hexsha
string
size
int64
ext
string
lang
string
max_stars_repo_path
string
max_stars_repo_name
string
max_stars_repo_head_hexsha
string
max_stars_repo_licenses
list
max_stars_count
int64
max_stars_repo_stars_event_min_datetime
string
max_stars_repo_stars_event_max_datetime
string
max_issues_repo_path
string
max_issues_repo_name
string
max_issues_repo_head_hexsha
string
max_issues_repo_licenses
list
max_issues_count
int64
max_issues_repo_issues_event_min_datetime
string
max_issues_repo_issues_event_max_datetime
string
max_forks_repo_path
string
max_forks_repo_name
string
max_forks_repo_head_hexsha
string
max_forks_repo_licenses
list
max_forks_count
int64
max_forks_repo_forks_event_min_datetime
string
max_forks_repo_forks_event_max_datetime
string
content
string
avg_line_length
float64
max_line_length
int64
alphanum_fraction
float64
qsc_code_num_words_quality_signal
int64
qsc_code_num_chars_quality_signal
float64
qsc_code_mean_word_length_quality_signal
float64
qsc_code_frac_words_unique_quality_signal
float64
qsc_code_frac_chars_top_2grams_quality_signal
float64
qsc_code_frac_chars_top_3grams_quality_signal
float64
qsc_code_frac_chars_top_4grams_quality_signal
float64
qsc_code_frac_chars_dupe_5grams_quality_signal
float64
qsc_code_frac_chars_dupe_6grams_quality_signal
float64
qsc_code_frac_chars_dupe_7grams_quality_signal
float64
qsc_code_frac_chars_dupe_8grams_quality_signal
float64
qsc_code_frac_chars_dupe_9grams_quality_signal
float64
qsc_code_frac_chars_dupe_10grams_quality_signal
float64
qsc_code_frac_chars_replacement_symbols_quality_signal
float64
qsc_code_frac_chars_digital_quality_signal
float64
qsc_code_frac_chars_whitespace_quality_signal
float64
qsc_code_size_file_byte_quality_signal
float64
qsc_code_num_lines_quality_signal
float64
qsc_code_num_chars_line_max_quality_signal
float64
qsc_code_num_chars_line_mean_quality_signal
float64
qsc_code_frac_chars_alphabet_quality_signal
float64
qsc_code_frac_chars_comments_quality_signal
float64
qsc_code_cate_xml_start_quality_signal
float64
qsc_code_frac_lines_dupe_lines_quality_signal
float64
qsc_code_cate_autogen_quality_signal
float64
qsc_code_frac_lines_long_string_quality_signal
float64
qsc_code_frac_chars_string_length_quality_signal
float64
qsc_code_frac_chars_long_word_length_quality_signal
float64
qsc_code_frac_lines_string_concat_quality_signal
float64
qsc_code_cate_encoded_data_quality_signal
float64
qsc_code_frac_chars_hex_words_quality_signal
float64
qsc_code_frac_lines_prompt_comments_quality_signal
float64
qsc_code_frac_lines_assert_quality_signal
float64
qsc_codepython_cate_ast_quality_signal
float64
qsc_codepython_frac_lines_func_ratio_quality_signal
float64
qsc_codepython_cate_var_zero_quality_signal
bool
qsc_codepython_frac_lines_pass_quality_signal
float64
qsc_codepython_frac_lines_import_quality_signal
float64
qsc_codepython_frac_lines_simplefunc_quality_signal
float64
qsc_codepython_score_lines_no_logic_quality_signal
float64
qsc_codepython_frac_lines_print_quality_signal
float64
qsc_code_num_words
int64
qsc_code_num_chars
int64
qsc_code_mean_word_length
int64
qsc_code_frac_words_unique
null
qsc_code_frac_chars_top_2grams
int64
qsc_code_frac_chars_top_3grams
int64
qsc_code_frac_chars_top_4grams
int64
qsc_code_frac_chars_dupe_5grams
int64
qsc_code_frac_chars_dupe_6grams
int64
qsc_code_frac_chars_dupe_7grams
int64
qsc_code_frac_chars_dupe_8grams
int64
qsc_code_frac_chars_dupe_9grams
int64
qsc_code_frac_chars_dupe_10grams
int64
qsc_code_frac_chars_replacement_symbols
int64
qsc_code_frac_chars_digital
int64
qsc_code_frac_chars_whitespace
int64
qsc_code_size_file_byte
int64
qsc_code_num_lines
int64
qsc_code_num_chars_line_max
int64
qsc_code_num_chars_line_mean
int64
qsc_code_frac_chars_alphabet
int64
qsc_code_frac_chars_comments
int64
qsc_code_cate_xml_start
int64
qsc_code_frac_lines_dupe_lines
int64
qsc_code_cate_autogen
int64
qsc_code_frac_lines_long_string
int64
qsc_code_frac_chars_string_length
int64
qsc_code_frac_chars_long_word_length
int64
qsc_code_frac_lines_string_concat
null
qsc_code_cate_encoded_data
int64
qsc_code_frac_chars_hex_words
int64
qsc_code_frac_lines_prompt_comments
int64
qsc_code_frac_lines_assert
int64
qsc_codepython_cate_ast
int64
qsc_codepython_frac_lines_func_ratio
int64
qsc_codepython_cate_var_zero
int64
qsc_codepython_frac_lines_pass
int64
qsc_codepython_frac_lines_import
int64
qsc_codepython_frac_lines_simplefunc
int64
qsc_codepython_score_lines_no_logic
int64
qsc_codepython_frac_lines_print
int64
effective
string
hits
int64
949573b4599bf569bf84e6c141760b8c438837a0
123
py
Python
cfg.py
rucnyz/Pikachu
fbfd51d83104a5fe2847e78bc6e82d87275d5c29
[ "MIT" ]
null
null
null
cfg.py
rucnyz/Pikachu
fbfd51d83104a5fe2847e78bc6e82d87275d5c29
[ "MIT" ]
null
null
null
cfg.py
rucnyz/Pikachu
fbfd51d83104a5fe2847e78bc6e82d87275d5c29
[ "MIT" ]
null
null
null
"""配置文件""" '''图片路径''' BLOCK_IMAGE_PATH = 'resources/images/block.png' PIKACHU_IMAGE_PATH = 'resources/images/pikachu.png'
20.5
51
0.731707
16
123
5.375
0.5625
0.209302
0.418605
0.55814
0
0
0
0
0
0
0
0
0.073171
123
5
52
24.6
0.754386
0.03252
0
0
0
0
0.524272
0.524272
0
0
0
0
0
1
0
false
0
0
0
0
0
1
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
1
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
949fff77fc46edf794334104fc6e534c3be1f908
101
py
Python
tests/__init__.py
afilip1/imagehash
671b066242274ace3bce87c5ff6bbbb2b8eb98b4
[ "BSD-2-Clause" ]
2,338
2015-01-03T08:06:11.000Z
2022-03-29T07:06:00.000Z
tests/__init__.py
afilip1/imagehash
671b066242274ace3bce87c5ff6bbbb2b8eb98b4
[ "BSD-2-Clause" ]
143
2015-01-21T17:55:31.000Z
2022-02-01T09:23:00.000Z
tests/__init__.py
afilip1/imagehash
671b066242274ace3bce87c5ff6bbbb2b8eb98b4
[ "BSD-2-Clause" ]
338
2015-01-28T18:26:19.000Z
2022-03-27T12:54:32.000Z
from __future__ import (absolute_import, division, print_function) from .utils import TestImageHash
25.25
66
0.841584
12
101
6.583333
0.75
0
0
0
0
0
0
0
0
0
0
0
0.108911
101
3
67
33.666667
0.877778
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0.5
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
1
0
6
94ca8c466bd8029eca105c94d688f362097e883b
156
py
Python
lib/extensions/dcn/modules/__init__.py
shampooma/openseg.pytorch
d1da408a1e870d52c058c359583bc098f7f3d9e2
[ "MIT" ]
1,254
2019-01-03T02:51:22.000Z
2022-03-31T08:36:59.000Z
lib/extensions/dcn/modules/__init__.py
shampooma/openseg.pytorch
d1da408a1e870d52c058c359583bc098f7f3d9e2
[ "MIT" ]
88
2019-02-13T03:43:09.000Z
2022-03-27T08:23:29.000Z
lib/extensions/dcn/modules/__init__.py
shampooma/openseg.pytorch
d1da408a1e870d52c058c359583bc098f7f3d9e2
[ "MIT" ]
211
2019-01-03T13:21:07.000Z
2022-03-22T08:46:34.000Z
from .deform_conv import DeformConv from .modulated_dcn import DeformRoIPooling, ModulatedDeformConv, ModulatedDeformConvPack, ModulatedDeformRoIPoolingPack
78
120
0.903846
13
156
10.692308
0.846154
0
0
0
0
0
0
0
0
0
0
0
0.064103
156
2
120
78
0.952055
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
1
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
a210fc11bf7c9401f15cd825ab81a64de9b583e5
9,631
py
Python
models/vamvsnet_high_submodule.py
brandontan99/D2HC-RMVSNet
c4615c2d7c212b9b247da6fc0e0e110344b1b0ce
[ "MIT" ]
91
2020-08-14T15:43:48.000Z
2022-03-24T11:07:40.000Z
models/vamvsnet_high_submodule.py
brandontan99/D2HC-RMVSNet
c4615c2d7c212b9b247da6fc0e0e110344b1b0ce
[ "MIT" ]
15
2020-08-29T02:25:20.000Z
2022-03-13T06:34:11.000Z
models/vamvsnet_high_submodule.py
brandontan99/D2HC-RMVSNet
c4615c2d7c212b9b247da6fc0e0e110344b1b0ce
[ "MIT" ]
7
2020-11-02T12:47:49.000Z
2021-07-27T07:13:27.000Z
import torch import torch.nn as nn import torch.nn.functional as F from .module import * import sys from copy import deepcopy # Multi-scale feature extractor && Coarse To Fine Regression Module class FeatureNetHigh(nn.Module): #Original Paper Setting def __init__(self): super(FeatureNetHigh, self).__init__() self.inplanes = 32 self.conv0 = ConvBnReLU(3, 8, 3, 1, 1) self.conv1 = ConvBnReLU(8, 8, 3, 1, 1) self.conv2 = ConvBnReLU(8, 16, 5, 2, 2) self.conv3 = ConvBnReLU(16, 16, 3, 1, 1) self.conv4 = ConvBnReLU(16, 16, 3, 1, 1) self.conv5 = ConvBnReLU(16, 32, 5, 2, 2) self.conv6 = ConvBnReLU(32, 32, 3, 1, 1) self.conv7 = ConvBnReLU( 32, 32, 5, 2, 2) self.conv8 = ConvBnReLU(32, 32, 3, 1, 1) self.conv9 = ConvBnReLU(32, 64, 5, 2, 2) self.conv10 = ConvBnReLU(64, 64, 3, 1, 1) self.conv11 = ConvBnReLU(64, 64, 5, 2, 2) self.conv12 = ConvBnReLU(64, 64, 3, 1, 1) self.feature1 = nn.Conv2d(32, 32, 3, 1, 1) self.feature2 = nn.Conv2d(32, 32, 3, 1, 1) self.feature3 = nn.Conv2d(64, 64, 3, 1, 1) self.feature4 = nn.Conv2d(64, 64, 3, 1, 1) def forward(self, x): x = self.conv1(self.conv0(x)) x = self.conv4(self.conv3(self.conv2(x))) x = self.conv6(self.conv5(x)) feature1 = self.feature1(x) x = self.conv8(self.conv7(x)) feature2 = self.feature2(x) x = self.conv10(self.conv9(x)) feature3 = self.feature3(x) x = self.conv12(self.conv11(x)) feature4 = self.feature4(x) return [feature1, feature2, feature3, feature4] class FeatureNetHighGN(nn.Module): #Original Paper Setting def __init__(self): super(FeatureNetHighGN, self).__init__() self.inplanes = 32 self.conv0 = ConvGnReLU(3, 8, 3, 1, 1) self.conv1 = ConvGnReLU(8, 8, 3, 1, 1) self.conv2 = ConvGnReLU(8, 16, 5, 2, 2) self.conv3 = ConvGnReLU(16, 16, 3, 1, 1) self.conv4 = ConvGnReLU(16, 16, 3, 1, 1) self.conv5 = ConvGnReLU(16, 32, 5, 2, 2) self.conv6 = ConvGnReLU(32, 32, 3, 1, 1) self.conv7 = ConvGnReLU( 32, 32, 5, 2, 2) self.conv8 = ConvGnReLU(32, 32, 3, 1, 1) self.conv9 = ConvGnReLU(32, 64, 5, 2, 2) self.conv10 = ConvGnReLU(64, 64, 3, 1, 1) self.conv11 = ConvGnReLU(64, 64, 5, 2, 2) self.conv12 = ConvGnReLU(64, 64, 3, 1, 1) self.feature1 = nn.Conv2d(32, 32, 3, 1, 1) self.feature2 = nn.Conv2d(32, 32, 3, 1, 1) self.feature3 = nn.Conv2d(64, 64, 3, 1, 1) self.feature4 = nn.Conv2d(64, 64, 3, 1, 1) def forward(self, x): x = self.conv1(self.conv0(x)) x = self.conv4(self.conv3(self.conv2(x))) x = self.conv6(self.conv5(x)) feature1 = self.feature1(x) x = self.conv8(self.conv7(x)) feature2 = self.feature2(x) x = self.conv10(self.conv9(x)) feature3 = self.feature3(x) x = self.conv12(self.conv11(x)) feature4 = self.feature4(x) return [feature1, feature2, feature3, feature4] class RegNetUS0_Coarse2Fine(nn.Module): def __init__(self, origin_size=False, dp_ratio=0.0, image_scale=0.25): super(RegNetUS0_Coarse2Fine, self).__init__() self.origin_size = origin_size self.image_scale = image_scale self.conv0 = ConvBnReLU3D(32, 8) self.conv1 = ConvBnReLU3D(32, 16, stride=2) self.conv2 = ConvBnReLU3D(16, 16) self.conv3 = ConvBnReLU3D(16, 32, stride=2) self.conv4 = ConvBnReLU3D(32, 32) self.conv5 = ConvBnReLU3D(32, 64, stride=2) self.conv6 = ConvBnReLU3D(64, 64) self.conv7 = nn.Sequential( nn.ConvTranspose3d(128, 32, kernel_size=3, padding=1, output_padding=1, stride=2, bias=False), nn.BatchNorm3d(32), nn.ReLU(inplace=True)) self.conv9 = nn.Sequential( nn.ConvTranspose3d(97, 16, kernel_size=3, padding=1, output_padding=1, stride=2, bias=False), nn.BatchNorm3d(16), nn.ReLU(inplace=True)) self.conv11 = nn.Sequential( nn.ConvTranspose3d(49, 8, kernel_size=3, padding=1, output_padding=1, stride=2, bias=False), nn.BatchNorm3d(8), nn.ReLU(inplace=True)) self.prob1 = nn.Conv3d(41, 1, 1,bias=False) self.dropout1 = nn.Dropout3d(p=dp_ratio) self.prob2 = nn.Conv3d(49, 1, 1,bias=False) self.dropout2 = nn.Dropout3d(p=dp_ratio) self.prob3 = nn.Conv3d(97, 1, 1,bias=False) self.dropout3 = nn.Dropout3d(p=dp_ratio) self.prob4 = nn.Conv3d(128, 1, 1,bias=False) self.dropout4 = nn.Dropout3d(p=dp_ratio) #add Drop out def forward(self, x_list): x1, x2, x3, x4 = x_list # 32*192, 32*96, 64*48, 64*24 input_shape = x1.shape conv0 = self.conv0(x1) conv1 = self.conv1(x1) conv3 = self.conv3(conv1) conv5 = self.conv5(conv3) x = torch.cat([self.conv6(conv5), x4], 1) prob4 = self.dropout4(self.prob4(x)) #prob4 = self.prob4(x) x = self.conv7(x) + self.conv4(conv3) x = torch.cat([x, x3, F.interpolate(prob4, scale_factor=2, mode='trilinear', align_corners=True)], 1) prob3 = self.dropout3(self.prob3(x)) #prob3 = self.prob3(x) x = self.conv9(x) + self.conv2(conv1) x = torch.cat([x, x2, F.interpolate(prob3, scale_factor=2, mode='trilinear', align_corners=True)], 1) prob2 = self.dropout2(self.prob2(x)) #prob2 = self.prob2(x) x = self.conv11(x) + conv0 x = torch.cat([x, x1, F.interpolate(prob2, scale_factor=2, mode='trilinear', align_corners=True)], 1) if self.origin_size and self.image_scale == 0.50: x = F.interpolate(x, size=(input_shape[2], input_shape[3]*2, input_shape[4]*2), mode='trilinear', align_corners=True) prob1 = self.dropout1(self.prob1(x)) #prob1 = self.prob1(x) # without dropout # if self.origin_size: # x = F.interpolate(x, size=(input_shape[2], input_shape[3]*4, input_shape[4]*4), mode='trilinear', align_corners=True) return [prob1, prob2, prob3, prob4] class RegNetUS0_Coarse2FineGN(nn.Module): def __init__(self, origin_size=False, dp_ratio=0.0, image_scale=0.25): super(RegNetUS0_Coarse2FineGN, self).__init__() self.origin_size = origin_size self.image_scale = image_scale self.conv0 = ConvGnReLU3D(32, 8) self.conv1 = ConvGnReLU3D(32, 16, stride=2) self.conv2 = ConvGnReLU3D(16, 16) self.conv3 = ConvGnReLU3D(16, 32, stride=2) self.conv4 = ConvGnReLU3D(32, 32) self.conv5 = ConvGnReLU3D(32, 64, stride=2) self.conv6 = ConvGnReLU3D(64, 64) self.conv7 = nn.Sequential( nn.ConvTranspose3d(128, 32, kernel_size=3, padding=1, output_padding=1, stride=2, bias=False), #nn.BatchNorm3d(32), nn.GroupNorm(4, 32), nn.ReLU(inplace=True)) self.conv9 = nn.Sequential( nn.ConvTranspose3d(97, 16, kernel_size=3, padding=1, output_padding=1, stride=2, bias=False), nn.GroupNorm(2, 16), nn.ReLU(inplace=True)) self.conv11 = nn.Sequential( nn.ConvTranspose3d(49, 8, kernel_size=3, padding=1, output_padding=1, stride=2, bias=False), nn.GroupNorm(1, 8), nn.ReLU(inplace=True)) self.prob1 = nn.Conv3d(41, 1, 1,bias=False) self.dropout1 = nn.Dropout3d(p=dp_ratio) self.prob2 = nn.Conv3d(49, 1, 1,bias=False) self.dropout2 = nn.Dropout3d(p=dp_ratio) self.prob3 = nn.Conv3d(97, 1, 1,bias=False) self.dropout3 = nn.Dropout3d(p=dp_ratio) self.prob4 = nn.Conv3d(128, 1, 1,bias=False) self.dropout4 = nn.Dropout3d(p=dp_ratio) #add Drop out def forward(self, x_list): x1, x2, x3, x4 = x_list # 32*192, 32*96, 64*48, 64*24 # print(x1.shape, x2.shape, x3.shape, x4.shape) input_shape = x1.shape conv0 = self.conv0(x1) conv1 = self.conv1(x1) conv3 = self.conv3(conv1) conv5 = self.conv5(conv3) x = torch.cat([self.conv6(conv5), x4], 1) prob4 = self.dropout4(self.prob4(x)) #prob4 = self.prob4(x) x = self.conv7(x) + self.conv4(conv3) x = torch.cat([x, x3, F.interpolate(prob4, scale_factor=2, mode='trilinear', align_corners=True)], 1) prob3 = self.dropout3(self.prob3(x)) #prob3 = self.prob3(x) x = self.conv9(x) + self.conv2(conv1) x = torch.cat([x, x2, F.interpolate(prob3, scale_factor=2, mode='trilinear', align_corners=True)], 1) prob2 = self.dropout2(self.prob2(x)) #prob2 = self.prob2(x) x = self.conv11(x) + conv0 x = torch.cat([x, x1, F.interpolate(prob2, scale_factor=2, mode='trilinear', align_corners=True)], 1) if self.origin_size and self.image_scale == 0.50: x = F.interpolate(x, size=(input_shape[2], input_shape[3]*2, input_shape[4]*2), mode='trilinear', align_corners=True) prob1 = self.dropout1(self.prob1(x)) #prob1 = self.prob1(x) # without dropout # if self.origin_size: # x = F.interpolate(x, size=(input_shape[2], input_shape[3]*4, input_shape[4]*4), mode='trilinear', align_corners=True) return [prob1, prob2, prob3, prob4]
38.218254
131
0.585194
1,372
9,631
4.027697
0.094023
0.011582
0.013029
0.027868
0.869707
0.869707
0.847991
0.74629
0.74629
0.730366
0
0.111935
0.271831
9,631
251
132
38.370518
0.67603
0.077147
0
0.662921
0
0
0.008123
0
0
0
0
0
0
1
0.044944
false
0
0.033708
0
0.123596
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
bf4cf7ee8d67b8444fcdd6c7632c197ccf5a62fe
363
py
Python
utils/exporters/blender/addons/io_three/exceptions.py
wenluzhizhi/threeEx
82b1795f9f73bb47fd3c49befc6606944f79d639
[ "MIT" ]
2,162
2018-02-23T12:15:07.000Z
2022-03-31T09:52:41.000Z
utils/exporters/blender/addons/io_three/exceptions.py
superguigui/three.js
c18be1eca38a1f3c779e8dcb168edf06ee9441ad
[ "MIT" ]
241
2018-03-13T17:13:45.000Z
2022-03-26T03:06:59.000Z
utils/exporters/blender/addons/io_three/exceptions.py
superguigui/three.js
c18be1eca38a1f3c779e8dcb168edf06ee9441ad
[ "MIT" ]
500
2018-02-24T01:34:55.000Z
2022-03-30T10:41:43.000Z
class ThreeError(Exception): pass class UnimplementedFeatureError(ThreeError): pass class ThreeValueError(ThreeError): pass class UnsupportedObjectType(ThreeError): pass class GeometryError(ThreeError): pass class MaterialError(ThreeError): pass class SelectionError(ThreeError): pass class NGonError(ThreeError): pass class BufferGeometryError(ThreeError): pass
36.3
49
0.85124
36
363
8.583333
0.333333
0.23301
0.430421
0
0
0
0
0
0
0
0
0
0.07438
363
9
50
40.333333
0.919643
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
1
0
0
1
0
0
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
1
0
0
1
0
0
6
bf786306f2ab7d3c5b7701cc81d170522a365d7e
176
py
Python
src/KicadSymGen/draw/__init__.py
krtkr/altera_kicad_gen
0a688df5d718bcd2c40946fa3538e8a7b20427a3
[ "MIT" ]
null
null
null
src/KicadSymGen/draw/__init__.py
krtkr/altera_kicad_gen
0a688df5d718bcd2c40946fa3538e8a7b20427a3
[ "MIT" ]
null
null
null
src/KicadSymGen/draw/__init__.py
krtkr/altera_kicad_gen
0a688df5d718bcd2c40946fa3538e8a7b20427a3
[ "MIT" ]
null
null
null
from .DrawItem import * from .Field import * from .Writer import * from .Library import * from .Pin import * from .Rectangle import * from .Text import * from .Symbol import *
19.555556
24
0.727273
24
176
5.333333
0.416667
0.546875
0
0
0
0
0
0
0
0
0
0
0.181818
176
8
25
22
0.888889
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
bfa14468706050e1ac1206594dd0261da672358e
112
py
Python
dnspython/e164.py
dineshkumar2509/learning-python
e8af11ff0b396da4c3f2cfe21d14131bae4b2adb
[ "MIT" ]
86
2015-06-13T16:53:55.000Z
2022-03-24T20:56:42.000Z
dnspython/e164.py
pei-zheng-yi/learning-python
55e350dfe44cf04f7d4408e76e72d2f467bd42ce
[ "MIT" ]
9
2015-05-27T07:52:44.000Z
2022-03-29T21:52:40.000Z
dnspython/e164.py
pei-zheng-yi/learning-python
55e350dfe44cf04f7d4408e76e72d2f467bd42ce
[ "MIT" ]
124
2015-12-10T01:17:18.000Z
2021-11-08T04:03:38.000Z
#!/usr/bin/env python import dns.e164 n = dns.e164.from_e164("+1 555 1212") print n print dns.e164.to_e164(n)
14
37
0.705357
23
112
3.347826
0.608696
0.272727
0
0
0
0
0
0
0
0
0
0.237113
0.133929
112
7
38
16
0.556701
0.178571
0
0
0
0
0.120879
0
0
0
0
0
0
0
null
null
0
0.25
null
null
0.5
1
0
0
null
1
0
0
0
0
0
0
0
0
0
1
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
1
0
6
7822cb3b1bea06ce5c35d2dddad1cdb7cd2f3ad0
40
py
Python
favorite-animals/james.py
jasonstewartpariveda/learn-git-1
ae981f5a3d787860240ce658f46f1d98d0caf76e
[ "MIT" ]
1
2021-09-29T18:48:12.000Z
2021-09-29T18:48:12.000Z
favorite-animals/james.py
jasonstewartpariveda/learn-git-1
ae981f5a3d787860240ce658f46f1d98d0caf76e
[ "MIT" ]
21
2021-09-27T17:19:45.000Z
2021-09-30T04:07:26.000Z
favorite-animals/james.py
jasonstewartpariveda/learn-git-1
ae981f5a3d787860240ce658f46f1d98d0caf76e
[ "MIT" ]
192
2021-09-27T17:10:51.000Z
2021-10-05T03:06:36.000Z
print("My favorite animal is a mermaid")
40
40
0.775
7
40
4.428571
1
0
0
0
0
0
0
0
0
0
0
0
0.125
40
1
40
40
0.885714
0
0
0
0
0
0.756098
0
0
0
0
0
0
1
0
true
0
0
0
0
1
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
1
0
null
0
0
0
0
0
0
1
0
0
0
0
1
0
6
78427439a3b854f912e667e0decd18444e64ef88
136
py
Python
contants.py
m-mone/fiction
8b04f21272003af03d5cb38f8bfddc7f50a1e862
[ "Apache-2.0" ]
null
null
null
contants.py
m-mone/fiction
8b04f21272003af03d5cb38f8bfddc7f50a1e862
[ "Apache-2.0" ]
null
null
null
contants.py
m-mone/fiction
8b04f21272003af03d5cb38f8bfddc7f50a1e862
[ "Apache-2.0" ]
null
null
null
USER_AGENT = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.95 Safari/537.36'
68
135
0.75
27
136
3.666667
0.888889
0.10101
0
0
0
0
0
0
0
0
0
0.214876
0.110294
136
1
136
136
0.603306
0
0
0
0
1
0.882353
0
0
0
0
0
0
1
0
false
0
0
0
0
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
1
0
1
0
0
0
0
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
784b038a96a0c0fc7c4a6af7cc5089a699e630da
159
py
Python
flair/models/__init__.py
amagge/flair
4cdc41da77297531f8a9ebe6f47ae9ac8a1eb620
[ "MIT" ]
1
2021-01-07T07:30:21.000Z
2021-01-07T07:30:21.000Z
flair/models/__init__.py
amagge/flair
4cdc41da77297531f8a9ebe6f47ae9ac8a1eb620
[ "MIT" ]
1
2021-03-01T17:14:03.000Z
2021-03-01T17:14:03.000Z
flair/models/__init__.py
amagge/flair
4cdc41da77297531f8a9ebe6f47ae9ac8a1eb620
[ "MIT" ]
2
2021-02-24T19:58:46.000Z
2021-02-25T10:53:23.000Z
from .sequence_tagger_model import SequenceTagger, MultiTagger from .language_model import LanguageModel from .text_classification_model import TextClassifier
39.75
62
0.893082
18
159
7.611111
0.666667
0.240876
0
0
0
0
0
0
0
0
0
0
0.081761
159
3
63
53
0.938356
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
7857c5863c85f6c6ff9d8ca803c71cf7b61d8735
5,133
py
Python
test.py
JingZhang918/MC-Option-Pricing
4ac1f17cc8e67faf5628314f91662da53f019a85
[ "MIT" ]
null
null
null
test.py
JingZhang918/MC-Option-Pricing
4ac1f17cc8e67faf5628314f91662da53f019a85
[ "MIT" ]
null
null
null
test.py
JingZhang918/MC-Option-Pricing
4ac1f17cc8e67faf5628314f91662da53f019a85
[ "MIT" ]
null
null
null
import unittest from financial_instruments import europeanOption, barrierOption from MCPricer import MCPricer class MyTestCase(unittest.TestCase): # european option test def test_european_option(self): optionEU = europeanOption() optionEU.sigma = .3 optionEU.K = 40 optionEU.T = 7/365 optionEU.r = .01 optionEU.q = 0 drift = .1 N = 10 #simulation steps # MC pricer optionEU.S = 38 optionEU.type = "call" [price, delta, gamma, vega] = MCPricer(optionEU, drift, N).get_price_greeks() self.assertAlmostEqual(price, 7.4155541306977835) self.assertTrue(0 <= delta <= 1) self.assertTrue(vega >= 0) optionEU.type = "put" [price, delta, gamma, vega] = MCPricer(optionEU, drift, N).get_price_greeks() self.assertAlmostEqual(price, 4.94932879566411) self.assertTrue(-1 <= delta <= 0) self.assertTrue(vega >= 0) optionEU.S = 42 optionEU.type = "call" [price, delta, gamma, vega] = MCPricer(optionEU, drift, N).get_price_greeks() self.assertAlmostEqual(price, 10.040123830350968) self.assertTrue(0 <= delta <= 1) self.assertTrue(vega >= 0) optionEU.type = "put" [price, delta, gamma, vega] = MCPricer(optionEU, drift, N).get_price_greeks() self.assertAlmostEqual(price, 3.1045729234458883) self.assertTrue(-1 <= delta <= 0) self.assertTrue(vega >= 0) # Black Scholes Model optionEU.S = 38 optionEU.type = "call" [price, delta, gamma, vega] = optionEU.get_BS_price_greeks() self.assertAlmostEqual(price, 0.08539668795131128) self.assertTrue(0 <= delta <= 1) self.assertTrue(vega >= 0) optionEU.type = "put" [price, delta, gamma, vega] = optionEU.get_BS_price_greeks() self.assertAlmostEqual(price, 2.077726190625249) self.assertTrue(-1 <= delta <= 0) self.assertTrue(vega >= 0) optionEU.S = 42 optionEU.type = "call" [price, delta, gamma, vega] = optionEU.get_BS_price_greeks() self.assertAlmostEqual(price, 2.107370356043525) self.assertTrue(0 <= delta <= 1) self.assertTrue(vega >= 0) optionEU.type = "put" [price, delta, gamma, vega] = optionEU.get_BS_price_greeks() self.assertAlmostEqual(price, 0.09969985871746534) self.assertTrue(-1 <= delta <= 0) self.assertTrue(vega >= 0) # # barrier option test def test_barrier_option(self): #parameters optionBA = barrierOption(42, "up-in") optionBA.S = 38 optionBA.sigma = .3 optionBA.K = 40 optionBA.T = 7 / 365 optionBA.r = .01 optionBA.q = 0 optionBA.type = "call" # optionBA.get_d1_d2() drift = .1 N = 10 #simulation steps [price, delta, gamma, vega] = MCPricer(optionBA, drift, N).get_price_greeks() self.assertAlmostEqual(price, 7.4155541306977835) self.assertTrue(0 <= delta <= 1) self.assertTrue(vega >= 0) optionBA.type = "put" [price, delta, gamma, vega] = MCPricer(optionBA, drift, N).get_price_greeks() self.assertAlmostEqual(price, 0.6626594883295722) self.assertTrue(-1 <= delta <= 0) self.assertTrue(vega >= 0) optionBA.barrier_type = "up-out" optionBA.type = "call" # only profit if st between [40,42] [price, delta, gamma, vega] = MCPricer(optionBA, drift, N).get_price_greeks() self.assertAlmostEqual(price, 0.0) self.assertTrue(0 <= delta <= 1) self.assertTrue(vega >= 0) optionBA.type = "put" [price, delta, gamma, vega] = MCPricer(optionBA, drift, N).get_price_greeks() self.assertAlmostEqual(price, 4.286669307334537) self.assertTrue(-1 <= delta <= 0) self.assertTrue(vega >= 0) optionBA.S = 42 optionBA.barrier = 38 optionBA.barrier_type = "down-in" optionBA.type = "call" [price, delta, gamma, vega] = MCPricer(optionBA, drift, N).get_price_greeks() self.assertAlmostEqual(price, .0) self.assertTrue(0 <= delta <= 1) self.assertTrue(vega >= 0) optionBA.type = "put" [price, delta, gamma, vega] = MCPricer(optionBA, drift, N).get_price_greeks() self.assertAlmostEqual(price, 3.054008493464713) self.assertTrue(-1 <= delta <= 0) self.assertTrue(vega >= 0) optionBA.barrier_type = "down-out" optionBA.type = "call" [price, delta, gamma, vega] = MCPricer(optionBA, drift, N).get_price_greeks() self.assertAlmostEqual(price, 10.040123830350968) self.assertTrue(0 <= delta <= 1) self.assertTrue(vega >= 0) optionBA.type = "put" [price, delta, gamma, vega] = MCPricer(optionBA, drift, N).get_price_greeks() self.assertAlmostEqual(price, 0.050564429981175496) self.assertTrue(-1 <= delta <= 0) self.assertTrue(vega >= 0) if __name__ == '__main__': unittest.main()
36.664286
85
0.606078
580
5,133
5.27069
0.144828
0.146549
0.078508
0.099444
0.759895
0.759895
0.744194
0.744194
0.744194
0.703631
0
0.089262
0.268849
5,133
139
86
36.928058
0.725286
0.032729
0
0.666667
0
0
0.018167
0
0
0
0
0
0.421053
1
0.017544
false
0
0.026316
0
0.052632
0
0
0
0
null
0
0
0
0
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
1
0
0
0
0
0
0
0
0
0
6
786edc382c1d2d3379cf7ebd707a772e04ee40b6
30
py
Python
spmuck/blog/models/__init__.py
spmuck/spmuck.com
e462e29f9c6702203b5f6904effabc8fc7280601
[ "Apache-2.0" ]
null
null
null
spmuck/blog/models/__init__.py
spmuck/spmuck.com
e462e29f9c6702203b5f6904effabc8fc7280601
[ "Apache-2.0" ]
null
null
null
spmuck/blog/models/__init__.py
spmuck/spmuck.com
e462e29f9c6702203b5f6904effabc8fc7280601
[ "Apache-2.0" ]
null
null
null
from .blogpage import BlogPage
30
30
0.866667
4
30
6.5
0.75
0
0
0
0
0
0
0
0
0
0
0
0.1
30
1
30
30
0.962963
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
787f7aa38ed65b770b2c62e0f290675548ac0136
33
py
Python
olgaming/games/tictactoe/__init__.py
OctaveLauby/olgaming
e6e7780bfefa466facb535e4346ecaa1a555f8f1
[ "MIT" ]
null
null
null
olgaming/games/tictactoe/__init__.py
OctaveLauby/olgaming
e6e7780bfefa466facb535e4346ecaa1a555f8f1
[ "MIT" ]
null
null
null
olgaming/games/tictactoe/__init__.py
OctaveLauby/olgaming
e6e7780bfefa466facb535e4346ecaa1a555f8f1
[ "MIT" ]
null
null
null
from .tictactoe import TicTacToe
16.5
32
0.848485
4
33
7
0.75
0
0
0
0
0
0
0
0
0
0
0
0.121212
33
1
33
33
0.965517
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
789c51582ff5072a5b60eac9bf3cbdbb6bc4e8c6
122
py
Python
Books/GodOfPython/P00_OriginalSource/ch10/camera.py
Tim232/Python-Things
05f0f373a4cf298e70d9668c88a6e3a9d1cd8146
[ "MIT" ]
2
2020-12-05T07:42:55.000Z
2021-01-06T23:23:18.000Z
Books/GodOfPython/P00_OriginalSource/ch10/smtpkg7/camera/camera.py
Tim232/Python-Things
05f0f373a4cf298e70d9668c88a6e3a9d1cd8146
[ "MIT" ]
null
null
null
Books/GodOfPython/P00_OriginalSource/ch10/smtpkg7/camera/camera.py
Tim232/Python-Things
05f0f373a4cf298e70d9668c88a6e3a9d1cd8146
[ "MIT" ]
null
null
null
# camera.py def photo(): print("Take photo") photo() print("camera.py's module name is", __name__) # 추가(모듈의 이름을 출력)
17.428571
63
0.647541
20
122
3.75
0.7
0.213333
0
0
0
0
0
0
0
0
0
0
0.180328
122
6
64
20.333333
0.75
0.196721
0
0
0
0
0.378947
0
0
0
0
0
0
1
0.25
true
0
0
0
0.25
0.5
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
1
0
0
0
0
1
0
6
78aa1b24c7d4f18d66ca83a35eaf9470314c8666
130
py
Python
nort/util.py
taDachs/nort
e057198a521227ba8f60f49ca25a321a4ddad26e
[ "MIT" ]
null
null
null
nort/util.py
taDachs/nort
e057198a521227ba8f60f49ca25a321a4ddad26e
[ "MIT" ]
null
null
null
nort/util.py
taDachs/nort
e057198a521227ba8f60f49ca25a321a4ddad26e
[ "MIT" ]
null
null
null
# stolen from https://github.com/fmauch/catmux/blob/master/catmux/prefix.py def get_prefix(): return __file__.split("lib")[0]
32.5
75
0.738462
20
130
4.55
0.9
0
0
0
0
0
0
0
0
0
0
0.008475
0.092308
130
3
76
43.333333
0.762712
0.561538
0
0
0
0
0.054545
0
0
0
0
0
0
1
0.5
true
0
0
0.5
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
1
0
0
1
1
0
0
6
78b9239ee95b92e8422032a360eea6e64647f830
12,404
py
Python
Experiments/ST_MGCN/Runner_techniques_analysis_30_STMGCN.py
TempAnonymous/Context_Analysis
bbeba1ed7ea7001c22a12721fc4f390d4cc01a6e
[ "MIT" ]
3
2021-06-29T06:18:18.000Z
2021-09-07T03:11:35.000Z
Experiments/ST_MGCN/Runner_techniques_analysis_30_STMGCN.py
TempAnonymous/Context_Analysis
bbeba1ed7ea7001c22a12721fc4f390d4cc01a6e
[ "MIT" ]
null
null
null
Experiments/ST_MGCN/Runner_techniques_analysis_30_STMGCN.py
TempAnonymous/Context_Analysis
bbeba1ed7ea7001c22a12721fc4f390d4cc01a6e
[ "MIT" ]
null
null
null
import os import warnings warnings.filterwarnings("ignore") # ############################################# # # BenchMark Bike # # ############################################# bike_shared_params_st_mgcn = ('python ST_MGCN_Obj.py ' '--Dataset Bike ' '--CT 6 ' '--PT 7 ' '--TT 4 ' '--K 1 ' '--L 1 ' '--Graph Distance-Correlation-Interaction ' '--LSTMUnits 64 ' '--LSTMLayers 3 ' '--DataRange All ' '--TrainDays 365 ' '--threshold_correlation 0 ' '--threshold_distance 1000 ' '--threshold_interaction 500 ' '--Epoch 10000 ' '--Train True ' '--lr 5e-4 ' '--patience 0.1 ' '--ESlength 100 ' '--BatchSize 16 ' '--MergeWay sum ' ) os.system(bike_shared_params_st_mgcn + ' --City Chicago --external_method not-not-not ' ' --DataRange 0.5 --TrainDays 183 --MergeIndex 6 --CodeVersion not_external_30 ') os.system(bike_shared_params_st_mgcn + ' --City Chicago --external_method not-not-concat ' ' --DataRange 0.5 --TrainDays 183 --MergeIndex 6 --CodeVersion not_not_concat_30 ') os.system(bike_shared_params_st_mgcn + ' --City Chicago --external_method emb-not-concat ' ' --DataRange 0.5 --TrainDays 183 --MergeIndex 6 --CodeVersion emb_not_concat_30 ') os.system(bike_shared_params_st_mgcn + ' --City Chicago --external_method multi-not-concat ' ' --DataRange 0.5 --TrainDays 183 --MergeIndex 6 --CodeVersion multi_not_concat_30 ') os.system(bike_shared_params_st_mgcn + ' --City Chicago --external_method not-linear-add ' ' --DataRange 0.5 --TrainDays 183 --MergeIndex 6 --CodeVersion not_linear_add_30 ') os.system(bike_shared_params_st_mgcn + ' --City Chicago --external_method not-linear-gating ' ' --DataRange 0.5 --TrainDays 183 --MergeIndex 6 --CodeVersion not_linear_gating_30 ') os.system(bike_shared_params_st_mgcn + ' --City Chicago --external_lstm_len 4 --external_method lstm-linear-add ' ' --DataRange 0.5 --TrainDays 183 --MergeIndex 6 --CodeVersion lstm_linear_add_30 ') os.system(bike_shared_params_st_mgcn + ' --City Chicago --external_method emb-linear-add ' ' --DataRange 0.5 --TrainDays 183 --MergeIndex 6 --CodeVersion emb_linear_add_30') os.system(bike_shared_params_st_mgcn + ' --City Chicago --external_method emb-linear-gating ' ' --DataRange 0.5 --TrainDays 183 --MergeIndex 6 --CodeVersion emb_linear_gating_30 ') os.system(bike_shared_params_st_mgcn + ' --City Chicago --external_method multi-linear-add ' ' --DataRange 0.5 --TrainDays 183 --MergeIndex 6 --CodeVersion multi_linear_add_30 ') os.system(bike_shared_params_st_mgcn + ' --City Chicago --external_method multi-linear-gating ' ' --DataRange 0.5 --TrainDays 183 --MergeIndex 6 --CodeVersion multi_linear_gating_30 ') os.system(bike_shared_params_st_mgcn + ' --City Chicago --external_lstm_len 4 --external_method lstm-not-concat ' ' --DataRange 0.5 --TrainDays 183 --MergeIndex 6 --CodeVersion lstm4_not_concat_30 ') os.system(bike_shared_params_st_mgcn + ' --City Chicago --external_lstm_len 4 --external_method lstm-linear-gating ' ' --DataRange 0.5 --TrainDays 183 --MergeIndex 6 --CodeVersion lstm4_linear_gating_30 ') os.system(bike_shared_params_st_mgcn + ' --City Chicago --external_method earlyconcat ' ' --DataRange 0.5 --TrainDays 183 --MergeIndex 6 --CodeVersion earlyconcat_30 ') os.system(bike_shared_params_st_mgcn + ' --City Chicago --external_method earlyadd ' ' --DataRange 0.5 --TrainDays 183 --MergeIndex 6 --CodeVersion earlyadd_30 ') # # ############################################### # # BenchMark Metro # ############################################### metro_shared_params_st_mgcn = ('python ST_MGCN_Obj.py ' '--Dataset Metro ' '--CT 6 ' '--PT 7 ' '--TT 4 ' '--K 1 ' '--L 1 ' '--Graph Distance-Correlation-Line ' '--LSTMUnits 64 ' '--LSTMLayers 3 ' '--DataRange All ' '--TrainDays All ' '--threshold_correlation 0.7 ' '--threshold_distance 5000 ' '--threshold_interaction 30 ' '--Epoch 10000 ' '--Train True ' '--lr 1e-4 ' '--patience 0.1 ' '--ESlength 100 ' '--BatchSize 16 ' '--MergeWay sum ' ) os.system(metro_shared_params_st_mgcn + ' --City Shanghai --external_method not-not-not ' ' --MergeIndex 6 --CodeVersion not_external_30 ') os.system(metro_shared_params_st_mgcn + ' --City Shanghai --external_method not-not-concat ' ' --MergeIndex 6 --CodeVersion not_not_concat_30 ') os.system(metro_shared_params_st_mgcn + ' --City Shanghai --external_method emb-not-concat ' ' --MergeIndex 6 --CodeVersion emb_not_concat_30 ') os.system(metro_shared_params_st_mgcn + ' --City Shanghai --external_method multi-not-concat ' ' --MergeIndex 6 --CodeVersion multi_not_concat_30 ') os.system(metro_shared_params_st_mgcn + ' --City Shanghai --external_method not-linear-add ' ' --MergeIndex 6 --CodeVersion not_linear_add_30 ') os.system(metro_shared_params_st_mgcn + ' --City Shanghai --external_method not-linear-gating ' ' --MergeIndex 6 --CodeVersion not_linear_gating_30 ') os.system(metro_shared_params_st_mgcn + ' --City Shanghai --external_lstm_len 4 --external_method lstm-linear-add ' ' --MergeIndex 6 --CodeVersion lstm_linear_add_30 ') os.system(metro_shared_params_st_mgcn + ' --City Shanghai --external_method emb-linear-add ' ' --MergeIndex 6 --CodeVersion emb_linear_add_30 ') os.system(metro_shared_params_st_mgcn + ' --City Shanghai --external_method emb-linear-gating ' ' --MergeIndex 6 --CodeVersion emb_linear_gating_30 ') os.system(metro_shared_params_st_mgcn + ' --City Shanghai --external_method multi-linear-add ' ' --MergeIndex 6 --CodeVersion multi_linear_add_30 ') os.system(metro_shared_params_st_mgcn + ' --City Shanghai --external_method multi-linear-gating ' ' --MergeIndex 6 --CodeVersion multi_linear_gating_30 ') os.system(metro_shared_params_st_mgcn + ' --City Shanghai --external_lstm_len 4 --external_method lstm-not-concat ' ' --MergeIndex 6 --CodeVersion lstm4_not_concat_30 ') os.system(metro_shared_params_st_mgcn + ' --City Shanghai --external_lstm_len 4 --external_method lstm-linear-gating ' ' --MergeIndex 6 --CodeVersion lstm4_linear_gating_30 ') os.system(metro_shared_params_st_mgcn + ' --City Shanghai --external_method earlyconcat ' ' --MergeIndex 6 --CodeVersion earlyconcat_30 ') os.system(metro_shared_params_st_mgcn + ' --City Shanghai --external_method earlyadd ' ' --MergeIndex 6 --CodeVersion earlyadd_30 ') # # ############################################### # # # BenchMark ChargeStation # # ############################################### cs_shared_params_st_mgcn = ('python ST_MGCN_Obj.py ' '--Dataset ChargeStation ' '--CT 6 ' '--PT 7 ' '--TT 4 ' '--K 1 ' '--L 1 ' '--Graph Distance-Correlation ' '--LSTMUnits 64 ' '--LSTMLayers 3 ' '--DataRange All ' '--TrainDays All ' '--threshold_correlation 0.1 ' '--threshold_distance 1000 ' '--threshold_interaction 500 ' '--Epoch 10000 ' '--Train True ' '--lr 5e-4 ' '--patience 0.1 ' '--ESlength 100 ' '--BatchSize 16 ' '--MergeWay max ' ) os.system(cs_shared_params_st_mgcn + ' --City Beijing --external_method not-not-not ' ' --MergeIndex 1 --CodeVersion not_external_30 ') os.system(cs_shared_params_st_mgcn + ' --City Beijing --external_method not-not-concat ' ' --MergeIndex 1 --CodeVersion not_not_concat_30 ') os.system(cs_shared_params_st_mgcn + ' --City Beijing --external_method emb-not-concat ' ' --MergeIndex 1 --CodeVersion emb_not_concat_30 ') os.system(cs_shared_params_st_mgcn + ' --City Beijing --external_method multi-not-concat ' ' --MergeIndex 1 --CodeVersion multi_not_concat_30 ') os.system(cs_shared_params_st_mgcn + ' --City Beijing --external_method not-linear-add ' ' --MergeIndex 1 --CodeVersion not_linear_add_30 ') os.system(cs_shared_params_st_mgcn + ' --City Beijing --external_method not-linear-gating ' ' --MergeIndex 1 --CodeVersion not_linear_gating_30 ') os.system(cs_shared_params_st_mgcn + ' --City Beijing --external_lstm_len 4 --external_method lstm-linear-add ' ' --MergeIndex 1 --CodeVersion lstm_linear_add_30 ') os.system(cs_shared_params_st_mgcn + ' --City Beijing --external_method emb-linear-add ' ' --MergeIndex 1 --CodeVersion emb_linear_add_30 ') os.system(cs_shared_params_st_mgcn + ' --City Beijing --external_method emb-linear-gating ' ' --MergeIndex 1 --CodeVersion emb_linear_gating_30 ') os.system(cs_shared_params_st_mgcn + ' --City Beijing --external_method multi-linear-add ' ' --MergeIndex 1 --CodeVersion multi_linear_add_30 ') os.system(cs_shared_params_st_mgcn + ' --City Beijing --external_method multi-linear-gating ' ' --MergeIndex 1 --CodeVersion multi_linear_gating_30 ') os.system(cs_shared_params_st_mgcn + ' --City Beijing --external_lstm_len 4 --external_method lstm-not-concat ' ' --MergeIndex 1 --CodeVersion lstm4_not_concat_30 ') os.system(cs_shared_params_st_mgcn + ' --City Beijing --external_lstm_len 4 --external_method lstm-linear-gating ' ' --MergeIndex 1 --CodeVersion lstm4_linear_gating_30 ') os.system(cs_shared_params_st_mgcn + ' --City Beijing --external_method earlyconcat ' ' --MergeIndex 1 --CodeVersion earlyconcat_30 ') os.system(cs_shared_params_st_mgcn + ' --City Beijing --external_method earlyadd ' ' --MergeIndex 1 --CodeVersion earlyadd_30 ')
54.884956
127
0.525879
1,220
12,404
5.027049
0.068852
0.049894
0.109571
0.140877
0.952226
0.940975
0.89369
0.850318
0.828958
0.771238
0
0.03989
0.355289
12,404
225
128
55.128889
0.727023
0.00516
0
0.302469
0
0
0.516239
0.027162
0
0
0
0
0
1
0
false
0
0.012346
0
0.012346
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
78d23c4b47f2031f2f995d65d142aae594cfdcea
16,745
py
Python
DC_CDN_IJCAI21.py
abhirajasp/CDCN
c9863775b1c1bffd91f956b5b2c6c78abfc988ec
[ "MIT" ]
463
2020-03-08T22:13:11.000Z
2022-03-30T08:46:26.000Z
DC_CDN_IJCAI21.py
abhirajasp/CDCN
c9863775b1c1bffd91f956b5b2c6c78abfc988ec
[ "MIT" ]
53
2020-03-12T03:31:17.000Z
2022-03-31T07:15:53.000Z
DC_CDN_IJCAI21.py
abhirajasp/CDCN
c9863775b1c1bffd91f956b5b2c6c78abfc988ec
[ "MIT" ]
159
2020-03-10T09:01:39.000Z
2022-03-28T12:30:40.000Z
''' Code of 'Dual-Cross Central Difference Network for Face Anti-Spoofing' By Zitong Yu, 2021 If you use the code, please cite: @inproceedings{yu2021dual, title={Dual-Cross Central Difference Network for Face Anti-Spoofing}, author={Yu, Zitong and Qin, Yunxiao and ZHoa, Hengshuang and Li, Xiaobai and Zhao, Guoying}, booktitle= {IJCAI}, year = {2021} } Only for research purpose, and commercial use is not allowed. MIT License Copyright (c) 2021 ''' import math import torch import torch.nn.functional as F import torch.utils.model_zoo as model_zoo from torch import nn from torch.nn import Parameter import pdb import numpy as np class Conv2d_Hori_Veri_Cross(nn.Module): def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, padding=1, dilation=1, groups=1, bias=False, theta=0.7): super(Conv2d_Hori_Veri_Cross, self).__init__() self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=(1, 5), stride=stride, padding=padding, dilation=dilation, groups=groups, bias=bias) self.theta = theta def forward(self, x): [C_out,C_in,H_k,W_k] = self.conv.weight.shape tensor_zeros = torch.FloatTensor(C_out, C_in, 1).fill_(0).cuda() conv_weight = torch.cat((tensor_zeros, self.conv.weight[:,:,:,0], tensor_zeros, self.conv.weight[:,:,:,1], self.conv.weight[:,:,:,2], self.conv.weight[:,:,:,3], tensor_zeros, self.conv.weight[:,:,:,4], tensor_zeros), 2) conv_weight = conv_weight.contiguous().view(C_out, C_in, 3, 3) out_normal = F.conv2d(input=x, weight=conv_weight, bias=self.conv.bias, stride=self.conv.stride, padding=self.conv.padding) if math.fabs(self.theta - 0.0) < 1e-8: return out_normal else: #pdb.set_trace() [C_out,C_in, kernel_size,kernel_size] = self.conv.weight.shape kernel_diff = self.conv.weight.sum(2).sum(2) kernel_diff = kernel_diff[:, :, None, None] out_diff = F.conv2d(input=x, weight=kernel_diff, bias=self.conv.bias, stride=self.conv.stride, padding=0, groups=self.conv.groups) return out_normal - self.theta * out_diff class Conv2d_Diag_Cross(nn.Module): def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, padding=1, dilation=1, groups=1, bias=False, theta=0.7): super(Conv2d_Diag_Cross, self).__init__() self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=(1, 5), stride=stride, padding=padding, dilation=dilation, groups=groups, bias=bias) self.theta = theta def forward(self, x): [C_out,C_in,H_k,W_k] = self.conv.weight.shape tensor_zeros = torch.FloatTensor(C_out, C_in, 1).fill_(0).cuda() conv_weight = torch.cat((self.conv.weight[:,:,:,0], tensor_zeros, self.conv.weight[:,:,:,1], tensor_zeros, self.conv.weight[:,:,:,2], tensor_zeros, self.conv.weight[:,:,:,3], tensor_zeros, self.conv.weight[:,:,:,4]), 2) conv_weight = conv_weight.contiguous().view(C_out, C_in, 3, 3) out_normal = F.conv2d(input=x, weight=conv_weight, bias=self.conv.bias, stride=self.conv.stride, padding=self.conv.padding) if math.fabs(self.theta - 0.0) < 1e-8: return out_normal else: #pdb.set_trace() [C_out,C_in, kernel_size,kernel_size] = self.conv.weight.shape kernel_diff = self.conv.weight.sum(2).sum(2) kernel_diff = kernel_diff[:, :, None, None] out_diff = F.conv2d(input=x, weight=kernel_diff, bias=self.conv.bias, stride=self.conv.stride, padding=0, groups=self.conv.groups) return out_normal - self.theta * out_diff class C_CDN(nn.Module): def __init__(self, basic_conv=Conv2d_Hori_Veri_Cross, theta=0.8): super(C_CDN, self).__init__() self.conv1 = nn.Sequential( basic_conv(3, 64, kernel_size=3, stride=1, padding=1, bias=False, theta= theta), nn.BatchNorm2d(64), nn.ReLU(), ) self.Block1 = nn.Sequential( basic_conv(64, 128, kernel_size=3, stride=1, padding=1, bias=False, theta= theta), nn.BatchNorm2d(128), nn.ReLU(), basic_conv(128, 196, kernel_size=3, stride=1, padding=1, bias=False, theta= theta), nn.BatchNorm2d(196), nn.ReLU(), basic_conv(196, 128, kernel_size=3, stride=1, padding=1, bias=False, theta= theta), nn.BatchNorm2d(128), nn.ReLU(), nn.MaxPool2d(kernel_size=2, stride=2, padding=0), ) self.Block2 = nn.Sequential( basic_conv(128, 128, kernel_size=3, stride=1, padding=1, bias=False, theta= theta), nn.BatchNorm2d(128), nn.ReLU(), basic_conv(128, 196, kernel_size=3, stride=1, padding=1, bias=False, theta= theta), nn.BatchNorm2d(196), nn.ReLU(), basic_conv(196, 128, kernel_size=3, stride=1, padding=1, bias=False, theta= theta), nn.BatchNorm2d(128), nn.ReLU(), nn.MaxPool2d(kernel_size=2, stride=2, padding=0), ) self.Block3 = nn.Sequential( basic_conv(128, 128, kernel_size=3, stride=1, padding=1, bias=False, theta= theta), nn.BatchNorm2d(128), nn.ReLU(), basic_conv(128, 196, kernel_size=3, stride=1, padding=1, bias=False, theta= theta), nn.BatchNorm2d(196), nn.ReLU(), basic_conv(196, 128, kernel_size=3, stride=1, padding=1, bias=False, theta= theta), nn.BatchNorm2d(128), nn.ReLU(), nn.MaxPool2d(kernel_size=2, stride=2, padding=0), ) self.lastconv1 = nn.Sequential( basic_conv(128*3, 128, kernel_size=3, stride=1, padding=1, bias=False, theta= theta), nn.BatchNorm2d(128), nn.ReLU(), ) self.lastconv2 = nn.Sequential( basic_conv(128, 64, kernel_size=3, stride=1, padding=1, bias=False, theta= theta), nn.BatchNorm2d(64), nn.ReLU(), ) self.lastconv3 = nn.Sequential( basic_conv(64, 1, kernel_size=3, stride=1, padding=1, bias=False, theta= theta), #nn.Conv2d(64, 1, kernel_size=1, stride=1, padding=0, bias=False), nn.ReLU(), ) self.downsample32x32 = nn.Upsample(size=(32, 32), mode='bilinear') def forward(self, x): # x [3, 256, 256] x_input = x x = self.conv1(x) x_Block1 = self.Block1(x) # x [128, 128, 128] x_Block1_32x32 = self.downsample32x32(x_Block1) # x [128, 32, 32] x_Block2 = self.Block2(x_Block1) # x [128, 64, 64] x_Block2_32x32 = self.downsample32x32(x_Block2) # x [128, 32, 32] x_Block3 = self.Block3(x_Block2) # x [128, 32, 32] x_Block3_32x32 = self.downsample32x32(x_Block3) # x [128, 32, 32] x_concat = torch.cat((x_Block1_32x32,x_Block2_32x32,x_Block3_32x32), dim=1) # x [128*3, 32, 32] #pdb.set_trace() x = self.lastconv1(x_concat) # x [128, 32, 32] x = self.lastconv2(x) # x [64, 32, 32] x = self.lastconv3(x) # x [1, 32, 32] depth = x.squeeze(1) return depth class DC_CDN(nn.Module): def __init__(self, basic_conv1=Conv2d_Hori_Veri_Cross, basic_conv2=Conv2d_Diag_Cross, theta=0.8): super(DC_CDN, self).__init__() self.conv1 = nn.Sequential( basic_conv1(3, 64, kernel_size=3, stride=1, padding=1, bias=False, theta= theta), nn.BatchNorm2d(64), nn.ReLU(), ) self.Block1 = nn.Sequential( basic_conv1(64, 128, kernel_size=3, stride=1, padding=1, bias=False, theta= theta), nn.BatchNorm2d(128), nn.ReLU(), basic_conv1(128, 196, kernel_size=3, stride=1, padding=1, bias=False, theta= theta), nn.BatchNorm2d(196), nn.ReLU(), basic_conv1(196, 128, kernel_size=3, stride=1, padding=1, bias=False, theta= theta), nn.BatchNorm2d(128), nn.ReLU(), nn.MaxPool2d(kernel_size=2, stride=2, padding=0), ) self.Block2 = nn.Sequential( basic_conv1(128, 128, kernel_size=3, stride=1, padding=1, bias=False, theta= theta), nn.BatchNorm2d(128), nn.ReLU(), basic_conv1(128, 196, kernel_size=3, stride=1, padding=1, bias=False, theta= theta), nn.BatchNorm2d(196), nn.ReLU(), basic_conv1(196, 128, kernel_size=3, stride=1, padding=1, bias=False, theta= theta), nn.BatchNorm2d(128), nn.ReLU(), nn.MaxPool2d(kernel_size=2, stride=2, padding=0), ) self.Block3 = nn.Sequential( basic_conv1(128, 128, kernel_size=3, stride=1, padding=1, bias=False, theta= theta), nn.BatchNorm2d(128), nn.ReLU(), basic_conv1(128, 196, kernel_size=3, stride=1, padding=1, bias=False, theta= theta), nn.BatchNorm2d(196), nn.ReLU(), basic_conv1(196, 128, kernel_size=3, stride=1, padding=1, bias=False, theta= theta), nn.BatchNorm2d(128), nn.ReLU(), nn.MaxPool2d(kernel_size=2, stride=2, padding=0), ) self.lastconv1 = nn.Sequential( basic_conv1(128*3, 128, kernel_size=3, stride=1, padding=1, bias=False, theta= theta), nn.BatchNorm2d(128), nn.ReLU(), ) self.lastconv2 = nn.Sequential( basic_conv1(128, 64, kernel_size=3, stride=1, padding=1, bias=False, theta= theta), nn.BatchNorm2d(64), nn.ReLU(), ) self.lastconv3 = nn.Sequential( #basic_conv1(64, 1, kernel_size=3, stride=1, padding=1, bias=False, theta= theta), nn.Conv2d(128, 1, kernel_size=1, stride=1, padding=0, bias=False), nn.ReLU(), ) # 2nd stream self.conv1_2 = nn.Sequential( basic_conv2(3, 64, kernel_size=3, stride=1, padding=1, bias=False, theta= theta), nn.BatchNorm2d(64), nn.ReLU(), ) self.Block1_2 = nn.Sequential( basic_conv2(64, 128, kernel_size=3, stride=1, padding=1, bias=False, theta= theta), nn.BatchNorm2d(128), nn.ReLU(), basic_conv2(128, 196, kernel_size=3, stride=1, padding=1, bias=False, theta= theta), nn.BatchNorm2d(196), nn.ReLU(), basic_conv2(196, 128, kernel_size=3, stride=1, padding=1, bias=False, theta= theta), nn.BatchNorm2d(128), nn.ReLU(), nn.MaxPool2d(kernel_size=2, stride=2, padding=0), ) self.Block2_2 = nn.Sequential( basic_conv2(128, 128, kernel_size=3, stride=1, padding=1, bias=False, theta= theta), nn.BatchNorm2d(128), nn.ReLU(), basic_conv2(128, 196, kernel_size=3, stride=1, padding=1, bias=False, theta= theta), nn.BatchNorm2d(196), nn.ReLU(), basic_conv2(196, 128, kernel_size=3, stride=1, padding=1, bias=False, theta= theta), nn.BatchNorm2d(128), nn.ReLU(), nn.MaxPool2d(kernel_size=2, stride=2, padding=0), ) self.Block3_2 = nn.Sequential( basic_conv2(128, 128, kernel_size=3, stride=1, padding=1, bias=False, theta= theta), nn.BatchNorm2d(128), nn.ReLU(), basic_conv2(128, 196, kernel_size=3, stride=1, padding=1, bias=False, theta= theta), nn.BatchNorm2d(196), nn.ReLU(), basic_conv2(196, 128, kernel_size=3, stride=1, padding=1, bias=False, theta= theta), nn.BatchNorm2d(128), nn.ReLU(), nn.MaxPool2d(kernel_size=2, stride=2, padding=0), ) self.lastconv1_2 = nn.Sequential( basic_conv2(128*3, 128, kernel_size=3, stride=1, padding=1, bias=False, theta= theta), nn.BatchNorm2d(128), nn.ReLU(), ) self.lastconv2_2 = nn.Sequential( basic_conv2(128, 64, kernel_size=3, stride=1, padding=1, bias=False, theta= theta), nn.BatchNorm2d(64), nn.ReLU(), ) #self.lastconv3_2 = nn.Sequential( # basic_conv2(64, 1, kernel_size=3, stride=1, padding=1, bias=False, theta= theta), # #nn.Conv2d(64, 1, kernel_size=1, stride=1, padding=0, bias=False), # nn.ReLU(), #) #self.HP_branch1 = Parameter(torch.ones([3,1])) self.HP_branch1 = Parameter(torch.zeros([3,1])) #self.HP_branch2 = Parameter(torch.ones([3,1])) self.HP_branch2 = Parameter(torch.zeros([3,1])) self.downsample32x32 = nn.Upsample(size=(32, 32), mode='bilinear') def forward(self, x): # x [3, 256, 256] x_input = x # 1st stream x = self.conv1(x_input) x_2 = self.conv1_2(x_input) x_Block1 = self.Block1(x) # x [128, 128, 128] x_Block1_2 = self.Block1_2(x_2) # x [128, 128, 128] # fusion1 x_Block1_new = F.sigmoid(self.HP_branch1[0])*x_Block1 + (1-F.sigmoid(self.HP_branch1[0]))*x_Block1_2 x_Block1_2_new = F.sigmoid(self.HP_branch2[0])*x_Block1_2 + (1-F.sigmoid(self.HP_branch2[0]))*x_Block1 x_Block2 = self.Block2(x_Block1) # x [128, 64, 64] x_Block2_2 = self.Block2_2(x_Block1_2) # x [128, 64, 64] # fusion2 x_Block2_new = F.sigmoid(self.HP_branch1[1])*x_Block2 + (1-F.sigmoid(self.HP_branch1[1]))*x_Block2_2 x_Block2_2_new = F.sigmoid(self.HP_branch2[1])*x_Block2_2 + (1-F.sigmoid(self.HP_branch2[1]))*x_Block2 x_Block3 = self.Block3(x_Block2) # x [128, 32, 32] x_Block3_2 = self.Block3_2(x_Block2_2) # x [128, 32, 32] # fusion3 x_Block3_new = F.sigmoid(self.HP_branch1[2])*x_Block3 + (1-F.sigmoid(self.HP_branch1[2]))*x_Block3_2 x_Block3_2_new = F.sigmoid(self.HP_branch2[2])*x_Block3_2 + (1-F.sigmoid(self.HP_branch2[2]))*x_Block3 x_Block1_32x32 = self.downsample32x32(x_Block1_new) # x [128, 32, 32] x_Block2_32x32 = self.downsample32x32(x_Block2_new) # x [128, 32, 32] x_Block3_32x32 = self.downsample32x32(x_Block3_new) # x [128, 32, 32] x_concat = torch.cat((x_Block1_32x32,x_Block2_32x32,x_Block3_32x32), dim=1) # x [128*3, 32, 32] x = self.lastconv1(x_concat) # x [128, 32, 32] depth1 = self.lastconv2(x) # x [64, 32, 32] #x = self.lastconv3(x) # x [1, 32, 32] #map_x_1 = x.squeeze(1) # 2nd stream x_Block1_32x32 = self.downsample32x32(x_Block1_2_new) # x [128, 32, 32] x_Block2_32x32 = self.downsample32x32(x_Block2_2_new) # x [128, 32, 32] x_Block3_32x32 = self.downsample32x32(x_Block3_2_new) # x [128, 32, 32] x_concat = torch.cat((x_Block1_32x32,x_Block2_32x32,x_Block3_32x32), dim=1) # x [128*3, 32, 32] x = self.lastconv1_2(x_concat) # x [128, 32, 32] depth2 = self.lastconv2_2(x) # x [64, 32, 32] # fusion depth = torch.cat((depth1,depth2), dim=1) depth = self.lastconv3(depth) # x [1, 32, 32] depth = depth.squeeze(1) return depth if __name__ == '__main__': inputs = torch.randn(1,3,256,256).cuda() model_C_CDN = C_CDN(basic_conv=Conv2d_Hori_Veri_Cross, theta=0.8).cuda() depth = model_C_CDN(inputs) model_C_CDN = C_CDN(basic_conv=Conv2d_Diag_Cross, theta=0.8).cuda() depth = model_C_CDN(inputs) model_DC_CDN = DC_CDN(basic_conv1=Conv2d_Hori_Veri_Cross, basic_conv2=Conv2d_Diag_Cross, theta=0.8).cuda() depth = model_DC_CDN(inputs) pdb.set_trace()
39.032634
227
0.563213
2,265
16,745
3.981015
0.074614
0.065432
0.068315
0.077298
0.896972
0.879561
0.862593
0.831097
0.779306
0.753798
0
0.102922
0.303135
16,745
429
228
39.032634
0.669809
0.097223
0
0.64
0
0
0.001594
0
0
0
0
0
0
1
0.029091
false
0
0.029091
0
0.094545
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
15416b43de6cd779ca949b24ddbd0646f2ae6037
26
py
Python
flfm/shell/__init__.py
m-flak/flfm
e73943dc014e9af87a5c170d17dee15e1c6609bd
[ "Apache-2.0" ]
5
2019-10-24T05:40:26.000Z
2021-01-06T01:41:08.000Z
flfm/shell/__init__.py
m-flak/flfm
e73943dc014e9af87a5c170d17dee15e1c6609bd
[ "Apache-2.0" ]
null
null
null
flfm/shell/__init__.py
m-flak/flfm
e73943dc014e9af87a5c170d17dee15e1c6609bd
[ "Apache-2.0" ]
null
null
null
from .routes import shell
13
25
0.807692
4
26
5.25
1
0
0
0
0
0
0
0
0
0
0
0
0.153846
26
1
26
26
0.954545
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
1566691c72ed8b34e355e97cfc508dde2c45bc16
2,907
py
Python
pirates/leveleditor/worldData/bilgewater_guildhall_interior_a.py
itsyaboyrocket/pirates
6ca1e7d571c670b0d976f65e608235707b5737e3
[ "BSD-3-Clause" ]
3
2021-02-25T06:38:13.000Z
2022-03-22T07:00:15.000Z
pirates/leveleditor/worldData/bilgewater_guildhall_interior_a.py
itsyaboyrocket/pirates
6ca1e7d571c670b0d976f65e608235707b5737e3
[ "BSD-3-Clause" ]
null
null
null
pirates/leveleditor/worldData/bilgewater_guildhall_interior_a.py
itsyaboyrocket/pirates
6ca1e7d571c670b0d976f65e608235707b5737e3
[ "BSD-3-Clause" ]
1
2021-02-25T06:38:17.000Z
2021-02-25T06:38:17.000Z
# uncompyle6 version 3.2.0 # Python bytecode 2.4 (62061) # Decompiled from: Python 2.7.14 (v2.7.14:84471935ed, Sep 16 2017, 20:19:30) [MSC v.1500 32 bit (Intel)] # Embedded file name: pirates.leveleditor.worldData.bilgewater_guildhall_interior_a from pandac.PandaModules import Point3, VBase3 objectStruct = {'Objects': {'1155866758.05sdnaik0': {'Type': 'Building Interior', 'Name': 'bilgewater_guildhall_interior_a', 'Objects': {'1156912518.59sdnaik': {'Type': 'Barrel', 'Hpr': Point3(0.0, 0.0, 0.0), 'Pos': Point3(0.879, 15.628, 0.0), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Model': 'models/props/barrel'}}, '1156912537.84sdnaik': {'Type': 'Crate', 'Hpr': VBase3(28.285, 0.0, 0.0), 'Pos': Point3(-6.481, 10.324, 0.0), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Model': 'models/props/crate'}}, '1156912551.03sdnaik': {'Type': 'Crate', 'Hpr': Point3(0.0, 0.0, 0.0), 'Pos': Point3(-2.958, 9.399, 0.0), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Model': 'models/props/crate'}}, '1156918681.66sdnaik': {'Type': 'Patrol Node', 'Hpr': Point3(0.0, 0.0, 0.0), 'Min Population': '2', 'Pos': Point3(2.041, 24.861, 0.0), 'Scale': VBase3(1.0, 1.0, 1.0)}, '1156918727.22sdnaik': {'Type': 'Skeleton', 'AvId': 1, 'AvTrack': 0, 'Hpr': Point3(0.0, 0.0, 0.0), 'Pos': Point3(1.715, 20.64, 0.0), 'Respawns': True, 'Scale': VBase3(1.0, 1.0, 1.0), 'Start State': 'Walk'}, '1156970961.41sdnaik': {'Type': 'Animal', 'Hpr': Point3(0.0, 0.0, 0.0), 'Pos': Point3(2.22, 5.799, 0.0), 'Respawns': True, 'Scale': VBase3(1.0, 1.0, 1.0), 'Species': 'Pig', 'Start State': 'Walk'}, '1156970980.98sdnaik': {'Type': 'Animal', 'Hpr': Point3(0.0, 0.0, 0.0), 'Pos': Point3(-14.216, -4.079, 0.0), 'Respawns': True, 'Scale': VBase3(1.0, 1.0, 1.0), 'Species': 'Rooster', 'Start State': 'Walk'}, '1156970994.78sdnaik': {'Type': 'Animal', 'Hpr': Point3(0.0, 0.0, 0.0), 'Pos': Point3(-28.945, 26.753, 0.0), 'Respawns': True, 'Scale': VBase3(1.0, 1.0, 1.0), 'Species': 'Pig', 'Start State': 'Walk'}}, 'Visual': {'Model': 'models/buildings/interior_shanty_guildhall'}}}, 'Node Links': [], 'Layers': {}, 'ObjectIds': {'1155866758.05sdnaik0': '["Objects"]["1155866758.05sdnaik0"]', '1156912518.59sdnaik': '["Objects"]["1155866758.05sdnaik0"]["Objects"]["1156912518.59sdnaik"]', '1156912537.84sdnaik': '["Objects"]["1155866758.05sdnaik0"]["Objects"]["1156912537.84sdnaik"]', '1156912551.03sdnaik': '["Objects"]["1155866758.05sdnaik0"]["Objects"]["1156912551.03sdnaik"]', '1156918681.66sdnaik': '["Objects"]["1155866758.05sdnaik0"]["Objects"]["1156918681.66sdnaik"]', '1156918727.22sdnaik': '["Objects"]["1155866758.05sdnaik0"]["Objects"]["1156918727.22sdnaik"]', '1156970961.41sdnaik': '["Objects"]["1155866758.05sdnaik0"]["Objects"]["1156970961.41sdnaik"]', '1156970980.98sdnaik': '["Objects"]["1155866758.05sdnaik0"]["Objects"]["1156970980.98sdnaik"]', '1156970994.78sdnaik': '["Objects"]["1155866758.05sdnaik0"]["Objects"]["1156970994.78sdnaik"]'}}
484.5
2,614
0.650155
414
2,907
4.545894
0.289855
0.048884
0.047821
0.046759
0.284272
0.284272
0.277365
0.277365
0.269394
0.269394
0
0.257666
0.080151
2,907
6
2,614
484.5
0.446148
0.081527
0
0
0
0
0.55964
0.247562
0
0
0
0
0
1
0
false
0
0.5
0
0.5
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
1
1
1
1
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
6
ec6bef0bfd07b896bf7dc1e9c6e1f64224d7fe0e
20
py
Python
__init__.py
hamiltonparker/Learning
2dd77bd222758db961c0987e4ad4e828daf2c508
[ "MIT" ]
null
null
null
__init__.py
hamiltonparker/Learning
2dd77bd222758db961c0987e4ad4e828daf2c508
[ "MIT" ]
3
2017-08-31T23:44:54.000Z
2017-09-19T04:24:37.000Z
__init__.py
hamiltonparker/learning
2dd77bd222758db961c0987e4ad4e828daf2c508
[ "MIT" ]
null
null
null
import HNFGen as hg
10
19
0.8
4
20
4
1
0
0
0
0
0
0
0
0
0
0
0
0.2
20
1
20
20
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
ec9a1f73c7a29726f28d4bc65a81a2ddf2ba1572
401
py
Python
strops/schemes/views/mapping/__init__.py
ckoerber/strops
2131354fd6822b3aa7b7d9c3c0db79723b06b8ca
[ "BSD-3-Clause" ]
1
2020-12-29T19:57:47.000Z
2020-12-29T19:57:47.000Z
strops/schemes/views/mapping/__init__.py
ckoerber/strops
2131354fd6822b3aa7b7d9c3c0db79723b06b8ca
[ "BSD-3-Clause" ]
13
2020-06-29T11:15:59.000Z
2021-09-22T19:18:36.000Z
strops/schemes/views/mapping/__init__.py
ckoerber/strops
2131354fd6822b3aa7b7d9c3c0db79723b06b8ca
[ "BSD-3-Clause" ]
null
null
null
"""Views associated with connecting operators at a source scale to a target scale.""" from strops.schemes.views.mapping.index import IndexView # noqa from strops.schemes.views.mapping.scales import ( # noqa PickSourceScaleView, PickTargetScaleView, ) from strops.schemes.views.mapping.branch import PickBranchView # noqa from strops.schemes.views.mapping.present import PresentView # noqa
44.555556
85
0.793017
50
401
6.36
0.52
0.125786
0.213836
0.27673
0.389937
0.207547
0
0
0
0
0
0
0.129676
401
8
86
50.125
0.911175
0.249377
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.571429
0
0.571429
0
0
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
ecdbd86e8fe2c0d100da33966efa83239bdba064
111
py
Python
psy/settings/__init__.py
cegfdb/IRT
20fcde3b385bce1644fecab7cdc8bda5beacda03
[ "MIT" ]
169
2017-08-29T01:35:49.000Z
2022-03-01T05:03:02.000Z
psy/settings/__init__.py
a854367688/pypsy
f055fe1f4901b654d99d9a776152e8192e014f5f
[ "MIT" ]
8
2017-12-05T05:20:35.000Z
2021-10-03T05:40:45.000Z
psy/settings/__init__.py
a854367688/pypsy
f055fe1f4901b654d99d9a776152e8192e014f5f
[ "MIT" ]
67
2017-09-01T04:18:54.000Z
2022-02-24T08:21:18.000Z
from psy.settings.mirt import GH_POINT_DT, X_NODES, X_WEIGHTS from psy.settings.cat import TRIPLETS_PERMUTATION
55.5
61
0.864865
19
111
4.789474
0.736842
0.153846
0.32967
0
0
0
0
0
0
0
0
0
0.081081
111
2
62
55.5
0.892157
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
01ba06ef716f5278eb8fc912cceb8bc80b1a8795
92
py
Python
awegan/options/__init__.py
atomicoo/EnhanceIMG
8c009fbb6c5461ff6d7f30bdacec72232639c7f2
[ "MIT" ]
35
2021-04-20T21:14:25.000Z
2022-03-31T08:27:35.000Z
awegan/options/__init__.py
Real798/EnhanceIMG
8c009fbb6c5461ff6d7f30bdacec72232639c7f2
[ "MIT" ]
2
2021-05-13T05:34:59.000Z
2021-09-23T09:07:32.000Z
awegan/options/__init__.py
Real798/EnhanceIMG
8c009fbb6c5461ff6d7f30bdacec72232639c7f2
[ "MIT" ]
7
2021-05-10T12:08:42.000Z
2022-02-24T10:06:05.000Z
from options.train_options import TrainOptions from options.test_options import TestOptions
30.666667
46
0.891304
12
92
6.666667
0.583333
0.275
0
0
0
0
0
0
0
0
0
0
0.086957
92
2
47
46
0.952381
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
01c0adf80716ea3638a87f631b4832a3652d3188
77
py
Python
src/backend/lib/demo/user_ids.py
robyn-thomas/span-gdg-2021-hackathon
09bf8cdaf21ec9e8a83ea7de5076c4e26bae64d0
[ "MIT" ]
null
null
null
src/backend/lib/demo/user_ids.py
robyn-thomas/span-gdg-2021-hackathon
09bf8cdaf21ec9e8a83ea7de5076c4e26bae64d0
[ "MIT" ]
null
null
null
src/backend/lib/demo/user_ids.py
robyn-thomas/span-gdg-2021-hackathon
09bf8cdaf21ec9e8a83ea7de5076c4e26bae64d0
[ "MIT" ]
null
null
null
# Jayan Comparison ID's jayan_ids = "1467151701325004801,801093845882523648"
25.666667
52
0.831169
8
77
7.875
0.875
0
0
0
0
0
0
0
0
0
0
0.528571
0.090909
77
2
53
38.5
0.371429
0.272727
0
0
0
0
0.703704
0.703704
0
0
0
0
0
1
0
false
0
0
0
0
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
1
0
0
1
0
0
0
0
0
1
1
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
01f8fd648f5e063d4563db9581204c30bcd24ddb
21
py
Python
web/settings/settings.py
beniaminonobile/www.albertifra.it
721b6125bbe56e806cf3abd5270d9c5cd85034be
[ "MIT" ]
null
null
null
web/settings/settings.py
beniaminonobile/www.albertifra.it
721b6125bbe56e806cf3abd5270d9c5cd85034be
[ "MIT" ]
null
null
null
web/settings/settings.py
beniaminonobile/www.albertifra.it
721b6125bbe56e806cf3abd5270d9c5cd85034be
[ "MIT" ]
null
null
null
from .common import *
21
21
0.761905
3
21
5.333333
1
0
0
0
0
0
0
0
0
0
0
0
0.142857
21
1
21
21
0.888889
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
bf09757183ff8301ba869c3472bf6c729bf5c25b
261
py
Python
include/__init__.py
MLI-lab/Robustness-CS
8ef26795ffd02824a2cf0f9496887554484a8b08
[ "Apache-2.0" ]
18
2021-05-16T21:50:58.000Z
2021-12-23T14:52:02.000Z
include/__init__.py
MLI-lab/Robustness-CS
8ef26795ffd02824a2cf0f9496887554484a8b08
[ "Apache-2.0" ]
null
null
null
include/__init__.py
MLI-lab/Robustness-CS
8ef26795ffd02824a2cf0f9496887554484a8b08
[ "Apache-2.0" ]
3
2021-04-08T06:47:32.000Z
2021-10-15T12:22:03.000Z
from .transforms import * from .decoder_parallel_conv import * from .decoder_conv import * from .decoder_skip import * from .fit import * from .fits import * from .helpers import * from .mri_helpers import * from .runner import * from .runner_untrained import *
26.1
36
0.773946
36
261
5.444444
0.361111
0.459184
0.260204
0.214286
0
0
0
0
0
0
0
0
0.149425
261
10
37
26.1
0.882883
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
bf27db95b6bc597b60b50f534eee7a86de261f5e
155
py
Python
81_pr9_09.py
AmreshTripathy/Python
e86420fef7f52da393be5b50ac2f13bddfeb3306
[ "Apache-2.0" ]
4
2021-05-27T05:06:09.000Z
2021-06-12T17:12:47.000Z
81_pr9_09.py
AmreshTripathy/Python
e86420fef7f52da393be5b50ac2f13bddfeb3306
[ "Apache-2.0" ]
null
null
null
81_pr9_09.py
AmreshTripathy/Python
e86420fef7f52da393be5b50ac2f13bddfeb3306
[ "Apache-2.0" ]
null
null
null
with open('copy.txt') as f: print (f.read()) with open('copy.txt', 'w') as f: f.write('') with open('copy.txt') as f: print (f.read())
19.375
33
0.522581
27
155
3
0.37037
0.296296
0.444444
0.555556
0.691358
0.691358
0.691358
0.691358
0.691358
0
0
0
0.245161
155
8
34
19.375
0.692308
0
0
0.666667
0
0
0.167785
0
0
0
0
0
0
1
0
true
0
0
0
0
0.333333
1
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
6
171e69809d73a3dddab54a89799ecfbc4f935dde
126
py
Python
application/python/dobot/__init__.py
afifswaidan/open-dobot
377dab0f1803b0cd78dd619e5bdb5eca77edaeaf
[ "MIT" ]
148
2016-03-22T13:21:09.000Z
2021-07-15T12:28:16.000Z
application/python/dobot/__init__.py
afifswaidan/open-dobot
377dab0f1803b0cd78dd619e5bdb5eca77edaeaf
[ "MIT" ]
37
2016-03-27T03:20:31.000Z
2021-11-17T00:20:26.000Z
application/python/dobot/__init__.py
afifswaidan/open-dobot
377dab0f1803b0cd78dd619e5bdb5eca77edaeaf
[ "MIT" ]
71
2016-03-26T08:14:06.000Z
2022-02-18T06:51:57.000Z
from dobot.DobotDriver import DobotDriver from dobot.DobotSDK import Dobot from dobot.DobotKinematics import DobotKinematics
25.2
49
0.873016
15
126
7.333333
0.4
0.245455
0
0
0
0
0
0
0
0
0
0
0.103175
126
4
50
31.5
0.973451
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
172d64a8dcd79b83eb4053c4500e353aa7fb7faf
6,862
py
Python
gnuradio-3.7.13.4/gr-filter/python/filter/qa_fir_filter.py
v1259397/cosmic-gnuradio
64c149520ac6a7d44179c3f4a38f38add45dd5dc
[ "BSD-3-Clause" ]
1
2021-03-09T07:32:37.000Z
2021-03-09T07:32:37.000Z
gnuradio-3.7.13.4/gr-filter/python/filter/qa_fir_filter.py
v1259397/cosmic-gnuradio
64c149520ac6a7d44179c3f4a38f38add45dd5dc
[ "BSD-3-Clause" ]
null
null
null
gnuradio-3.7.13.4/gr-filter/python/filter/qa_fir_filter.py
v1259397/cosmic-gnuradio
64c149520ac6a7d44179c3f4a38f38add45dd5dc
[ "BSD-3-Clause" ]
null
null
null
#!/usr/bin/env python # # Copyright 2008,2010,2012,2013 Free Software Foundation, Inc. # # This file is part of GNU Radio # # GNU Radio is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3, or (at your option) # any later version. # # GNU Radio is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. # from gnuradio import gr, gr_unittest, filter, blocks def fir_filter(x, taps, decim=1): y = [] x2 = (len(taps)-1)*[0,] + x for i in range(0, len(x), decim): yi = 0 for j in range(len(taps)): yi += taps[len(taps)-1-j] * x2[i+j] y.append(yi) return y class test_filter(gr_unittest.TestCase): def setUp(self): self.tb = gr.top_block () def tearDown(self): self.tb = None def test_fir_filter_fff_001(self): decim = 1 taps = 20*[0.5, 0.5] src_data = 40*[1, 2, 3, 4] expected_data = fir_filter(src_data, taps, decim) src = blocks.vector_source_f(src_data) op = filter.fir_filter_fff(decim, taps) dst = blocks.vector_sink_f() self.tb.connect(src, op, dst) self.tb.run() result_data = dst.data() self.assertFloatTuplesAlmostEqual(expected_data, result_data, 5) def test_fir_filter_fff_002(self): decim = 4 taps = 20*[0.5, 0.5] src_data = 40*[1, 2, 3, 4] expected_data = fir_filter(src_data, taps, decim) src = blocks.vector_source_f(src_data) op = filter.fir_filter_fff(decim, taps) dst = blocks.vector_sink_f() self.tb.connect(src, op, dst) self.tb.run() result_data = dst.data() self.assertFloatTuplesAlmostEqual(expected_data, result_data, 5) def test_fir_filter_ccf_001(self): decim = 1 taps = 20*[0.5, 0.5] src_data = 40*[1+1j, 2+2j, 3+3j, 4+4j] expected_data = fir_filter(src_data, taps, decim) src = blocks.vector_source_c(src_data) op = filter.fir_filter_ccf(decim, taps) dst = blocks.vector_sink_c() self.tb.connect(src, op, dst) self.tb.run() result_data = dst.data() self.assertComplexTuplesAlmostEqual(expected_data, result_data, 5) def test_fir_filter_ccf_002(self): decim = 4 taps = 20*[0.5, 0.5] src_data = 40*[1+1j, 2+2j, 3+3j, 4+4j] expected_data = fir_filter(src_data, taps, decim) src = blocks.vector_source_c(src_data) op = filter.fir_filter_ccf(decim, taps) dst = blocks.vector_sink_c() self.tb.connect(src, op, dst) self.tb.run() result_data = dst.data() self.assertComplexTuplesAlmostEqual(expected_data, result_data, 5) def test_fir_filter_ccc_001(self): decim = 1 taps = 20*[0.5+1j, 0.5+1j] src_data = 40*[1+1j, 2+2j, 3+3j, 4+4j] expected_data = fir_filter(src_data, taps, decim) src = blocks.vector_source_c(src_data) op = filter.fir_filter_ccc(decim, taps) dst = blocks.vector_sink_c() self.tb.connect(src, op, dst) self.tb.run() result_data = dst.data() self.assertComplexTuplesAlmostEqual(expected_data, result_data, 5) def test_fir_filter_ccc_002(self): decim = 1 taps = filter.firdes.low_pass(1, 1, 0.1, 0.01) src_data = 10*[1+1j, 2+2j, 3+3j, 4+4j] expected_data = fir_filter(src_data, taps, decim) src = blocks.vector_source_c(src_data) op = filter.fir_filter_ccc(decim, taps) dst = blocks.vector_sink_c() self.tb.connect(src, op, dst) self.tb.run() result_data = dst.data() self.assertComplexTuplesAlmostEqual(expected_data, result_data, 5) def test_fir_filter_ccc_003(self): decim = 4 taps = 20*[0.5+1j, 0.5+1j] src_data = 40*[1+1j, 2+2j, 3+3j, 4+4j] expected_data = fir_filter(src_data, taps, decim) src = blocks.vector_source_c(src_data) op = filter.fir_filter_ccc(decim, taps) dst = blocks.vector_sink_c() self.tb.connect(src, op, dst) self.tb.run() result_data = dst.data() self.assertComplexTuplesAlmostEqual(expected_data, result_data, 5) def test_fir_filter_scc_001(self): decim = 1 taps = 20*[0.5+1j, 0.5+1j] src_data = 40*[1, 2, 3, 4] expected_data = fir_filter(src_data, taps, decim) src = blocks.vector_source_s(src_data) op = filter.fir_filter_scc(decim, taps) dst = blocks.vector_sink_c() self.tb.connect(src, op, dst) self.tb.run() result_data = dst.data() self.assertComplexTuplesAlmostEqual(expected_data, result_data, 5) def test_fir_filter_scc_002(self): decim = 4 taps = 20*[0.5+1j, 0.5+1j] src_data = 40*[1, 2, 3, 4] expected_data = fir_filter(src_data, taps, decim) src = blocks.vector_source_s(src_data) op = filter.fir_filter_scc(decim, taps) dst = blocks.vector_sink_c() self.tb.connect(src, op, dst) self.tb.run() result_data = dst.data() self.assertComplexTuplesAlmostEqual(expected_data, result_data, 5) def test_fir_filter_fsf_001(self): decim = 1 taps = 20*[0.5, 0.5] src_data = 40*[1, 2, 3, 4] expected_data = fir_filter(src_data, taps, decim) expected_data = [int(e) for e in expected_data] src = blocks.vector_source_f(src_data) op = filter.fir_filter_fsf(decim, taps) dst = blocks.vector_sink_s() self.tb.connect(src, op, dst) self.tb.run() result_data = dst.data() self.assertComplexTuplesAlmostEqual(expected_data, result_data, 5) def test_fir_filter_fsf_002(self): decim = 4 taps = 20*[0.5, 0.5] src_data = 40*[1, 2, 3, 4] expected_data = fir_filter(src_data, taps, decim) expected_data = [int(e) for e in expected_data] src = blocks.vector_source_f(src_data) op = filter.fir_filter_fsf(decim, taps) dst = blocks.vector_sink_s() self.tb.connect(src, op, dst) self.tb.run() result_data = dst.data() self.assertComplexTuplesAlmostEqual(expected_data, result_data, 5) if __name__ == '__main__': gr_unittest.run(test_filter, "test_filter.xml")
33.473171
74
0.62285
1,025
6,862
3.956098
0.158049
0.075462
0.027127
0.043403
0.784464
0.772626
0.758816
0.758816
0.758076
0.758076
0
0.049901
0.264063
6,862
204
75
33.637255
0.753069
0.112066
0
0.801325
0
0
0.003788
0
0
0
0
0
0.072848
1
0.092715
false
0.006623
0.006623
0
0.112583
0
0
0
0
null
0
0
0
0
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
175d212b63795ced10efbdeea2d18fcb3232efd4
641
py
Python
app/models.py
sonalnikam/try
26ef8355d652ffd35f63564c3c7665ad0776a0c8
[ "CC0-1.0" ]
null
null
null
app/models.py
sonalnikam/try
26ef8355d652ffd35f63564c3c7665ad0776a0c8
[ "CC0-1.0" ]
null
null
null
app/models.py
sonalnikam/try
26ef8355d652ffd35f63564c3c7665ad0776a0c8
[ "CC0-1.0" ]
null
null
null
""" Definition of models. """ from django.db import models # Create your models here. class Registern(models.Model): Name = models.CharField(max_length=400) Username = models.CharField(max_length=400) Password = models.CharField(max_length=400) CPassword = models.CharField(max_length=400) def __str__(self): return ' '.join([ self. ordering, ]) class car_info(models.Model): location = models.CharField(max_length=400) from_id = models.CharField(max_length=400) to = models.CharField(max_length=400) def __str__(self): return ' '.join([ self. ordering, ])
20.03125
47
0.663027
78
641
5.230769
0.410256
0.257353
0.308824
0.411765
0.620098
0.289216
0.289216
0.289216
0.289216
0.289216
0
0.041833
0.216849
641
32
48
20.03125
0.770916
0.073323
0
0.444444
0
0
0.003407
0
0
0
0
0
0
1
0.111111
false
0.111111
0.055556
0.111111
0.777778
0
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
1
0
1
1
0
0
6
178e129f44e0910e2c2af028f025c2a0f8bbf5e2
192
py
Python
run.py
sofiaele/audio_annotator
c9be96fce1a3ccdb53a73b80b81fc93ce0050901
[ "MIT" ]
null
null
null
run.py
sofiaele/audio_annotator
c9be96fce1a3ccdb53a73b80b81fc93ce0050901
[ "MIT" ]
null
null
null
run.py
sofiaele/audio_annotator
c9be96fce1a3ccdb53a73b80b81fc93ce0050901
[ "MIT" ]
null
null
null
from audio_annotator_generic import app from audio_annotator_generic.utils import create_directories if __name__ == "__main__": create_directories() app.run(host="0.0.0.0", port=9001)
32
60
0.78125
28
192
4.857143
0.607143
0.044118
0.264706
0.367647
0
0
0
0
0
0
0
0.047337
0.119792
192
6
61
32
0.757396
0
0
0
0
0
0.07772
0
0
0
0
0
0
1
0
true
0
0.4
0
0.4
0
1
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
6
1792c82a7afb0ee445c721bf24a1b3e9b541b5e2
12,067
py
Python
tests/fixtures/responses.py
gurdulu/virga
4d641de30ab574823d0326b9904161aaec8845ed
[ "MIT" ]
null
null
null
tests/fixtures/responses.py
gurdulu/virga
4d641de30ab574823d0326b9904161aaec8845ed
[ "MIT" ]
null
null
null
tests/fixtures/responses.py
gurdulu/virga
4d641de30ab574823d0326b9904161aaec8845ed
[ "MIT" ]
null
null
null
import datetime from dateutil.tz import tzutc acm_certificate_list = { 'CertificateSummaryList': [ { 'DomainName': 'my.any-domain.com', 'CertificateArn': 'arn:aws:acm:eu-west-2:012345678:certificate/01234567-abcd-0123-0123-abcdfe01234' } ] } acm_result_find_certificate = { 'CertificateArn': 'arn:aws:acm:eu-west-2:012345678:certificate/01234567-abcd-0123-0123-abcdfe01234', 'CreatedAt': '2017-01-11T09:23:40+01:00', 'DomainName': 'my.any-domain.com', 'DomainValidationOptions': [ { 'DomainName': 'my.any-domain.com', 'ValidationDomain': 'any-domain.com', 'ValidationEmails': [ 'hostmaster@any-domain.com', 'admin@any-domain.com', 'webmaster@any-domain.com', 'postmaster@any-domain.com', 'administrator@any-domain.com' ], 'ValidationStatus': 'SUCCESS' } ], 'InUseBy': [], 'IssuedAt': '2017-01-11T09:25:15+01:00', 'Issuer': 'Amazon', 'KeyAlgorithm': 'RSA-2048', 'NotAfter': '2018-01-12T13:00:00+01:00', 'NotBefore': '2017-01-12T01:00:00+01:00', 'Serial': '00:11:22:33:44:55:66:77:88:99:aa:bb:cc:dd:ee:ff', 'SignatureAlgorithm': 'SHA256WITHRSA', 'Status': 'ISSUED', 'Subject': 'CN=my.any-domain.com', 'SubjectAlternativeNames': ['my.any-domain.com'], 'Type': 'AMAZON_ISSUED' } elbv2_describe_load_balancers = { 'LoadBalancers': [ { 'AvailabilityZones': [ { 'SubnetId': 'subnet-0123456', 'ZoneName': 'eu-west-2a' }, { 'SubnetId': 'subnet-0123457', 'ZoneName': 'eu-west-2b'} ], 'CanonicalHostedZoneId': 'ZHURV9DERC5T8', 'CreatedTime': datetime.datetime(2017, 1, 12, 8, 25, 11, 840000, tzinfo=tzutc()), 'DNSName': 'internal-my-elbv2-0123456.eu-west-2.elb.amazonaws.com', 'IpAddressType': 'ipv4', 'LoadBalancerArn': 'arn:aws:elasticloadbalancing:eu-west-2:01234567890:loadbalancer/app/my-elbv2/9987acf27', 'LoadBalancerName': 'my-elbv2', 'Scheme': 'internal', 'SecurityGroups': ['sg-02232883'], 'State': {'Code': 'active'}, 'Type': 'application', 'VpcId': 'vpc-9839873'} ] } elbv2_describe_load_balancer_attributes = { 'Attributes': [ {'Key': 'access_logs.s3.bucket', 'Value': 'bucket'}, {'Key': 'deletion_protection.enabled', 'Value': 'false'}, {'Key': 'access_logs.s3.prefix', 'Value': 'prefix'}, {'Key': 'idle_timeout.timeout_seconds', 'Value': '60'}, {'Key': 'access_logs.s3.enabled', 'Value': 'false'} ], } elbv2_describe_listeners = { 'Listeners': [ { 'DefaultActions': [ { 'TargetGroupArn': 'arn:aws:elasticloadbalancing:eu-west-2:12345679012:targetgroup/my-app-tg/0bd28872872', 'Type': 'forward' } ], 'ListenerArn': 'arn:aws:elasticloadbalancing:eu-west-2:01234567890:listener/app/my-elbv2/9b54/2ab1', 'LoadBalancerArn': 'arn:aws:elasticloadbalancing:eu-west-2:01234567890:loadbalancer/app/my-elbv2/9987acf27', 'Port': 8080, 'Protocol': 'HTTP' } ] } elbv2_describe_target_groups = { 'TargetGroups': [ { 'HealthCheckIntervalSeconds': 30, 'HealthCheckPath': '/', 'HealthCheckPort': 'traffic-port', 'HealthCheckProtocol': 'HTTP', 'HealthCheckTimeoutSeconds': 5, 'HealthyThresholdCount': 5, 'LoadBalancerArns': [ 'arn:aws:elasticloadbalancing:eu-west-2:01234567890:loadbalancer/app/my-elbv2/9987acf27' ], 'Matcher': {'HttpCode': '200'}, 'Port': 8080, 'Protocol': 'HTTP', 'TargetGroupArn': 'arn:aws:elasticloadbalancing:eu-west-2:12345679012:targetgroup/my-app-tg/0bd28872872', 'TargetGroupName': 'my-app-tg', 'TargetType': 'instance', 'UnhealthyThresholdCount': 2, 'VpcId': 'vpc-9839873' } ] } elbv2_describe_target_group_attributes = { 'Attributes': [ {'Key': 'stickiness.enabled', 'Value': 'true'}, {'Key': 'deregistration_delay.timeout_seconds', 'Value': '300'}, {'Key': 'stickiness.type', 'Value': 'lb_cookie'}, {'Key': 'stickiness.lb_cookie.duration_seconds', 'Value': '86400'} ], } elbv2_describe_tags = { 'TagDescriptions': [ { 'Tags': [ {'Key': 'Environment', 'Value': 'dev'}, {'Key': 'Name', 'Value': 'my-elbv2'}, ] } ] } elbv2_result = { 'LoadBalancers': [ { 'Attributes': [ {'Key': 'access_logs.s3.bucket', 'Value': 'bucket'}, {'Key': 'deletion_protection.enabled', 'Value': 'false'}, {'Key': 'access_logs.s3.prefix', 'Value': 'prefix'}, {'Key': 'idle_timeout.timeout_seconds', 'Value': '60'}, {'Key': 'access_logs.s3.enabled', 'Value': 'false'} ], 'AvailabilityZones': [ {'SubnetId': 'subnet-0123456', 'ZoneName': 'eu-west-2a'}, {'SubnetId': 'subnet-0123457', 'ZoneName': 'eu-west-2b'} ], 'CanonicalHostedZoneId': 'ZHURV9DERC5T8', 'CreatedTime': datetime.datetime(2017, 1, 12, 8, 25, 11, 840000, tzinfo=tzutc()), 'DNSName': 'internal-my-elbv2-0123456.eu-west-2.elb.amazonaws.com', 'IpAddressType': 'ipv4', 'Listeners': [ { 'DefaultActions': [ { 'TargetGroupArn': 'arn:aws:elasticloadbalancing:eu-west-2:12345679012:targetgroup/my-app-tg/0bd28872872', 'Type': 'forward' } ], 'ListenerArn': 'arn:aws:elasticloadbalancing:eu-west-2:01234567890:listener/app/my-elbv2/9b54/2ab1', 'LoadBalancerArn': 'arn:aws:elasticloadbalancing:eu-west-2:01234567890:loadbalancer/app/my-elbv2/9987acf27', 'Port': 8080, 'Protocol': 'HTTP' } ], 'LoadBalancerArn': 'arn:aws:elasticloadbalancing:eu-west-2:01234567890:loadbalancer/app/my-elbv2/9987acf27', 'LoadBalancerName': 'my-elbv2', 'Scheme': 'internal', 'SecurityGroups': ['sg-02232883'], 'State': {'Code': 'active'}, 'TargetGroups': [ { 'Attributes': [ {'Key': 'stickiness.enabled', 'Value': 'true'}, {'Key': 'deregistration_delay.timeout_seconds', 'Value': '300'}, {'Key': 'stickiness.type', 'Value': 'lb_cookie'}, {'Key': 'stickiness.lb_cookie.duration_seconds', 'Value': '86400'} ], 'HealthCheckIntervalSeconds': 30, 'HealthCheckPath': '/', 'HealthCheckPort': 'traffic-port', 'HealthCheckProtocol': 'HTTP', 'HealthCheckTimeoutSeconds': 5, 'HealthyThresholdCount': 5, 'LoadBalancerArns': [ 'arn:aws:elasticloadbalancing:eu-west-2:01234567890:loadbalancer/app/my-elbv2/9987acf27' ], 'Matcher': {'HttpCode': '200'}, 'Port': 8080, 'Protocol': 'HTTP', 'TargetGroupArn': 'arn:aws:elasticloadbalancing:eu-west-2:12345679012:targetgroup/my-app-tg/0bd28872872', 'TargetGroupName': 'my-app-tg', 'TargetType': 'instance', 'UnhealthyThresholdCount': 2, 'VpcId': 'vpc-9839873' } ], 'Type': 'application', 'VpcId': 'vpc-9839873', 'Tags': [ {'Key': 'Environment', 'Value': 'dev'}, {'Key': 'Name', 'Value': 'my-elbv2'}, ] }, ] } elb_describe_load_balancers = { 'LoadBalancerDescriptions': [ { 'Subnets': ['subnet-0123456', 'subnet-0123457'], 'CanonicalHostedZoneNameID': 'ZABCDEFG', 'VPCId': 'vpc-0123456', 'ListenerDescriptions': [ { 'Listener': { 'InstancePort': 443, 'LoadBalancerPort': 443, 'Protocol': 'TCP', 'InstanceProtocol': 'TCP' }, 'PolicyNames': [] } ], 'HealthCheck': { 'HealthyThreshold': 2, 'Interval': 30, 'Target': 'HTTPS:443', 'Timeout': 5, 'UnhealthyThreshold': 2 }, 'BackendServerDescriptions': [], 'Instances': [ {'InstanceId': 'i-0123456'} ], 'DNSName': 'internal-my-elb-0123456.eu-west-2.elb.amazonaws.com', 'SecurityGroups': ['sg-0123456'], 'Policies': { 'LBCookieStickinessPolicies': [], 'AppCookieStickinessPolicies': [], 'OtherPolicies': [] }, 'LoadBalancerName': 'my-elb', 'CreatedTime': '2018-04-24T21:44:24.670Z', 'AvailabilityZones': [ 'eu-west-2a', 'eu-west-2b' ], 'Scheme': 'internal', 'SourceSecurityGroup': { 'OwnerAlias': '01234567890', 'GroupName': 'my.example.com' }, } ] } elb_describe_tags = { 'TagDescriptions': [ { 'Tags': [ {'Key': 'Environment', 'Value': 'dev'}, {'Key': 'Name', 'Value': 'my-elb'}, ] } ] } elb_result = { 'LoadBalancerDescriptions': [ { 'Subnets': ['subnet-0123456', 'subnet-0123457'], 'CanonicalHostedZoneNameID': 'ZABCDEFG', 'VPCId': 'vpc-0123456', 'ListenerDescriptions': [ { 'Listener': { 'InstancePort': 443, 'LoadBalancerPort': 443, 'Protocol': 'TCP', 'InstanceProtocol': 'TCP' }, 'PolicyNames': [] } ], 'HealthCheck': { 'HealthyThreshold': 2, 'Interval': 30, 'Target': 'HTTPS:443', 'Timeout': 5, 'UnhealthyThreshold': 2 }, 'BackendServerDescriptions': [], 'Instances': [ {'InstanceId': 'i-0123456'} ], 'DNSName': 'internal-my-elb-0123456.eu-west-2.elb.amazonaws.com', 'SecurityGroups': ['sg-0123456'], 'Policies': { 'LBCookieStickinessPolicies': [], 'AppCookieStickinessPolicies': [], 'OtherPolicies': [] }, 'LoadBalancerName': 'my-elb', 'CreatedTime': '2018-04-24T21:44:24.670Z', 'AvailabilityZones': [ 'eu-west-2a', 'eu-west-2b' ], 'Scheme': 'internal', 'SourceSecurityGroup': { 'OwnerAlias': '01234567890', 'GroupName': 'my.example.com' }, 'Tags': [ {'Key': 'Environment', 'Value': 'dev'}, {'Key': 'Name', 'Value': 'my-elb'}, ] } ] }
35.180758
133
0.473523
880
12,067
6.432955
0.259091
0.027557
0.022258
0.059353
0.854619
0.826709
0.826709
0.826709
0.826709
0.826709
0
0.101998
0.36952
12,067
342
134
35.283626
0.642087
0
0
0.640379
0
0.047319
0.464739
0.216872
0
0
0
0
0
1
0
false
0
0.006309
0
0.006309
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
bd66c0257f0cb49e8300720d7dea2f89e2605265
30,280
py
Python
stRT/plot/three_d_plot/three_dims_plots.py
Yao-14/stAnalysis
d08483ce581f5b03cfcad8be500aaa64b0293f74
[ "BSD-3-Clause" ]
null
null
null
stRT/plot/three_d_plot/three_dims_plots.py
Yao-14/stAnalysis
d08483ce581f5b03cfcad8be500aaa64b0293f74
[ "BSD-3-Clause" ]
null
null
null
stRT/plot/three_d_plot/three_dims_plots.py
Yao-14/stAnalysis
d08483ce581f5b03cfcad8be500aaa64b0293f74
[ "BSD-3-Clause" ]
null
null
null
import math import re from typing import List, Optional, Tuple, Union import matplotlib as mpl import numpy as np from pyvista import MultiBlock, Plotter, PolyData, UnstructuredGrid try: from typing import Literal except ImportError: from typing_extensions import Literal from ...tdr import collect_model from .three_dims_plotter import ( _set_jupyter, add_legend, add_model, add_outline, add_text, create_plotter, output_plotter, save_plotter, ) def wrap_to_plotter( plotter: Plotter, model: Union[PolyData, UnstructuredGrid, MultiBlock], key: Union[str, list] = None, background: str = "white", cpo: Union[str, list] = "iso", ambient: float = 0.2, opacity: float = 1.0, model_style: Union[Literal["points", "surface", "wireframe"], list] = "surface", model_size: float = 5.0, show_legend: bool = True, legend_kwargs: Optional[dict] = None, show_outline: bool = False, outline_kwargs: Optional[dict] = None, text: Optional[str] = None, text_kwargs: Optional[dict] = None, ): """ What needs to be added to the visualization window. Args: plotter: The plotting object to display pyvista/vtk model. model: A reconstructed model. key: The key under which are the labels. background: The background color of the window. cpo: Camera position of the active render window. Available `cpo` are: * Iterable containing position, focal_point, and view up. E.g.: `[(2.0, 5.0, 13.0), (0.0, 0.0, 0.0), (-0.7, -0.5, 0.3)]` * Iterable containing a view vector. E.g.: ` [-1.0, 2.0, -5.0]` * A string containing the plane orthogonal to the view direction. E.g.: `'xy'`, `'xz'`, `'yz'`, `'yx'`, `'zx'`, `'zy'`, `'iso'` ambient: When lighting is enabled, this is the amount of light in the range of 0 to 1 (default 0.0) that reaches the actor when not directed at the light source emitted from the viewer. opacity: Opacity of the model. If a single float value is given, it will be the global opacity of the model and uniformly applied everywhere - should be between 0 and 1. A string can also be specified to map the scalars range to a predefined opacity transfer function (options include: 'linear', 'linear_r', 'geom', 'geom_r'). model_style: Visualization style of the model. One of the following: style='surface', style='wireframe', style='points'. model_size: If model_style=`points`, point size of any nodes in the dataset plotted. If model_style=`wireframe`, thickness of lines. show_legend: whether to add a legend to the plotter. legend_kwargs: A dictionary that will be pass to the `add_legend` function. By default, it is an empty dictionary and the `add_legend` function will use the {"legend_size": None, "legend_loc": "lower right"} as its parameters. Otherwise, you can provide a dictionary that properly modify those keys according to your needs. show_outline: whether to produce an outline of the full extent for the model. outline_kwargs: A dictionary that will be pass to the `add_outline` function. By default, it is an empty dictionary and the `add_legend` function will use the {"outline_width": 5.0, "outline_color": "black", "show_labels": True, "labels_size": 16, "labels_color": "white", "labels_font": "times"} as its parameters. Otherwise, you can provide a dictionary that properly modify those keys according to your needs. text: The text to add the rendering. text_kwargs: A dictionary that will be pass to the `add_text` function. By default, it is an empty dictionary and the `add_legend` function will use the {"text_font": "times", "text_size": 18, "text_color": "black", "text_loc": "upper_left"} as its parameters. Otherwise, you can provide a dictionary that properly modify those keys according to your needs. """ bg_rgb = mpl.colors.to_rgb(background) cbg_rgb = (1 - bg_rgb[0], 1 - bg_rgb[1], 1 - bg_rgb[2]) # Add model(s) to the plotter. add_model( plotter=plotter, model=model, key=key, ambient=ambient, opacity=opacity, model_size=model_size, model_style=model_style, ) # Set the camera position of plotter. plotter.camera_position = cpo # Add a legend to the plotter. if show_legend: lg_kwargs = { "legend_size": None, "legend_loc": "lower right", } if not (legend_kwargs is None): lg_kwargs.update( (k, legend_kwargs[k]) for k in lg_kwargs.keys() & legend_kwargs.keys() ) legend_key = key if isinstance(key, str) else key[0] add_legend(plotter=plotter, model=model, key=legend_key, **lg_kwargs) # Add a outline to the plotter. if show_outline: ol_kwargs = { "outline_width": 5.0, "outline_color": cbg_rgb, "show_labels": True, "labels_size": 16, "labels_color": bg_rgb, "labels_font": "times", } if not (outline_kwargs is None): ol_kwargs.update( (k, outline_kwargs[k]) for k in ol_kwargs.keys() & outline_kwargs.keys() ) add_outline(plotter=plotter, model=model, **ol_kwargs) # Add text to the plotter. if not (text is None): t_kwargs = { "text_font": "times", "text_size": 18, "text_color": cbg_rgb, "text_loc": "upper_left", } if not (text_kwargs is None): t_kwargs.update( (k, text_kwargs[k]) for k in t_kwargs.keys() & text_kwargs.keys() ) add_text(plotter=plotter, text=text, **t_kwargs) def three_d_plot( model: Union[PolyData, UnstructuredGrid, MultiBlock], key: Union[str, list] = None, filename: Optional[str] = None, jupyter: Union[ bool, Literal["panel", "none", "pythreejs", "static", "ipygany"] ] = False, off_screen: bool = False, window_size: tuple = (1024, 768), background: str = "white", cpo: Union[str, list] = "iso", ambient: float = 0.2, opacity: float = 1.0, model_style: Union[Literal["points", "surface", "wireframe"], list] = "surface", model_size: float = 5.0, show_legend: bool = True, legend_kwargs: Optional[dict] = None, show_outline: bool = False, outline_kwargs: Optional[dict] = None, text: Optional[str] = None, text_kwargs: Optional[dict] = None, view_up: tuple = (0.5, 0.5, 1), framerate: int = 15, plotter_filename: Optional[str] = None, ): """ Visualize reconstructed 3D model. Args: model: A reconstructed model. key: The key under which are the labels. filename: Filename of output file. Writer type is inferred from the extension of the filename. * Output an image file, please enter a filename ending with `.png`, `.tif`, `.tiff`, `.bmp`, `.jpeg`, `.jpg`, `.svg`, `.eps`, `.ps`, `.pdf`, `.tex`. * Output a gif file, please enter a filename ending with `.gif`. * Output a mp4 file, please enter a filename ending with `.mp4`. jupyter: Whether to plot in jupyter notebook. * `'none'` : Do not display in the notebook. * `'pythreejs'` : Show a pythreejs widget * `'static'` : Display a static figure. * `'ipygany'` : Show an ipygany widget * `'panel'` : Show a panel widget. off_screen: Renders off-screen when True. Useful for automated screenshots. window_size: Window size in pixels. The default window_size is `[1024, 768]`. background: The background color of the window. cpo: Camera position of the active render window. Available `cpo` are: * Iterable containing position, focal_point, and view up. E.g.: `[(2.0, 5.0, 13.0), (0.0, 0.0, 0.0), (-0.7, -0.5, 0.3)]` * Iterable containing a view vector. E.g.: ` [-1.0, 2.0, -5.0]` * A string containing the plane orthogonal to the view direction. E.g.: `'xy'`, `'xz'`, `'yz'`, `'yx'`, `'zx'`, `'zy'`, `'iso'` ambient: When lighting is enabled, this is the amount of light in the range of 0 to 1 (default 0.0) that reaches the actor when not directed at the light source emitted from the viewer. opacity: Opacity of the model. If a single float value is given, it will be the global opacity of the model and uniformly applied everywhere - should be between 0 and 1. A string can also be specified to map the scalars range to a predefined opacity transfer function (options include: 'linear', 'linear_r', 'geom', 'geom_r'). model_style: Visualization style of the model. One of the following: style='surface', style='wireframe', style='points'. model_size: If model_style=`points`, point size of any nodes in the dataset plotted. If model_style=`wireframe`, thickness of lines. show_legend: whether to add a legend to the plotter. legend_kwargs: A dictionary that will be pass to the `add_legend` function. By default, it is an empty dictionary and the `add_legend` function will use the {"legend_size": None, "legend_loc": "lower right"} as its parameters. Otherwise, you can provide a dictionary that properly modify those keys according to your needs. show_outline: whether to produce an outline of the full extent for the model. outline_kwargs: A dictionary that will be pass to the `add_outline` function. By default, it is an empty dictionary and the `add_legend` function will use the {"outline_width": 5.0, "outline_color": "black", "show_labels": True, "labels_size": 16, "labels_color": "white", "labels_font": "times"} as its parameters. Otherwise, you can provide a dictionary that properly modify those keys according to your needs. text: The text to add the rendering. text_kwargs: A dictionary that will be pass to the `add_text` function. By default, it is an empty dictionary and the `add_legend` function will use the {"text_font": "times", "text_size": 18, "text_color": "black", "text_loc": "upper_left"} as its parameters. Otherwise, you can provide a dictionary that properly modify those keys according to your needs. view_up: The normal to the orbital plane. Only available when filename ending with `.mp4` or `.gif`. framerate: Frames per second. Only available when filename ending with `.mp4` or `.gif`. plotter_filename: The filename of the file where the plotter is saved. Writer type is inferred from the extension of the filename. * Output a gltf file, please enter a filename ending with `.gltf`. * Output a html file, please enter a filename ending with `.html`. * Output an obj file, please enter a filename ending with `.obj`. * Output a vtkjs file, please enter a filename without format. Returns: cpo: List of camera position, focal point, and view up. Returned only if filename is None or filename ending with `.png`, `.tif`, `.tiff`, `.bmp`, `.jpeg`, `.jpg`, `.svg`, `.eps`, `.ps`, `.pdf`, `.tex`. img: Numpy array of the last image. Returned only if filename is None or filename ending with `.png`, `.tif`, `.tiff`, `.bmp`, `.jpeg`, `.jpg`, `.svg`, `.eps`, `.ps`, `.pdf`, `.tex`. """ plotter_kws = dict( jupyter=False if jupyter is False else True, window_size=window_size, background=background, ) model_kwargs = dict( background=background, ambient=ambient, opacity=opacity, model_style=model_style, model_size=model_size, show_legend=show_legend, legend_kwargs=legend_kwargs, show_outline=show_outline, outline_kwargs=outline_kwargs, text=text, text_kwargs=text_kwargs, ) # Set jupyter. off_screen1, off_screen2, jupyter_backend = _set_jupyter( jupyter=jupyter, off_screen=off_screen ) # Create a plotting object to display pyvista/vtk model. p = create_plotter(off_screen=off_screen1, **plotter_kws) wrap_to_plotter(plotter=p, model=model, key=key, cpo=cpo, **model_kwargs) cpo = p.show(return_cpos=True, jupyter_backend="none", cpos=cpo) # Create another plotting object to save pyvista/vtk model. p = create_plotter(off_screen=off_screen2, **plotter_kws) wrap_to_plotter(plotter=p, model=model, key=key, cpo=cpo, **model_kwargs) # Save the plotting object. if plotter_filename is not None: save_plotter(plotter=p, filename=plotter_filename) # Output the plotting object. return output_plotter( plotter=p, filename=filename, view_up=view_up, framerate=framerate, jupyter=jupyter, ) def three_d_multi_plot( model: Union[PolyData, UnstructuredGrid, MultiBlock], key: Union[str, list] = None, filename: Optional[str] = None, jupyter: Union[ bool, Literal["panel", "none", "pythreejs", "static", "ipygany"] ] = False, off_screen: bool = False, shape: Union[str, list, tuple] = None, window_size: Optional[tuple] = None, background: str = "white", cpo: Union[str, list] = "iso", ambient: float = 0.2, opacity: float = 1.0, model_style: Union[Literal["points", "surface", "wireframe"], list] = "surface", model_size: float = 5.0, show_legend: bool = True, legend_kwargs: Optional[dict] = None, show_outline: bool = False, outline_kwargs: Optional[dict] = None, text: Union[str, list] = None, text_kwargs: Optional[dict] = None, view_up: tuple = (0.5, 0.5, 1), framerate: int = 15, plotter_filename: Optional[str] = None, ): """ Multi-view visualization of reconstructed 3D model. Args: model: A MultiBlock of reconstructed models or a reconstructed model. key: The key under which are the labels. filename: Filename of output file. Writer type is inferred from the extension of the filename. * Output an image file, please enter a filename ending with `.png`, `.tif`, `.tiff`, `.bmp`, `.jpeg`, `.jpg`, `.svg`, `.eps`, `.ps`, `.pdf`, `.tex`. * Output a gif file, please enter a filename ending with `.gif`. * Output a mp4 file, please enter a filename ending with `.mp4`. jupyter: Whether to plot in jupyter notebook. * `'none'` : Do not display in the notebook. * `'pythreejs'` : Show a pythreejs widget * `'static'` : Display a static figure. * `'ipygany'` : Show an ipygany widget * `'panel'` : Show a panel widget. off_screen: Renders off-screen when True. Useful for automated screenshots. shape: Number of sub-render windows inside the main window. Specify two across with shape=(2, 1) and a two by two grid with shape=(2, 2). By default, there is only one render window. Can also accept a string descriptor as shape. E.g.: shape="3|1" means 3 plots on the left and 1 on the right, shape="4/2" means 4 plots on top and 2 at the bottom. window_size: Window size in pixels. The default window_size is `[1024, 768]`. background: The background color of the window. cpo: Camera position of the active render window. Available `cpo` are: * Iterable containing position, focal_point, and view up. E.g.: `[(2.0, 5.0, 13.0), (0.0, 0.0, 0.0), (-0.7, -0.5, 0.3)]` * Iterable containing a view vector. E.g.: ` [-1.0, 2.0, -5.0]` * A string containing the plane orthogonal to the view direction. E.g.: `'xy'`, `'xz'`, `'yz'`, `'yx'`, `'zx'`, `'zy'`, `'iso'` ambient: When lighting is enabled, this is the amount of light in the range of 0 to 1 (default 0.0) that reaches the actor when not directed at the light source emitted from the viewer. opacity: Opacity of the model. If a single float value is given, it will be the global opacity of the model and uniformly applied everywhere - should be between 0 and 1. A string can also be specified to map the scalars range to a predefined opacity transfer function (options include: 'linear', 'linear_r', 'geom', 'geom_r'). model_style: Visualization style of the model. One of the following: style='surface', style='wireframe', style='points'. model_size: If model_style=`points`, point size of any nodes in the dataset plotted. If model_style=`wireframe`, thickness of lines. show_legend: whether to add a legend to the plotter. legend_kwargs: A dictionary that will be pass to the `add_legend` function. By default, it is an empty dictionary and the `add_legend` function will use the {"legend_size": None, "legend_loc": "lower right"} as its parameters. Otherwise, you can provide a dictionary that properly modify those keys according to your needs. show_outline: whether to produce an outline of the full extent for the model. outline_kwargs: A dictionary that will be pass to the `add_outline` function. By default, it is an empty dictionary and the `add_legend` function will use the {"outline_width": 5.0, "outline_color": "black", "show_labels": True, "labels_size": 16, "labels_color": "white", "labels_font": "times"} as its parameters. Otherwise, you can provide a dictionary that properly modify those keys according to your needs. text: The text to add the rendering. text_kwargs: A dictionary that will be pass to the `add_text` function. By default, it is an empty dictionary and the `add_legend` function will use the {"text_font": "times", "text_size": 18, "text_color": "black", "text_loc": "upper_left"} as its parameters. Otherwise, you can provide a dictionary that properly modify those keys according to your needs. view_up: The normal to the orbital plane. Only available when filename ending with `.mp4` or `.gif`. framerate: Frames per second. Only available when filename ending with `.mp4` or `.gif`. plotter_filename: The filename of the file where the plotter is saved. Writer type is inferred from the extension of the filename. * Output a gltf file, please enter a filename ending with `.gltf`. * Output a html file, please enter a filename ending with `.html`. * Output an obj file, please enter a filename ending with `.obj`. * Output a vtkjs file, please enter a filename without format. """ models = model if isinstance(model, MultiBlock) else [model] n_model = len(models) keys = key if isinstance(key, list) else [key] n_key = len(keys) cpos = cpo if isinstance(cpo, list) else [cpo] n_cpo = len(cpos) texts = text if isinstance(text, list) else [text] n_text = len(texts) n_window = max(n_model, n_key, n_cpo, n_text) models = ( collect_model([models[0].copy() for i in range(n_window)]) if len(models) == 1 else models ) keys = keys * n_window if len(keys) == 1 else keys cpos = cpos * n_window if len(cpos) == 1 else cpos texts = texts * n_window if len(texts) == 1 else texts shape = ( (math.ceil(n_window / 4), n_window if n_window < 4 else 4) if shape is None else shape ) if isinstance(shape, (tuple, list)): n_subplots = shape[0] * shape[1] subplots = [] for i in range(n_subplots): col = math.floor(i / shape[1]) ind = i - col * shape[1] subplots.append([col, ind]) else: shape_x, shape_y = re.split("[/|]", shape) n_subplots = int(shape_x) * int(shape_y) subplots = [i for i in range(n_subplots)] if window_size is None: win_x, win_y = ( (shape[1], shape[0]) if isinstance(shape, (tuple, list)) else (1, 1) ) window_size = (512 * win_x, 512 * win_y) plotter_kws = dict( jupyter=False if jupyter is False else True, window_size=window_size, background=background, shape=shape, ) model_kwargs = dict( background=background, ambient=ambient, opacity=opacity, model_style=model_style, model_size=model_size, show_legend=show_legend, legend_kwargs=legend_kwargs, show_outline=show_outline, outline_kwargs=outline_kwargs, text_kwargs=text_kwargs, ) # Set jupyter. off_screen1, off_screen2, jupyter_backend = _set_jupyter( jupyter=jupyter, off_screen=off_screen ) # Create a plotting object to display pyvista/vtk model. p = create_plotter(off_screen=off_screen1, **plotter_kws) for model, sub_key, sub_cpo, sub_text, subplot_index in zip( models, keys, cpos, texts, subplots ): p.subplot(subplot_index[0], subplot_index[1]) wrap_to_plotter( plotter=p, model=model, key=sub_key, cpo=sub_cpo, text=sub_text, **model_kwargs ) p.add_axes() # Save the plotting object. if plotter_filename is not None: save_plotter(plotter=p, filename=plotter_filename) # Output the plotting object. return output_plotter( plotter=p, filename=filename, view_up=view_up, framerate=framerate, jupyter=jupyter, ) def three_d_animate( models: Union[List[PolyData or UnstructuredGrid], MultiBlock], key: Optional[str] = None, filename: str = "animate.mp4", jupyter: Union[ bool, Literal["panel", "none", "pythreejs", "static", "ipygany"] ] = False, off_screen: bool = False, window_size: tuple = (1024, 768), background: str = "white", cpo: Union[str, list] = "iso", ambient: float = 0.2, opacity: float = 1.0, model_style: Union[Literal["points", "surface", "wireframe"], list] = "surface", model_size: float = 5.0, show_legend: bool = True, legend_kwargs: Optional[dict] = None, show_outline: bool = False, outline_kwargs: Optional[dict] = None, text: Optional[str] = None, text_kwargs: Optional[dict] = None, framerate: int = 15, plotter_filename: Optional[str] = None, ): """ Animated visualization of 3D reconstruction model. Args: models: A List of reconstructed models or a MultiBlock. key: The key under which are the labels. filename: Filename of output file. Writer type is inferred from the extension of the filename. * Output a gif file, please enter a filename ending with `.gif`. * Output a mp4 file, please enter a filename ending with `.mp4`. jupyter: Whether to plot in jupyter notebook. * `'none'` : Do not display in the notebook. * `'pythreejs'` : Show a pythreejs widget * `'static'` : Display a static figure. * `'ipygany'` : Show an ipygany widget * `'panel'` : Show a panel widget. off_screen: Renders off-screen when True. Useful for automated screenshots. window_size: Window size in pixels. The default window_size is `[1024, 768]`. background: The background color of the window. cpo: Camera position of the active render window. Available `cpo` are: * Iterable containing position, focal_point, and view up. E.g.: `[(2.0, 5.0, 13.0), (0.0, 0.0, 0.0), (-0.7, -0.5, 0.3)]` * Iterable containing a view vector. E.g.: ` [-1.0, 2.0, -5.0]` * A string containing the plane orthogonal to the view direction. E.g.: `'xy'`, `'xz'`, `'yz'`, `'yx'`, `'zx'`, `'zy'`, `'iso'` ambient: When lighting is enabled, this is the amount of light in the range of 0 to 1 (default 0.0) that reaches the actor when not directed at the light source emitted from the viewer. opacity: Opacity of the model. If a single float value is given, it will be the global opacity of the model and uniformly applied everywhere - should be between 0 and 1. A string can also be specified to map the scalars range to a predefined opacity transfer function (options include: 'linear', 'linear_r', 'geom', 'geom_r'). model_style: Visualization style of the model. One of the following: style='surface', style='wireframe', style='points'. model_size: If model_style=`points`, point size of any nodes in the dataset plotted. If model_style=`wireframe`, thickness of lines. show_legend: whether to add a legend to the plotter. legend_kwargs: A dictionary that will be pass to the `add_legend` function. By default, it is an empty dictionary and the `add_legend` function will use the {"legend_size": None, "legend_loc": "lower right"} as its parameters. Otherwise, you can provide a dictionary that properly modify those keys according to your needs. show_outline: whether to produce an outline of the full extent for the model. outline_kwargs: A dictionary that will be pass to the `add_outline` function. By default, it is an empty dictionary and the `add_legend` function will use the {"outline_width": 5.0, "outline_color": "black", "show_labels": True, "labels_size": 16, "labels_color": "white", "labels_font": "times"} as its parameters. Otherwise, you can provide a dictionary that properly modify those keys according to your needs. text: The text to add the rendering. text_kwargs: A dictionary that will be pass to the `add_text` function. By default, it is an empty dictionary and the `add_legend` function will use the {"text_font": "times", "text_size": 18, "text_color": "black", "text_loc": "upper_left"} as its parameters. Otherwise, you can provide a dictionary that properly modify those keys according to your needs. framerate: Frames per second. Only available when filename ending with `.mp4` or `.gif`. plotter_filename: The filename of the file where the plotter is saved. Writer type is inferred from the extension of the filename. * Output a gltf file, please enter a filename ending with `.gltf`. * Output a html file, please enter a filename ending with `.html`. * Output an obj file, please enter a filename ending with `.obj`. * Output a vtkjs file, please enter a filename without format. """ plotter_kws = dict( jupyter=False if jupyter is False else True, window_size=window_size, background=background, ) model_kwargs = dict( background=background, ambient=ambient, opacity=opacity, model_style=model_style, model_size=model_size, show_legend=show_legend, legend_kwargs=legend_kwargs, show_outline=show_outline, outline_kwargs=outline_kwargs, text=text, text_kwargs=text_kwargs, ) # Set jupyter. off_screen1, off_screen2, jupyter_backend = _set_jupyter( jupyter=jupyter, off_screen=off_screen ) # Check models. blocks = collect_model(models) if isinstance(models, list) else models blocks_name = blocks.keys() # Create a plotting object to display the end model of blocks. end_block = blocks[blocks_name[-1]] p = create_plotter(off_screen=off_screen1, **plotter_kws) wrap_to_plotter(plotter=p, model=end_block, key=key, cpo=cpo, **model_kwargs) cpo = p.show(return_cpos=True, cpos=cpo, jupyter_backend="none") # Create another plotting object to save pyvista/vtk model. start_block = blocks[blocks_name[0]] p = create_plotter(off_screen=off_screen2, **plotter_kws) wrap_to_plotter(plotter=p, model=start_block, key=key, cpo=cpo, **model_kwargs) filename_format = filename.split(".")[-1] if filename_format == "gif": p.open_gif(filename) elif filename_format == "mp4": p.open_movie(filename, framerate=framerate, quality=5) for block_name in blocks_name[1:]: block = blocks[block_name] start_block.overwrite(block) wrap_to_plotter(plotter=p, model=start_block, key=key, cpo=cpo, **model_kwargs) p.write_frame() # Save the plotting object. if plotter_filename is not None: save_plotter(plotter=p, filename=plotter_filename)
48.760064
128
0.613871
4,014
30,280
4.519432
0.086198
0.010474
0.003969
0.00441
0.838267
0.82388
0.81291
0.808721
0.800617
0.795326
0
0.01415
0.290489
30,280
620
129
48.83871
0.830246
0.595674
0
0.52648
0
0
0.038151
0
0
0
0
0
0
1
0.012461
false
0
0.034268
0
0.05296
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
bdae698a8ecc2244aef9ffc027ac9ae358ea67ad
9,231
py
Python
mmdeploy/codebase/mmdet/core/bbox/delta_xywh_bbox_coder.py
grimoire/mmdeploy
e84bc30f4a036dd19cb3af854203922a91098e84
[ "Apache-2.0" ]
746
2021-12-27T10:50:28.000Z
2022-03-31T13:34:14.000Z
mmdeploy/codebase/mmdet/core/bbox/delta_xywh_bbox_coder.py
grimoire/mmdeploy
e84bc30f4a036dd19cb3af854203922a91098e84
[ "Apache-2.0" ]
253
2021-12-28T05:59:13.000Z
2022-03-31T18:22:25.000Z
mmdeploy/codebase/mmdet/core/bbox/delta_xywh_bbox_coder.py
grimoire/mmdeploy
e84bc30f4a036dd19cb3af854203922a91098e84
[ "Apache-2.0" ]
147
2021-12-27T10:50:33.000Z
2022-03-30T10:44:20.000Z
# Copyright (c) OpenMMLab. All rights reserved. import numpy as np import torch from mmdeploy.core import FUNCTION_REWRITER @FUNCTION_REWRITER.register_rewriter( func_name='mmdet.core.bbox.coder.delta_xywh_bbox_coder.' 'DeltaXYWHBBoxCoder.decode', backend='default') def deltaxywhbboxcoder__decode(ctx, self, bboxes, pred_bboxes, max_shape=None, wh_ratio_clip=16 / 1000): """Rewrite `decode` of `DeltaXYWHBBoxCoder` for default backend. Rewrite this func to call `delta2bbox` directly. Args: bboxes (torch.Tensor): Basic boxes. Shape (B, N, 4) or (N, 4) pred_bboxes (Tensor): Encoded offsets with respect to each roi. Has shape (B, N, num_classes * 4) or (B, N, 4) or (N, num_classes * 4) or (N, 4). Note N = num_anchors * W * H when rois is a grid of anchors.Offset encoding follows [1]_. max_shape (Sequence[int] or torch.Tensor or Sequence[ Sequence[int]],optional): Maximum bounds for boxes, specifies (H, W, C) or (H, W). If bboxes shape is (B, N, 4), then the max_shape should be a Sequence[Sequence[int]] and the length of max_shape should also be B. wh_ratio_clip (float, optional): The allowed ratio between width and height. Returns: torch.Tensor: Decoded boxes. """ assert pred_bboxes.size(0) == bboxes.size(0) if pred_bboxes.ndim == 3: assert pred_bboxes.size(1) == bboxes.size(1) from mmdet.core.bbox.coder.delta_xywh_bbox_coder import delta2bbox decoded_bboxes = delta2bbox(bboxes, pred_bboxes, self.means, self.stds, max_shape, wh_ratio_clip, self.clip_border, self.add_ctr_clamp, self.ctr_clamp) return decoded_bboxes @FUNCTION_REWRITER.register_rewriter( func_name='mmdet.core.bbox.coder.delta_xywh_bbox_coder.delta2bbox', # noqa backend='default') def delta2bbox(ctx, rois, deltas, means=(0., 0., 0., 0.), stds=(1., 1., 1., 1.), max_shape=None, wh_ratio_clip=16 / 1000, clip_border=True, add_ctr_clamp=False, ctr_clamp=32): """Rewrite `delta2bbox` for default backend. Since the need of clip op with dynamic min and max, this function uses clip_bboxes function to support dynamic shape. Args: ctx (ContextCaller): The context with additional information. rois (Tensor): Boxes to be transformed. Has shape (N, 4). deltas (Tensor): Encoded offsets relative to each roi. Has shape (N, num_classes * 4) or (N, 4). Note N = num_base_anchors * W * H, when rois is a grid of anchors. Offset encoding follows [1]_. means (Sequence[float]): Denormalizing means for delta coordinates. Default (0., 0., 0., 0.). stds (Sequence[float]): Denormalizing standard deviation for delta coordinates. Default (1., 1., 1., 1.). max_shape (tuple[int, int]): Maximum bounds for boxes, specifies (H, W). Default None. wh_ratio_clip (float): Maximum aspect ratio for boxes. Default 16 / 1000. clip_border (bool, optional): Whether clip the objects outside the border of the image. Default True. add_ctr_clamp (bool): Whether to add center clamp. When set to True, the center of the prediction bounding box will be clamped to avoid being too far away from the center of the anchor. Only used by YOLOF. Default False. ctr_clamp (int): the maximum pixel shift to clamp. Only used by YOLOF. Default 32. Return: bboxes (Tensor): Boxes with shape (N, num_classes * 4) or (N, 4), where 4 represent tl_x, tl_y, br_x, br_y. """ means = deltas.new_tensor(means).view(1, -1) stds = deltas.new_tensor(stds).view(1, -1) delta_shape = deltas.shape reshaped_deltas = deltas.view(delta_shape[:-1] + (-1, 4)) denorm_deltas = reshaped_deltas * stds + means dxy = denorm_deltas[..., :2] dwh = denorm_deltas[..., 2:] xy1 = rois[..., None, :2] xy2 = rois[..., None, 2:] pxy = (xy1 + xy2) * 0.5 pwh = xy2 - xy1 dxy_wh = pwh * dxy max_ratio = np.abs(np.log(wh_ratio_clip)) if add_ctr_clamp: dxy_wh = torch.clamp(dxy_wh, max=ctr_clamp, min=-ctr_clamp) dwh = torch.clamp(dwh, max=max_ratio) else: dwh = dwh.clamp(min=-max_ratio, max=max_ratio) # Use exp(network energy) to enlarge/shrink each roi half_gwh = pwh * dwh.exp() * 0.5 # Use network energy to shift the center of each roi gxy = pxy + dxy_wh # Convert center-xy/width/height to top-left, bottom-right xy1 = gxy - half_gwh xy2 = gxy + half_gwh x1 = xy1[..., 0] y1 = xy1[..., 1] x2 = xy2[..., 0] y2 = xy2[..., 1] if clip_border and max_shape is not None: from mmdeploy.codebase.mmdet.deploy import clip_bboxes x1, y1, x2, y2 = clip_bboxes(x1, y1, x2, y2, max_shape) bboxes = torch.stack([x1, y1, x2, y2], dim=-1).view(deltas.size()) return bboxes @FUNCTION_REWRITER.register_rewriter( func_name='mmdet.core.bbox.coder.delta_xywh_bbox_coder.delta2bbox', # noqa backend='ncnn') def delta2bbox__ncnn(ctx, rois, deltas, means=(0., 0., 0., 0.), stds=(1., 1., 1., 1.), max_shape=None, wh_ratio_clip=16 / 1000, clip_border=True, add_ctr_clamp=False, ctr_clamp=32): """Rewrite `delta2bbox` for ncnn backend. Batch dimension is not supported by ncnn, but supported by pytorch. ncnn regards the lowest two dimensions as continuous address with byte alignment, so the lowest two dimensions are not absolutely independent. Reshape operator with -1 arguments should operates ncnn::Mat with dimension >= 3. Args: ctx (ContextCaller): The context with additional information. rois (Tensor): Boxes to be transformed. Has shape (N, 4) or (B, N, 4) deltas (Tensor): Encoded offsets with respect to each roi. Has shape (B, N, num_classes * 4) or (B, N, 4) or (N, num_classes * 4) or (N, 4). Note N = num_anchors * W * H when rois is a grid of anchors.Offset encoding follows [1]_. means (Sequence[float]): Denormalizing means for delta coordinates stds (Sequence[float]): Denormalizing standard deviation for delta coordinates max_shape (Sequence[int] or torch.Tensor or Sequence[ Sequence[int]],optional): Maximum bounds for boxes, specifies (H, W, C) or (H, W). If rois shape is (B, N, 4), then the max_shape should be a Sequence[Sequence[int]] and the length of max_shape should also be B. wh_ratio_clip (float): Maximum aspect ratio for boxes. clip_border (bool, optional): Whether clip the objects outside the border of the image. Defaults to True. add_ctr_clamp (bool): Whether to add center clamp, when added, the predicted box is clamped is its center is too far away from the original anchor's center. Only used by YOLOF. Default False. ctr_clamp (int): the maximum pixel shift to clamp. Only used by YOLOF. Default 32. Return: bboxes (Tensor): Boxes with shape (B, N, num_classes * 4) or (B, N, 4) or (N, num_classes * 4) or (N, 4), where 4 represent tl_x, tl_y, br_x, br_y. """ means = deltas.new_tensor(means).view(1, 1, 1, -1).data stds = deltas.new_tensor(stds).view(1, 1, 1, -1).data delta_shape = deltas.shape reshaped_deltas = deltas.view(delta_shape[:-1] + (-1, 4)) denorm_deltas = reshaped_deltas * stds + means dxy = denorm_deltas[..., :2] dwh = denorm_deltas[..., 2:] xy1 = rois[..., None, :2] xy2 = rois[..., None, 2:] pxy = (xy1 + xy2) * 0.5 pwh = xy2 - xy1 dxy_wh = pwh * dxy max_ratio = np.abs(np.log(wh_ratio_clip)) if add_ctr_clamp: dxy_wh = torch.clamp(dxy_wh, max=ctr_clamp, min=-ctr_clamp) dwh = torch.clamp(dwh, max=max_ratio) else: dwh = dwh.clamp(min=-max_ratio, max=max_ratio) # Use exp(network energy) to enlarge/shrink each roi half_gwh = pwh * dwh.exp() * 0.5 # Use network energy to shift the center of each roi gxy = pxy + dxy_wh # Convert center-xy/width/height to top-left, bottom-right xy1 = gxy - half_gwh xy2 = gxy + half_gwh x1 = xy1[..., 0] y1 = xy1[..., 1] x2 = xy2[..., 0] y2 = xy2[..., 1] if clip_border and max_shape is not None: from mmdeploy.codebase.mmdet.deploy import clip_bboxes x1, y1, x2, y2 = clip_bboxes(x1, y1, x2, y2, max_shape) bboxes = torch.stack([x1, y1, x2, y2], dim=-1).view(deltas.size()) return bboxes
40.134783
79
0.600802
1,298
9,231
4.145609
0.181818
0.007062
0.005575
0.017841
0.764914
0.753949
0.74317
0.737224
0.713436
0.676268
0
0.030727
0.294876
9,231
229
80
40.310044
0.795975
0.501137
0
0.770642
0
0
0.045753
0.04153
0
0
0
0
0.018349
1
0.027523
false
0
0.055046
0
0.110092
0
0
0
0
null
0
0
0
0
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
bdd5d3b8670509a7a2812c4c283540d3e5d4d96c
1,422
py
Python
safe_relay_service/relay/migrations/0020_auto_20190514_1211.py
vaporyorg/safe-relay-service
1289c3d31639f83aa2c0110ff3b84a69212b81f5
[ "MIT" ]
5
2021-06-07T14:07:32.000Z
2022-03-26T19:42:45.000Z
safe_relay_service/relay/migrations/0020_auto_20190514_1211.py
vaporyorg/safe-relay-service
1289c3d31639f83aa2c0110ff3b84a69212b81f5
[ "MIT" ]
24
2019-12-11T14:43:38.000Z
2022-03-01T12:37:24.000Z
safe_relay_service/relay/migrations/0020_auto_20190514_1211.py
vaporyorg/safe-relay-service
1289c3d31639f83aa2c0110ff3b84a69212b81f5
[ "MIT" ]
4
2020-05-25T03:30:53.000Z
2021-11-22T06:51:12.000Z
# Generated by Django 2.2.1 on 2019-05-14 12:11 from django.db import migrations import gnosis.eth.django.models class Migration(migrations.Migration): dependencies = [ ('relay', '0019_ethereumevent'), ] operations = [ migrations.AlterField( model_name='ethereumtx', name='_from', field=gnosis.eth.django.models.EthereumAddressField(db_index=True, null=True), ), migrations.AlterField( model_name='ethereumtx', name='to', field=gnosis.eth.django.models.EthereumAddressField(db_index=True, null=True), ), migrations.AlterField( model_name='internaltx', name='_from', field=gnosis.eth.django.models.EthereumAddressField(db_index=True), ), migrations.AlterField( model_name='internaltx', name='contract_address', field=gnosis.eth.django.models.EthereumAddressField(db_index=True, null=True), ), migrations.AlterField( model_name='internaltx', name='to', field=gnosis.eth.django.models.EthereumAddressField(db_index=True, null=True), ), migrations.AlterField( model_name='safemultisigtx', name='to', field=gnosis.eth.django.models.EthereumAddressField(db_index=True, null=True), ), ]
30.913043
90
0.601266
138
1,422
6.07971
0.282609
0.075089
0.125149
0.175209
0.76758
0.76758
0.709178
0.657926
0.657926
0.657926
0
0.018646
0.283404
1,422
45
91
31.6
0.804711
0.031646
0
0.710526
1
0
0.086545
0
0
0
0
0
0
1
0
false
0
0.052632
0
0.131579
0
0
0
0
null
0
0
1
0
1
1
0
0
1
0
0
0
0
0
0
0
0
0
0
1
1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
da03338ce162282390a173ce080ca31e2a0c0eae
201
py
Python
apps/about/views.py
AleksandrTka4uk/foodgram-project
b3f1762d1ba686d42bc7a2d6c2e4b31150fd55ea
[ "MIT" ]
null
null
null
apps/about/views.py
AleksandrTka4uk/foodgram-project
b3f1762d1ba686d42bc7a2d6c2e4b31150fd55ea
[ "MIT" ]
null
null
null
apps/about/views.py
AleksandrTka4uk/foodgram-project
b3f1762d1ba686d42bc7a2d6c2e4b31150fd55ea
[ "MIT" ]
null
null
null
from django.views.generic.base import TemplateView class AboutView(TemplateView): template_name = 'about/AboutPage.html' class TechView(TemplateView): template_name = 'about/TechPage.html'
20.1
50
0.776119
23
201
6.695652
0.695652
0.25974
0.311688
0.376623
0
0
0
0
0
0
0
0
0.129353
201
9
51
22.333333
0.88
0
0
0
0
0
0.19403
0
0
0
0
0
0
1
0
false
0
0.2
0
1
0
1
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
6
da175fe6bb86a540616355984973b20ad753d6e7
29
py
Python
src/featurizer/__init__.py
geffy/retailhero-recommender-solution
9e94f313146acd87bd09fe8ab63e4f58f22b9a3e
[ "MIT" ]
11
2020-03-01T22:25:32.000Z
2021-10-19T19:59:25.000Z
src/featurizer/__init__.py
geffy/retailhero-recommender-solution
9e94f313146acd87bd09fe8ab63e4f58f22b9a3e
[ "MIT" ]
null
null
null
src/featurizer/__init__.py
geffy/retailhero-recommender-solution
9e94f313146acd87bd09fe8ab63e4f58f22b9a3e
[ "MIT" ]
1
2021-02-19T19:01:58.000Z
2021-02-19T19:01:58.000Z
from typing import Any, Dict
14.5
28
0.793103
5
29
4.6
1
0
0
0
0
0
0
0
0
0
0
0
0.172414
29
1
29
29
0.958333
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
16fe1d44dbc1025223f0b213224e96e5033b7fb6
22,702
py
Python
owl2vec_star/cli.py
KRR-Oxford/OWL2Vec-Star
9fa80b98b5014dd5c775d8e97073972c724123d6
[ "Apache-2.0" ]
44
2020-10-10T05:51:25.000Z
2022-03-25T14:58:10.000Z
owl2vec_star/cli.py
KRR-Oxford/OWL2Vec-Star
9fa80b98b5014dd5c775d8e97073972c724123d6
[ "Apache-2.0" ]
5
2021-03-08T13:00:31.000Z
2022-02-11T19:01:56.000Z
owl2vec_star/cli.py
KRR-Oxford/OWL2Vec-Star
9fa80b98b5014dd5c775d8e97073972c724123d6
[ "Apache-2.0" ]
10
2020-10-28T12:52:00.000Z
2022-03-01T13:10:48.000Z
"""Console script for owl2vec_star.""" import configparser import multiprocessing import os import random import sys import time import click import gensim from owl2vec_star.lib.RDF2Vec_Embed import get_rdf2vec_walks from owl2vec_star.lib.Label import pre_process_words, URI_parse from owl2vec_star.lib.Onto_Projection import Reasoner, OntologyProjection import nltk nltk.download('punkt') @click.group() def main(): pass @main.command() @click.option("--ontology_file", type=click.Path(exists=True), default=None, help="The input ontology for embedding") @click.option("--embedding_dir", type=click.Path(exists=True), default=None, help="The output embedding directory") @click.option("--config_file", type=click.Path(exists=True), default='default.cfg', help="Configuration file") @click.option("--URI_Doc", help="Using URI document", is_flag=True) @click.option("--Lit_Doc", help="Using literal document", is_flag=True) @click.option("--Mix_Doc", help="Using mixture document", is_flag=True) def standalone(ontology_file, embedding_dir, config_file, uri_doc, lit_doc, mix_doc): config = configparser.ConfigParser() config.read(click.format_filename(config_file)) if ontology_file: config['BASIC']['ontology_file'] = click.format_filename(ontology_file) if embedding_dir: config['BASIC']['embedding_dir'] = click.format_filename(embedding_dir) if uri_doc: config['DOCUMENT']['URI_Doc'] = 'yes' if lit_doc: config['DOCUMENT']['Lit_Doc'] = 'yes' if mix_doc: config['DOCUMENT']['Mix_Doc'] = 'yes' if 'cache_dir' not in config['DOCUMENT']: config['DOCUMENT']['cache_dir'] = './cache' if not os.path.exists(config['DOCUMENT']['cache_dir']): os.mkdir(config['DOCUMENT']['cache_dir']) if 'embedding_dir' not in config['BASIC']: config['BASIC']['embedding_dir'] = os.path.join(config['DOCUMENT']['cache_dir'], 'output') start_time = time.time() if ('ontology_projection' in config['DOCUMENT'] and config['DOCUMENT']['ontology_projection'] == 'yes') or \ 'pre_entity_file' not in config['DOCUMENT'] or 'pre_axiom_file' not in config['DOCUMENT'] or \ 'pre_annotation_file' not in config['DOCUMENT']: print('\n Access the ontology ...') projection = OntologyProjection(config['BASIC']['ontology_file'], reasoner=Reasoner.STRUCTURAL, only_taxonomy=False, bidirectional_taxonomy=True, include_literals=True, avoid_properties=set(), additional_preferred_labels_annotations=set(), additional_synonyms_annotations=set(), memory_reasoner='13351') else: projection = None # Ontology projection if 'ontology_projection' in config['DOCUMENT'] and config['DOCUMENT']['ontology_projection'] == 'yes': print('\nCalculate the ontology projection ...') projection.extractProjection() onto_projection_file = os.path.join(config['DOCUMENT']['cache_dir'], 'projection.ttl') projection.saveProjectionGraph(onto_projection_file) ontology_file = onto_projection_file else: ontology_file = config['BASIC']['ontology_file'] # Extract and save seed entities (classes and individuals) # Or read entities specified by the user if 'pre_entity_file' in config['DOCUMENT']: entities = [line.strip() for line in open(config['DOCUMENT']['pre_entity_file']).readlines()] else: print('\nExtract classes and individuals ...') projection.extractEntityURIs() classes = projection.getClassURIs() individuals = projection.getIndividualURIs() entities = classes.union(individuals) with open(os.path.join(config['DOCUMENT']['cache_dir'], 'entities.txt'), 'w') as f: for e in entities: f.write('%s\n' % e) # Extract axioms in Manchester Syntax if it is not pre_axiom_file is not set if 'pre_axiom_file' not in config['DOCUMENT']: print('\nExtract axioms ...') projection.createManchesterSyntaxAxioms() with open(os.path.join(config['DOCUMENT']['cache_dir'], 'axioms.txt'), 'w') as f: for ax in projection.axioms_manchester: f.write('%s\n' % ax) # If pre_annotation_file is set, directly read annotations # else, read annotations including rdfs:label and other literals from the ontology # Extract annotations: 1) English label of each entity, by rdfs:label or skos:preferredLabel # 2) None label annotations as sentences of the literal document uri_label, annotations = dict(), list() if 'pre_annotation_file' in config['DOCUMENT']: with open(config['DOCUMENT']['pre_annotation_file']) as f: for line in f.readlines(): tmp = line.strip().split() if tmp[1] == 'http://www.w3.org/2000/01/rdf-schema#label': uri_label[tmp[0]] = pre_process_words(tmp[2:]) else: annotations.append([tmp[0]] + tmp[2:]) else: print('\nExtract annotations ...') projection.indexAnnotations() for e in entities: if e in projection.entityToPreferredLabels and len(projection.entityToPreferredLabels[e]) > 0: label = list(projection.entityToPreferredLabels[e])[0] uri_label[e] = pre_process_words(words=label.split()) for e in entities: if e in projection.entityToAllLexicalLabels: for v in projection.entityToAllLexicalLabels[e]: if (v is not None) and \ (not (e in projection.entityToPreferredLabels and v in projection.entityToPreferredLabels[e])): annotation = [e] + v.split() annotations.append(annotation) with open(os.path.join(config['DOCUMENT']['cache_dir'], 'annotations.txt'), 'w') as f: for e in projection.entityToPreferredLabels: for v in projection.entityToPreferredLabels[e]: f.write('%s preferred_label %s\n' % (e, v)) for a in annotations: f.write('%s\n' % ' '.join(a)) # read URI document # two parts: walks, axioms (if the axiom file exists) walk_sentences, axiom_sentences, URI_Doc = list(), list(), list() if 'URI_Doc' in config['DOCUMENT'] and config['DOCUMENT']['URI_Doc'] == 'yes': print('\nGenerate URI document ...') walks_ = get_rdf2vec_walks(onto_file=ontology_file, walker_type=config['DOCUMENT']['walker'], walk_depth=int(config['DOCUMENT']['walk_depth']), classes=entities) print('Extracted %d walks for %d seed entities' % (len(walks_), len(entities))) walk_sentences += [list(map(str, x)) for x in walks_] axiom_file = os.path.join(config['DOCUMENT']['cache_dir'], 'axioms.txt') if os.path.exists(axiom_file): for line in open(axiom_file).readlines(): axiom_sentence = [item for item in line.strip().split()] axiom_sentences.append(axiom_sentence) print('Extracted %d axiom sentences' % len(axiom_sentences)) URI_Doc = walk_sentences + axiom_sentences # Some entities have English labels # Keep the name of built-in properties (those starting with http://www.w3.org) # Some entities have no labels, then use the words in their URI name def label_item(item): if item in uri_label: return uri_label[item] elif item.startswith('http://www.w3.org'): return [item.split('#')[1].lower()] elif item.startswith('http://'): return URI_parse(uri=item) else: return [item.lower()] # read literal document # two parts: literals in the annotations (subject's label + literal words) # replacing walk/axiom sentences by words in their labels Lit_Doc = list() if 'Lit_Doc' in config['DOCUMENT'] and config['DOCUMENT']['Lit_Doc'] == 'yes': print('\nGenerate literal document ...') for annotation in annotations: processed_words = pre_process_words(annotation[1:]) if len(processed_words) > 0: Lit_Doc.append(label_item(item=annotation[0]) + processed_words) print('Extracted %d annotation sentences' % len(Lit_Doc)) for sentence in walk_sentences: lit_sentence = list() for item in sentence: lit_sentence += label_item(item=item) Lit_Doc.append(lit_sentence) for sentence in axiom_sentences: lit_sentence = list() for item in sentence: lit_sentence += label_item(item=item) Lit_Doc.append(lit_sentence) # read mixture document # for each axiom/walk sentence # - all): for each entity, keep its entity URI, replace the others by label words # - random): randomly select one entity, keep its entity URI, replace the others by label words Mix_Doc = list() if 'Mix_Doc' in config['DOCUMENT'] and config['DOCUMENT']['Mix_Doc'] == 'yes': print('\nGenerate mixture document ...') for sentence in walk_sentences + axiom_sentences: if config['DOCUMENT']['Mix_Type'] == 'all': for index in range(len(sentence)): mix_sentence = list() for i, item in enumerate(sentence): mix_sentence += [item] if i == index else label_item(item=item) Mix_Doc.append(mix_sentence) elif config['DOCUMENT']['Mix_Type'] == 'random': random_index = random.randint(0, len(sentence) - 1) mix_sentence = list() for i, item in enumerate(sentence): mix_sentence += [item] if i == random_index else label_item(item=item) Mix_Doc.append(mix_sentence) print('URI_Doc: %d, Lit_Doc: %d, Mix_Doc: %d' % (len(URI_Doc), len(Lit_Doc), len(Mix_Doc))) all_doc = URI_Doc + Lit_Doc + Mix_Doc print('Time for document construction: %s seconds' % (time.time() - start_time)) random.shuffle(all_doc) #Save all_doc (optional) #with open(os.path.join(config['DOCUMENT']['cache_dir'], 'document_sentences.txt'), 'w') as f: # for sentence in all_doc: # for w in sentence: # f.write('%s ' % w) # f.write('\n') # f.close() # learn the language model (train a new model or fine tune the pre-trained model) start_time = time.time() if 'pre_train_model' not in config['MODEL'] or not os.path.exists(config['MODEL']['pre_train_model']): print('\nTrain the language model ...') model_ = gensim.models.Word2Vec(all_doc, size=int(config['MODEL']['embed_size']), window=int(config['MODEL']['window']), workers=multiprocessing.cpu_count(), sg=1, iter=int(config['MODEL']['iteration']), negative=int(config['MODEL']['negative']), min_count=int(config['MODEL']['min_count']), seed=int(config['MODEL']['seed'])) else: print('\nFine-tune the pre-trained language model ...') model_ = gensim.models.Word2Vec.load(config['MODEL']['pre_train_model']) if len(all_doc) > 0: model_.min_count = int(config['MODEL']['min_count']) model_.build_vocab(all_doc, update=True) model_.train(all_doc, total_examples=model_.corpus_count, epochs=int(config['MODEL']['epoch'])) #Gensim format model_.save(config['BASIC']['embedding_dir']+"ontology.embeddings") #Txt format model_.wv.save_word2vec_format(config['BASIC']['embedding_dir']+"ontology.embeddings.txt", binary=False) print('Time for learning the language model: %s seconds' % (time.time() - start_time)) print('Model saved. Done!') return 0 @main.command() @click.option("--ontology_dir", type=click.Path(exists=True), default=None, help="The directory of input ontologies for embedding") @click.option("--embedding_dir", type=click.Path(exists=True), default=None, help="The output embedding directory") @click.option("--config_file", type=click.Path(exists=True), default='default_multi.cfg', help="Configuration file") @click.option("--URI_Doc", help="Using URI document", is_flag=True) @click.option("--Lit_Doc", help="Using literal document", is_flag=True) @click.option("--Mix_Doc", help="Using mixture document", is_flag=True) def standalone_multi(ontology_dir, embedding_dir, config_file, uri_doc, lit_doc, mix_doc): # read and combine configurations # overwrite the parameters in the configuration file by the command parameters config = configparser.ConfigParser() config.read(click.format_filename(config_file)) if ontology_dir: config['BASIC']['ontology_dir'] = click.format_filename(ontology_dir) if embedding_dir: config['BASIC']['embedding_dir'] = click.format_filename(embedding_dir) if uri_doc: config['DOCUMENT']['URI_Doc'] = 'yes' if lit_doc: config['DOCUMENT']['Lit_Doc'] = 'yes' if mix_doc: config['DOCUMENT']['Mix_Doc'] = 'yes' if 'cache_dir' not in config['DOCUMENT']: config['DOCUMENT']['cache_dir'] = './cache' if not os.path.exists(config['DOCUMENT']['cache_dir']): os.mkdir(config['DOCUMENT']['cache_dir']) if 'embedding_dir' not in config['BASIC']: config['BASIC']['embedding_dir'] = os.path.join(config['DOCUMENT']['cache_dir'], 'output') start_time = time.time() walk_sentences, axiom_sentences = list(), list() uri_label, annotations = dict(), list() for file_name in os.listdir(config['BASIC']['ontology_dir']): if not file_name.endswith('.owl'): continue ONTO_FILE = os.path.join(config['BASIC']['ontology_dir'], file_name) print('\nProcessing %s' % file_name) projection = OntologyProjection(ONTO_FILE, reasoner=Reasoner.STRUCTURAL, only_taxonomy=False, bidirectional_taxonomy=True, include_literals=True, avoid_properties=set(), additional_preferred_labels_annotations=set(), additional_synonyms_annotations=set(), memory_reasoner='13351') # Extract and save seed entities (classes and individuals) print('... Extract entities (classes and individuals) ...') projection.extractEntityURIs() classes = projection.getClassURIs() individuals = projection.getIndividualURIs() entities = classes.union(individuals) with open(os.path.join(config['DOCUMENT']['cache_dir'], 'entities.txt'), 'a') as f: for e in entities: f.write('%s\n' % e) # Extract and save axioms in Manchester Syntax print('... Extract axioms ...') projection.createManchesterSyntaxAxioms() with open(os.path.join(config['DOCUMENT']['cache_dir'], 'axioms.txt'), 'a') as f: for ax in projection.axioms_manchester: axiom_sentence = [item for item in ax.split()] axiom_sentences.append(axiom_sentence) f.write('%s\n' % ax) print('... %d axioms ...' % len(axiom_sentences)) # Read annotations including rdfs:label and other literals from the ontology # Extract annotations: 1) English label of each entity, by rdfs:label or skos:preferredLabel # 2) None label annotations as sentences of the literal document print('... Extract annotations ...') projection.indexAnnotations() with open(os.path.join(config['DOCUMENT']['cache_dir'], 'annotations.txt'), 'a') as f: for e in entities: if e in projection.entityToPreferredLabels and len(projection.entityToPreferredLabels[e]) > 0: label = list(projection.entityToPreferredLabels[e])[0] v = pre_process_words(words=label.split()) uri_label[e] = v f.write('%s preferred_label %s\n' % (e, v)) for e in entities: if e in projection.entityToAllLexicalLabels: for v in projection.entityToAllLexicalLabels[e]: if (v is not None) and \ (not (e in projection.entityToPreferredLabels and v in projection.entityToPreferredLabels[ e])): annotation = [e] + v.split() annotations.append(annotation) f.write('%s\n' % ' '.join(annotation)) # project ontology to RDF graph (optionally) and extract walks if 'ontology_projection' in config['DOCUMENT'] and config['DOCUMENT']['ontology_projection'] == 'yes': print('... Calculate the ontology projection ...') projection.extractProjection() onto_projection_file = os.path.join(config['DOCUMENT']['cache_dir'], 'projection.ttl') projection.saveProjectionGraph(onto_projection_file) ONTO_FILE = onto_projection_file print('... Generate walks ...') walks_ = get_rdf2vec_walks(onto_file=ONTO_FILE, walker_type=config['DOCUMENT']['walker'], walk_depth=int(config['DOCUMENT']['walk_depth']), classes=entities) print('... %d walks for %d seed entities ...' % (len(walks_), len(entities))) walk_sentences += [list(map(str, x)) for x in walks_] # collect URI documents # two parts: axiom sentences + walk sentences URI_Doc = list() if 'URI_Doc' in config['DOCUMENT'] and config['DOCUMENT']['URI_Doc'] == 'yes': print('Extracted %d axiom sentences' % len(axiom_sentences)) URI_Doc = walk_sentences + axiom_sentences # Some entities have English labels # Keep the name of built-in properties (those starting with http://www.w3.org) # Some entities have no labels, then use the words in their URI name def label_item(item): if item in uri_label: return uri_label[item] elif item.startswith('http://www.w3.org'): return [item.split('#')[1].lower()] elif item.startswith('http://'): return URI_parse(uri=item) else: # return [item.lower()] return '' # read literal document # two parts: literals in the annotations (subject's label + literal words) # replacing walk/axiom sentences by words in their labels Lit_Doc = list() if 'Lit_Doc' in config['DOCUMENT'] and config['DOCUMENT']['Lit_Doc'] == 'yes': print('\n\nGenerate literal document') for annotation in annotations: processed_words = pre_process_words(annotation[1:]) if len(processed_words) > 0: Lit_Doc.append(label_item(item=annotation[0]) + processed_words) print('... Extracted %d annotation sentences ...' % len(Lit_Doc)) for sentence in walk_sentences + axiom_sentences: lit_sentence = list() for item in sentence: lit_sentence += label_item(item=item) Lit_Doc.append(lit_sentence) # for each axiom/walk sentence, generate mixture sentence(s) by two strategies: # all): for each entity, keep its entity URI, replace the others by label words # random): randomly select one entity, keep its entity URI, replace the others by label words Mix_Doc = list() if 'Mix_Doc' in config['DOCUMENT'] and config['DOCUMENT']['Mix_Doc'] == 'yes': print('\n\nGenerate mixture document') for sentence in walk_sentences + axiom_sentences: if config['DOCUMENT']['Mix_Type'] == 'all': for index in range(len(sentence)): mix_sentence = list() for i, item in enumerate(sentence): mix_sentence += [item] if i == index else label_item(item=item) Mix_Doc.append(mix_sentence) elif config['DOCUMENT']['Mix_Type'] == 'random': random_index = random.randint(0, len(sentence) - 1) mix_sentence = list() for i, item in enumerate(sentence): mix_sentence += [item] if i == random_index else label_item(item=item) Mix_Doc.append(mix_sentence) print('\n\nURI_Doc: %d, Lit_Doc: %d, Mix_Doc: %d' % (len(URI_Doc), len(Lit_Doc), len(Mix_Doc))) all_doc = URI_Doc + Lit_Doc + Mix_Doc print('Time for document construction: %s seconds' % (time.time() - start_time)) random.shuffle(all_doc) # learn the language model (train a new model or fine tune the pre-trained model) start_time = time.time() if 'pre_train_model' not in config['MODEL'] or not os.path.exists(config['MODEL']['pre_train_model']): print('\n\nTrain the language model') model_ = gensim.models.Word2Vec(all_doc, size=int(config['MODEL']['embed_size']), window=int(config['MODEL']['window']), workers=multiprocessing.cpu_count(), sg=1, iter=int(config['MODEL']['iteration']), negative=int(config['MODEL']['negative']), min_count=int(config['MODEL']['min_count']), seed=int(config['MODEL']['seed'])) else: print('\n\nFine-tune the pre-trained language model') model_ = gensim.models.Word2Vec.load(config['MODEL']['pre_train_model']) if len(all_doc) > 0: model_.min_count = int(config['MODEL']['min_count']) model_.build_vocab(all_doc, update=True) model_.train(all_doc, total_examples=model_.corpus_count, epochs=int(config['MODEL']['epoch'])) #Gensim format model_.save(config['BASIC']['embedding_dir']+"ontology.embeddings") #Txt format model_.wv.save_word2vec_format(config['BASIC']['embedding_dir']+"ontology.embeddings.txt", binary=False) print('Time for learning the language model: %s seconds' % (time.time() - start_time)) print('Model saved. Done!') if __name__ == "__main__": print("ciao") sys.exit(main()) # pragma: no cover
49.352174
131
0.615012
2,708
22,702
4.998892
0.104874
0.062052
0.025264
0.029253
0.830612
0.810372
0.785329
0.777425
0.764497
0.755485
0
0.003861
0.258479
22,702
459
132
49.459695
0.800285
0.125363
0
0.664723
0
0
0.190433
0.002324
0
0
0
0
0
1
0.014577
false
0.002915
0.034985
0
0.075802
0.104956
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
e52e122ee4bec9f90dbb6985750499cafe7fd5f4
170
py
Python
flask_arch/builtins/sql.py
ToraNova/flask-arch
ff95c4b49a2d954ae69d21853f646792a72918ed
[ "MIT" ]
null
null
null
flask_arch/builtins/sql.py
ToraNova/flask-arch
ff95c4b49a2d954ae69d21853f646792a72918ed
[ "MIT" ]
null
null
null
flask_arch/builtins/sql.py
ToraNova/flask-arch
ff95c4b49a2d954ae69d21853f646792a72918ed
[ "MIT" ]
null
null
null
from ..user import SQLRole from sqlalchemy.ext.declarative import declarative_base default_base = declarative_base() class DefaultRole(SQLRole, default_base): pass
21.25
55
0.811765
21
170
6.380952
0.571429
0.223881
0
0
0
0
0
0
0
0
0
0
0.123529
170
7
56
24.285714
0.899329
0
0
0
0
0
0
0
0
0
0
0
0
1
0
false
0.2
0.4
0
0.6
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
1
1
0
1
0
0
6
e5851127ec3f2ce3cc73ceff089452ca12c8ec83
310
py
Python
test/acceptance/features/steps/util.py
growi/service-binding-operator
f10f7f8838049b0c4e9fe04aa6dbce151296b908
[ "Apache-2.0" ]
null
null
null
test/acceptance/features/steps/util.py
growi/service-binding-operator
f10f7f8838049b0c4e9fe04aa6dbce151296b908
[ "Apache-2.0" ]
94
2021-03-11T14:08:13.000Z
2022-03-14T09:04:33.000Z
test/acceptance/features/steps/util.py
growi/service-binding-operator
f10f7f8838049b0c4e9fe04aa6dbce151296b908
[ "Apache-2.0" ]
1
2021-11-17T16:04:56.000Z
2021-11-17T16:04:56.000Z
import os from string import Template def scenario_id(context): return f"{os.path.basename(os.path.splitext(context.scenario.filename)[0]).lower()}-{context.scenario.line}" def substitute_scenario_id(context, text="$scenario_id"): return Template(text).substitute(scenario_id=scenario_id(context))
28.181818
112
0.774194
43
310
5.44186
0.465116
0.213675
0.217949
0
0
0
0
0
0
0
0
0.003534
0.087097
310
10
113
31
0.823322
0
0
0
0
0.166667
0.354839
0.316129
0
0
0
0
0
1
0.333333
false
0
0.333333
0.333333
1
0
0
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
1
1
1
0
0
6
e5cbc0f43c1096b762625a6a661a402680eec65c
32
py
Python
base24_builder/__init__.py
Base24/base24-builder-python-portable
b99a432a7c28ccbff597964db97e435262bedbd9
[ "MIT" ]
2
2020-11-27T15:37:14.000Z
2021-01-21T16:18:32.000Z
base24_builder/__init__.py
Base24/base24-builder-python
fcd25fa8c30c43e83256be1d87182b97685d0d5c
[ "MIT" ]
null
null
null
base24_builder/__init__.py
Base24/base24-builder-python
fcd25fa8c30c43e83256be1d87182b97685d0d5c
[ "MIT" ]
null
null
null
"""CLI """ from .cli import run
10.666667
20
0.59375
5
32
3.8
0.8
0
0
0
0
0
0
0
0
0
0
0
0.1875
32
2
21
16
0.730769
0.09375
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
e5fe537cd750ae2734243b551c6c4e96dac985f7
170
py
Python
scapy_helper/helpers/to_list.py
NexSabre/scapy_helper
8c239bad4e081e3f2f47ec2c152a689a10ff78d0
[ "MIT" ]
1
2020-12-30T09:21:33.000Z
2020-12-30T09:21:33.000Z
scapy_helper/helpers/to_list.py
NexSabre/scapy_helper
8c239bad4e081e3f2f47ec2c152a689a10ff78d0
[ "MIT" ]
4
2021-01-13T18:23:41.000Z
2021-10-19T19:40:41.000Z
scapy_helper/helpers/to_list.py
NexSabre/scapy_helper
8c239bad4e081e3f2f47ec2c152a689a10ff78d0
[ "MIT" ]
null
null
null
from scapy_helper.helpers.utils import _layer2dict def to_list(packet, extend=False): return [_layer2dict(packet.getlayer(x)) for x in range(len(packet.layers()))]
28.333333
81
0.764706
25
170
5.04
0.84
0
0
0
0
0
0
0
0
0
0
0.013245
0.111765
170
5
82
34
0.821192
0
0
0
0
0
0
0
0
0
0
0
0
1
0.333333
false
0
0.333333
0.333333
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
1
1
1
0
0
6
f912cccddeddd863531d9ac86c9f38c3870e80f5
37
py
Python
codingbat.com/Warmup-1/front3.py
ahmedelq/PythonicAlgorithms
ce10dbb6e1fd0ea5c922a932b0f920236aa411bf
[ "MIT" ]
null
null
null
codingbat.com/Warmup-1/front3.py
ahmedelq/PythonicAlgorithms
ce10dbb6e1fd0ea5c922a932b0f920236aa411bf
[ "MIT" ]
null
null
null
codingbat.com/Warmup-1/front3.py
ahmedelq/PythonicAlgorithms
ce10dbb6e1fd0ea5c922a932b0f920236aa411bf
[ "MIT" ]
null
null
null
def front3(str): return str[:3] * 3
18.5
20
0.621622
7
37
3.285714
0.714286
0
0
0
0
0
0
0
0
0
0
0.1
0.189189
37
2
20
18.5
0.666667
0
0
0
0
0
0
0
0
0
0
0
0
1
0.5
false
0
0
0.5
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
6
0059cdd2e95aa85d3dd81f17a99c67c8ae983329
59
py
Python
lib/clckwrkbdgr/winnt/test/test_winnt.py
umi0451/dotfiles
c618811be788d995fe01f6a16b355828d7efdd36
[ "MIT" ]
2
2017-04-16T14:54:17.000Z
2020-11-12T04:15:00.000Z
lib/clckwrkbdgr/winnt/test/test_winnt.py
clckwrkbdgr/dotfiles
292dac8c3211248b490ddbae55fe2adfffcfcf58
[ "MIT" ]
null
null
null
lib/clckwrkbdgr/winnt/test/test_winnt.py
clckwrkbdgr/dotfiles
292dac8c3211248b490ddbae55fe2adfffcfcf58
[ "MIT" ]
null
null
null
import clckwrkbdgr.winnt import clckwrkbdgr.winnt.schtasks
19.666667
33
0.881356
7
59
7.428571
0.571429
0.653846
0.846154
0
0
0
0
0
0
0
0
0
0.067797
59
2
34
29.5
0.945455
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
6
00b692564a5becb49f6fba628166f300155e0c28
36
py
Python
utils/__init__.py
bogvak/dash-holoniq-components
fee11b37b742c7a41d90b1af9eb76b1576ae8f01
[ "Apache-2.0" ]
null
null
null
utils/__init__.py
bogvak/dash-holoniq-components
fee11b37b742c7a41d90b1af9eb76b1576ae8f01
[ "Apache-2.0" ]
null
null
null
utils/__init__.py
bogvak/dash-holoniq-components
fee11b37b742c7a41d90b1af9eb76b1576ae8f01
[ "Apache-2.0" ]
null
null
null
from .logger import logging, log
9
33
0.722222
5
36
5.2
1
0
0
0
0
0
0
0
0
0
0
0
0.222222
36
3
34
12
0.928571
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
00e3c4b5722fda86767f22def5d965c89f617a9b
98
py
Python
server-master/backend/setup/mockObjects.py
nicodenner/ifeed-pse
5f47e974c031a78a2e83bf5ad3add66425000933
[ "MIT" ]
1
2021-10-16T09:50:13.000Z
2021-10-16T09:50:13.000Z
server-master/backend/setup/mockObjects.py
NicoD31/ifeed-pse
5f47e974c031a78a2e83bf5ad3add66425000933
[ "MIT" ]
null
null
null
server-master/backend/setup/mockObjects.py
NicoD31/ifeed-pse
5f47e974c031a78a2e83bf5ad3add66425000933
[ "MIT" ]
null
null
null
import django from app.models import * from .helper import * def createMockObjects(): pass
10.888889
24
0.72449
12
98
5.916667
0.75
0
0
0
0
0
0
0
0
0
0
0
0.204082
98
8
25
12.25
0.910256
0
0
0
0
0
0
0
0
0
0
0
0
1
0.2
true
0.2
0.6
0
0.8
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
1
1
0
1
0
0
6
00f52e3c2df2ad1173d17b11e564490278aed13f
45
py
Python
acora/nfa2dfa.py
msabramo/acora
7111065b8bae236ac4a34436f014433938f91fa1
[ "BSD-3-Clause" ]
1
2015-11-02T18:00:59.000Z
2015-11-02T18:00:59.000Z
acora/nfa2dfa.py
msabramo/acora
7111065b8bae236ac4a34436f014433938f91fa1
[ "BSD-3-Clause" ]
null
null
null
acora/nfa2dfa.py
msabramo/acora
7111065b8bae236ac4a34436f014433938f91fa1
[ "BSD-3-Clause" ]
null
null
null
from _nfa2dfa import insert_keyword, nfa2dfa
22.5
44
0.866667
6
45
6.166667
0.833333
0
0
0
0
0
0
0
0
0
0
0.05
0.111111
45
1
45
45
0.875
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
dae4ecd1aac473b0cc149efc79e055d2f1a0c8c3
15,295
py
Python
src/dataprotection/azext_dataprotection/generated/_params.py
Caoxuyang/azure-cli-extensions
d2011261f29033cb31a1064256727d87049ab423
[ "MIT" ]
null
null
null
src/dataprotection/azext_dataprotection/generated/_params.py
Caoxuyang/azure-cli-extensions
d2011261f29033cb31a1064256727d87049ab423
[ "MIT" ]
9
2022-03-25T19:35:49.000Z
2022-03-31T06:09:47.000Z
src/dataprotection/azext_dataprotection/generated/_params.py
Caoxuyang/azure-cli-extensions
d2011261f29033cb31a1064256727d87049ab423
[ "MIT" ]
1
2022-03-10T22:13:02.000Z
2022-03-10T22:13:02.000Z
# -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. # # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is # regenerated. # -------------------------------------------------------------------------- # pylint: disable=too-many-lines # pylint: disable=too-many-statements from azure.cli.core.commands.parameters import ( tags_type, get_enum_type, resource_group_name_type, get_location_type ) from azure.cli.core.commands.validators import ( get_default_location_from_resource_group, validate_file_or_dict ) from azext_dataprotection.action import ( AddStorageSettings, AddBackupPolicy, AddDataSourceInfo, AddDataSourceSetInfo, AddSecretStoreBasedAuthCredentials, AddPolicyParameters ) def load_arguments(self, _): with self.argument_context('dataprotection backup-vault show') as c: c.argument('resource_group_name', resource_group_name_type) c.argument('vault_name', type=str, help='The name of the backup vault.', id_part='name') with self.argument_context('dataprotection backup-vault create') as c: c.argument('resource_group_name', resource_group_name_type) c.argument('vault_name', type=str, help='The name of the backup vault.') c.argument('e_tag', type=str, help='Optional ETag.') c.argument('location', arg_type=get_location_type(self.cli_ctx), required=False, validator=get_default_location_from_resource_group) c.argument('tags', tags_type) c.argument('type_', options_list=['--type'], type=str, help='The identityType which can be either ' 'SystemAssigned or None', arg_group='Identity') c.argument('storage_settings', action=AddStorageSettings, nargs='+', help='Storage Settings') with self.argument_context('dataprotection backup-vault update') as c: c.argument('resource_group_name', resource_group_name_type) c.argument('vault_name', type=str, help='The name of the backup vault.', id_part='name') c.argument('tags', tags_type) c.argument('type_', options_list=['--type'], type=str, help='The identityType which can be either ' 'SystemAssigned or None', arg_group='Identity') with self.argument_context('dataprotection backup-vault delete') as c: c.argument('resource_group_name', resource_group_name_type) c.argument('vault_name', type=str, help='The name of the backup vault.', id_part='name') with self.argument_context('dataprotection backup-vault wait') as c: c.argument('resource_group_name', resource_group_name_type) c.argument('vault_name', type=str, help='The name of the backup vault.', id_part='name') with self.argument_context('dataprotection backup-policy list') as c: c.argument('resource_group_name', resource_group_name_type) c.argument('vault_name', type=str, help='The name of the backup vault.') with self.argument_context('dataprotection backup-policy show') as c: c.argument('resource_group_name', resource_group_name_type) c.argument('vault_name', type=str, help='The name of the backup vault.', id_part='name') c.argument('backup_policy_name', options_list=['--name', '-n', '--backup-policy-name'], type=str, help='Name ' 'of the policy', id_part='child_name_1') with self.argument_context('dataprotection backup-policy create') as c: c.argument('resource_group_name', resource_group_name_type) c.argument('vault_name', type=str, help='The name of the backup vault.') c.argument('backup_policy_name', options_list=['--name', '-n', '--backup-policy-name'], type=str, help='Name ' 'of the policy') c.argument('backup_policy', action=AddBackupPolicy, nargs='+', help='Rule based backup policy', arg_group='Properties') with self.argument_context('dataprotection backup-policy delete') as c: c.argument('resource_group_name', resource_group_name_type) c.argument('vault_name', type=str, help='The name of the backup vault.', id_part='name') c.argument('backup_policy_name', options_list=['--name', '-n', '--backup-policy-name'], type=str, help='Name ' 'of the policy', id_part='child_name_1') with self.argument_context('dataprotection backup-instance list') as c: c.argument('resource_group_name', resource_group_name_type) c.argument('vault_name', type=str, help='The name of the backup vault.') with self.argument_context('dataprotection backup-instance show') as c: c.argument('resource_group_name', resource_group_name_type) c.argument('vault_name', type=str, help='The name of the backup vault.', id_part='name') c.argument('backup_instance_name', options_list=['--name', '-n', '--backup-instance-name'], type=str, help='The name of the backup instance', id_part='child_name_1') with self.argument_context('dataprotection backup-instance create') as c: c.argument('resource_group_name', resource_group_name_type) c.argument('vault_name', type=str, help='The name of the backup vault.') c.argument('backup_instance_name', options_list=['--name', '-n', '--backup-instance-name'], type=str, help='The name of the backup instance') c.argument('friendly_name', type=str, help='Gets or sets the Backup Instance friendly name.') c.argument('data_source_info', action=AddDataSourceInfo, nargs='+', help='Gets or sets the data source ' 'information.') c.argument('data_source_set_info', action=AddDataSourceSetInfo, nargs='+', help='Gets or sets the data source ' 'set information.') c.argument('secret_store_based_auth_credentials', action=AddSecretStoreBasedAuthCredentials, nargs='+', help='Secret store based authentication credentials.', arg_group='DatasourceAuthCredentials') c.argument('validation_type', arg_type=get_enum_type(['ShallowValidation', 'DeepValidation']), help='Specifies ' 'the type of validation. In case of DeepValidation, all validations from /validateForBackup API ' 'will run again.') c.argument('object_type', type=str, help='') c.argument('policy_id', type=str, help='', arg_group='Policy Info') c.argument('policy_parameters', action=AddPolicyParameters, nargs='+', help='Policy parameters for the backup ' 'instance', arg_group='Policy Info') with self.argument_context('dataprotection backup-instance delete') as c: c.argument('resource_group_name', resource_group_name_type) c.argument('vault_name', type=str, help='The name of the backup vault.', id_part='name') c.argument('backup_instance_name', options_list=['--name', '-n', '--backup-instance-name'], type=str, help='The name of the backup instance', id_part='child_name_1') with self.argument_context('dataprotection backup-instance adhoc-backup') as c: c.argument('resource_group_name', resource_group_name_type) c.argument('vault_name', type=str, help='The name of the backup vault.', id_part='name') c.argument('backup_instance_name', options_list=['--name', '-n', '--backup-instance-name'], type=str, help='The name of the backup instance', id_part='child_name_1') c.argument('rule_name', type=str, help='Specify backup policy rule name.', arg_group='Backup Rule Options') c.argument('retention_tag_override', type=str, help='Specify retention override tag.', arg_group='Backup Rule ' 'Options Trigger Option') with self.argument_context('dataprotection backup-instance restore trigger') as c: c.argument('resource_group_name', resource_group_name_type) c.argument('vault_name', type=str, help='The name of the backup vault.', id_part='name') c.argument('backup_instance_name', options_list=['--name', '-n', '--backup-instance-name'], type=str, help='The name of the backup instance', id_part='child_name_1') c.argument('parameters', options_list=['--restore-request-object'], type=validate_file_or_dict, help='Request ' 'body for operation Expected value: json-string/@json-file.') with self.argument_context('dataprotection backup-instance resume-protection') as c: c.argument('resource_group_name', resource_group_name_type) c.argument('vault_name', type=str, help='The name of the backup vault.', id_part='name') c.argument('backup_instance_name', options_list=['--name', '-n', '--backup-instance-name'], type=str, help='The name of the backup instance', id_part='child_name_1') with self.argument_context('dataprotection backup-instance stop-protection') as c: c.argument('resource_group_name', resource_group_name_type) c.argument('vault_name', type=str, help='The name of the backup vault.', id_part='name') c.argument('backup_instance_name', options_list=['--name', '-n', '--backup-instance-name'], type=str, help='The name of the backup instance', id_part='child_name_1') with self.argument_context('dataprotection backup-instance suspend-backup') as c: c.argument('resource_group_name', resource_group_name_type) c.argument('vault_name', type=str, help='The name of the backup vault.', id_part='name') c.argument('backup_instance_name', options_list=['--name', '-n', '--backup-instance-name'], type=str, help='The name of the backup instance', id_part='child_name_1') with self.argument_context('dataprotection backup-instance validate-for-backup') as c: c.argument('resource_group_name', resource_group_name_type) c.argument('vault_name', type=str, help='The name of the backup vault.', id_part='name') c.argument('friendly_name', type=str, help='Gets or sets the Backup Instance friendly name.', arg_group='Backup Instance') c.argument('data_source_info', action=AddDataSourceInfo, nargs='+', help='Gets or sets the data source ' 'information.', arg_group='Backup Instance') c.argument('data_source_set_info', action=AddDataSourceSetInfo, nargs='+', help='Gets or sets the data source ' 'set information.', arg_group='Backup Instance') c.argument('secret_store_based_auth_credentials', action=AddSecretStoreBasedAuthCredentials, nargs='+', help='Secret store based authentication credentials.', arg_group='DatasourceAuthCredentials') c.argument('validation_type', arg_type=get_enum_type(['ShallowValidation', 'DeepValidation']), help='Specifies ' 'the type of validation. In case of DeepValidation, all validations from /validateForBackup API ' 'will run again.', arg_group='Backup Instance') c.argument('object_type', type=str, help='', arg_group='Backup Instance') c.argument('policy_id', type=str, help='', arg_group='Backup Instance Policy Info') c.argument('policy_parameters', action=AddPolicyParameters, nargs='+', help='Policy parameters for the backup ' 'instance', arg_group='Backup Instance Policy Info') with self.argument_context('dataprotection backup-instance validate-for-restore') as c: c.argument('resource_group_name', resource_group_name_type) c.argument('vault_name', type=str, help='The name of the backup vault.', id_part='name') c.argument('backup_instance_name', options_list=['--name', '-n', '--backup-instance-name'], type=str, help='The name of the backup instance', id_part='child_name_1') c.argument('restore_request_object', type=validate_file_or_dict, help='Gets or sets the restore request ' 'object. Expected value: json-string/@json-file.') with self.argument_context('dataprotection backup-instance wait') as c: c.argument('resource_group_name', resource_group_name_type) c.argument('vault_name', type=str, help='The name of the backup vault.', id_part='name') c.argument('backup_instance_name', options_list=['--name', '-n', '--backup-instance-name'], type=str, help='The name of the backup instance', id_part='child_name_1') with self.argument_context('dataprotection recovery-point list') as c: c.argument('resource_group_name', resource_group_name_type) c.argument('vault_name', type=str, help='The name of the backup vault.') c.argument('backup_instance_name', type=str, help='The name of the backup instance') c.argument('filter_', options_list=['--filter'], type=str, help='OData filter options.') c.argument('skip_token', type=str, help='skipToken Filter.') with self.argument_context('dataprotection recovery-point show') as c: c.argument('resource_group_name', resource_group_name_type) c.argument('vault_name', type=str, help='The name of the backup vault.', id_part='name') c.argument('backup_instance_name', type=str, help='The name of the backup instance', id_part='child_name_1') c.argument('recovery_point_id', type=str, help='Id of the recovery point.', id_part='child_name_2') with self.argument_context('dataprotection job list') as c: c.argument('resource_group_name', resource_group_name_type) c.argument('vault_name', type=str, help='The name of the backup vault.') with self.argument_context('dataprotection job show') as c: c.argument('resource_group_name', resource_group_name_type) c.argument('vault_name', type=str, help='The name of the backup vault.', id_part='name') c.argument('job_id', type=str, help='The Job ID. This is a GUID-formatted string (e.g. ' '00000000-0000-0000-0000-000000000000).', id_part='child_name_1') with self.argument_context('dataprotection restorable-time-range find') as c: c.argument('resource_group_name', resource_group_name_type) c.argument('vault_name', type=str, help='The name of the backup vault.', id_part='name') c.argument('backup_instance_name', type=str, help='The name of the backup instance', id_part='child_name_1') c.argument('source_data_store_type', help='Gets or sets the type of the source data store.', arg_type=get_enum_type(['OperationalStore', 'VaultStore', 'ArchiveStore'])) c.argument('start_time', type=str, help='Start time for the List Restore Ranges request. ISO 8601 format.') c.argument('end_time', type=str, help='End time for the List Restore Ranges request. ISO 8601 format.')
68.28125
121
0.67205
1,984
15,295
4.977319
0.097782
0.093873
0.065722
0.068354
0.849823
0.835038
0.819949
0.774278
0.769823
0.738633
0
0.004455
0.192808
15,295
223
122
68.587444
0.795399
0.033083
0
0.535519
0
0
0.424488
0.03676
0
0
0
0
0
1
0.005464
false
0
0.016393
0
0.021858
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
97a9d99bccccc7f85235bad1d78f31b883e1cbd2
205
py
Python
Module-11-Functions/functions-01-basics.py
CodingGearsCourses/PythonProgrammingFundamentals
40e562e143802d997e1f0129bc8b52d5d0931728
[ "MIT" ]
1
2021-12-23T07:52:08.000Z
2021-12-23T07:52:08.000Z
Module-11-Functions/functions-01-basics.py
CodingGearsCourses/PythonProgrammingFundamentals
40e562e143802d997e1f0129bc8b52d5d0931728
[ "MIT" ]
null
null
null
Module-11-Functions/functions-01-basics.py
CodingGearsCourses/PythonProgrammingFundamentals
40e562e143802d997e1f0129bc8b52d5d0931728
[ "MIT" ]
null
null
null
# Functions - Basics def print_message(): print(" Alert! ".center(30, "*")) print(" Hello World!") print(" How are you?!") print("-----") print_message() print_message() print_message()
15.769231
37
0.595122
23
205
5.130435
0.565217
0.40678
0.432203
0.40678
0
0
0
0
0
0
0
0.012121
0.195122
205
12
38
17.083333
0.70303
0.087805
0
0.375
0
0
0.221622
0
0
0
0
0
0
1
0.125
true
0
0
0
0.125
1
1
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
1
0
6
97c5937c3690cf328478d3d355fd67bf270f4d6e
25
py
Python
src/iranlowo/__init__.py
Niger-Volta-LTI/iranlowo
0046b61105ffadfff21dd8b37754b9d95177fbf8
[ "MIT" ]
17
2019-07-05T20:30:35.000Z
2022-02-28T10:00:24.000Z
src/iranlowo/__init__.py
Olamyy/iranlowo
1feb123988a8afac3ac53c7acfb72df862c4bc18
[ "MIT" ]
17
2019-07-06T09:10:10.000Z
2020-11-13T08:30:37.000Z
src/iranlowo/__init__.py
ruohoruotsi/iranlowo
0046b61105ffadfff21dd8b37754b9d95177fbf8
[ "MIT" ]
7
2019-07-01T01:59:07.000Z
2020-11-27T17:12:46.000Z
from iranlowo import adr
12.5
24
0.84
4
25
5.25
1
0
0
0
0
0
0
0
0
0
0
0
0.16
25
1
25
25
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
97ca63f7782f6853ee20f870c3c77138a6155f6f
8,149
py
Python
src/rule/dynamic_oracle.py
zsLin177/IRNet_dynamic_temp
17fb455e9766376b4ec0c4c1d53dedcc5710450a
[ "MIT" ]
null
null
null
src/rule/dynamic_oracle.py
zsLin177/IRNet_dynamic_temp
17fb455e9766376b4ec0c4c1d53dedcc5710450a
[ "MIT" ]
null
null
null
src/rule/dynamic_oracle.py
zsLin177/IRNet_dynamic_temp
17fb455e9766376b4ec0c4c1d53dedcc5710450a
[ "MIT" ]
null
null
null
# 用来在使用dynamic oracle进行训练的时候, # 根据当前已经生成的action序列(partical AST tree)和current objective, # 来生成新的objective from src.rule.lf import build_tree, build_sketch_tree from src.rule.semQL import Sup, Sel, Order, Root, Filter, A, N, C, T, Root1 import random Keywords = ['des', 'asc', 'and', 'or', 'sum', 'min', 'max', 'avg', 'none', '=', '!=', '<', '>', '<=', '>=', 'between', 'like', 'not_like'] + [ 'in', 'not_in', 'count', 'intersect', 'union', 'except' ] def preorder_travel_all(node, lst): lst.append(node) for child in node.children: preorder_travel_all(child, lst) return lst def generate(node, selected_C): if(isinstance(node, A)): idx = random.randint(0, len(selected_C)-1) selected_C[idx].set_parent(node) node.add_children(selected_C[idx]) return elif(isinstance(node, Root1)): child = Root(5) child.set_parent(node) node.add_children(child) generate(child, selected_C) elif(isinstance(node, Root)): child = Sel(0) child.set_parent(node) node.add_children(child) generate(child, selected_C) elif(isinstance(node, Sel)): child = N(0) child.set_parent(node) node.add_children(child) generate(child, selected_C) elif(isinstance(node, N) or isinstance(node, Order) or isinstance(node, Sup) or isinstance(node, Filter)): child = A(0) child.set_parent(node) node.add_children(child) generate(child, selected_C) def generate_sketch(node): if (isinstance(node, N) or isinstance(node, Order) or isinstance(node, Sup) or isinstance(node, Filter)): return elif (isinstance(node, Root1)): child = Root(5) child.set_parent(node) node.add_children(child) generate_sketch(child) elif (isinstance(node, Root)): child = Sel(0) child.set_parent(node) node.add_children(child) generate_sketch(child) elif (isinstance(node, Sel)): child = N(0) child.set_parent(node) node.add_children(child) generate_sketch(child) def derive_sketch(nodes_type): lst = [] for node_type in nodes_type: if (node_type == Root1): node = Root1(3) elif (node_type == Root): node = Root(5) elif (node_type == N): node = N(0) # 此处存疑,或许也可以是包含selected_A中所有的A elif (node_type == Sel): node = Sel(0) elif (node_type == Filter): id = random.randint(2, 10) node = Filter(id) elif (node_type == Order): id = random.randint(0, 1) node = Order(id) elif (node_type == Sup): id = random.randint(0, 1) node = Sup(id) generate_sketch(node) lst.append(node) return lst def derive(nodes_type, selected_C): lst = [] for node_type in nodes_type: if(node_type == Root1): node = Root1(3) elif(node_type == Root): node = Root(5) elif(node_type == N): node = N(0) # 此处存疑,或许也可以是包含selected_A中所有的A elif(node_type == A): node = A(0) elif(node_type == Sel): node = Sel(0) elif(node_type == Filter): id = random.randint(2, 10) node = Filter(id) elif(node_type == Order): id = random.randint(0, 1) node = Order(id) elif(node_type == Sup): id = random.randint(0, 1) node = Sup(id) generate(node, selected_C) lst.append(node) return lst def adjust(action_seq, current_obj): ''' action_seq:目前模型已经生成的action序列,类型不是字符串 current_obj:当前的object action序列,类型不是字符串 return:新的object action序列 # current_obj需要调整的也就是把action_o为根的子树换成以action_p为根的子树 ''' if(action_seq[-1] == current_obj[len(action_seq)-1]): return current_obj already_correct = action_seq[0:-1] action_p = action_seq[-1] # action_o = current_obj[len(already_correct)] current_obj_tree = build_tree(current_obj) # 建成了树的结构 node_lst = [] preorder_travel_all(current_obj_tree, node_lst) selected_C = [] for node in node_lst: if(isinstance(node, C)): selected_C.append(node) node_o = node_lst[len(already_correct)] p_children = action_p.get_next_action() o_children = node_o.children p_plus_children_type = [] for p_child in p_children: flag = 0 for i in range(len(o_children)-1, -1, -1): if(isinstance(o_children[i], p_child)): o_children[i].set_parent(action_p) action_p.add_children(o_children[i]) o_children.pop(i) flag = 1 break if(flag == 0): p_plus_children_type.append(p_child) new_children = derive(p_plus_children_type, selected_C) for new_child in new_children: new_child.set_parent(action_p) action_p.add_children(new_child) parent = node_o.parent parent.children.remove(node_o) action_p.set_parent(parent) parent.add_children(action_p) new_node_lst = [] preorder_travel_all(current_obj_tree, new_node_lst) # print(new_node_lst) return new_node_lst # print(new_node_lst) # print(action_p) def adjust_sketch(action_seq, current_obj): ''' action_seq:目前模型已经生成的action序列,类型不是字符串 current_obj:当前的object action序列,类型不是字符串 return:新的object action序列 # current_obj需要调整的也就是把action_o为根的子树换成以action_p为根的子树 ''' idx = 0 flag = 0 for idx in range(len(action_seq)): if(action_seq[idx] != current_obj[idx]): flag = 1 break if(flag == 0): return current_obj action_p = action_seq[idx] # action_o = current_obj[len(already_correct)] current_obj_tree = build_sketch_tree(current_obj) # 建成了树的结构 node_lst = [] preorder_travel_all(current_obj_tree, node_lst) # selected_C = [] # for node in node_lst: # if(isinstance(node,C)): # selected_C.append(node) node_o = node_lst[idx] p_children = action_p.get_next_action() o_children = node_o.children p_plus_children_type = [] for p_child in p_children: if(p_child == C or p_child == T or p_child == A): continue flag = 0 for i in range(len(o_children)-1, -1, -1): if(isinstance(o_children[i], p_child)): o_children[i].set_parent(action_p) action_p.add_children(o_children[i]) o_children.pop(i) flag = 1 break if(flag == 0): p_plus_children_type.append(p_child) new_children = derive_sketch(p_plus_children_type) for new_child in new_children: new_child.set_parent(action_p) action_p.add_children(new_child) parent = node_o.parent if(parent): parent.children.remove(node_o) action_p.set_parent(parent) parent.add_children(action_p) new_node_lst = [] preorder_travel_all(current_obj_tree, new_node_lst) # print(new_node_lst) for node in new_node_lst: node.parent = None node.children = [] return new_node_lst else: new_node_lst = [] preorder_travel_all(action_p, new_node_lst) for node in new_node_lst: node.parent = None node.children = [] return new_node_lst # print(new_node_lst) # print(action_p) if __name__ == '__main__': # correct_s = "Root1(3) Root(4) Sel(0) N(2) A(0) C(3) T(1) A(0) C(9) T(1) A(0) C(12) T(1) Order(0) A(0) C(12) T(1)".split() correct = [Root1(3), Root(3), Sel(0), N(0), Filter(0), Filter( 0), Filter(2), Root(3), Sel(0), N(0), Filter(2), Filter(2)] # predicted_s = 'Root1(3) Root(4) Sel(0) N(2) A(0) C(4)'.split() predicted = [Root1(3), Root(3), Sel(0), N(0), Filter( 0), Filter(2), Filter(2), Root(3), Sel(0), N(0), Filter(0)] print(predicted) print(correct) print(adjust_sketch(predicted, correct))
30.070111
142
0.597497
1,102
8,149
4.167877
0.108893
0.038101
0.032658
0.02961
0.780753
0.771827
0.742216
0.742216
0.741999
0.731548
0
0.019142
0.281998
8,149
270
143
30.181481
0.765852
0.118297
0
0.721393
0
0
0.013502
0
0
0
0
0
0
1
0.034826
false
0
0.014925
0
0.099502
0.014925
0
0
0
null
0
0
0
0
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
c11c17ad8b26e16c21d6a7fa0fa4b48f2bea1672
72
py
Python
calamari_ocr/test/__init__.py
jacektl/calamari
980477aefe4e56f7fc373119c1b38649798d8686
[ "Apache-2.0" ]
922
2018-07-06T05:18:22.000Z
2022-03-22T12:38:32.000Z
calamari_ocr/test/__init__.py
jacektl/calamari
980477aefe4e56f7fc373119c1b38649798d8686
[ "Apache-2.0" ]
267
2018-07-14T22:10:41.000Z
2022-03-28T18:38:43.000Z
calamari_ocr/test/__init__.py
jacektl/calamari
980477aefe4e56f7fc373119c1b38649798d8686
[ "Apache-2.0" ]
227
2018-07-06T07:42:16.000Z
2022-02-27T05:29:59.000Z
from tfaip.util.testing.setup import setup_test_init setup_test_init()
18
52
0.847222
12
72
4.75
0.666667
0.315789
0.45614
0
0
0
0
0
0
0
0
0
0.083333
72
3
53
24
0.863636
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.5
0
0.5
0
1
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
6
c15d5c68cd54abe62d9dfb92dbb7c87f8f0cb64e
236
py
Python
fn_whois_rdap/fn_whois_rdap/util/config.py
rudimeyer/resilient-community-apps
7a46841ba41fa7a1c421d4b392b0a3ca9e36bd00
[ "MIT" ]
1
2020-08-25T03:43:07.000Z
2020-08-25T03:43:07.000Z
fn_whois_rdap/fn_whois_rdap/util/config.py
rudimeyer/resilient-community-apps
7a46841ba41fa7a1c421d4b392b0a3ca9e36bd00
[ "MIT" ]
1
2019-07-08T16:57:48.000Z
2019-07-08T16:57:48.000Z
fn_whois_rdap/fn_whois_rdap/util/config.py
rudimeyer/resilient-community-apps
7a46841ba41fa7a1c421d4b392b0a3ca9e36bd00
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- # (c) Copyright IBM Corp. 2019. All Rights Reserved. """Generate a default configuration-file section for fn_whois_rdap""" from __future__ import print_function def config_section_data(): return None
21.454545
69
0.720339
32
236
5.03125
0.96875
0
0
0
0
0
0
0
0
0
0
0.025641
0.173729
236
11
70
21.454545
0.8
0.580508
0
0
1
0
0
0
0
0
0
0
0
1
0.333333
true
0
0.333333
0.333333
1
0.333333
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
1
1
0
1
1
1
0
0
6
c15fa28e05b48a82039fe8eaac04b1e6b58a5072
148
py
Python
hardware/button/__init__.py
magnusnordlander/silvia-pi
3b927f73f8c8608a17f1f0e6458d06eff0f1d09a
[ "MIT" ]
16
2020-06-09T22:34:18.000Z
2021-02-09T15:31:16.000Z
hardware/button/__init__.py
magnusnordlander/silvia-pi
3b927f73f8c8608a17f1f0e6458d06eff0f1d09a
[ "MIT" ]
null
null
null
hardware/button/__init__.py
magnusnordlander/silvia-pi
3b927f73f8c8608a17f1f0e6458d06eff0f1d09a
[ "MIT" ]
1
2020-09-03T15:21:15.000Z
2020-09-03T15:21:15.000Z
from .EmulatedRandomButton import EmulatedRandomButton try: from .GpioSwitchButton import GpioSwitchButton except ModuleNotFoundError: pass
24.666667
54
0.837838
12
148
10.333333
0.666667
0
0
0
0
0
0
0
0
0
0
0
0.135135
148
6
55
24.666667
0.96875
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0.2
0.4
0
0.4
0
1
0
1
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
1
1
0
0
0
0
6
c16699e8afbda8607fc77f35a9d1c78913fee338
4,271
py
Python
tests/test_cell_heating.py
kevinm387/openmc_tally_unit_converter
46720b0dd2cf572d74d69dcb73877d362983f23b
[ "MIT" ]
null
null
null
tests/test_cell_heating.py
kevinm387/openmc_tally_unit_converter
46720b0dd2cf572d74d69dcb73877d362983f23b
[ "MIT" ]
null
null
null
tests/test_cell_heating.py
kevinm387/openmc_tally_unit_converter
46720b0dd2cf572d74d69dcb73877d362983f23b
[ "MIT" ]
null
null
null
import unittest import openmc_tally_unit_converter as otuc import pytest import openmc class TestUsage(unittest.TestCase): def setUp(self): # loads in the statepoint file containing tallies statepoint = openmc.StatePoint(filepath="statepoint.2.h5") self.my_tally = statepoint.get_tally(name="2_heating") def test_cell_tally_heating_base_units(self): # returns the tally with base units result = otuc.process_tally(tally=self.my_tally) assert len(result) == 2 assert result[0].units == "electron_volt / source_particle" assert result[1].units == "electron_volt / source_particle" assert isinstance(result[0][0].magnitude, float) assert isinstance(result[1][0].magnitude, float) def test_cell_tally_heating_no_processing(self): # returns the tally with base units result = otuc.process_tally( tally=self.my_tally, required_units="eV / source_particle" ) assert len(result) == 2 assert result[0].units == "electron_volt / source_particle" assert result[1].units == "electron_volt / source_particle" assert isinstance(result[0][0].magnitude, float) assert isinstance(result[1][0].magnitude, float) def test_cell_tally_heating_fusion_power_processing(self): # returns the tally with scalled based units (MeV instead of eV) result = otuc.process_tally( source_strength=4.6e17, # neutrons per 1.3MJ pulse tally=self.my_tally, required_units="eV / second", ) assert len(result) == 2 assert result[0].units == "electron_volt / second" assert result[1].units == "electron_volt / second" assert isinstance(result[0][0].magnitude, float) assert isinstance(result[1][0].magnitude, float) def test_cell_tally_heating_pulse_processing(self): # returns the tally with scalled based units (MeV instead of eV) result = otuc.process_tally( source_strength=4.6e17, # neutrons per 1.3MJ pulse tally=self.my_tally, required_units="eV / pulse", ) assert len(result) == 2 assert result[0].units == "electron_volt / pulse" assert result[1].units == "electron_volt / pulse" def test_cell_tally_heating_pulse_processing_and_scaling(self): # returns the tally with scalled based units (MeV instead of eV) result = otuc.process_tally( source_strength=4.6e17, # neutrons per 1.3MJ pulse tally=self.my_tally, required_units="MeV / pulse", ) assert len(result) == 2 assert result[0].units == "megaelectron_volt / pulse" assert result[1].units == "megaelectron_volt / pulse" def test_cell_tally_heating_fusion_power_processing_and_scaling(self): # returns the tally with scalled based units (MeV instead of eV) result = otuc.process_tally( source_strength=4.6e17, # neutrons per 1.3MJ pulse tally=self.my_tally, required_units="MeV / second", ) assert len(result) == 2 assert result[0].units == "megaelectron_volt / second" assert result[1].units == "megaelectron_volt / second" def test_cell_tally_heating_fusion_power_processing_and_conversion(self): # returns the tally with normalisation per pulse and conversion to joules result = otuc.process_tally( source_strength=1.3e6, tally=self.my_tally, required_units="joule / second" ) assert len(result) == 2 assert result[0].units == "joule / second" assert result[1].units == "joule / second" def test_cell_tally_heating_pulse_processing_and_conversion(self): # returns the tally with normalisation per pulse and conversion to joules result = otuc.process_tally( source_strength=1.3e6, tally=self.my_tally, required_units="joules / pulse", # joules or joule can be requested ) assert len(result) == 2 assert result[0].units == "joule / pulse" assert result[1].units == "joule / pulse" if __name__ == "__main__": unittest.main()
36.194915
87
0.650199
530
4,271
5.030189
0.154717
0.072018
0.037134
0.048012
0.872468
0.84021
0.778695
0.764441
0.73931
0.674044
0
0.021746
0.257083
4,271
117
88
36.504274
0.818468
0.150784
0
0.425
0
0
0.135659
0
0
0
0
0
0.375
1
0.1125
false
0
0.05
0
0.175
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
c18dd1b337bbfc8671f98004eb2cdca181194963
98
py
Python
lib/hachoir/wx/tree_view/__init__.py
0x20Man/Watcher3
4656b42bc5879a3741bb95f534b7c6612a25264d
[ "Apache-2.0" ]
320
2017-03-28T23:33:45.000Z
2022-02-17T08:45:01.000Z
lib/hachoir/wx/tree_view/__init__.py
0x20Man/Watcher3
4656b42bc5879a3741bb95f534b7c6612a25264d
[ "Apache-2.0" ]
300
2017-03-28T19:22:54.000Z
2021-12-01T01:11:55.000Z
lib/hachoir/wx/tree_view/__init__.py
0x20Man/Watcher3
4656b42bc5879a3741bb95f534b7c6612a25264d
[ "Apache-2.0" ]
90
2017-03-29T16:12:43.000Z
2022-03-01T06:23:48.000Z
from .tree_view import tree_view_t # noqa from .tree_view_setup import setup_tree_view # noqa
32.666667
53
0.795918
17
98
4.176471
0.411765
0.450704
0.338028
0
0
0
0
0
0
0
0
0
0.163265
98
2
54
49
0.865854
0.091837
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
6
c1a78afa13c8b618d299b4e0da540ab4ebdeb7e7
2,820
py
Python
tests/seahub/options/test_models.py
saukrIppl/newsea
0fd5ab2ade9a8fb16b1e7b43ba13dac32eb39603
[ "Apache-2.0" ]
2
2017-06-21T09:46:55.000Z
2018-05-30T10:07:32.000Z
tests/seahub/options/test_models.py
saukrIppl/newsea
0fd5ab2ade9a8fb16b1e7b43ba13dac32eb39603
[ "Apache-2.0" ]
null
null
null
tests/seahub/options/test_models.py
saukrIppl/newsea
0fd5ab2ade9a8fb16b1e7b43ba13dac32eb39603
[ "Apache-2.0" ]
1
2020-10-01T04:11:41.000Z
2020-10-01T04:11:41.000Z
from seahub.test_utils import BaseTestCase from seahub.options.models import (UserOptions, KEY_USER_GUIDE, VAL_USER_GUIDE_ON, VAL_USER_GUIDE_OFF, KEY_DEFAULT_REPO) class UserOptionsManagerTest(BaseTestCase): def test_is_user_guide_enabled(self): assert UserOptions.objects.is_user_guide_enabled(self.user.email) is True UserOptions.objects.create(email=self.user.email, option_key=KEY_USER_GUIDE, option_val=VAL_USER_GUIDE_OFF) assert UserOptions.objects.is_user_guide_enabled(self.user.email) is False def test_is_user_guide_enabled_with_multiple_records(self): UserOptions.objects.create(email=self.user.email, option_key=KEY_USER_GUIDE, option_val=VAL_USER_GUIDE_OFF) UserOptions.objects.create(email=self.user.email, option_key=KEY_USER_GUIDE, option_val=VAL_USER_GUIDE_ON) assert len(UserOptions.objects.filter(email=self.user.email, option_key=KEY_USER_GUIDE)) == 2 assert UserOptions.objects.is_user_guide_enabled(self.user.email) is True assert len(UserOptions.objects.filter(email=self.user.email, option_key=KEY_USER_GUIDE)) == 1 def test_get_default_repo(self): assert len(UserOptions.objects.filter(email=self.user.email, option_key=KEY_DEFAULT_REPO)) == 0 UserOptions.objects.create(email=self.user.email, option_key=KEY_DEFAULT_REPO, option_val=self.repo.id) assert len(UserOptions.objects.filter(email=self.user.email, option_key=KEY_DEFAULT_REPO)) == 1 assert UserOptions.objects.get_default_repo(self.user.email) is not None def test_get_default_repo_with_multiple_records(self): assert len(UserOptions.objects.filter(email=self.user.email, option_key=KEY_DEFAULT_REPO)) == 0 UserOptions.objects.create(email=self.user.email, option_key=KEY_DEFAULT_REPO, option_val=self.repo.id) UserOptions.objects.create(email=self.user.email, option_key=KEY_DEFAULT_REPO, option_val=self.repo.id) assert len(UserOptions.objects.filter(email=self.user.email, option_key=KEY_DEFAULT_REPO)) == 2 assert UserOptions.objects.get_default_repo(self.user.email) is not None assert len(UserOptions.objects.filter(email=self.user.email, option_key=KEY_DEFAULT_REPO)) == 1
52.222222
103
0.621277
331
2,820
4.996979
0.126888
0.195889
0.141475
0.141475
0.865175
0.837364
0.807134
0.807134
0.807134
0.807134
0
0.003555
0.301773
2,820
53
104
53.207547
0.836465
0
0
0.658537
0
0
0
0
0
0
0
0
0.292683
1
0.097561
false
0
0.04878
0
0.170732
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
c1c54ac6214a1a47d875889dd6b41105b30346f5
46
py
Python
2/week4/5.py
briannice/logiscool-python
00cf772072f574d297ed487e8edc9bb0158b6c68
[ "Apache-2.0" ]
null
null
null
2/week4/5.py
briannice/logiscool-python
00cf772072f574d297ed487e8edc9bb0158b6c68
[ "Apache-2.0" ]
null
null
null
2/week4/5.py
briannice/logiscool-python
00cf772072f574d297ed487e8edc9bb0158b6c68
[ "Apache-2.0" ]
null
null
null
def rec(n): return n + rec(n-1) rec(10)
7.666667
23
0.521739
10
46
2.4
0.6
0.333333
0
0
0
0
0
0
0
0
0
0.090909
0.282609
46
5
24
9.2
0.636364
0
0
0
0
0
0
0
0
0
0
0
0
1
0.333333
false
0
0
0.333333
0.666667
0
1
1
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
0
0
0
6
c1cb0bde4849daa659eb75e2b655dff74ee9a1ff
21
py
Python
lib/theme.py
nsde/mctools
54e44409879bb8ab96981b9c1439670e1997791b
[ "MIT" ]
5
2020-09-29T15:15:40.000Z
2020-10-20T16:36:12.000Z
lib/theme.py
nsde/mctools
54e44409879bb8ab96981b9c1439670e1997791b
[ "MIT" ]
1
2020-10-02T21:19:27.000Z
2020-10-02T21:19:27.000Z
lib/theme.py
nsde/mctools
54e44409879bb8ab96981b9c1439670e1997791b
[ "MIT" ]
null
null
null
def theme(): pass
10.5
12
0.571429
3
21
4
1
0
0
0
0
0
0
0
0
0
0
0
0.285714
21
2
13
10.5
0.8
0
0
0
0
0
0
0
0
0
0
0
0
1
0.5
true
0.5
0
0
0.5
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
1
1
0
0
0
0
0
6
c1d7ad17faecd3f25e5648461a6013b2620e159a
5,295
py
Python
tests/test_reading_dataset_from_txt.py
jakub-tomczak/ror
cf9ab38a2d66f4816a1289b9726911960059fce7
[ "MIT" ]
null
null
null
tests/test_reading_dataset_from_txt.py
jakub-tomczak/ror
cf9ab38a2d66f4816a1289b9726911960059fce7
[ "MIT" ]
null
null
null
tests/test_reading_dataset_from_txt.py
jakub-tomczak/ror
cf9ab38a2d66f4816a1289b9726911960059fce7
[ "MIT" ]
null
null
null
from ror.Relation import INDIFFERENCE, PREFERENCE from ror.data_loader import RORParameter, read_dataset_from_txt import unittest from ror.Dataset import Dataset, RORDataset import numpy as np class TestTxtDatasetReader(unittest.TestCase): def test_reading_dataset_from_txt(self): loading_result = read_dataset_from_txt("tests/datasets/example.txt") data = loading_result.dataset self.assertIs(type(data), RORDataset) self.assertEqual(len(data.criteria), 2) self.assertEqual(data.criteria[0][0], "MaxSpeed") self.assertEqual(data.criteria[0][1], "g") self.assertEqual(data.criteria[1][0], "FuelCons") self.assertEqual(data.criteria[1][1], "c") self.assertEqual(len(data.alternatives), 5) self.assertEqual(data.alternatives[0], "b01") self.assertEqual(data.alternatives[4], "b05") self.assertIs(type(data.matrix[0, 0]), np.float64) self.assertEqual(data.matrix[0, 0], 90) self.assertIs(type(data.matrix[4, 0]), np.float64) self.assertEqual(data.matrix[4, 0], 83) # cost type criteria are reversed # (multiplied by -1 so we can treat them as gain type criteria) self.assertIs(type(data.matrix[4, 1]), np.float64) self.assertEqual(data.matrix[4, 1], -26) self.assertIs(type(data.matrix[0, 1]), np.float64) self.assertEqual(data.matrix[0, 1], -27) def test_reading_dataset_from_txt(self): loading_result = read_dataset_from_txt("tests/datasets/example.txt") data = loading_result.dataset self.assertIs(type(data), RORDataset) self.assertEqual(len(data.criteria), 2) self.assertEqual(data.criteria[0][0], "MaxSpeed") self.assertEqual(data.criteria[0][1], "g") self.assertEqual(data.criteria[1][0], "FuelCons") self.assertEqual(data.criteria[1][1], "c") self.assertEqual(len(data.alternatives), 5) self.assertEqual(data.alternatives[0], "b01") self.assertEqual(data.alternatives[4], "b05") self.assertIs(type(data.matrix[0, 0]), np.float64) self.assertEqual(data.matrix[0, 0], 90) self.assertIs(type(data.matrix[4, 0]), np.float64) self.assertEqual(data.matrix[4, 0], 83) # cost type criteria are reversed # (multiplied by -1 so we can treat them as gain type criteria) self.assertIs(type(data.matrix[4, 1]), np.float64) self.assertEqual(data.matrix[4, 1], -26) self.assertIs(type(data.matrix[0, 1]), np.float64) self.assertEqual(data.matrix[0, 1], -27) def test_reading_dataset_from_txt_with_preferences(self): loading_result = read_dataset_from_txt("tests/datasets/ror_dataset.txt") data = loading_result.dataset self.assertIs(type(data), RORDataset) self.assertEqual(len(data.criteria), 2) self.assertEqual(data.criteria[0][0], "MaxSpeed") self.assertEqual(data.criteria[0][1], "g") self.assertEqual(data.criteria[1][0], "FuelCons") self.assertEqual(data.criteria[1][1], "c") self.assertEqual(len(data.alternatives), 14) self.assertEqual(data.alternatives[0], "b01") self.assertEqual(data.alternatives[4], "b05") self.assertIs(type(data.matrix[0, 0]), np.float64) self.assertEqual(data.matrix[0, 0], 90) self.assertIs(type(data.matrix[4, 0]), np.float64) self.assertEqual(data.matrix[4, 0], 83) self.assertEqual(len(data.preferenceRelations), 3) self.assertEqual(data.preferenceRelations[0].relation, INDIFFERENCE) self.assertEqual(data.preferenceRelations[0].alternative_1, "b01") self.assertEqual(data.preferenceRelations[0].alternative_2, "b02") self.assertEqual(data.preferenceRelations[1].relation, PREFERENCE) self.assertEqual(data.preferenceRelations[1].alternative_1, "b06") self.assertEqual(data.preferenceRelations[1].alternative_2, "b03") self.assertEqual(data.preferenceRelations[2].relation, PREFERENCE) self.assertEqual(data.preferenceRelations[2].alternative_1, "b08") self.assertEqual(data.preferenceRelations[2].alternative_2, "b07") self.assertEqual(len(data.intensityRelations), 1) self.assertEqual(data.intensityRelations[0].relation, PREFERENCE) self.assertEqual(data.intensityRelations[0].alternative_1, "b04") self.assertEqual(data.intensityRelations[0].alternative_2, "b08") self.assertEqual(data.intensityRelations[0].alternative_3, "b07") self.assertEqual(data.intensityRelations[0].alternative_4, "b06") def test_reading_with_preferences(self): loading_result = read_dataset_from_txt("tests/datasets/ror_dataset_with_parameters.txt") data = loading_result.dataset parameters = loading_result.parameters self.assertIs(type(data), RORDataset) self.assertEqual(len(data.criteria), 2) self.assertEqual(len(data.alternatives), 14) self.assertEqual(len(data.preferenceRelations), 3) self.assertEqual(len(data.intensityRelations), 1) self.assertAlmostEqual(parameters[RORParameter.EPS], 2e-11) self.assertAlmostEqual(parameters[RORParameter.INITIAL_ALPHA], 0.1)
46.447368
96
0.682342
654
5,295
5.437309
0.126911
0.227784
0.224409
0.07874
0.879921
0.829021
0.679415
0.658324
0.619798
0.619798
0
0.043257
0.183569
5,295
114
97
46.447368
0.77932
0.035316
0
0.696629
0
0
0.045063
0.025078
0
0
0
0
0.786517
1
0.044944
false
0
0.05618
0
0.11236
0
0
0
0
null
1
1
0
1
1
0
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
1
0
0
0
0
0
0
0
0
0
6
de0a43ded272756910f0067260f0b014efe14338
1,208
py
Python
rotation.py
FanaticalFighter/cubinator
67a2a0109000d7cf0cbd0e5550a4c16fd19318fa
[ "MIT" ]
null
null
null
rotation.py
FanaticalFighter/cubinator
67a2a0109000d7cf0cbd0e5550a4c16fd19318fa
[ "MIT" ]
null
null
null
rotation.py
FanaticalFighter/cubinator
67a2a0109000d7cf0cbd0e5550a4c16fd19318fa
[ "MIT" ]
null
null
null
from point import * import math def rotate_about_x_clockwise(point): rotation_matrix = [[1, 0, 0], [0, 0, -1], [0, 1, 0]] return point.return_rotation(rotation_matrix) def rotate_about_x_counter_clockwise(point): rotation_matrix = [[1, 0, 0], [0, 0, 1], [0, -1, 0]] return point.return_rotation(rotation_matrix) def rotate_about_y_clockwise(point): rotation_matrix = [[0, 0, 1], [0, 1, 0], [-1, 0, 0]] return point.return_rotation(rotation_matrix) def rotate_about_y_counter_clockwise(point): rotation_matrix = [[0, 0, -1], [0, 1, 0], [1, 0, 0]] return point.return_rotation(rotation_matrix) def rotate_about_z_clockwise(point): rotation_matrix = [[0, -1, 0], [1, 0, 0], [0, 0, 1]] return point.return_rotation(rotation_matrix) def rotate_about_z_counter_clockwise(point): rotation_matrix = [[0, 1, 0], [-1, 0, 0], [0, 0, 1]] return point.return_rotation(rotation_matrix)
26.844444
49
0.522351
146
1,208
4.054795
0.116438
0.054054
0.070946
0.054054
0.930743
0.918919
0.89527
0.89527
0.89527
0.89527
0
0.069588
0.357616
1,208
44
50
27.454545
0.693299
0
0
0.375
0
0
0
0
0
0
0
0
0
1
0.1875
false
0
0.0625
0
0.4375
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
a9bca7de7544546c5ad12095d75659ec955585c1
498
py
Python
highlightDemo/test.py
zhaouv/vscode-markdown-everywhere
52ad5d80a8850fd266d3e84b93a6673476d267bf
[ "Apache-2.0" ]
7
2021-01-21T09:20:20.000Z
2022-02-25T11:09:06.000Z
highlightDemo/test.py
zhaouv/vscode-markdown-everywhere
52ad5d80a8850fd266d3e84b93a6673476d267bf
[ "Apache-2.0" ]
6
2020-08-10T04:46:58.000Z
2021-05-16T14:21:35.000Z
highlightDemo/test.py
zhaouv/vscode-markdown-everywhere
52ad5d80a8850fd266d3e84b93a6673476d267bf
[ "Apache-2.0" ]
null
null
null
def asd(): pass # [markdown] # # title # + content # content def dsa(): pass # MD # title # MD content # MD + list a=1 # %% [markdown] # # highlight python markdown cell # for the vscode-python data-science feature v=1 def abc(): """ xxx xxx xxx xxx """ pass ''' def asd(): pass # [markdown] # # title # + content # content def dsa(): pass # MD # title # MD content # MD + list v=1 def abc(): """ xxx xxx xxx xxx """ pass '''
8.440678
44
0.52008
64
498
4.046875
0.359375
0.138996
0.138996
0.138996
0.718147
0.718147
0.718147
0.718147
0.718147
0.532819
0
0.008982
0.329317
498
59
45
8.440678
0.766467
0.345382
0
0.375
0
0
0
0
0
0
0
0
0
1
0.375
false
0.375
0
0
0.375
0
0
0
0
null
0
0
0
0
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
1
0
0
0
0
0
6
a9d80734bcc9c0bfaff09f4f8d9d93cb56357c3d
6,675
py
Python
test/functional/test_cli.py
fedden/pluribus
73fb394b26623c897459ffa3e66d7a5cb47e9962
[ "MIT" ]
2
2020-01-12T07:59:56.000Z
2020-01-13T10:04:26.000Z
test/functional/test_cli.py
fedden/pluribus
73fb394b26623c897459ffa3e66d7a5cb47e9962
[ "MIT" ]
null
null
null
test/functional/test_cli.py
fedden/pluribus
73fb394b26623c897459ffa3e66d7a5cb47e9962
[ "MIT" ]
null
null
null
import os import pickle import shlex from typing import List import pytest from click.testing import CliRunner from poker_ai.cli.runner import cli os.environ["TESTING_SUITE"] = "1" pickle_dir = os.environ.get("LUT_DIR", os.path.abspath("research/blueprint_algo/")) @pytest.mark.parametrize("strategy_interval", [1]) @pytest.mark.parametrize("n_iterations", [5]) @pytest.mark.parametrize("lcfr_threshold", [0]) @pytest.mark.parametrize("discount_interval", [1]) @pytest.mark.parametrize("prune_threshold", [1]) @pytest.mark.parametrize("c", [0]) @pytest.mark.parametrize("n_players", [2]) @pytest.mark.parametrize("dump_iteration", [1]) @pytest.mark.parametrize("update_threshold", [0]) def test_train_multiprocess_async( strategy_interval: int, n_iterations: int, lcfr_threshold: int, discount_interval: int, prune_threshold: int, c: int, n_players: int, dump_iteration: int, update_threshold: int, ): """Test we can call the syncronous multiprocessing training CLI.""" runner = CliRunner() with runner.isolated_filesystem(): cli_str: str = f"""train start \ --strategy_interval {strategy_interval} \ --n_iterations {n_iterations} \ --lcfr_threshold {lcfr_threshold} \ --discount_interval {discount_interval} \ --prune_threshold {prune_threshold} \ --c {c} \ --n_players {n_players} \ --dump_iteration {dump_iteration} \ --update_threshold {update_threshold} \ --pickle_dir {pickle_dir} \ --multi_process \ --async_update_strategy \ --async_cfr \ --async_discount \ --async_serialise \ --nickname test """ cli_args: List[str] = shlex.split(cli_str) result = runner.invoke(cli, cli_args, catch_exceptions=True) @pytest.mark.parametrize("strategy_interval", [1]) @pytest.mark.parametrize("n_iterations", [5]) @pytest.mark.parametrize("lcfr_threshold", [0]) @pytest.mark.parametrize("discount_interval", [1]) @pytest.mark.parametrize("prune_threshold", [1]) @pytest.mark.parametrize("c", [0]) @pytest.mark.parametrize("n_players", [2]) @pytest.mark.parametrize("dump_iteration", [1]) @pytest.mark.parametrize("update_threshold", [0]) def test_train_multiprocess_sync( strategy_interval: int, n_iterations: int, lcfr_threshold: int, discount_interval: int, prune_threshold: int, c: int, n_players: int, dump_iteration: int, update_threshold: int, ): """Test we can call the syncronous multiprocessing training CLI.""" runner = CliRunner() with runner.isolated_filesystem(): cli_str: str = f"""train start \ --strategy_interval {strategy_interval} \ --n_iterations {n_iterations} \ --lcfr_threshold {lcfr_threshold} \ --discount_interval {discount_interval} \ --prune_threshold {prune_threshold} \ --c {c} \ --n_players {n_players} \ --dump_iteration {dump_iteration} \ --update_threshold {update_threshold} \ --pickle_dir {pickle_dir} \ --multi_process \ --sync_update_strategy \ --sync_cfr \ --sync_discount \ --sync_serialise \ --nickname test """ cli_args: List[str] = shlex.split(cli_str) result = runner.invoke(cli, cli_args, catch_exceptions=True) @pytest.mark.parametrize("strategy_interval", [1]) @pytest.mark.parametrize("n_iterations", [5]) @pytest.mark.parametrize("lcfr_threshold", [0]) @pytest.mark.parametrize("discount_interval", [1]) @pytest.mark.parametrize("prune_threshold", [1]) @pytest.mark.parametrize("c", [0]) @pytest.mark.parametrize("n_players", [2]) @pytest.mark.parametrize("dump_iteration", [1]) @pytest.mark.parametrize("update_threshold", [0]) def test_train_singleprocess( strategy_interval: int, n_iterations: int, lcfr_threshold: int, discount_interval: int, prune_threshold: int, c: int, n_players: int, dump_iteration: int, update_threshold: int, ): """Test we can call the syncronous multiprocessing training CLI.""" runner = CliRunner() with runner.isolated_filesystem(): cli_str: str = f"""train start \ --strategy_interval {strategy_interval} \ --n_iterations {n_iterations} \ --lcfr_threshold {lcfr_threshold} \ --discount_interval {discount_interval} \ --prune_threshold {prune_threshold} \ --c {c} \ --n_players {n_players} \ --dump_iteration {dump_iteration} \ --update_threshold {update_threshold} \ --pickle_dir {pickle_dir} \ --single_process \ --nickname test """ cli_args: List[str] = shlex.split(cli_str) result = runner.invoke(cli, cli_args, catch_exceptions=True) # TODO(fedden): Figure out a way to test the terminal game. # from os import kill, getpid # from multiprocessing import Queue, Process # from time import sleep # from threading import Timer # from signal import SIGINT # def test_terminal(): # """Test we can call the Terminal game.""" # n_secs_to_run: int = 5 # queue: Queue = Queue() # # runner = CliRunner() # cli_str: str = "play --pickle_dir . --debug_quick_start" # cli_args: List[str] = shlex.split(cli_str) # # def background(): # """Use a killable background process.""" # Timer(n_secs_to_run, lambda: kill(getpid(), SIGINT)).start() # result = runner.invoke(cli, cli_args, catch_exceptions=False) # queue.put(result) # # process = Process(target=background) # process.start() # while process.is_alive(): # sleep(0.1) # else: # result = queue.get() # import ipdb # # ipdb.set_trace() # assert result["exit_code"] == 0 # assert ( # "Results can be inconsistent, as execution was terminated" in results["output"] # )
37.083333
90
0.581873
690
6,675
5.391304
0.184058
0.072581
0.152419
0.070968
0.756989
0.752688
0.752688
0.752688
0.733065
0.733065
0
0.006826
0.297678
6,675
179
91
37.290503
0.786689
0.185768
0
0.839695
0
0
0.518567
0.012811
0
0
0
0.005587
0
1
0.022901
false
0
0.053435
0
0.076336
0.007634
0
0
0
null
0
0
0
0
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
e70828378ae63848adf03c8f66e2ee3b43fb1618
120
py
Python
py/qaviton/scripts/examples/new_project/pages/home.py
qaviton/qaviton
112f1620af36e09031909bd36b7e388df577b75b
[ "Apache-2.0" ]
9
2018-09-06T10:27:55.000Z
2020-01-02T16:50:13.000Z
py/qaviton/scripts/examples/new_project/pages/home.py
qaviton/qaviton
112f1620af36e09031909bd36b7e388df577b75b
[ "Apache-2.0" ]
6
2019-06-05T09:44:21.000Z
2022-03-11T23:26:41.000Z
py/qaviton/scripts/examples/new_project/pages/home.py
qaviton/qaviton
112f1620af36e09031909bd36b7e388df577b75b
[ "Apache-2.0" ]
9
2018-09-21T14:47:40.000Z
2021-12-21T01:37:20.000Z
from tests.pages.components.page import Page from tests.config.locators import locator class HomePage(Page): pass
17.142857
44
0.791667
17
120
5.588235
0.705882
0.189474
0
0
0
0
0
0
0
0
0
0
0.141667
120
6
45
20
0.92233
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0.25
0.5
0
0.75
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
1
1
0
1
0
0
6
e72ffe065f91171d4da0b21bf3f019f50bcfeda6
163
py
Python
webFrameworkTest/tornadoProject/TornadoProject/routers.py
belingud/sources
9275cc653caf0422a50724f50d075b33c919db36
[ "Apache-2.0" ]
null
null
null
webFrameworkTest/tornadoProject/TornadoProject/routers.py
belingud/sources
9275cc653caf0422a50724f50d075b33c919db36
[ "Apache-2.0" ]
8
2019-08-11T16:24:06.000Z
2020-03-06T15:11:56.000Z
webFrameworkTest/tornadoProject/TornadoProject/routers.py
belingud/sources
9275cc653caf0422a50724f50d075b33c919db36
[ "Apache-2.0" ]
null
null
null
from Users.routers import urlpatterns as user_urlpatterns from App.routers import urlpatterns as app_urlpatterns urlpatterns = app_urlpatterns + user_urlpatterns
32.6
57
0.865031
21
163
6.52381
0.380952
0.189781
0.350365
0.379562
0
0
0
0
0
0
0
0
0.110429
163
4
58
40.75
0.944828
0
0
0
0
0
0
0
0
0
0
0
0
1
0
false
0
0.666667
0
0.666667
0
1
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
6
e730fc38fa81dc9b7571d0489add84c0d20e82e6
131
py
Python
app/main/__init__.py
GraceOswal/pitch-perfect
d781c6e0f55c11f2a5e5dceb952f6b2de3c47c3b
[ "MIT" ]
null
null
null
app/main/__init__.py
GraceOswal/pitch-perfect
d781c6e0f55c11f2a5e5dceb952f6b2de3c47c3b
[ "MIT" ]
null
null
null
app/main/__init__.py
GraceOswal/pitch-perfect
d781c6e0f55c11f2a5e5dceb952f6b2de3c47c3b
[ "MIT" ]
null
null
null
from flask import Blueprint main = Blueprint('main', __name__) from . views import * from app import views from app import error
16.375
34
0.763359
19
131
5.052632
0.473684
0.270833
0.270833
0
0
0
0
0
0
0
0
0
0.175573
131
7
35
18.714286
0.888889
0
0
0
0
0
0.030534
0
0
0
0
0
0
1
0
false
0
0.8
0
0.8
0.4
1
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
6
e737249a5d453d5a0c8098652bee6d3375413447
12,658
py
Python
adminTest.py
joeyw526/Personal
52849526810f9b11947aeabafe56ecabbc68f04f
[ "MIT" ]
null
null
null
adminTest.py
joeyw526/Personal
52849526810f9b11947aeabafe56ecabbc68f04f
[ "MIT" ]
null
null
null
adminTest.py
joeyw526/Personal
52849526810f9b11947aeabafe56ecabbc68f04f
[ "MIT" ]
null
null
null
from admin import Admin from db import Session import unittest from user import User from datetime import * from sqlalchemy import exc import random import string # volunteer contains: name, email, passwordhash, phone, last_active, # birthdate=None, permissions, bio=None, gender=None, # vhours=None, neighborhood=None, interests=None, # skills=None, education=None, availability=None, events=None class AdminTests(unittest.TestCase): #checks if the volunteer's fields are initialized correctly def test_01_init(self): N=10 email = ''.join(random.choice(string.ascii_uppercase + string.digits + string.ascii_lowercase) for _ in range(N)) + '@gmail.com' mickey = Admin('Mickey Mouse', email, 'mouse', '0765434567', True, birthdate=date(2006, 6, 6), bio='Peace Walt', gender='Male') self.assertTrue(mickey.name == 'Mickey Mouse') #self.assertTrue(mickey.email == 'wood.jos@husky.neu.edu') #self.assertTrue(mickey.passwordhash == 'mouse') self.assertTrue(mickey.phone == '0765434567') self.assertTrue(mickey.master) #self.assertTrue(mickey.last_active == ) #self.assertTrue(mickey.birthdate == '06/06/2006') self.assertTrue(mickey.permissions == 'admin') self.assertTrue(mickey.bio == 'Peace Walt') self.assertTrue(mickey.gender == 'Male') #test object write to the database. def test_02_db_write(self): N=15 email = ''.join(random.choice(string.ascii_uppercase + string.digits + string.ascii_lowercase) for _ in range(N)) + '@gmail.com' mickey = Admin('Mickey Mouse', email, 'mouse', '0765434567', True, birthdate=date(2006, 6, 6), bio='Peace Walt', gender='Male') s = Session() try: s.add(mickey) s.commit() s.close() self.assertTrue(True) except exc.SQLAlchemyError: self.assertTrue(False) # checks if the volunteer was added to the database after initialization def test_03_queryName(self): session = Session() mickey = Admin('Mickey Mouse', 'mickey@disney.com', 'mouse', '0765434567', True, birthdate=date(2006, 6, 6), bio='Peace Walt', gender='Male') sickey = session.query(Admin).filter_by(name='Mickey Mouse').first() self.assertTrue(mickey.name == sickey.name) #self.assertTrue(mickey.email == sickey.email) #self.assertTrue(mickey.passwordhash == sickey.passwordhash) self.assertTrue(mickey.phone == sickey.phone) self.assertTrue(mickey.master) #self.assertTrue(mickey.last_active == ) self.assertTrue(mickey.birthdate == sickey.birthdate) self.assertTrue(mickey.permissions == sickey.permissions) self.assertTrue(mickey.bio == sickey.bio) self.assertTrue(mickey.gender == sickey.gender) # checks if the volunteer can be queried by phone def test_05_queryPhone(self): session = Session() mickey = Admin('Mickey Mouse', 'mickey@disney.com', 'mouse', '0765434567', True, birthdate=date(2006, 6, 6), bio='Peace Walt', gender='Male') sickey = session.query(Admin).filter_by(name='Mickey Mouse').first() self.assertTrue(mickey.name == sickey.name) #cself.assertTrue(mickey.email == sickey.email) #self.assertTrue(mickey.passwordhash == sickey.passwordhash) self.assertTrue(mickey.phone == sickey.phone) self.assertTrue(mickey.master) #self.assertTrue(mickey.last_active == ) self.assertTrue(mickey.birthdate == sickey.birthdate) self.assertTrue(mickey.permissions == sickey.permissions) self.assertTrue(mickey.bio == sickey.bio) self.assertTrue(mickey.gender == sickey.gender) def test_06_updating_name(self): session = Session() mickey = session.query(User).filter_by(name='Mickey Mouse').first() q = session.query(User).filter_by(id=mickey.id) q = q.update({"name":"Wood Joey"}) mickey = session.query(User).filter_by(id=mickey.id).first() self.assertTrue(mickey.name == 'Wood Joey') session.close() def test_07_updating_email(self): session = Session() mickey = session.query(User).filter_by(name='Mickey Mouse').first() q = session.query(User).filter_by(id=mickey.id) q = q.update({"email":"jos.wood1@husky.neu.edu"}) mickey = session.query(User).filter_by(id=mickey.id).first() self.assertTrue(mickey.email == 'jos.wood1@husky.neu.edu') session.close() def test_08_phone_long(self): N=10 email = ''.join(random.choice(string.ascii_uppercase + string.digits + string.ascii_lowercase) for _ in range(N)) + '@gmail.com' self.assertRaises(ValueError, Admin, 'Mickey Mouse', email, 'mouse', '07654345677', True, birthdate=date(2006, 6, 6), bio='Peace Walt', gender='Male') def test_09_phone_short(self): N=10 email = ''.join(random.choice(string.ascii_uppercase + string.digits + string.ascii_lowercase) for _ in range(N)) + '@gmail.com' self.assertRaises(ValueError, Admin, 'Mickey Mouse', email, 'mouse', '076543456', True, birthdate=date(2006, 6, 6), bio='Peace Walt', gender='Male') def test_10_phone_letters(self): N=10 email = ''.join(random.choice(string.ascii_uppercase + string.digits + string.ascii_lowercase) for _ in range(N)) + '@gmail.com' self.assertRaises(ValueError, Admin, 'Mickey Mouse', email, 'mouse', 'abcdefghij', True, birthdate=date(2006, 6, 6), bio='Peace Walt', gender='Male') #unit test for password hashing def test_11_password_hash(self): session = Session() N=10 email = ''.join(random.choice(string.ascii_uppercase + string.digits + string.ascii_lowercase) for _ in range(N)) + '@gmail.com' mickey = Admin('Mickey Mouse', email, 'mouse', '0765434567', True, birthdate=date(2006, 6, 6), bio='Peace Walt', gender='Male') try: session.add(mickey) session.commit() rickey = session.query(Admin).filter_by(phone='0765434567').first() self.assertTrue(mickey.passwordhash != 'mouse') self.assertTrue(rickey.passwordhash != 'mouse') self.assertTrue(mickey.check_password('mouse')) self.assertFalse(mickey.check_password('mouse2')) session.close() self.assertTrue(True) except exc.SQLAlchemyError: self.assertTrue(False) # # Email is valid # def test_phone_number_symbol(self): # joey = Volunteer('Joey Wood', 'wood.jos@husky.neu.edu', 'lit', '3015559721', '05/26/1990', # bio='Snell rhymes with hell', gender='Male', vhours=0, neighborhood="Back Bay", interests="Teaching", skills="Teaching", education="College", # availability="Mondays @ 3pm - 6pm", events="") # self.assertRaises(ValueError, 'Email must be vald') # # Phone is a string of 10 ints # def test_phone_number_symbol(self): # joey = Volunteer('Joey Wood', 'wood.jos@husky.neu.edu', 'lit', '3015559721', '05/26/1990', # bio='Snell rhymes with hell', gender='Male', vhours=0, neighborhood="Back Bay", interests="Teaching", skills="Teaching", education="College", # availability="Mondays @ 3pm - 6pm", events="") # self.assertRaises(ValueError, 'Phone numbers must be a string of 10 integers') # # Phone is a string of 10 ints # def test_phone_number<10(self): # joey = Volunteer('Joey Wood', 'wood.jos@husky.neu.edu', 'lit', '3015559721', '05/26/1990', # bio='Snell rhymes with hell', gender='Male', vhours=0, neighborhood="Back Bay", interests="Teaching", skills="Teaching", education="College", # availability="Mondays @ 3pm - 6pm", events="") # self.assertRaises(ValueError, 'Phone numbers must be a string of 10 integers') # Phone is a string of 10 ints # def test_phone_number>10(self): # joey = Volunteer('Joey Wood', 'wood.jos@husky.neu.edu', 'lit', '3015559721', '05/26/1990', # bio='Snell rhymes with hell', gender='Male', vhours=0, neighborhood="Back Bay", interests="Teaching", skills="Teaching", education="College", # availability="Mondays @ 3pm - 6pm", events="") # self.assertRaises(ValueError, 'Phone numbers must be a string of 10 integers') # # joey.last_active_is a string - should be in the form mm/dd/yyyy, hh:mm # def test_last_active_format0(self): # joey = Volunteer('Joey Wood', 'wood.jos@husky.neu.edu', 'lit', '3015559721', '05/26/1990', # bio='Snell rhymes with hell', gender='Male', vhours=0, neighborhood="Back Bay", interests="Teaching", skills="Teaching", education="College", # availability="Mondays @ 3pm - 6pm", events="") # self.assertRaises(ValueError, 'last active must be in the form mm/dd/yyyy hh:mm') # # joey.last_active_is a string - should be in the form mm/dd/yyyy, hh:mm # def test_last_active_format1(self): # joey = Volunteer('Joey Wood', 'wood.jos@husky.neu.edu', 'lit', '3015559721', '05/26/1990', # bio='Snell rhymes with hell', gender='Male', vhours=0, neighborhood="Back Bay", interests="Teaching", skills="Teaching", education="College", # availability="Mondays @ 3pm - 6pm", events="") # self.assertRaises(ValueError, 'last active must be in the form mm/dd/yyyy hh:mm') # # joey.last_active_must be in the past # def test_last_active_past(self): # joey = Volunteer('Joey Wood', 'wood.jos@husky.neu.edu', 'lit', '3015559721', '05/26/1990' # bio='Snell rhymes with hell', gender='Male', vhours=0, neighborhood="Back Bay", interests="Teaching", skills="Teaching", education="College", # availability="Mondays @ 3pm - 6pm", events="") # self.assertRaises(ValueError, 'last active must be in the form mm/dd/yyyy hh:mm') # # joey.birthday is a string - should be in form mm/dd/yyyy # def test_birthday_format0(self): # joey = Volunteer('Joey Wood', 'wood.jos@husky.neu.edu', 'lit', '3015559721', '05/26/1990', # bio='Snell rhymes with hell', gender='Male', vhours=0, neighborhood="Back Bay", interests="Teaching", skills="Teaching", education="College", # availability="Mondays @ 3pm - 6pm", events="") # self.assertRaises(ValueError, 'birthday must be in the form mm/dd/yyyy') # # joey.birthday is a string - should be in the form mm/dd/yyyy # def test_birthday_format1(self): # joey = Volunteer('Joey Wood', 'wood.jos@husky.neu.edu', 'lit', '3015559721', '05/26/1990', # bio='Snell rhymes with hell', gender='Male', vhours=0, neighborhood="Back Bay", interests="Teaching", skills="Teaching", education="College", # availability="Mondays @ 3pm - 6pm", events="") # self.assertRaises(ValueError, 'birthday must be in the form mm/dd/yyyy') # # joey.birthday is a string of letters - should be in the past # def test_birthday_past(self): # joey = Volunteer('Joey Wood', 'wood.jos@husky.neu.edu', 'lit', '3015559721', '05/26/1990', # bio='Snell rhymes with hell', gender='Male', vhours=0, neighborhood="Back Bay", interests="Teaching", skills="Teaching", education="College", # availability="Mondays @ 3pm - 6pm", events="") # self.assertRaises(ValueError, 'birthday must be in the past') # These tests require the Interest and Skills Enumerations to be created # # joey.interests should exist in the interests table # def test_interests_exists(self): # session = Session() # self.assertEqual(self.joey.interests, session.query(Interests).filter_by(name=self.joey.interests).first()) # session.close() # # # joey.skills should exist in the skills table # def test_skills_exists(self): # session = Session() # self.assertEqual(self.joey.skills, session.query(Skills).filter_by(name=self.joey.skills).first()) # session.close() if __name__ == '__main__': unittest.main()
51.877049
167
0.622926
1,518
12,658
5.125165
0.121871
0.06838
0.084833
0.021208
0.803728
0.786504
0.775193
0.756684
0.741131
0.741131
0
0.042591
0.237636
12,658
243
168
52.090535
0.763627
0.498815
0
0.583333
0
0
0.099904
0.007377
0
0
0
0
0.305556
1
0.092593
false
0.046296
0.074074
0
0.175926
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
e7511ea5f51a988256bd02d1b83507612d5f5721
6,040
py
Python
notorhot/contrib/write_in/_tests/integration.py
sbnoemi/notorhot
e8a90a41147a511f6d0f4ab99a2e30ab92b5e70b
[ "BSD-3-Clause" ]
3
2015-02-11T16:49:50.000Z
2020-04-30T17:33:18.000Z
notorhot/contrib/write_in/_tests/integration.py
sbnoemi/notorhot
e8a90a41147a511f6d0f4ab99a2e30ab92b5e70b
[ "BSD-3-Clause" ]
null
null
null
notorhot/contrib/write_in/_tests/integration.py
sbnoemi/notorhot
e8a90a41147a511f6d0f4ab99a2e30ab92b5e70b
[ "BSD-3-Clause" ]
null
null
null
import datetime from mock import Mock, patch from django.test import TestCase from django.forms import ValidationError from django import forms from notorhot._tests.factories import mixer from notorhot.contrib.write_in.models import DefaultWriteIn from notorhot.contrib.write_in.views import WriteInDefaultView, \ WriteInThanksView class URLConfMixin(object): urls = 'notorhot.contrib.write_in._tests.urls' class WriteInDefaultViewTestCase(URLConfMixin, TestCase): def test_get_with_category(self): cat1 = mixer.blend('notorhot.CandidateCategory', slug='cat-slug') response = self.client.get('/write-in/cat-slug/') self.assertEqual(response.status_code, 200) self.assertIsInstance(response.context['view'], WriteInDefaultView) self.assertIsNotNone(response.context['category']) self.assertEqual(response.context['category'], cat1) self.assertIsNotNone(response.context['form']) self.assertEqual(response.context['form']._meta.model, DefaultWriteIn) self.assertTemplateUsed(response, 'write_in/defaultwritein_create.html') def test_get_non_public_category(self): cat1 = mixer.blend('notorhot.CandidateCategory', slug='cat-slug', \ is_public=False) response = self.client.get('/write-in/cat-slug/') self.assertEqual(response.status_code, 200) self.assertIsInstance(response.context['view'], WriteInDefaultView) self.assertIsNotNone(response.context['category']) self.assertEqual(response.context['category'], cat1) self.assertIsNotNone(response.context['form']) self.assertEqual(response.context['form']._meta.model, DefaultWriteIn) self.assertTemplateUsed(response, 'write_in/defaultwritein_create.html') def test_get_without_category(self): response = self.client.get('/write-in/') self.assertEqual(response.status_code, 200) self.assertIsInstance(response.context['view'], WriteInDefaultView) self.assertIsNone(response.context['category']) self.assertIsNotNone(response.context['form']) self.assertEqual(response.context['form']._meta.model, DefaultWriteIn) self.assertTemplateUsed(response, 'write_in/defaultwritein_create.html') def test_get_invalid_category(self): # We should probably 404, but I'm still trying to figure out how to # do that without having to catch an exception in every CategoryMixin # subclass, and this is not ideal behavior, but neither is it # pathological. cat1 = mixer.blend('notorhot.CandidateCategory', slug='cat-slug', \ is_public=False) response = self.client.get('/write-in/wrong-slug/') self.assertEqual(response.status_code, 200) self.assertIsInstance(response.context['view'], WriteInDefaultView) self.assertIsNone(response.context['category']) self.assertIsNotNone(response.context['form']) self.assertEqual(response.context['form']._meta.model, DefaultWriteIn) self.assertTemplateUsed(response, 'write_in/defaultwritein_create.html') def test_invalid_form(self): with patch.object(forms.ModelForm, 'is_valid') as mock_is_valid: mock_is_valid.return_value = False response = self.client.post('/write-in/', data={}) self.assertEqual(response.status_code, 200) self.assertIsInstance(response.context['view'], WriteInDefaultView) self.assertIsNone(response.context['category']) self.assertIsNotNone(response.context['form']) self.assertEqual(response.context['form']._meta.model, DefaultWriteIn) self.assertTemplateUsed(response, 'write_in/defaultwritein_create.html') def test_success(self): cat1 = mixer.blend('notorhot.CandidateCategory', slug='cat-slug', id=1) self.assertEqual(DefaultWriteIn.objects.count(), 0) with patch.object(forms.ModelForm, 'is_valid') as mock_is_valid: mock_is_valid.return_value = True response = self.client.post('/write-in/', data={ 'candidate_name': 'candidate', 'submitter_name': 'submitter', 'submitter_email': 'submitter@example.com', 'category': 1, }) self.assertEqual(response.status_code, 302) self.assertRedirects(response, '/write-in/cat-slug/thanks/') self.assertEqual(DefaultWriteIn.objects.count(), 1) self.assertEqual(cat1.defaultwritein_write_ins.count(), 1) class WriteInThanksViewTestCase(URLConfMixin, TestCase): def test_success(self): cat1 = mixer.blend('notorhot.CandidateCategory', slug='cat-slug') response = self.client.get('/write-in/cat-slug/thanks/') self.assertEqual(response.status_code, 200) self.assertIsInstance(response.context['view'], WriteInThanksView) self.assertIsNotNone(response.context['category']) self.assertEqual(response.context['category'], cat1) self.assertTemplateUsed(response, 'write_in/thanks.html') def test_invalid_category(self): response = self.client.get('/write-in/cat-slug/thanks/') self.assertEqual(response.status_code, 404) def test_non_public_category(self): cat1 = mixer.blend('notorhot.CandidateCategory', slug='cat-slug', is_public=False) response = self.client.get('/write-in/cat-slug/thanks/') self.assertEqual(response.status_code, 200) self.assertIsInstance(response.context['view'], WriteInThanksView) self.assertIsNotNone(response.context['category']) self.assertEqual(response.context['category'], cat1) self.assertTemplateUsed(response, 'write_in/thanks.html')
46.10687
84
0.668543
627
6,040
6.323764
0.185008
0.105927
0.104414
0.065826
0.793695
0.755359
0.755359
0.729887
0.716772
0.716772
0
0.009723
0.216722
6,040
131
85
46.10687
0.828366
0.034437
0
0.597938
0
0
0.156314
0.088195
0
0
0
0
0.494845
1
0.092784
false
0
0.082474
0
0.216495
0
0
0
0
null
0
0
0
0
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
1
0
0
0
0
0
0
0
0
0
6
e7b04a90ff0c5ed8f46a427472bf5156c8a718da
1,772
py
Python
pac_settings.py
maximilan/test
6cc8240f207efa332fe846a1ba2e9a1c6556f07f
[ "MIT" ]
null
null
null
pac_settings.py
maximilan/test
6cc8240f207efa332fe846a1ba2e9a1c6556f07f
[ "MIT" ]
null
null
null
pac_settings.py
maximilan/test
6cc8240f207efa332fe846a1ba2e9a1c6556f07f
[ "MIT" ]
null
null
null
q1 = [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0] q2 = [0,3,1,1,1,0,1,1,1,0,1,1,1,1,0,0] q3 = [0,0,1,0,1,1,1,0,1,1,1,0,0,1,0,0] q4 = [0,1,1,1,1,0,1,0,1,0,1,0,0,1,0,0] q5 = [0,1,0,0,0,0,1,0,1,1,1,1,1,1,1,0] q6 = [0,1,1,1,1,1,1,1,1,0,1,0,0,0,1,0] q7 = [0,1,0,0,1,0,0,1,0,1,1,0,2,0,1,0] q8 = [0,1,1,1,1,1,1,1,0,1,0,0,1,0,1,0] q9 = [0,0,1,0,0,1,0,0,0,1,1,1,1,1,1,0] q10 = [0,0,1,1,1,1,0,0,0,0,0,0,0,0,0,0] q11 = [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0] q = [q1,q2,q3,q4,q5,q6,q7,q8,q9,q10,q11] t1 = [0,0,0,0,0,0,0,0,0,0,0,0] t2 = [0,3,1,1,1,1,1,1,1,1,1,0] t3 = [0,1,0,1,0,1,0,0,1,0,1,0] t4 = [0,1,1,1,0,1,0,1,1,0,1,0] t5 = [0,1,0,1,0,1,1,1,0,0,1,0] t6 = [0,1,0,1,0,1,1,0,0,0,1,0] t7 = [0,1,1,1,1,1,1,1,1,1,1,0] t8 = [0,1,0,1,0,1,0,0,1,0,1,0] t9 = [0,1,0,1,0,1,0,0,1,0,1,0] t10 = [0,1,1,1,1,1,2,0,1,0,1,0] t11 = [0,1,0,0,0,0,0,0,1,0,1,0] t12 = [0,1,1,1,1,1,1,1,1,1,1,0] t13 = [0,0,0,0,0,0,0,0,0,0,0,0] t = [t1, t2, t3, t4, t5, t6, t7, t8, t9, t10, t11, t12, t13] w1 = [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0] w2 = [0,0,0,0,1,1,1,0,1,1,1,0,0,0,0] w3 = [0,0,0,0,1,0,1,0,1,0,1,0,0,0,0] w4 = [0,1,1,1,1,1,1,1,1,1,1,1,1,1,0] w5 = [0,1,0,0,1,0,1,0,1,0,1,0,0,1,0] w6 = [0,1,1,1,1,1,1,1,1,1,1,1,1,3,0] w7 = [0,0,0,0,1,0,1,0,1,0,1,0,0,0,0] w8 = [0,0,0,0,1,0,1,0,1,0,1,0,0,0,0] w9 = [0,0,0,0,1,1,1,0,1,1,1,0,0,0,0] w10 = [0,0,0,0,2,0,0,0,0,0,0,0,0,0,0] w11 = [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0] w = [w1,w2,w3,w4,w5,w6,w7,w8,w9,w10,w11] settings = [w,t,q] ghostnumber = [6,6,4] class Setting(): def __init__(self,canvas, level): self.setting = settings[level-1] self.ghostnumber = ghostnumber[level-1] def return_setting(self): return self.setting def return_ghostnumber(self): return self.ghostnumber def return_maxlevel(self): return len(settings)
31.642857
60
0.507336
616
1,772
1.448052
0.090909
0.365471
0.406951
0.434978
0.543722
0.53139
0.506726
0.430493
0.41704
0.375561
0
0.379845
0.126411
1,772
55
61
32.218182
0.196382
0
0
0
0
0
0
0
0
0
0
0
0
1
0.08
false
0
0
0.06
0.16
0
0
0
1
null
1
1
1
0
0
0
0
0
0
0
1
0
0
0
0
0
1
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
e7d94fd30214979c9c2b51dcd959bf495f95949a
6,233
py
Python
forms-flow-api/src/api/resources/task.py
gitter-badger/forms-flow-ai
d012566902120a24d02a7c1dad9053fefd17d24d
[ "Apache-2.0" ]
null
null
null
forms-flow-api/src/api/resources/task.py
gitter-badger/forms-flow-ai
d012566902120a24d02a7c1dad9053fefd17d24d
[ "Apache-2.0" ]
11
2021-06-02T04:42:50.000Z
2022-02-14T07:24:15.000Z
forms-flow-api/src/api/resources/task.py
gitter-badger/forms-flow-ai
d012566902120a24d02a7c1dad9053fefd17d24d
[ "Apache-2.0" ]
null
null
null
"""API endpoints for managing task resource.""" import logging import sys, traceback from http import HTTPStatus from flask import request from flask_restx import Namespace, Resource from api.services import TaskService from api.utils.auth import auth from api.utils.util import cors_preflight API = Namespace("Task", description="Task") @cors_preflight("GET,OPTIONS") @API.route("", methods=["GET", "OPTIONS"]) class TaskList(Resource): """Resource for managing tasks.""" @staticmethod @auth.require def get(): """List all tasks.""" return ( ( { "tasks": TaskService.get_all_tasks( token=request.headers["Authorization"] ) } ), HTTPStatus.OK, ) @cors_preflight("GET,OPTIONS") @API.route("/<string:task_id>", methods=["GET", "OPTIONS"]) class Task(Resource): """Resource for managing tasks.""" @staticmethod @auth.require def get(task_id): """List specific tasks.""" return ( ( { "task": TaskService.get_task( task_id=task_id, token=request.headers["Authorization"] ) } ), HTTPStatus.OK, ) @cors_preflight("POST,OPTIONS") @API.route("/<string:task_id>/claim", methods=["POST", "OPTIONS"]) class TaskClaim(Resource): """Resource for claim task.""" @staticmethod @auth.require def post(task_id): """Claim a task.""" request_json = request.get_json() try: return ( ( { "tasks": TaskService.claim_task( task_id=task_id, data=request_json, token=request.headers["Authorization"], ) } ), HTTPStatus.OK, ) except KeyError as err: exc_traceback = sys.exc_info() response, status = ( { "type": "Invalid Request Object", "message": "Required fields are not passed", "errors": err.messages, }, HTTPStatus.BAD_REQUEST, ) logging.exception(response) logging.exception(err) # traceback.print_tb(exc_traceback) return response, status except BaseException as err: exc_traceback = sys.exc_info() response, status = { "type": "Bad request error", "message": "Invalid request data object", }, HTTPStatus.BAD_REQUEST logging.exception(response) logging.exception(err) # traceback.print_tb(exc_traceback) return response, status @cors_preflight("POST,OPTIONS") @API.route("/<string:task_id>/unclaim", methods=["POST", "OPTIONS"]) class TaskUnClaim(Resource): """Resource for claim task.""" @staticmethod @auth.require def post(task_id): """Unclaim a task.""" request_json = request.get_json() try: return ( ( { "tasks": TaskService.unclaim_task( task_id=task_id, data=request_json, token=request.headers["Authorization"], ) } ), HTTPStatus.OK, ) except KeyError as err: exc_traceback = sys.exc_info() response, status = ( { "type": "Invalid Request Object", "message": "Required fields are not passed", "errors": err.messages, }, HTTPStatus.BAD_REQUEST, ) logging.exception(response) logging.exception(err) # traceback.print_tb(exc_traceback) except BaseException as err: exc_traceback = sys.exc_info() response, status = { "type": "Bad request error", "message": "Invalid request data object", }, HTTPStatus.BAD_REQUEST logging.exception(response) logging.exception(err) # traceback.print_tb(exc_traceback) return response, status @cors_preflight("POST,OPTIONS") @API.route("/<string:task_id>/complete", methods=["POST", "OPTIONS"]) class TaskComplete(Resource): """Resource for claim task.""" @staticmethod @auth.require def post(task_id): """Complete a task.""" request_json = request.get_json() try: return ( ( { "tasks": TaskService.complete_task( task_id=task_id, data=request_json, token=request.headers["Authorization"], ) } ), HTTPStatus.OK, ) except KeyError as err: exc_traceback = sys.exc_info() response, status = ( { "type": "Invalid Request Object", "message": "Required fields are not passed", "errors": err.messages, }, HTTPStatus.BAD_REQUEST, ) logging.exception(response) logging.exception(err) # traceback.print_tb(exc_traceback) return response, status except BaseException as err: exc_traceback = sys.exc_info() response, status = { "type": "Bad request error", "message": "Invalid request data object", }, HTTPStatus.BAD_REQUEST logging.exception(response) logging.exception(err) # traceback.print_tb(exc_traceback) return response, status
28.591743
79
0.485641
521
6,233
5.679463
0.15547
0.032443
0.016222
0.034471
0.803312
0.797905
0.77729
0.77729
0.743156
0.728287
0
0
0.413926
6,233
217
80
28.723502
0.810019
0.074282
0
0.626506
0
0
0.117534
0.012962
0
0
0
0
0
1
0.03012
false
0.018072
0.048193
0
0.168675
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
8213c0a2cd676b7925cff0db9e231e4ebd8d7fff
391
py
Python
src/test/python/testDataSetRepo/bad_provider/library/a.py
ninjapapa/SMV2
42cf9f176c3ec0bed61f66fbf859c18d97027dd6
[ "Apache-2.0" ]
null
null
null
src/test/python/testDataSetRepo/bad_provider/library/a.py
ninjapapa/SMV2
42cf9f176c3ec0bed61f66fbf859c18d97027dd6
[ "Apache-2.0" ]
34
2022-02-26T04:27:34.000Z
2022-03-29T23:05:47.000Z
src/test/python/testDataSetRepo/bad_provider/library/a.py
ninjapapa/SMV2
42cf9f176c3ec0bed61f66fbf859c18d97027dd6
[ "Apache-2.0" ]
null
null
null
from smv.provider import SmvProvider class MyBaseProvider(SmvProvider): @staticmethod def provider_type(): return "aaa" # ERROR: two classes below with same provider fqn "aaa.bbb" class MyConcreteProvider1(MyBaseProvider): @staticmethod def provider_type(): return "bbb" class MyConcreteProvider2(MyBaseProvider): @staticmethod def provider_type(): return "bbb"
23
59
0.751918
42
391
6.928571
0.52381
0.154639
0.237113
0.278351
0.457045
0.343643
0.343643
0
0
0
0
0.006116
0.163683
391
16
60
24.4375
0.883792
0.14578
0
0.5
0
0
0.027108
0
0
0
0
0
0
1
0.3
true
0
0.1
0.3
0.7
0
0
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
1
0
0
1
1
0
0
6
821d0858e2fa55c3e13895153fe68a8dd4ba647a
48
py
Python
pi/stream_processor/__init__.py
DebasishMaji/PI
e293982cae8f8755d28d7b3de22966dc74759b90
[ "Apache-2.0" ]
null
null
null
pi/stream_processor/__init__.py
DebasishMaji/PI
e293982cae8f8755d28d7b3de22966dc74759b90
[ "Apache-2.0" ]
null
null
null
pi/stream_processor/__init__.py
DebasishMaji/PI
e293982cae8f8755d28d7b3de22966dc74759b90
[ "Apache-2.0" ]
null
null
null
from .producer import * from .consumer import *
16
23
0.75
6
48
6
0.666667
0
0
0
0
0
0
0
0
0
0
0
0.166667
48
2
24
24
0.9
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
413741323ef8e113796b0a8221b2aeedc83e9f0a
175
py
Python
thirdparty/blender_autocomplete-master/2.82/gpu/__init__.py
Ray1184/HPMSBatch
3852710e7366361cb9e90f471ddccbbce5ffe8ee
[ "MIT" ]
null
null
null
thirdparty/blender_autocomplete-master/2.82/gpu/__init__.py
Ray1184/HPMSBatch
3852710e7366361cb9e90f471ddccbbce5ffe8ee
[ "MIT" ]
null
null
null
thirdparty/blender_autocomplete-master/2.82/gpu/__init__.py
Ray1184/HPMSBatch
3852710e7366361cb9e90f471ddccbbce5ffe8ee
[ "MIT" ]
null
null
null
import sys import typing from . import shader from . import matrix from . import types GPU_DYNAMIC_MIST_ENABLE: float = None '''See bpy.types.WorldMistSettings.use_mist. '''
19.444444
48
0.777143
25
175
5.28
0.68
0.227273
0
0
0
0
0
0
0
0
0
0
0.137143
175
8
49
21.875
0.874172
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.833333
0
0.833333
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
418443a00118cc909e4e522b5db6cf35a9de23cc
96
py
Python
venv/lib/python3.8/site-packages/poetry/core/_vendor/pyrsistent/_pbag.py
Retraces/UkraineBot
3d5d7f8aaa58fa0cb8b98733b8808e5dfbdb8b71
[ "MIT" ]
2
2022-03-13T01:58:52.000Z
2022-03-31T06:07:54.000Z
venv/lib/python3.8/site-packages/poetry/core/_vendor/pyrsistent/_pbag.py
DesmoSearch/Desmobot
b70b45df3485351f471080deb5c785c4bc5c4beb
[ "MIT" ]
19
2021-11-20T04:09:18.000Z
2022-03-23T15:05:55.000Z
venv/lib/python3.8/site-packages/poetry/core/_vendor/pyrsistent/_pbag.py
DesmoSearch/Desmobot
b70b45df3485351f471080deb5c785c4bc5c4beb
[ "MIT" ]
null
null
null
/home/runner/.cache/pip/pool/33/85/67/472ce5852a9636bcda895e5ba65442c79097802c0a35f9f2b9f33323f5
96
96
0.895833
9
96
9.555556
1
0
0
0
0
0
0
0
0
0
0
0.479167
0
96
1
96
96
0.416667
0
0
0
0
0
0
0
0
1
0
0
0
0
null
null
0
0
null
null
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
1
0
0
1
0
0
0
0
0
0
0
null
1
0
0
0
1
0
0
0
0
0
0
0
0
6
4185f468d4b96607988fcad74a6edf93c5ec13ab
20
py
Python
program.py
vyahello/flask-template
446eb37f9b48d7eb89821ee9913baae77b0b462e
[ "MIT" ]
11
2019-08-18T09:02:52.000Z
2019-08-29T00:10:22.000Z
program.py
vyahello/flask-template
446eb37f9b48d7eb89821ee9913baae77b0b462e
[ "MIT" ]
8
2019-08-18T10:51:59.000Z
2020-05-09T21:01:59.000Z
program.py
vyahello/flask-template
446eb37f9b48d7eb89821ee9913baae77b0b462e
[ "MIT" ]
2
2018-06-23T03:07:31.000Z
2018-06-23T03:22:28.000Z
from src import app
10
19
0.8
4
20
4
1
0
0
0
0
0
0
0
0
0
0
0
0.2
20
1
20
20
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
41c1a2ef5f6c4cfea8a719372a30a77438d36e00
4,869
py
Python
test_google_calendar.py
jezhiggins/conference-calendar
2a579cd37b109c2875fe806e84efe53ec1a7019e
[ "MIT" ]
1
2020-03-02T13:15:27.000Z
2020-03-02T13:15:27.000Z
test_google_calendar.py
jezhiggins/conference-calendar
2a579cd37b109c2875fe806e84efe53ec1a7019e
[ "MIT" ]
52
2020-02-24T22:03:42.000Z
2021-07-30T05:51:35.000Z
test_google_calendar.py
jezhiggins/conference-calendar
2a579cd37b109c2875fe806e84efe53ec1a7019e
[ "MIT" ]
2
2020-03-01T16:58:18.000Z
2020-03-03T23:08:36.000Z
from google_calendar import parse_event_body, build_event_body from datetime import date from models import Event def test_parse(): response = { 'start': { 'date': '2000-01-01', }, 'end': { 'date': '2000-01-01', }, 'extendedProperties': { 'shared': { 'website': 'https://google.com', } }, 'description': 'foo\n\n<a href="https://google.com">https://google.com</a>', 'summary': 'test', } actual = parse_event_body(response) expected = Event( start_date=date(2000, 1, 1), end_date=date(2000, 1, 1), website='https://google.com', description='foo', title='test' ) assert actual == expected def test_build(): event = Event( start_date=date(2000, 1, 1), end_date=date(2000, 1, 1), website='https://google.com', description='foo', title='test' ) actual = build_event_body(event) expected = { 'start': { 'date': '2000-01-01', }, 'end': { 'date': '2000-01-01', }, 'extendedProperties': { 'shared': { 'website': 'https://google.com', } }, 'description': 'foo\n\n<a href="https://google.com">https://google.com</a>', 'summary': 'test', } assert actual == expected def test_build_multiday(): """ The end date in the API is the following day, not the last day of the conference https://developers.google.com/calendar/v3/reference/events/insert """ event = Event( start_date=date(2000, 1, 1), end_date=date(2000, 1, 2), website='https://google.com', description='foo', title='test' ) actual = build_event_body(event) expected = { 'start': { 'date': '2000-01-01', }, 'end': { 'date': '2000-01-03', }, 'extendedProperties': { 'shared': { 'website': 'https://google.com', } }, 'description': 'foo\n\n<a href="https://google.com">https://google.com</a>', 'summary': 'test', } assert actual == expected def test_build_multiday_on_month_boundary(): """ The end date in the API is the following day, not the last day of the conference https://developers.google.com/calendar/v3/reference/events/insert """ event = Event( start_date=date(2000, 12, 30), end_date=date(2000, 12, 31), website='https://google.com', description='the conference where we discuss dodgy datetime arithmetic', title='new years eve conf' ) actual = build_event_body(event) expected = { 'start': { 'date': '2000-12-30', }, 'end': { 'date': '2001-01-01', }, 'extendedProperties': { 'shared': { 'website': 'https://google.com', } }, 'description': 'the conference where we discuss dodgy datetime arithmetic\n\n<a href="https://google.com">https://google.com</a>', 'summary': 'new years eve conf', } assert actual == expected def test_parse_multiday(): response = { 'start': { 'date': '2000-01-01', }, 'end': { 'date': '2000-01-03', }, 'extendedProperties': { 'shared': { 'website': 'https://google.com', } }, 'description': 'foo\n\n<a href="https://google.com">https://google.com</a>', 'summary': 'test', } actual = parse_event_body(response) expected = Event( start_date=date(2000, 1, 1), end_date=date(2000, 1, 2), website='https://google.com', description='foo', title='test' ) assert actual == expected def test_parse_single_day_with_weird_end_date(): """ If the end date is 1 day after the start date, I think it's still supposed to be a single day event. https://developers.google.com/calendar/v3/reference/events/insert """ response = { 'start': { 'date': '2000-01-01', }, 'end': { 'date': '2000-01-02', }, 'extendedProperties': { 'shared': { 'website': 'https://google.com', } }, 'description': 'foo\n\n<a href="https://google.com">https://google.com</a>', 'summary': 'test', } actual = parse_event_body(response) expected = Event( start_date=date(2000, 1, 1), end_date=date(2000, 1, 1), website='https://google.com', description='foo', title='test' ) assert actual == expected
24.715736
138
0.507086
524
4,869
4.622137
0.156489
0.10033
0.138728
0.104046
0.882742
0.882742
0.858382
0.856317
0.856317
0.803055
0
0.054271
0.333949
4,869
197
139
24.715736
0.692569
0.094681
0
0.647059
0
0.039216
0.293536
0
0
0
0
0
0.039216
1
0.039216
false
0
0.019608
0
0.058824
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
68b873054b669b7df95ebe31a4bf12587d2125fc
13,084
py
Python
venv/lib/python3.7/site-packages/pystan/tests/test_extract.py
vchiapaikeo/prophet
e8c250ca7bfffc280baa7dabc80a2c2d1f72c6a7
[ "MIT" ]
null
null
null
venv/lib/python3.7/site-packages/pystan/tests/test_extract.py
vchiapaikeo/prophet
e8c250ca7bfffc280baa7dabc80a2c2d1f72c6a7
[ "MIT" ]
null
null
null
venv/lib/python3.7/site-packages/pystan/tests/test_extract.py
vchiapaikeo/prophet
e8c250ca7bfffc280baa7dabc80a2c2d1f72c6a7
[ "MIT" ]
null
null
null
import unittest import numpy as np from pandas.util.testing import assert_series_equal from numpy.testing import assert_array_equal import pystan class TestExtract(unittest.TestCase): @classmethod def setUpClass(cls): ex_model_code = ''' parameters { real alpha[2,3]; real beta[2]; } model { for (i in 1:2) for (j in 1:3) alpha[i, j] ~ normal(0, 1); for (i in 1:2) beta ~ normal(0, 2); } ''' cls.sm = sm = pystan.StanModel(model_code=ex_model_code) cls.fit = sm.sampling(chains=4, iter=2000) def test_extract_permuted(self): ss = self.fit.extract(permuted=True) alpha = ss['alpha'] beta = ss['beta'] lp__ = ss['lp__'] self.assertEqual(sorted(ss.keys()), sorted({'alpha', 'beta', 'lp__'})) self.assertEqual(alpha.shape, (4000, 2, 3)) self.assertEqual(beta.shape, (4000, 2)) self.assertEqual(lp__.shape, (4000,)) self.assertTrue((~np.isnan(alpha)).all()) self.assertTrue((~np.isnan(beta)).all()) self.assertTrue((~np.isnan(lp__)).all()) # extract one at a time alpha2 = self.fit.extract('alpha', permuted=True)['alpha'] self.assertEqual(alpha2.shape, (4000, 2, 3)) np.testing.assert_array_equal(alpha, alpha2) beta = self.fit.extract('beta', permuted=True)['beta'] self.assertEqual(beta.shape, (4000, 2)) lp__ = self.fit.extract('lp__', permuted=True)['lp__'] self.assertEqual(lp__.shape, (4000,)) def test_extract_permuted_false(self): fit = self.fit ss = fit.extract(permuted=False) num_samples = fit.sim['iter'] - fit.sim['warmup'] self.assertEqual(ss.shape, (num_samples, 4, 9)) self.assertTrue((~np.isnan(ss)).all()) def test_extract_permuted_false_pars(self): fit = self.fit ss = fit.extract(pars=['beta'], permuted=False) num_samples = fit.sim['iter'] - fit.sim['warmup'] self.assertEqual(ss['beta'].shape, (num_samples, 4, 2)) self.assertTrue((~np.isnan(ss['beta'])).all()) def test_extract_permuted_false_pars_inc_warmup(self): fit = self.fit ss = fit.extract(pars=['beta'], inc_warmup=True, permuted=False) num_samples = fit.sim['iter'] self.assertEqual(ss['beta'].shape, (num_samples, 4, 2)) self.assertTrue((~np.isnan(ss['beta'])).all()) def test_extract_permuted_false_inc_warmup(self): fit = self.fit ss = fit.extract(inc_warmup=True, permuted=False) num_samples = fit.sim['iter'] self.assertEqual(ss.shape, (num_samples, 4, 9)) self.assertTrue((~np.isnan(ss)).all()) def test_extract_thin(self): sm = self.sm fit = sm.sampling(chains=4, iter=2000, thin=2) # permuted True ss = fit.extract(permuted=True) alpha = ss['alpha'] beta = ss['beta'] lp__ = ss['lp__'] self.assertEqual(sorted(ss.keys()), sorted({'alpha', 'beta', 'lp__'})) self.assertEqual(alpha.shape, (2000, 2, 3)) self.assertEqual(beta.shape, (2000, 2)) self.assertEqual(lp__.shape, (2000,)) self.assertTrue((~np.isnan(alpha)).all()) self.assertTrue((~np.isnan(beta)).all()) self.assertTrue((~np.isnan(lp__)).all()) # permuted False ss = fit.extract(permuted=False) self.assertEqual(ss.shape, (500, 4, 9)) self.assertTrue((~np.isnan(ss)).all()) # permuted False inc_warmup True ss = fit.extract(inc_warmup=True, permuted=False) self.assertEqual(ss.shape, (1000, 4, 9)) self.assertTrue((~np.isnan(ss)).all()) def test_extract_dtype(self): dtypes = {"alpha": np.int, "beta": np.int} ss = self.fit.extract(dtypes = dtypes) alpha = ss['alpha'] beta = ss['beta'] lp__ = ss['lp__'] self.assertEqual(alpha.dtype, np.dtype(np.int)) self.assertEqual(beta.dtype, np.dtype(np.int)) self.assertEqual(lp__.dtype, np.dtype(np.float)) def test_extract_dtype_permuted_false(self): dtypes = {"alpha": np.int, "beta": np.int} pars = ['alpha', 'beta', 'lp__'] ss = self.fit.extract(pars=pars, dtypes = dtypes, permuted=False) alpha = ss['alpha'] beta = ss['beta'] lp__ = ss['lp__'] self.assertEqual(alpha.dtype, np.dtype(np.int)) self.assertEqual(beta.dtype, np.dtype(np.int)) self.assertEqual(lp__.dtype, np.dtype(np.float)) def test_to_dataframe_permuted_true(self): ss = self.fit.extract(permuted=True) alpha = ss['alpha'] beta = ss['beta'] lp__ = ss['lp__'] df = self.fit.to_dataframe(permuted=True) self.assertEqual(df.shape, (4000,7+9+6)) for idx in range(2): for jdx in range(3): name = 'alpha[{},{}]'.format(idx+1,jdx+1) assert_array_equal(df[name].values,alpha[:,idx,jdx]) for idx in range(2): name = 'beta[{}]'.format(idx+1) assert_array_equal(df[name].values,beta[:,idx]) assert_array_equal(df['lp__'].values,lp__) # Test pars argument df = self.fit.to_dataframe(pars='alpha', permuted=True) self.assertEqual(df.shape, (4000,7+6+6)) for idx in range(2): for jdx in range(3): name = 'alpha[{},{}]'.format(idx+1,jdx+1) assert_array_equal(df[name].values,alpha[:,idx,jdx]) # Test pars and dtype argument df = self.fit.to_dataframe(pars='alpha',dtypes = {'alpha':np.int}, permuted=True) alpha_int = ss['alpha'].astype(np.int) self.assertEqual(df.shape, (4000,7+6+6)) for idx in range(2): for jdx in range(3): name = 'alpha[{},{}]'.format(idx+1,jdx+1) assert_array_equal(df[name].values,alpha_int[:,idx,jdx]) def test_to_dataframe_permuted_false_inc_warmup_false(self): fit = self.fit ss = fit.extract(permuted=False) df = fit.to_dataframe(permuted=False) num_samples = fit.sim['iter'] - fit.sim['warmup'] num_chains = fit.sim['chains'] self.assertEqual(df.shape, (num_samples*num_chains,3+9+6)) alpha_index = 0 for jdx in range(3): for idx in range(2): name = 'alpha[{},{}]'.format(idx+1,jdx+1) for n in range(num_chains): assert_array_equal( df.loc[df.chain == n, name].values,ss[:,n,alpha_index] ) alpha_index += 1 for idx in range(2): name = 'beta[{}]'.format(idx+1) for n in range(num_chains): assert_array_equal( df.loc[df.chain == n, name].values,ss[:,n,6+idx] ) for n in range(num_chains): assert_array_equal(df.loc[df.chain == n,'lp__'].values,ss[:,n,-1]) diagnostic_type = {'divergent':int,'energy':float,'treedepth':int, 'accept_stat':float, 'stepsize':float, 'n_leapfrog':int} for n in range(num_chains): assert_array_equal( df.chain.values[n*num_samples:(n+1)*num_samples], n*np.ones(num_samples,dtype=np.int) ) assert_array_equal( df.draw.values[n*num_samples:(n+1)*num_samples], np.arange(num_samples,dtype=np.int) ) for diag, diag_type in diagnostic_type.items(): assert_array_equal( df[diag+'__'].values[n*num_samples:(n+1)*num_samples], fit.get_sampler_params()[n][diag+'__'][-num_samples:].astype(diag_type) ) def test_to_dataframe_permuted_false_inc_warmup_true(self): fit = self.fit ss = fit.extract(permuted=False, inc_warmup=True) df = fit.to_dataframe(permuted=False,inc_warmup=True) num_samples = fit.sim['iter'] num_chains = fit.sim['chains'] self.assertEqual(df.shape, (num_samples*num_chains,3+9+6)) alpha_index = 0 for jdx in range(3): for idx in range(2): name = 'alpha[{},{}]'.format(idx+1,jdx+1) for n in range(num_chains): assert_array_equal( df.loc[df.chain == n, name].values,ss[:,n,alpha_index] ) alpha_index += 1 for idx in range(2): name = 'beta[{}]'.format(idx+1) for n in range(num_chains): assert_array_equal( df.loc[df.chain == n, name].values,ss[:,n,6+idx] ) for n in range(num_chains): assert_array_equal(df.loc[df.chain == n,'lp__'].values,ss[:,n,-1]) assert_array_equal(df.loc[ n*fit.sim['n_save'][n]:n*fit.sim['n_save'][n]+fit.sim['warmup2'][n]-1, 'warmup'].values, np.ones(fit.sim['warmup2'][n])) assert_array_equal(df.loc[ n*fit.sim['n_save'][n]+fit.sim['warmup2'][n]: (n+1)*fit.sim['n_save'][n]-1,'warmup'].values, np.zeros(fit.sim['warmup2'][n])) diagnostic_type = {'divergent':int,'energy':float,'treedepth':int, 'accept_stat':float, 'stepsize':float, 'n_leapfrog':int} for n in range(num_chains): assert_array_equal( df.chain.values[n*num_samples:(n+1)*num_samples], n*np.ones(num_samples,dtype=np.int) ) assert_array_equal( df.draw.values[n*num_samples:(n+1)*num_samples], np.arange(num_samples,dtype=np.int)-int(fit.sim['warmup']) ) for diag, diag_type in diagnostic_type.items(): assert_array_equal( df[diag+'__'].values[n*num_samples:(n+1)*num_samples], fit.get_sampler_params()[n][diag+'__'][-num_samples:].astype(diag_type) ) def test_to_dataframe_permuted_false_diagnostics_false(self): fit = self.fit ss = fit.extract(permuted=False) df = fit.to_dataframe(permuted=False,diagnostics=False) num_samples = fit.sim['iter'] - fit.sim['warmup'] num_chains = fit.sim['chains'] self.assertEqual(df.shape, (num_samples*num_chains,3+9)) alpha_index = 0 for jdx in range(3): for idx in range(2): name = 'alpha[{},{}]'.format(idx+1,jdx+1) for n in range(num_chains): assert_array_equal( df[name].loc[df.chain == n].values,ss[:,n,alpha_index] ) alpha_index += 1 for idx in range(2): name = 'beta[{}]'.format(idx+1) for n in range(num_chains): assert_array_equal( df[name].loc[df.chain == n].values,ss[:,n,6+idx] ) for n in range(num_chains): assert_array_equal(df.loc[df.chain == n,'lp__'].values,ss[:,n,-1]) for n in range(num_chains): assert_array_equal( df.chain.values[n*num_samples:(n+1)*num_samples], n*np.ones(num_samples,dtype=np.int) ) assert_array_equal( df.draw.values[n*num_samples:(n+1)*num_samples], np.arange(num_samples,dtype=np.int) ) def test_to_dataframe_permuted_false_pars(self): fit = self.fit ss = fit.extract(permuted=False) df = fit.to_dataframe(permuted=False, pars='alpha') num_samples = fit.sim['iter'] - fit.sim['warmup'] num_chains = fit.sim['chains'] self.assertEqual(df.shape, (num_samples*num_chains,3+6+6)) alpha_index = 0 for jdx in range(3): for idx in range(2): name = 'alpha[{},{}]'.format(idx+1,jdx+1) for n in range(num_chains): assert_array_equal( df[name].loc[df.chain == n].values,ss[:,n,alpha_index] ) alpha_index += 1 diagnostic_type = {'divergent':int,'energy':float,'treedepth':int, 'accept_stat':float, 'stepsize':float, 'n_leapfrog':int} for n in range(num_chains): assert_array_equal( df.chain.values[n*num_samples:(n+1)*num_samples], n*np.ones(num_samples,dtype=np.int) ) assert_array_equal( df.draw.values[n*num_samples:(n+1)*num_samples], np.arange(num_samples,dtype=np.int) ) for diag, diag_type in diagnostic_type.items(): assert_array_equal( df[diag+'__'].values[n*num_samples:(n+1)*num_samples], fit.get_sampler_params()[n][diag+'__'][-num_samples:].astype(diag_type) )
41.801917
104
0.54945
1,704
13,084
4.032277
0.069836
0.071314
0.069859
0.073352
0.868578
0.828846
0.801485
0.78271
0.739339
0.721147
0
0.020854
0.299985
13,084
312
105
41.935897
0.729337
0.009859
0
0.669014
0
0
0.070976
0
0
0
0
0
0.257042
1
0.049296
false
0
0.017606
0
0.070423
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
ec048081853e9bcb89bdbc628e3078c09dc3dba3
99
py
Python
tgtypes/traits/base/inline_query.py
autogram/tgtypes
90f8d0d35d3c372767508e56c20777635e128e38
[ "MIT" ]
null
null
null
tgtypes/traits/base/inline_query.py
autogram/tgtypes
90f8d0d35d3c372767508e56c20777635e128e38
[ "MIT" ]
null
null
null
tgtypes/traits/base/inline_query.py
autogram/tgtypes
90f8d0d35d3c372767508e56c20777635e128e38
[ "MIT" ]
null
null
null
from tgtypes.traits.base.update import UpdateTrait class InlineQueryTrait(UpdateTrait): pass
16.5
50
0.808081
11
99
7.272727
0.909091
0
0
0
0
0
0
0
0
0
0
0
0.131313
99
5
51
19.8
0.930233
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0.333333
0.333333
0
0.666667
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
1
1
0
1
0
0
6
ec50e2680265a2bcdecef8ee688ec2b1dda30429
101
py
Python
tests/t23/test_n122.py
ablearthy/ege2021kp-problem-solution
02fcf24adb1df2a92d19a73aaf9e335145169ba5
[ "Unlicense" ]
null
null
null
tests/t23/test_n122.py
ablearthy/ege2021kp-problem-solution
02fcf24adb1df2a92d19a73aaf9e335145169ba5
[ "Unlicense" ]
null
null
null
tests/t23/test_n122.py
ablearthy/ege2021kp-problem-solution
02fcf24adb1df2a92d19a73aaf9e335145169ba5
[ "Unlicense" ]
null
null
null
from ege_problem_solution.t23.n122 import solve def test_solve(): assert solve(31, 1001) == 56
16.833333
47
0.732673
16
101
4.4375
0.875
0
0
0
0
0
0
0
0
0
0
0.154762
0.168317
101
5
48
20.2
0.690476
0
0
0
0
0
0
0
0
0
0
0
0.333333
1
0.333333
true
0
0.333333
0
0.666667
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
1
0
1
0
1
0
0
6
6b90046700146e8182ec0d685c2e02c3d908962b
87
py
Python
src/data/__init__.py
LCS2-IIITD/Code-mixed-classification
10ff6b9af034770b0c821b5fa3470d3ab4bb9957
[ "MIT" ]
null
null
null
src/data/__init__.py
LCS2-IIITD/Code-mixed-classification
10ff6b9af034770b0c821b5fa3470d3ab4bb9957
[ "MIT" ]
1
2022-03-04T04:11:52.000Z
2022-03-04T04:11:52.000Z
src/data/__init__.py
LCS2-IIITD/Hinglish_offense_detection-Neurocomputing2021
54d8e70d42cbc4597a4f4cc859e633618df57f30
[ "MIT" ]
1
2021-11-24T02:01:00.000Z
2021-11-24T02:01:00.000Z
from .data_utils import * from .preprocessing import * from .custom_tokenizers import *
29
32
0.804598
11
87
6.181818
0.636364
0.294118
0
0
0
0
0
0
0
0
0
0
0.126437
87
3
32
29
0.894737
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
6bb58bdf0f8670a15982885bb5fa641bfdffeec3
18
py
Python
plugins/default/metasploit_attacks/metasploit_mimikatz_t1003/metasploit_mimikatz_t1003.py
Thorsten-Sick/PurpleDome
297d746ef2e17a4207f8274b7fccbe2ce43c4a5f
[ "MIT" ]
7
2021-11-30T19:54:29.000Z
2022-03-05T23:15:23.000Z
plugins/default/metasploit_attacks/metasploit_mimikatz_t1003/metasploit_mimikatz_t1003.py
Thorsten-Sick/PurpleDome
297d746ef2e17a4207f8274b7fccbe2ce43c4a5f
[ "MIT" ]
null
null
null
plugins/default/metasploit_attacks/metasploit_mimikatz_t1003/metasploit_mimikatz_t1003.py
Thorsten-Sick/PurpleDome
297d746ef2e17a4207f8274b7fccbe2ce43c4a5f
[ "MIT" ]
2
2021-11-30T11:16:27.000Z
2022-02-02T13:36:01.000Z
# TODO: Implement
9
17
0.722222
2
18
6.5
1
0
0
0
0
0
0
0
0
0
0
0
0.166667
18
1
18
18
0.866667
0.833333
0
null
0
null
0
0
null
0
0
1
null
1
null
true
0
0
null
null
null
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
1
0
0
0
0
0
0
null
0
0
1
0
0
0
1
0
0
0
0
0
0
6
d42dc05f07df961f2c9c928161703fd742c42580
3,983
py
Python
helpscout/endpoints/reports/conversation.py
Gogen120/helpscout
7e884247f5cd59c75b12792e331b25e9873a4207
[ "MIT" ]
null
null
null
helpscout/endpoints/reports/conversation.py
Gogen120/helpscout
7e884247f5cd59c75b12792e331b25e9873a4207
[ "MIT" ]
null
null
null
helpscout/endpoints/reports/conversation.py
Gogen120/helpscout
7e884247f5cd59c75b12792e331b25e9873a4207
[ "MIT" ]
null
null
null
from typing import Dict from helpscout.endpoints.endpoint import Endpoint class Conversation(Endpoint): """Conversation report endpoint.""" def overall_report(self, start: str, end: str, **kwargs) -> Dict: """Get conversation overall report. Doc page: https://developer.helpscout.com/mailbox-api/endpoints/reports/conversations/reports-conversations-overall/ """ response = self.base_get_request( f"{self.base_url}/conversations", start=start, end=end, **kwargs ) return self.process_get_result(response) def volumes_by_channel(self, start: str, end: str, **kwargs) -> Dict: """Get conversation volumes by channel. Doc page: https://developer.helpscout.com/mailbox-api/endpoints/reports/conversations/reports-conversations-volume-by-channel/ """ response = self.base_get_request( f"{self.base_url}/conversations/volume-by-channel", start=start, end=end, **kwargs, ) return self.process_get_result(response) def busiest_time_of_day(self, start: str, end: str, **kwargs) -> Dict: """Get conversation busiest time of day. Doc page: https://developer.helpscout.com/mailbox-api/endpoints/reports/conversations/reports-conversations-busy-times/ """ response = self.base_get_request( f"{self.base_url}/conversations/busy-times", start=start, end=end, **kwargs ) return self.process_get_result(response) def drilldown(self, start: str, end: str, **kwargs) -> Dict: """Get conversation drilldown. Doc page: https://developer.helpscout.com/mailbox-api/endpoints/reports/conversations/reports-conversations-drilldown/ """ response = self.base_get_request( f"{self.base_url}/conversations/drilldown", start=start, end=end, **kwargs ) return self.process_get_result(response) def drilldown_by_field( self, start: str, end: str, field: str, fieldid: int, **kwargs ) -> Dict: """Get conversation drilldown by field. Doc page: https://developer.helpscout.com/mailbox-api/endpoints/reports/conversations/reports-conversations-field-drilldown/ """ response = self.base_get_request( f"{self.base_url}/conversations/fields-drilldown", start=start, end=end, field=field, fieldid=fieldid, **kwargs, ) return self.process_get_result(response) def new(self, start: str, end: str, **kwargs) -> Dict: """Get new conversations. Doc page: https://developer.helpscout.com/mailbox-api/endpoints/reports/conversations/reports-conversations-new/ """ response = self.base_get_request( f"{self.base_url}/conversations/new", start=start, end=end, **kwargs ) return self.process_get_result(response) def new_drilldown(self, start: str, end: str, **kwargs) -> Dict: """Get new conversations drilldown. Doc page: https://developer.helpscout.com/mailbox-api/endpoints/reports/conversations/reports-conversations-new-drilldown/ """ response = self.base_get_request( f"{self.base_url}/conversations/new-drilldown", start=start, end=end, **kwargs, ) return self.process_get_result(response) def received_messages(self, start: str, end: str, **kwargs) -> Dict: """Get conversation received messages statistics. Doc page: https://developer.helpscout.com/mailbox-api/endpoints/reports/conversations/reports-conversations-received-messages/ """ response = self.base_get_request( f"{self.base_url}/conversations/received-messages", start=start, end=end, **kwargs, ) return self.process_get_result(response)
35.5625
134
0.638966
440
3,983
5.670455
0.122727
0.128257
0.038477
0.048096
0.838076
0.807214
0.807214
0.807214
0.807214
0.697796
0
0
0.241778
3,983
111
135
35.882883
0.826159
0.32237
0
0.474576
0
0
0.128725
0.128725
0
0
0
0
0
1
0.135593
false
0
0.033898
0
0.322034
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
d461b8ee48d53dd2671b90a93b5a0d699f03c58e
1,655
py
Python
app/images/fix.py
semisided1/rms12
c278cadaada8350d584e17e4fff0f98005281ea2
[ "BSD-3-Clause" ]
2
2016-11-14T16:50:44.000Z
2016-11-14T16:50:49.000Z
app/images/fix.py
dirtslayer/rms06
c1a25af17d241e0df4e1f3d1db3587f8fd80449b
[ "BSD-3-Clause" ]
null
null
null
app/images/fix.py
dirtslayer/rms06
c1a25af17d241e0df4e1f3d1db3587f8fd80449b
[ "BSD-3-Clause" ]
null
null
null
import os, glob # this python script fixes all the jpg files that are giving the # Invalid SOS parameters for sequential JPEG error # events.js:141 # throw er; // Unhandled 'error' event # ^ # Error: Invalid SOS parameters for sequential JPEG # # at ChildProcess.<anonymous> (/home/drd/proj/rms06/node_modules/imagemin-jpegtran/index.js:62:37) # at emitTwo (events.js:87:13) # at ChildProcess.emit (events.js:172:7) # at maybeClose (internal/child_process.js:818:16) # at Socket.<anonymous> (internal/child_process.js:319:11) # at emitOne (events.js:77:13) # at Socket.emit (events.js:169:7) # at Pipe._onclose (net.js:469:12) # events.js:141 # throw er; // Unhandled 'error' event # ^ # Error: Invalid SOS parameters for sequential JPEG # # at ChildProcess.<anonymous> (/home/drd/proj/rms06/node_modules/imagemin-jpegtran/index.js:62:37) # at emitTwo (events.js:87:13) # at ChildProcess.emit (events.js:172:7) # at maybeClose (internal/child_process.js:818:16) # at Socket.<anonymous> (internal/child_process.js:319:11) # at emitOne (events.js:77:13) # at Socket.emit (events.js:169:7) # at Pipe._onclose (net.js:469:12) for fn in glob.glob('*.jpg'): print(fn) if os.path.isfile(fn): l = os.path.splitext(os.path.basename(fn)) os.system('gm convert %s %s' % (os.path.basename(fn),os.path.splitext(os.path.basename(fn))[0] + '.png' )) os.system('gm convert %s %s' % (os.path.splitext(os.path.basename(fn))[0] + '.png' , os.path.basename(fn) )) os.system('rm %s' % os.path.splitext(os.path.basename(fn))[0] + '.png' )
40.365854
117
0.654985
254
1,655
4.23622
0.318898
0.061338
0.078067
0.089219
0.895911
0.886617
0.831784
0.803903
0.765799
0.765799
0
0.061391
0.183082
1,655
40
118
41.375
0.734467
0.687009
0
0
0
0
0.110429
0
0
0
0
0
0
1
0
false
0
0.125
0
0.125
0.125
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
2e3702949ff46208e637661d8e1bd1f66516abc6
1,744
py
Python
30-39/38. validators/validators.py
dcragusa/PythonMorsels
5f75b51a68769036e4004e9ccdada6b220124ab6
[ "MIT" ]
1
2021-11-30T05:03:24.000Z
2021-11-30T05:03:24.000Z
30-39/38. validators/validators.py
dcragusa/PythonMorsels
5f75b51a68769036e4004e9ccdada6b220124ab6
[ "MIT" ]
null
null
null
30-39/38. validators/validators.py
dcragusa/PythonMorsels
5f75b51a68769036e4004e9ccdada6b220124ab6
[ "MIT" ]
2
2021-04-18T05:26:43.000Z
2021-11-28T18:46:43.000Z
from abc import ABC, abstractmethod from weakref import WeakKeyDictionary MISSING = object() # class PositiveNumber: # # def __init__(self, default=MISSING): # self.default = default # self.data = WeakKeyDictionary() # self.class_name_map = WeakKeyDictionary() # # def __set_name__(self, owner, name): # self.class_name_map[owner] = name # # def __get__(self, obj, objtype): # if obj in self.data: # return self.data[obj] # elif self.default is not MISSING: # return self.default # else: # raise AttributeError(f"'{objtype.__name__}' object has no attribute '{self.class_name_map[objtype]}'") # # def __set__(self, obj, val): # if val <= 0: # raise ValueError('Positive number required.') # self.data[obj] = val class Validator(ABC): def __init__(self, default=MISSING): self.default = default self.data = WeakKeyDictionary() self.class_name_map = WeakKeyDictionary() def __set_name__(self, owner, name): self.class_name_map[owner] = name def __get__(self, obj, objtype): if obj in self.data: return self.data[obj] elif self.default is not MISSING: return self.default else: raise AttributeError(f"'{objtype.__name__}' object has no attribute '{self.class_name_map[objtype]}'") def __set__(self, obj, val): self.validate(val) self.data[obj] = val @abstractmethod def validate(self, val): return NotImplementedError class PositiveNumber(Validator): def validate(self, val): if val <= 0: raise ValueError('Positive number required.')
28.129032
116
0.617546
199
1,744
5.140704
0.221106
0.086022
0.076246
0.093842
0.749756
0.749756
0.749756
0.749756
0.749756
0.662757
0
0.001582
0.275229
1,744
61
117
28.590164
0.807753
0.40711
0
0.074074
0
0
0.10089
0.031652
0
0
0
0
0
1
0.222222
false
0
0.074074
0.037037
0.481481
0
0
0
0
null
0
0
0
0
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
0
0
0
6
2e3852983cb0e33917027c0e971003fb3c15399c
2,363
py
Python
qcdb/tests/test_fci_h2o_2.py
loriab/qccddb
d9e156ef8b313ac0633211fc6b841f84a3ddde24
[ "BSD-3-Clause" ]
8
2019-03-28T11:54:59.000Z
2022-03-19T03:31:37.000Z
qcdb/tests/test_fci_h2o_2.py
loriab/qccddb
d9e156ef8b313ac0633211fc6b841f84a3ddde24
[ "BSD-3-Clause" ]
39
2018-10-31T23:02:18.000Z
2021-12-12T22:11:37.000Z
qcdb/tests/test_fci_h2o_2.py
loriab/qccddb
d9e156ef8b313ac0633211fc6b841f84a3ddde24
[ "BSD-3-Clause" ]
9
2018-03-12T20:51:50.000Z
2022-02-28T15:18:34.000Z
import qcdb from .utils import * _refnuc = 9.2342185209120 _refscf = -75.985323665263 _refci = -76.1210978474779 _refcorr = _refci - _refscf def system_water(): h2o = qcdb.set_molecule( """ O .0000000000 .0000000000 -.0742719254 H .0000000000 -1.4949589982 -1.0728640373 H .0000000000 1.4949589982 -1.0728640373 units bohr """ ) h2o.update_geometry() assert compare_values(_refnuc, h2o.nuclear_repulsion_energy(), 9, "Nuclear repulsion energy") return h2o @using("psi4") def test_fci_rhf_psi4(): #! 6-31G H2O Test FCI Energy Point h2o = system_water() qcdb.set_options( { "basis": "6-31G", #'psi4_detci__icore': 0, } ) E = qcdb.energy("p4-fci", molecule=h2o) assert compare_values(_refnuc, h2o.nuclear_repulsion_energy(), 9, "nre") assert compare_values(_refscf, qcdb.variable("HF total energy"), 8, "hf total energy") assert compare_values(_refci, E, 7, "return E") assert compare_values(_refci, qcdb.variable("FCI TOTAL ENERGY"), 7, "fci total energy") assert compare_values(_refcorr, qcdb.variable("FCI CORRELATION ENERGY"), 7, "fci correlation energy") assert compare_values(_refci, qcdb.variable("CI TOTAL ENERGY"), 7, "ci total energy") assert compare_values(_refcorr, qcdb.variable("CI CORRELATION ENERGY"), 7, "ci correlation energy") @using("gamess") def test_fci_rhf_gamess(): #! 6-31G H2O Test FCI Energy Point h2o = system_water() qcdb.set_options( { "basis": "6-31G", # 'gamess_cidet__ncore': 0, "freeze_core": False, } ) E = qcdb.energy("gms-fci", molecule=h2o) assert compare_values(_refnuc, h2o.nuclear_repulsion_energy(), 9, "nre") assert compare_values(_refscf, qcdb.variable("HF total energy"), 8, "hf total energy") assert compare_values(_refci, E, 7, "return E") assert compare_values(_refci, qcdb.variable("FCI TOTAL ENERGY"), 7, "fci total energy") assert compare_values(_refcorr, qcdb.variable("FCI CORRELATION ENERGY"), 7, "fci correlation energy") assert compare_values(_refci, qcdb.variable("CI TOTAL ENERGY"), 7, "ci total energy") assert compare_values(_refcorr, qcdb.variable("CI CORRELATION ENERGY"), 7, "ci correlation energy")
32.819444
105
0.657639
296
2,363
5.040541
0.22973
0.130697
0.191019
0.134048
0.763405
0.763405
0.719169
0.719169
0.719169
0.684987
0
0.098752
0.220059
2,363
71
106
33.28169
0.710798
0.048667
0
0.444444
0
0
0.224299
0
0
0
0
0
0.333333
1
0.066667
false
0
0.044444
0
0.133333
0
0
0
0
null
0
1
0
0
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
5cf7b73944d0ad5d57523e65372b8e7b523edd3c
11,065
py
Python
test/integration/trec_cast.py
eugene-yang/ir_datasets
2b5a42edfb9ab8c4ee8f11674ffe14d60f41ec1e
[ "Apache-2.0" ]
null
null
null
test/integration/trec_cast.py
eugene-yang/ir_datasets
2b5a42edfb9ab8c4ee8f11674ffe14d60f41ec1e
[ "Apache-2.0" ]
null
null
null
test/integration/trec_cast.py
eugene-yang/ir_datasets
2b5a42edfb9ab8c4ee8f11674ffe14d60f41ec1e
[ "Apache-2.0" ]
null
null
null
import re import unittest from ir_datasets.formats import TrecQrel, GenericDoc, GenericScoredDoc from ir_datasets.datasets.trec_cast import Cast2019Query, Cast2020Query from .base import DatasetIntegrationTest class TestTrecCast(DatasetIntegrationTest): def test_docs(self): self._test_docs('trec-cast/v0', count=47696605, items={ 0: GenericDoc('WAPO_b2e89334-33f9-11e1-825f-dabc29fd7071-1', re.compile('^NEW ORLEANS — Whenever a Virginia Tech offensive coach is asked how the most prolific receiving duo .{1}n school history came to be, inevitably the first road game in 2008 against North Carolina comes up\\.$', flags=48)), 9: GenericDoc('WAPO_b2e89334-33f9-11e1-825f-dabc29fd7071-10', re.compile('^“There’s just some things that we were held back from being able to show,” Boykin said, “that we’re .{102}n Blackmon\\. I feel like they’re great athletes, but at the same time we’re right up there with them\\.$', flags=48)), 9074161: GenericDoc('MARCO_0', re.compile('^The presence of communication amid scientific minds was equally important to the success of the Manh.{125}nd engineers is what their success truly meant; hundreds of thousands of innocent lives obliterated\\.$', flags=48)), 9074170: GenericDoc('MARCO_9', re.compile("^One of the main reasons Hanford was selected as a site for the Manhattan Project's B Reactor was its.{13} the Columbia River, the largest river flowing into the Pacific Ocean from the North American coast\\.$", flags=48)), 47696604: GenericDoc('CAR_ffffffb9eec6224bef5da06e829eef59a37748c6', re.compile('^Fisher recommended Louis as First Sea Lord: "He is the most capable administrator in the Admiralty\'s.{472}that would prepare the navy\'s plans in case of war\\. He was promoted to full admiral on 13 July 1912\\.$', flags=48)), }) self._test_docs('trec-cast/v1', count=38622444, items={ 0: GenericDoc('MARCO_0', re.compile('^The presence of communication amid scientific minds was equally important to the success of the Manh.{125}nd engineers is what their success truly meant; hundreds of thousands of innocent lives obliterated\\.$', flags=48)), 9: GenericDoc('MARCO_9', re.compile("^One of the main reasons Hanford was selected as a site for the Manhattan Project's B Reactor was its.{13} the Columbia River, the largest river flowing into the Pacific Ocean from the North American coast\\.$", flags=48)), 38622443: GenericDoc('CAR_ffffffb9eec6224bef5da06e829eef59a37748c6', re.compile('^Fisher recommended Louis as First Sea Lord: "He is the most capable administrator in the Admiralty\'s.{472}that would prepare the navy\'s plans in case of war\\. He was promoted to full admiral on 13 July 1912\\.$', flags=48)), }) def test_queries(self): self._test_queries('trec-cast/v0/train', count=269, items={ 0: Cast2019Query('1_1', "What is a physician's assistant?", 1, 1, "Career choice for Nursing and Physician's Assistant", "Considering career options for becoming a physician's assistant vs a nurse. Discussion topics include required education (including time, cost), salaries, and which is better overall."), 9: Cast2019Query('1_10', 'Is a PA above a NP?', 1, 10, "Career choice for Nursing and Physician's Assistant", "Considering career options for becoming a physician's assistant vs a nurse. Discussion topics include required education (including time, cost), salaries, and which is better overall."), 268: Cast2019Query('30_7', 'Tell me about how I can share files.', 30, 7, 'Linux and Windows', 'A comparison of Windows and Linux, followed by some tips regarding software installation etc.'), }) self._test_queries('trec-cast/v0/train/judged', count=120, items={ 0: Cast2019Query('1_1', "What is a physician's assistant?", 1, 1, "Career choice for Nursing and Physician's Assistant", "Considering career options for becoming a physician's assistant vs a nurse. Discussion topics include required education (including time, cost), salaries, and which is better overall."), 9: Cast2019Query('1_10', 'Is a PA above a NP?', 1, 10, "Career choice for Nursing and Physician's Assistant", "Considering career options for becoming a physician's assistant vs a nurse. Discussion topics include required education (including time, cost), salaries, and which is better overall."), 119: Cast2019Query('30_7', 'Tell me about how I can share files.', 30, 7, 'Linux and Windows', 'A comparison of Windows and Linux, followed by some tips regarding software installation etc.'), }) self._test_queries('trec-cast/v1/2019', count=479, items={ 0: Cast2019Query('31_1', 'What is throat cancer?', 31, 1, 'head and neck cancer', 'A person is trying to compare and contrast types of cancer in the throat, esophagus, and lungs.'), 9: Cast2019Query('32_1', 'What are the different types of sharks?', 32, 1, 'sharks', 'Information about sharks including several of the main types of sharks, their biological properties including size (whether they have teeth), as well as adaptations. This includes difference between sharks and whales.'), 478: Cast2019Query('80_10', 'What was the impact of the expedition?', 80, 10, 'Lewis and Clark expedition', 'Information about the Lewis and Clark expedition, findings, and its significance in US history.'), }) self._test_queries('trec-cast/v1/2019/judged', count=173, items={ 0: Cast2019Query('31_1', 'What is throat cancer?', 31, 1, 'head and neck cancer', 'A person is trying to compare and contrast types of cancer in the throat, esophagus, and lungs.'), 9: Cast2019Query('32_1', 'What are the different types of sharks?', 32, 1, 'sharks', 'Information about sharks including several of the main types of sharks, their biological properties including size (whether they have teeth), as well as adaptations. This includes difference between sharks and whales.'), 172: Cast2019Query('79_9', 'What are modern examples of conflict theory?', 79, 9, 'sociology', 'Information about the field of sociology including important people, theories, and how they relate to one another.'), }) self._test_queries('trec-cast/v1/2020', count=216, items={ 0: Cast2020Query('81_1', 'How do you know when your garage door opener is going bad?', 'How do you know when your garage door opener is going bad?', 'How do you know when your garage door opener is going bad?', 'MARCO_5498474', 81, 1), 9: Cast2020Query('82_2', 'What are the pros and cons?', 'What are the pros and cons of GMO Food labeling?', 'What are the pros and cons of GMO food labeling?', 'CAR_bafb3c1c72e23c444e182cac4e0ea9e4330d21c9', 82, 2), 215: Cast2020Query('105_9', 'What else motivates the Black Lives Matter movement?', 'What else motivates the Black Lives Matter movement?', 'What else motivates the Black Lives Matter movement?', 'MARCO_801480', 105, 9), }) self._test_queries('trec-cast/v1/2020/judged', count=208, items={ 0: Cast2020Query('81_1', 'How do you know when your garage door opener is going bad?', 'How do you know when your garage door opener is going bad?', 'How do you know when your garage door opener is going bad?', 'MARCO_5498474', 81, 1), 9: Cast2020Query('82_2', 'What are the pros and cons?', 'What are the pros and cons of GMO Food labeling?', 'What are the pros and cons of GMO food labeling?', 'CAR_bafb3c1c72e23c444e182cac4e0ea9e4330d21c9', 82, 2), 207: Cast2020Query('105_9', 'What else motivates the Black Lives Matter movement?', 'What else motivates the Black Lives Matter movement?', 'What else motivates the Black Lives Matter movement?', 'MARCO_801480', 105, 9), }) def test_qrels(self): self._test_qrels('trec-cast/v0/train', count=2399, items={ 0: TrecQrel('1_1', 'MARCO_955948', 2, '0'), 9: TrecQrel('1_1', 'MARCO_4903530', 0, '0'), 2398: TrecQrel('30_7', 'MARCO_4250016', 0, '0'), }) self._test_qrels('trec-cast/v0/train/judged', count=2399, items={ 0: TrecQrel('1_1', 'MARCO_955948', 2, '0'), 9: TrecQrel('1_1', 'MARCO_4903530', 0, '0'), 2398: TrecQrel('30_7', 'MARCO_4250016', 0, '0'), }) self._test_qrels('trec-cast/v1/2019', count=29350, items={ 0: TrecQrel('31_1', 'CAR_116d829c4c800c2fc70f11692fec5e8c7e975250', 0, 'Q0'), 9: TrecQrel('31_1', 'CAR_40c64256e988c8103550008f4e9b7ce436d9536d', 2, 'Q0'), 29349: TrecQrel('79_9', 'MARCO_8795237', 3, 'Q0'), }) self._test_qrels('trec-cast/v1/2019/judged', count=29350, items={ 0: TrecQrel('31_1', 'CAR_116d829c4c800c2fc70f11692fec5e8c7e975250', 0, 'Q0'), 9: TrecQrel('31_1', 'CAR_40c64256e988c8103550008f4e9b7ce436d9536d', 2, 'Q0'), 29349: TrecQrel('79_9', 'MARCO_8795237', 3, 'Q0'), }) self._test_qrels('trec-cast/v1/2020', count=40451, items={ 0: TrecQrel('81_1', 'CAR_3add84966af079ed84e8b2fc412ad1dc27800127', 1, '0'), 9: TrecQrel('81_1', 'MARCO_1381086', 1, '0'), 40450: TrecQrel('105_9', 'MARCO_8757526', 0, '0'), }) self._test_qrels('trec-cast/v1/2020/judged', count=40451, items={ 0: TrecQrel('81_1', 'CAR_3add84966af079ed84e8b2fc412ad1dc27800127', 1, '0'), 9: TrecQrel('81_1', 'MARCO_1381086', 1, '0'), 40450: TrecQrel('105_9', 'MARCO_8757526', 0, '0'), }) def test_scoreddocs(self): self._test_scoreddocs('trec-cast/v0/train', count=269000, items={ 0: GenericScoredDoc('1_1', 'MARCO_955948', -5.32579), 9: GenericScoredDoc('1_1', 'CAR_87772d4208721133d00d7d62f4eaaf164da5b4e3', -5.44505), 268999: GenericScoredDoc('30_7', 'WAPO_595c1be2ba9e3b1e66d552a174219c12-3', -7.07828), }) self._test_scoreddocs('trec-cast/v0/train/judged', count=120000, items={ 0: GenericScoredDoc('1_1', 'MARCO_955948', -5.32579), 9: GenericScoredDoc('1_1', 'CAR_87772d4208721133d00d7d62f4eaaf164da5b4e3', -5.44505), 119999: GenericScoredDoc('30_7', 'WAPO_595c1be2ba9e3b1e66d552a174219c12-3', -7.07828), }) self._test_scoreddocs('trec-cast/v1/2019', count=479000, items={ 0: GenericScoredDoc('31_1', 'MARCO_789620', -5.71312), 9: GenericScoredDoc('31_1', 'MARCO_291004', -5.88053), 478999: GenericScoredDoc('80_10', 'CAR_268dcb1c6bc4326f81500513e0ad9d11acb2a693', -5.23496), }) self._test_scoreddocs('trec-cast/v1/2019/judged', count=173000, items={ 0: GenericScoredDoc('31_1', 'MARCO_789620', -5.71312), 9: GenericScoredDoc('31_1', 'MARCO_291004', -5.88053), 172999: GenericScoredDoc('79_9', 'MARCO_1431776', -6.75024), }) if __name__ == '__main__': unittest.main()
98.794643
321
0.686037
1,525
11,065
4.88459
0.236721
0.020405
0.014767
0.015304
0.832863
0.824137
0.814337
0.767217
0.763861
0.763861
0
0.144205
0.195301
11,065
111
322
99.684685
0.692273
0
0
0.538462
0
0.115385
0.597379
0.08423
0
0
0
0
0
1
0.038462
false
0
0.076923
0
0.125
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
cf4df7fb8da337f404f1f0186cc410c44c79e867
123
py
Python
c++/py-binder/call_trivial.py
markliou/tool_scripts
d9f7d8f23edeb294dac1c9d29a2d7358751922b7
[ "Apache-2.0" ]
null
null
null
c++/py-binder/call_trivial.py
markliou/tool_scripts
d9f7d8f23edeb294dac1c9d29a2d7358751922b7
[ "Apache-2.0" ]
null
null
null
c++/py-binder/call_trivial.py
markliou/tool_scripts
d9f7d8f23edeb294dac1c9d29a2d7358751922b7
[ "Apache-2.0" ]
1
2017-08-04T00:44:56.000Z
2017-08-04T00:44:56.000Z
import trivial_functions_in_c a = [float(i * 2) for i in range(32)] print(trivial_functions_in_c.pyDoubleArrayInPy(a, 32))
30.75
54
0.780488
22
123
4.090909
0.636364
0.355556
0.4
0.422222
0
0
0
0
0
0
0
0.045455
0.105691
123
4
54
30.75
0.772727
0
0
0
0
0
0
0
0
0
0
0
0
1
0
false
0
0.333333
0
0.333333
0.333333
1
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
6
d89f0b3f3dceace2e04c61f32be0d0571d2d1d8e
75
py
Python
FreeTAKServer/model/FTSModel/fts_protocol_object.py
logikal/FreeTakServer
c0916ce65781b5c60079d6440e52db8fc6ee0467
[ "MIT" ]
27
2020-05-01T01:45:59.000Z
2020-07-03T00:17:13.000Z
FreeTAKServer/model/FTSModel/fts_protocol_object.py
logikal/FreeTakServer
c0916ce65781b5c60079d6440e52db8fc6ee0467
[ "MIT" ]
34
2020-04-26T11:25:52.000Z
2020-07-03T21:06:34.000Z
FreeTAKServer/model/FTSModel/fts_protocol_object.py
logikal/FreeTakServer
c0916ce65781b5c60079d6440e52db8fc6ee0467
[ "MIT" ]
15
2020-05-01T01:46:07.000Z
2020-07-03T12:14:04.000Z
from abc import ABC, abstractmethod class FTSProtocolObject(ABC): pass
18.75
35
0.786667
9
75
6.555556
0.777778
0
0
0
0
0
0
0
0
0
0
0
0.16
75
4
36
18.75
0.936508
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0.333333
0.333333
0
0.666667
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
1
1
0
1
0
0
6
d8b13117841e30c32e3c2e95b043b4ee590f9efe
32
py
Python
benford_law/__init__.py
rafaelmata357/benford
1273f95c9b0eba705550f32e847f705b6254e2ac
[ "MIT" ]
3
2021-01-06T10:44:32.000Z
2021-05-23T17:46:21.000Z
benford_law/__init__.py
rafaelmata357/benford
1273f95c9b0eba705550f32e847f705b6254e2ac
[ "MIT" ]
2
2020-12-27T20:58:24.000Z
2021-01-04T21:52:58.000Z
benford_law/__init__.py
rafaelmata357/benford
1273f95c9b0eba705550f32e847f705b6254e2ac
[ "MIT" ]
null
null
null
from .benford_law import Benford
32
32
0.875
5
32
5.4
0.8
0
0
0
0
0
0
0
0
0
0
0
0.09375
32
1
32
32
0.931034
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
d8c03cd5a2f2bd20c21fef18cf2789823e67a175
38
py
Python
app/account/__init__.py
ta4tsering/pyrrha-bo
d5afbe4b37d4d2ad5b5bb4129b1dccaeb50c9b17
[ "MIT" ]
16
2018-11-16T13:48:20.000Z
2020-11-13T21:28:06.000Z
app/account/__init__.py
ta4tsering/pyrrha-bo
d5afbe4b37d4d2ad5b5bb4129b1dccaeb50c9b17
[ "MIT" ]
179
2018-11-16T12:43:05.000Z
2022-03-31T08:52:22.000Z
app/account/__init__.py
ta4tsering/pyrrha-bo
d5afbe4b37d4d2ad5b5bb4129b1dccaeb50c9b17
[ "MIT" ]
21
2019-02-17T15:56:29.000Z
2022-03-28T09:27:57.000Z
from app.account.views import account
19
37
0.842105
6
38
5.333333
0.833333
0
0
0
0
0
0
0
0
0
0
0
0.105263
38
1
38
38
0.941176
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
2b26ff47e56c7ffa21a99a6d8aeeac17bb99d931
204
py
Python
kanban/admin.py
Seunghan-Kim/africa_elephant
62acbc570cc40c43f38e521f751490ff6ae3ffc8
[ "MIT" ]
null
null
null
kanban/admin.py
Seunghan-Kim/africa_elephant
62acbc570cc40c43f38e521f751490ff6ae3ffc8
[ "MIT" ]
12
2020-03-24T17:57:35.000Z
2022-02-10T12:00:00.000Z
kanban/admin.py
Seunghan-Kim/africa_elephant
62acbc570cc40c43f38e521f751490ff6ae3ffc8
[ "MIT" ]
1
2019-12-07T02:27:18.000Z
2019-12-07T02:27:18.000Z
from django.contrib import admin from .models import Board, Card, Column, Column_top30 admin.site.register(Board) admin.site.register(Column) admin.site.register(Card) admin.site.register(Column_top30)
22.666667
53
0.813725
30
204
5.466667
0.4
0.219512
0.414634
0.280488
0
0
0
0
0
0
0
0.02139
0.083333
204
8
54
25.5
0.855615
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.333333
0
0.333333
0
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
6
2b6184afaaa33902282a8f0a0f108623ca7d5273
90
py
Python
pension/app.py
sarafilipacosta/py-pension-scheme
6e244446efacacc5bce5e94cbcc07231753d126e
[ "MIT" ]
null
null
null
pension/app.py
sarafilipacosta/py-pension-scheme
6e244446efacacc5bce5e94cbcc07231753d126e
[ "MIT" ]
2
2020-01-28T22:40:22.000Z
2021-02-07T13:00:02.000Z
pension/app.py
sarafilipacosta/py-pension-scheme
6e244446efacacc5bce5e94cbcc07231753d126e
[ "MIT" ]
1
2020-01-25T00:44:14.000Z
2020-01-25T00:44:14.000Z
'''Pension scheme main report generator''' def run(): print('Pension scheme start.')
18
42
0.677778
11
90
5.545455
0.818182
0.42623
0
0
0
0
0
0
0
0
0
0
0.166667
90
4
43
22.5
0.813333
0.4
0
0
0
0
0.4375
0
0
0
0
0
0
1
0.5
true
0
0
0
0.5
0.5
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
1
0
0
0
0
1
0
6
995b2dfda19e70042f8e90592d0fb95a8db6c7e1
127
py
Python
game/pacman/directions.py
loudest/twitch-pacman
22950d53cadc85d316a3d859a866afc0d82d04fa
[ "MIT" ]
1
2019-05-25T08:41:13.000Z
2019-05-25T08:41:13.000Z
game/pacman/directions.py
loudest/twitch-pacman
22950d53cadc85d316a3d859a866afc0d82d04fa
[ "MIT" ]
null
null
null
game/pacman/directions.py
loudest/twitch-pacman
22950d53cadc85d316a3d859a866afc0d82d04fa
[ "MIT" ]
null
null
null
def enum(**enums): return type('Enum', (), enums) # Movement Directions Directions = enum(RIGHT=1, LEFT=2, UP=3, DOWN=4)
18.142857
48
0.645669
19
127
4.315789
0.789474
0.219512
0
0
0
0
0
0
0
0
0
0.037736
0.165354
127
6
49
21.166667
0.735849
0.149606
0
0
0
0
0.038095
0
0
0
0
0
0
1
0.333333
false
0
0
0.333333
0.666667
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
6
99959a597507d108c61f3239293f8519ae965e64
9,126
py
Python
classification_tagging/utilities/data_selection.py
arkel23/yuwu
4dcf0e18693e09a947569ddcc7cb3ff00c7c674a
[ "MIT" ]
67
2021-01-22T02:29:40.000Z
2022-03-13T11:25:07.000Z
classification_tagging/utilities/data_selection.py
arkel23/yuwu
4dcf0e18693e09a947569ddcc7cb3ff00c7c674a
[ "MIT" ]
1
2021-03-14T10:35:19.000Z
2021-03-15T09:55:24.000Z
classification_tagging/utilities/data_selection.py
arkel23/yuwu
4dcf0e18693e09a947569ddcc7cb3ff00c7c674a
[ "MIT" ]
9
2021-01-22T13:50:11.000Z
2022-01-12T13:28:38.000Z
import os import ast import random import pandas as pd from PIL import Image from PIL import ImageFile import torch import torch.utils.data as data from torchvision import transforms from transformers import BertTokenizer from .custom_tokenizer import CustomTokenizer ImageFile.LOAD_TRUNCATED_IMAGES = True def load_data(args, split): transform = None if args.dataset_name == 'moeImouto': dataset = moeImouto(args, split=split, transform=transform) elif args.dataset_name == 'cartoonFace': dataset = cartoonFace(root=args.dataset_path, image_size=args.image_size, split=split, transform=transform) elif args.dataset_name == 'danbooruFaces' or args.dataset_name == 'danbooruFull': dataset = danbooruFacesFull(args, split=split, transform=transform) dataset_loader = data.DataLoader(dataset, batch_size=args.batch_size, shuffle=True, num_workers=args.no_cpu_workers, drop_last=True) return dataset, dataset_loader def get_transform(split, image_size): if split == 'train': transform = transforms.Compose([ transforms.Resize((image_size+32, image_size+32)), transforms.RandomCrop((image_size, image_size)), transforms.RandomHorizontalFlip(), transforms.ColorJitter(brightness=0.1, contrast=0.1, saturation=0.1, hue=0.1), transforms.ToTensor(), transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]) ]) else: transform = transforms.Compose([ transforms.Resize((image_size, image_size)), transforms.ToTensor(), transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]) ]) return transform class danbooruFacesFull(data.Dataset): ''' https://github.com/arkel23/Danbooru2018AnimeCharacterRecognitionDataset_Revamped ''' def __init__(self, args, split='train', transform=None): super().__init__() self.dataset_name = args.dataset_name self.root = os.path.abspath(args.dataset_path) self.image_size = args.image_size self.split = split self.transform = transform self.tokenizer_method = args.tokenizer self.max_text_seq_len = args.max_text_seq_len self.shuffle = args.shuffle_tokens if self.split=='train': print('Train set') self.set_dir = os.path.join(self.root, 'labels', 'train.csv') if self.transform is None: self.transform = get_transform(split='train', image_size=self.image_size) elif self.split=='val': print('Validation set') self.set_dir = os.path.join(self.root, 'labels', 'val.csv') if self.transform is None: self.transform = get_transform(split='test', image_size=self.image_size) else: print('Test set') self.set_dir = os.path.join(self.root, 'labels', 'test.csv') if self.transform is None: self.transform = get_transform(split='test', image_size=self.image_size) if self.max_text_seq_len: if self.tokenizer_method == 'wp': self.tokenizer = BertTokenizer.from_pretrained('bert-base-uncased') elif self.tokenizer_method == 'tag': self.tokenizer = CustomTokenizer( vocab_path=os.path.join(args.dataset_path, 'labels', 'vocab.pkl'), max_text_seq_len=args.max_text_seq_len) self.set_dir = self.set_dir.replace('.csv', '_tags.csv') self.df = pd.read_csv(self.set_dir) else: self.df = pd.read_csv(self.set_dir, sep=',', header=None, names=['class_id', 'dir'], dtype={'class_id': 'UInt16', 'dir': 'object'}) self.targets = self.df['class_id'].to_numpy() self.data = self.df['dir'].to_numpy() self.classes = pd.read_csv(os.path.join(self.root, 'labels', 'classid_classname.csv'), sep=',', header=None, names=['class_id', 'class_name'], dtype={'class_id': 'UInt16', 'class_name': 'object'}) self.num_classes = len(self.classes) def __getitem__(self, idx): if torch.is_tensor(idx): idx = idx.tolist() img_dir, target = self.data[idx], self.targets[idx] if self.dataset_name == 'danbooruFaces': img_dir = os.path.join(self.root, 'faces', img_dir) elif self.dataset_name == 'danbooruFull': img_dir = os.path.join(self.root, 'fullMin256', img_dir) img = Image.open(img_dir) if self.transform: img = self.transform(img) if self.max_text_seq_len: caption = ast.literal_eval(self.df.iloc[idx].tags_cat0) if self.shuffle: random.shuffle(caption) if self.tokenizer_method == 'wp': caption = ' '.join(caption) # originally joined by '[SEP]' caption = self.tokenizer(caption, return_tensors='pt', padding='max_length', max_length=self.max_text_seq_len, truncation=True)['input_ids'] elif self.tokenizer_method == 'tag': caption = self.tokenizer(caption) return img, target, caption else: return img, target def __len__(self): return len(self.targets) class moeImouto(data.Dataset): ''' https://www.kaggle.com/mylesoneill/tagged-anime-illustrations/home http://www.nurs.or.jp/~nagadomi/animeface-character-dataset/ https://github.com/nagadomi/lbpcascade_animeface ''' def __init__(self, args, split='train', transform=None): super().__init__() self.dataset_name = args.dataset_name self.root = os.path.abspath(args.dataset_path) self.image_size = args.image_size self.split = split self.transform = transform self.tokenizer_method = args.tokenizer self.max_text_seq_len = args.max_text_seq_len self.shuffle = args.shuffle_tokens if self.split=='train': print('Train set') self.set_dir = os.path.join(self.root, 'train.csv') if self.transform is None: self.transform = get_transform(split='train', image_size=self.image_size) else: print('Test set') self.set_dir = os.path.join(self.root, 'test.csv') if self.transform is None: self.transform = get_transform(split='test', image_size=self.image_size) if self.max_text_seq_len: if self.tokenizer_method == 'wp': self.tokenizer = BertTokenizer.from_pretrained('bert-base-uncased') elif self.tokenizer_method == 'tag': self.tokenizer = CustomTokenizer( vocab_path=os.path.join(args.dataset_path, 'labels', 'vocab.pkl'), max_text_seq_len=args.max_text_seq_len) self.set_dir = self.set_dir.replace('.csv', '_tags.csv') self.df = pd.read_csv(self.set_dir) else: self.df = pd.read_csv(self.set_dir, sep=',', header=None, names=['class_id', 'dir'], dtype={'class_id': 'UInt16', 'dir': 'object'}) self.targets = self.df['class_id'].to_numpy() self.data = self.df['dir'].to_numpy() self.classes = pd.read_csv(os.path.join(self.root, 'classid_classname.csv'), sep=',', header=None, names=['class_id', 'class_name'], dtype={'class_id': 'UInt16', 'class_name': 'object'}) self.num_classes = len(self.classes) def __getitem__(self, idx): if torch.is_tensor(idx): idx = idx.tolist() img_dir, target = self.data[idx], self.targets[idx] img_dir = os.path.join(self.root, 'data', img_dir) img = Image.open(img_dir) if self.transform: img = self.transform(img) if self.max_text_seq_len: caption = ast.literal_eval(self.df.iloc[idx].tags_cat0) if self.shuffle: random.shuffle(caption) if self.tokenizer_method == 'wp': caption = ' '.join(caption) # originally joined by '[SEP]' caption = self.tokenizer(caption, return_tensors='pt', padding='max_length', max_length=self.max_text_seq_len, truncation=True)['input_ids'] elif self.tokenizer_method == 'tag': caption = self.tokenizer(caption) return img, target, caption else: return img, target def __len__(self): return len(self.targets) class cartoonFace(data.Dataset): ''' http://challenge.ai.iqiyi.com/detail?raceId=5def69ace9fcf68aef76a75d https://github.com/luxiangju-PersonAI/iCartoonFace ''' def __init__(self, root, image_size=128, split='train', transform=None): super().__init__() self.root = os.path.abspath(root) self.image_size = image_size self.split = split self.transform = transform if self.split=='train': print('Train set') self.set_dir = os.path.join(self.root, 'train.csv') if self.transform is None: self.transform = get_transform(split='train', image_size=self.image_size) else: print('Test set') self.set_dir = os.path.join(self.root, 'test.csv') if self.transform is None: self.transform = get_transform(split='test', image_size=self.image_size) self.df = pd.read_csv(self.set_dir, sep=',', header=None, names=['class_id', 'dir'], dtype={'class_id': 'UInt16', 'dir': 'object'}) self.targets = self.df['class_id'].to_numpy() self.data = self.df['dir'].to_numpy() self.classes = pd.read_csv(os.path.join(self.root, 'classid_classname.csv'), sep=',', header=None, names=['class_id', 'class_name'], dtype={'class_id': 'UInt16', 'class_name': 'object'}) self.num_classes = len(self.classes) def __getitem__(self, idx): if torch.is_tensor(idx): idx = idx.tolist() img_dir, target = self.data[idx], self.targets[idx] img_dir = os.path.join(self.root, 'data', img_dir) img = Image.open(img_dir) if self.transform: img = self.transform(img) return img, target def __len__(self): return len(self.targets)
32.24735
89
0.697348
1,298
9,126
4.701849
0.134052
0.044241
0.026217
0.029821
0.785679
0.767328
0.766344
0.735868
0.713092
0.713092
0
0.009331
0.154504
9,126
282
90
32.361702
0.781623
0.047776
0
0.772093
0
0
0.093671
0.007277
0
0
0
0
0
1
0.051163
false
0
0.051163
0.013953
0.162791
0.032558
0
0
0
null
0
0
0
0
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
99a70aefb8669d973a0e442806910d2dd1dff004
23
py
Python
tasks/us/census/acs_columns/__init__.py
CartoDB/bigmetadata
a32325382500f23b8a607e4e02cc0ec111360869
[ "BSD-3-Clause" ]
45
2015-12-14T03:05:55.000Z
2021-06-29T22:46:40.000Z
tasks/us/census/acs_columns/__init__.py
CartoDB/bigmetadata
a32325382500f23b8a607e4e02cc0ec111360869
[ "BSD-3-Clause" ]
480
2016-02-19T15:58:44.000Z
2021-09-10T16:38:56.000Z
tasks/us/census/acs_columns/__init__.py
CartoDB/bigmetadata
a32325382500f23b8a607e4e02cc0ec111360869
[ "BSD-3-Clause" ]
13
2016-08-09T21:03:02.000Z
2020-04-29T23:40:20.000Z
from .columns import *
11.5
22
0.73913
3
23
5.666667
1
0
0
0
0
0
0
0
0
0
0
0
0.173913
23
1
23
23
0.894737
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
510e97554ad36f1a26ca37980613a4aa8f23c125
377
py
Python
etna/transforms/outliers/__init__.py
Pacman1984/etna
9b3ccb980e576d56858f14aca2e06ce2957b0fa9
[ "Apache-2.0" ]
96
2021-09-05T06:29:34.000Z
2021-11-07T15:22:54.000Z
etna/transforms/outliers/__init__.py
Pacman1984/etna
9b3ccb980e576d56858f14aca2e06ce2957b0fa9
[ "Apache-2.0" ]
188
2021-09-06T15:59:58.000Z
2021-11-17T09:34:16.000Z
etna/transforms/outliers/__init__.py
Pacman1984/etna
9b3ccb980e576d56858f14aca2e06ce2957b0fa9
[ "Apache-2.0" ]
8
2021-09-06T09:18:35.000Z
2021-11-11T21:18:39.000Z
from etna.transforms.outliers.base import OutliersTransform from etna.transforms.outliers.point_outliers import DensityOutliersTransform from etna.transforms.outliers.point_outliers import MedianOutliersTransform from etna.transforms.outliers.point_outliers import PredictionIntervalOutliersTransform from etna.transforms.outliers.sequence_outliers import SAXOutliersTransform
62.833333
87
0.907162
39
377
8.666667
0.333333
0.118343
0.266272
0.384615
0.399408
0.399408
0.399408
0
0
0
0
0
0.05305
377
5
88
75.4
0.946779
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
0
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
514120f742edfccc0099324b681c3384239453de
190
py
Python
src/log_utils/__init__.py
knkgun/federalist-garden-build
6cbe9cfc736717cdb30f0c81066516b6d99eca52
[ "CC0-1.0" ]
5
2017-12-23T16:22:13.000Z
2020-08-24T16:02:22.000Z
src/log_utils/__init__.py
knkgun/federalist-garden-build
6cbe9cfc736717cdb30f0c81066516b6d99eca52
[ "CC0-1.0" ]
133
2017-06-27T21:38:01.000Z
2022-03-22T21:19:18.000Z
src/log_utils/__init__.py
knkgun/federalist-garden-build
6cbe9cfc736717cdb30f0c81066516b6d99eca52
[ "CC0-1.0" ]
12
2017-07-14T02:39:58.000Z
2021-12-25T00:10:48.000Z
'''Logging stuff''' from .get_logger import get_logger, init_logging from .delta_to_mins_secs import delta_to_mins_secs __all__ = [ 'delta_to_mins_secs', 'get_logger', 'init_logging']
23.75
55
0.773684
29
190
4.448276
0.413793
0.209302
0.255814
0.348837
0
0
0
0
0
0
0
0
0.121053
190
7
56
27.142857
0.772455
0.068421
0
0
0
0
0.233918
0
0
0
0
0
0
1
0
false
0
0.5
0
0.5
0
1
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
6
5ac4704f14aac9dfaa553dddab16fae8b6d3f7a4
1,016
py
Python
Dice_Simulator.py
sanrock123/Dice-Simulator-Game
99f8efc28ab2b7ff1e1093c59162cae89fde0e64
[ "MIT" ]
null
null
null
Dice_Simulator.py
sanrock123/Dice-Simulator-Game
99f8efc28ab2b7ff1e1093c59162cae89fde0e64
[ "MIT" ]
null
null
null
Dice_Simulator.py
sanrock123/Dice-Simulator-Game
99f8efc28ab2b7ff1e1093c59162cae89fde0e64
[ "MIT" ]
null
null
null
import random x = "y" while x=="y": no=random.randint(1,6) if no==1: print("[-----]") print("[ ]") print("[ 0 ]") print("[ ]") print("[-----]") if no == 2: print("[-----]") print("[ 0 ]") print("[ ]") print("[ 0 ]") print("[-----]") if no == 3: print("[-----]") print("[ ]") print("[0 0 0]") print("[ ]") print("[-----]") if no == 4: print("[-----]") print("[0 0]") print("[ ]") print("[0 0]") print("[-----]") if no == 5: print("[-----]") print("[0 0]") print("[ 0 ]") print("[0 0]") print("[-----]") if no == 6: print("[-----]") print("[0 0 0]") print("[ ]") print("[0 0 0]") print("[-----]") x=input("press y to roll again and n to exit:") print("\n")
20.32
52
0.273622
94
1,016
2.957447
0.255319
0.467626
0.356115
0.258993
0.611511
0.334532
0.165468
0
0
0
0
0.051786
0.448819
1,016
49
53
20.734694
0.444643
0
0
0.714286
0
0
0.246063
0
0
0
0
0
0
1
0
false
0
0.02381
0
0.02381
0.738095
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
1
0
6
5ad6687c9623e6d226c06925f7d37252bb0e5f6d
10,721
py
Python
MLlib/activations.py
Udit-git-acc/ML-DL-implementation
7514038c76d8e4293554110c604b3336f01356eb
[ "BSD-3-Clause" ]
48
2020-08-05T09:49:21.000Z
2022-01-16T06:06:57.000Z
MLlib/activations.py
Udit-git-acc/ML-DL-implementation
7514038c76d8e4293554110c604b3336f01356eb
[ "BSD-3-Clause" ]
111
2020-08-06T08:18:38.000Z
2021-10-06T20:05:04.000Z
MLlib/activations.py
Udit-git-acc/ML-DL-implementation
7514038c76d8e4293554110c604b3336f01356eb
[ "BSD-3-Clause" ]
122
2020-08-05T16:59:23.000Z
2022-01-21T04:08:15.000Z
import MLlib import numpy as np from MLlib import autograd from MLlib.utils.misc_utils import unbroadcast class Sigmoid(autograd.Function): __slots__ = () @staticmethod def forward(ctx, input): if not (type(input).__name__ == 'Tensor'): raise RuntimeError("Expected a Tensor, got {}. Please use " "Sigmoid.activation() for non-Tensor data" .format(type(input).__name__)) requires_grad = input.requires_grad output = 1 / (1 + np.exp(-input.data)) output = MLlib.Tensor(output, requires_grad=requires_grad, is_leaf=not requires_grad) if requires_grad: ctx.save_for_backward(output) return output @staticmethod def backward(ctx, grad_output): o = ctx.saved_tensors[0] grad_o = o.data * (1 - o.data) * grad_output.data grad_o = MLlib.Tensor(unbroadcast(grad_o, o.shape)) return grad_o @staticmethod def activation(X): """ Apply Sigmoid on X Vector. PARAMETERS ========== X: ndarray(dtype=float, ndim=1) Array containing Input Values. RETURNS ======= ndarray(dtype=float,ndim=1) Output Vector after Vectorised Operation. """ return 1 / (1 + np.exp(-X)) @staticmethod def derivative(X): """ Calculate derivative of Sigmoid on X Vector. PARAMETERS ========== X: ndarray(dtype=float, ndim=1) Array containing Input Values. RETURNS ======= ndarray(dtype=float,ndim=1) Outputs array of derivatives. """ s = 1 / (1 + np.exp(-X)) ds = s * (1 - s) return ds class TanH(): @staticmethod def activation(X): """ Apply hyperbolic tangent function on X Vector. PARAMETERS ========== X: ndarray(dtype=float, ndim=1) Array containing Input Values. RETURNS ======= ndarray(dtype=float,ndim=1) Output Vector after Vectorised Operation. """ return np.tanh(X) @staticmethod def derivative(X): """ Calculate derivative of hyperbolic tangent function on X Vector. PARAMETERS ========== X: ndarray(dtype=float, ndim=1) Array containing Input Values. RETURNS ======= ndarray(dtype=float,ndim=1) Outputs array of derivatives. """ return 1.0 - np.tanh(X)**2 class Softmax(autograd.Function): __slots__ = () @staticmethod def forward(ctx, input): if not (type(input).__name__ == 'Tensor'): raise RuntimeError("Expected a Tensor, got {}. Please use " "Softmax.activation() for non-Tensor data" .format(type(input).__name__)) if len(input.shape) != 2: raise RuntimeError("Expected a batch of data of size (m, classes)" ", got {}".format(input.shape)) requires_grad = input.requires_grad e_x = np.exp(input.data) output = e_x / np.sum(e_x, axis=1, keepdims=True) # axis=1 because we don't want to compute across batch dimension output = MLlib.Tensor(output, requires_grad=requires_grad, is_leaf=not requires_grad) if requires_grad: ctx.save_for_backward(output) return output @staticmethod def backward(ctx, grad_output): output = ctx.saved_tensors[0].data o = -output[..., None] * output[:, None, :] diag_x, diag_y = np.diag_indices_from(o[0]) o[:, diag_y, diag_x] = output * (1.0 - output) grad_o = o.sum(axis=1) grad_o = grad_o * grad_output.data grad_o = MLlib.Tensor(grad_o) return grad_o @staticmethod def activation(X): """ Apply Softmax on X Vector. PARAMETERS ========== X: ndarray(dtype=float, ndim=1) Array containing Input Values. Sum: float Sum of values of Input Array. RETURNS ======= ndarray(dtype=float,ndim=1) Output Vector after Vectorised Operation. """ Sum = np.sum(np.exp(X)) return np.exp(X) / Sum @staticmethod def derivative(X): """ Calculate derivative of Softmax on X Vector. PARAMETERS ========== X: ndarray(dtype=float, ndim=1) Array containing Input Values. Sum: float Sum of values of Input Array. RETURNS ======= ndarray(dtype=float,ndim=1) Output Vector after Vectorised Operation. """ x_vector = X.reshape(X.shape[0], 1) x_matrix = np.tile(x_vector, X.shape[0]) x_der = np.diag(X) - (x_matrix * np.transpose(x_matrix)) return x_der class Softsign(): @staticmethod def activation(X): """ Apply Softsign on X Vector. PARAMETERS ========== X: ndarray(dtype=float, ndim=1) Array containing Input Values. RETURNS ======= ndarray(dtype=float,ndim=1) Output Vector after Vectorised Operation. """ return X / (np.abs(X) + 1) @staticmethod def derivative(X): """ Calculate derivative of Softsign on X Vector. PARAMETERS ========== X: ndarray(dtype=float, ndim=1) Array containing Input Values. RETURNS ======= ndarray(dtype=float,ndim=1) Output Vector after Vectorised Operation. """ return 1 / (np.abs(X) + 1)**2 class Relu(autograd.Function): __slots__ = () @staticmethod def forward(ctx, input): if not (type(input).__name__ == 'Tensor'): raise RuntimeError("Expected a Tensor, got {}. Please use " "Relu.activation() for non-Tensor data" .format(type(input).__name__)) requires_grad = input.requires_grad output = np.maximum(input.data, 0) output = MLlib.Tensor(output, requires_grad=requires_grad, is_leaf=not requires_grad) if requires_grad: ctx.save_for_backward(output) return output @staticmethod def backward(ctx, grad_output): o = ctx.saved_tensors[0] grad_o = np.greater(o.data, 0).astype(int) * grad_output.data grad_o = MLlib.Tensor(unbroadcast(grad_o, o.shape)) return grad_o @staticmethod def activation(X): """ Apply Rectified Linear Unit on X Vector. PARAMETERS ========== X: ndarray(dtype=float, ndim=1) Array containing Input Values. RETURNS ======= ndarray(dtype=float,ndim=1) Output Vector after Vectorised Operation. """ return np.maximum(0, X) @staticmethod def derivative(X): """ Calculate derivative of Rectified Linear Unit on X Vector. PARAMETERS ========== X: ndarray(dtype=float, ndim=1) Array containing Input Values. RETURNS ======= ndarray(dtype=float,ndim=1) Outputs array of derivatives. """ return np.greater(X, 0).astype(int) class LeakyRelu(): @staticmethod def activation(X, alpha=0.01): """ Apply Leaky Rectified Linear Unit on X Vector. PARAMETERS ========== X: ndarray(dtype=float, ndim=1) Array containing Input Values. alpha: float Slope for Values of X less than 0. RETURNS ======= ndarray(dtype=float,ndim=1) Output Vector after Vectorised Operation. """ return np.maximum(alpha*X, X) @staticmethod def derivative(X, alpha=0.01): """ Calculate derivative of Leaky Rectified Linear Unit on X Vector. PARAMETERS ========== X: ndarray(dtype=float, ndim=1) Array containing Input Values. alpha: float Slope for Values of X less than 0. RETURNS ======= ndarray(dtype=float,ndim=1) Outputs array of derivatives. """ dx = np.greater(X, 0).astype(float) dx[X < 0] = -alpha return dx class Elu(): @staticmethod def activation(X, alpha=1.0): """ Apply Exponential Linear Unit on X Vector. PARAMETERS ========== X: ndarray(dtype=float, ndim=1) Array containing Input Values. alpha: float Curve Constant for Values of X less than 0. RETURNS ======= ndarray(dtype=float,ndim=1) Output Vector after Vectorised Operation. """ if (alpha <= 0): raise AssertionError return np.maximum(0, X) + np.minimum(0, alpha * (np.exp(X) - 1)) def unit_step(X): """ Apply Binary Step Function on X Vector. PARAMETERS ========== X: ndarray(dtype=float, ndim=1) Array containing Input Values. RETURNS ======= ndarray(dtype=float,ndim=1) Output Vector after Vectorised Operation. """ return np.heaviside(X, 1) class Swish(): @staticmethod def activation(X, alpha=1.0): """ Apply Swish activation function on X Vector. PARAMETERS ========== X: ndarray(dtype=float, ndim=1) Array containing Input Values. b: int or float Either constant or trainable parameter according to the model. RETURNS ======= ndarray(dtype=float,ndim=1) Output Vector after Vectorised Operation. """ return X / (1 + np.exp(-(alpha*X))) @staticmethod def derivative(X, alpha=1.0): """ Calculate derivative of Swish activation function on X Vector. PARAMETERS ========== X: ndarray(dtype=float, ndim=1) Array containing Input Values. b: int or float Either constant or trainable parameter according to the model. RETURNS ======= ndarray(dtype=float,ndim=1) Output Vector after Vectorised Operation. """ s = 1 / (1 + np.exp(-X)) f = X / (1 + np.exp(-(alpha*X))) df = f + (s * (1 - f)) return df
23.408297
78
0.531574
1,182
10,721
4.733503
0.122673
0.068633
0.09723
0.120107
0.834495
0.796247
0.777301
0.755139
0.708311
0.700268
0
0.013166
0.355284
10,721
457
79
23.459519
0.796296
0.381774
0
0.557143
0
0
0.058493
0
0
0
0
0
0.007143
1
0.157143
false
0
0.028571
0
0.421429
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
5aded85c590b53977e065abbd6294434e2c0cec7
167
py
Python
src/examples/ShowActualPath.py
Gabvaztor/tensorflowCode
e206ea4544552b87c2d43274cea3182f6b385a87
[ "Apache-2.0" ]
4
2019-12-14T08:06:18.000Z
2020-09-12T10:09:31.000Z
src/examples/ShowActualPath.py
Gabvaztor/tensorflowCode
e206ea4544552b87c2d43274cea3182f6b385a87
[ "Apache-2.0" ]
null
null
null
src/examples/ShowActualPath.py
Gabvaztor/tensorflowCode
e206ea4544552b87c2d43274cea3182f6b385a87
[ "Apache-2.0" ]
2
2020-09-12T10:10:07.000Z
2021-09-15T11:58:37.000Z
import os def show_actual_path(): print("Actual Path: \n", os.path.dirname(os.path.abspath(__file__))) print("Actual Path: \n", os.getcwd()) show_actual_path()
33.4
72
0.700599
26
167
4.192308
0.461538
0.366972
0.256881
0.293578
0.330275
0
0
0
0
0
0
0
0.11976
167
5
73
33.4
0.741497
0
0
0
0
0
0.178571
0
0
0
0
0
0
1
0.2
true
0
0.2
0
0.4
0.4
1
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
6