hexsha
string
size
int64
ext
string
lang
string
max_stars_repo_path
string
max_stars_repo_name
string
max_stars_repo_head_hexsha
string
max_stars_repo_licenses
list
max_stars_count
int64
max_stars_repo_stars_event_min_datetime
string
max_stars_repo_stars_event_max_datetime
string
max_issues_repo_path
string
max_issues_repo_name
string
max_issues_repo_head_hexsha
string
max_issues_repo_licenses
list
max_issues_count
int64
max_issues_repo_issues_event_min_datetime
string
max_issues_repo_issues_event_max_datetime
string
max_forks_repo_path
string
max_forks_repo_name
string
max_forks_repo_head_hexsha
string
max_forks_repo_licenses
list
max_forks_count
int64
max_forks_repo_forks_event_min_datetime
string
max_forks_repo_forks_event_max_datetime
string
content
string
avg_line_length
float64
max_line_length
int64
alphanum_fraction
float64
qsc_code_num_words_quality_signal
int64
qsc_code_num_chars_quality_signal
float64
qsc_code_mean_word_length_quality_signal
float64
qsc_code_frac_words_unique_quality_signal
float64
qsc_code_frac_chars_top_2grams_quality_signal
float64
qsc_code_frac_chars_top_3grams_quality_signal
float64
qsc_code_frac_chars_top_4grams_quality_signal
float64
qsc_code_frac_chars_dupe_5grams_quality_signal
float64
qsc_code_frac_chars_dupe_6grams_quality_signal
float64
qsc_code_frac_chars_dupe_7grams_quality_signal
float64
qsc_code_frac_chars_dupe_8grams_quality_signal
float64
qsc_code_frac_chars_dupe_9grams_quality_signal
float64
qsc_code_frac_chars_dupe_10grams_quality_signal
float64
qsc_code_frac_chars_replacement_symbols_quality_signal
float64
qsc_code_frac_chars_digital_quality_signal
float64
qsc_code_frac_chars_whitespace_quality_signal
float64
qsc_code_size_file_byte_quality_signal
float64
qsc_code_num_lines_quality_signal
float64
qsc_code_num_chars_line_max_quality_signal
float64
qsc_code_num_chars_line_mean_quality_signal
float64
qsc_code_frac_chars_alphabet_quality_signal
float64
qsc_code_frac_chars_comments_quality_signal
float64
qsc_code_cate_xml_start_quality_signal
float64
qsc_code_frac_lines_dupe_lines_quality_signal
float64
qsc_code_cate_autogen_quality_signal
float64
qsc_code_frac_lines_long_string_quality_signal
float64
qsc_code_frac_chars_string_length_quality_signal
float64
qsc_code_frac_chars_long_word_length_quality_signal
float64
qsc_code_frac_lines_string_concat_quality_signal
float64
qsc_code_cate_encoded_data_quality_signal
float64
qsc_code_frac_chars_hex_words_quality_signal
float64
qsc_code_frac_lines_prompt_comments_quality_signal
float64
qsc_code_frac_lines_assert_quality_signal
float64
qsc_codepython_cate_ast_quality_signal
float64
qsc_codepython_frac_lines_func_ratio_quality_signal
float64
qsc_codepython_cate_var_zero_quality_signal
bool
qsc_codepython_frac_lines_pass_quality_signal
float64
qsc_codepython_frac_lines_import_quality_signal
float64
qsc_codepython_frac_lines_simplefunc_quality_signal
float64
qsc_codepython_score_lines_no_logic_quality_signal
float64
qsc_codepython_frac_lines_print_quality_signal
float64
qsc_code_num_words
int64
qsc_code_num_chars
int64
qsc_code_mean_word_length
int64
qsc_code_frac_words_unique
null
qsc_code_frac_chars_top_2grams
int64
qsc_code_frac_chars_top_3grams
int64
qsc_code_frac_chars_top_4grams
int64
qsc_code_frac_chars_dupe_5grams
int64
qsc_code_frac_chars_dupe_6grams
int64
qsc_code_frac_chars_dupe_7grams
int64
qsc_code_frac_chars_dupe_8grams
int64
qsc_code_frac_chars_dupe_9grams
int64
qsc_code_frac_chars_dupe_10grams
int64
qsc_code_frac_chars_replacement_symbols
int64
qsc_code_frac_chars_digital
int64
qsc_code_frac_chars_whitespace
int64
qsc_code_size_file_byte
int64
qsc_code_num_lines
int64
qsc_code_num_chars_line_max
int64
qsc_code_num_chars_line_mean
int64
qsc_code_frac_chars_alphabet
int64
qsc_code_frac_chars_comments
int64
qsc_code_cate_xml_start
int64
qsc_code_frac_lines_dupe_lines
int64
qsc_code_cate_autogen
int64
qsc_code_frac_lines_long_string
int64
qsc_code_frac_chars_string_length
int64
qsc_code_frac_chars_long_word_length
int64
qsc_code_frac_lines_string_concat
null
qsc_code_cate_encoded_data
int64
qsc_code_frac_chars_hex_words
int64
qsc_code_frac_lines_prompt_comments
int64
qsc_code_frac_lines_assert
int64
qsc_codepython_cate_ast
int64
qsc_codepython_frac_lines_func_ratio
int64
qsc_codepython_cate_var_zero
int64
qsc_codepython_frac_lines_pass
int64
qsc_codepython_frac_lines_import
int64
qsc_codepython_frac_lines_simplefunc
int64
qsc_codepython_score_lines_no_logic
int64
qsc_codepython_frac_lines_print
int64
effective
string
hits
int64
253a94244f337764645b9cbbbaf69bad77675449
85
py
Python
Section 7/function001.py
PacktPublishing/Learning-Python-v-
30fb28dfaaa18815f1b4c0b683e8839da223b195
[ "MIT" ]
1
2021-10-05T19:45:43.000Z
2021-10-05T19:45:43.000Z
Section 7/function001.py
PacktPublishing/Learning-Python-v-
30fb28dfaaa18815f1b4c0b683e8839da223b195
[ "MIT" ]
null
null
null
Section 7/function001.py
PacktPublishing/Learning-Python-v-
30fb28dfaaa18815f1b4c0b683e8839da223b195
[ "MIT" ]
2
2020-09-25T19:56:46.000Z
2021-09-02T11:14:28.000Z
def sum(a, b): c = a+b return c x = 10 y = 50 print "Result of addition ", sum(x,y)
14.166667
37
0.588235
20
85
2.5
0.7
0.08
0
0
0
0
0
0
0
0
0
0.0625
0.247059
85
6
37
14.166667
0.71875
0
0
0
0
0
0.22093
0
0
0
0
0
0
0
null
null
0
0
null
null
0.166667
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
3
253ee7f0fe823162aff4f0b447e10c654f47bdab
486
py
Python
DAY 1/Set Matrix Zero/Using Dummy Matrix <Python>.py
KishanMishra1/SDE-s-Sheet-
aa372dd6fba56dac00e23cdc0acc9187f0ccca24
[ "Unlicense" ]
null
null
null
DAY 1/Set Matrix Zero/Using Dummy Matrix <Python>.py
KishanMishra1/SDE-s-Sheet-
aa372dd6fba56dac00e23cdc0acc9187f0ccca24
[ "Unlicense" ]
null
null
null
DAY 1/Set Matrix Zero/Using Dummy Matrix <Python>.py
KishanMishra1/SDE-s-Sheet-
aa372dd6fba56dac00e23cdc0acc9187f0ccca24
[ "Unlicense" ]
null
null
null
class Solution: def setZeroes(self, matrix): dummy1=[1]*len(matrix) dummy2=[1]*len(matrix[0]) for i in range(len(matrix)): for j in range(len(matrix[0])): if matrix[i][j]==0: dummy1[i]=0 dummy2[j]=0 for i in range(len(matrix)): for j in range(len(matrix[0])): if dummy1[i]==0 or dummy2[j]==0: matrix[i][j]=0 return matrix
25.578947
48
0.44856
66
486
3.30303
0.287879
0.247706
0.183486
0.293578
0.40367
0.40367
0.40367
0.40367
0.40367
0.40367
0
0.059028
0.407407
486
18
49
27
0.697917
0
0
0.285714
0
0
0
0
0
0
0
0
0
1
0.071429
false
0
0
0
0.214286
0
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
3
254627a6acb20b1c3f0830f4cc4e24f2e0f759e6
231
py
Python
HCP_DataProcessor/Data_Process/_PCA_Filter.py
xinyuwang1209/HCP_DataProcessor
75ecaa762a84d4070cd384452c40685d3aa162ed
[ "MIT" ]
null
null
null
HCP_DataProcessor/Data_Process/_PCA_Filter.py
xinyuwang1209/HCP_DataProcessor
75ecaa762a84d4070cd384452c40685d3aa162ed
[ "MIT" ]
null
null
null
HCP_DataProcessor/Data_Process/_PCA_Filter.py
xinyuwang1209/HCP_DataProcessor
75ecaa762a84d4070cd384452c40685d3aa162ed
[ "MIT" ]
null
null
null
import numpy as np import pandas as pd from sklearn.decomposition import PCA def pca_filter(X,n): pca = PCA(n_components=n) pca.fit(X) pd_pca = pd.DataFrame(pca.transform(X)) return pd_pca, pca.explained_variance_
23.1
43
0.731602
39
231
4.179487
0.538462
0.04908
0
0
0
0
0
0
0
0
0
0
0.177489
231
9
44
25.666667
0.857895
0
0
0
0
0
0
0
0
0
0
0
0
1
0.125
false
0
0.375
0
0.625
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
3
2554994b02627d452c8dee6287bb92decc60aa1f
301
py
Python
setup.py
shatakshipachori/Distributions_package
834bdbd896c5cad20ef570771835716f5cb47cf8
[ "MIT" ]
null
null
null
setup.py
shatakshipachori/Distributions_package
834bdbd896c5cad20ef570771835716f5cb47cf8
[ "MIT" ]
null
null
null
setup.py
shatakshipachori/Distributions_package
834bdbd896c5cad20ef570771835716f5cb47cf8
[ "MIT" ]
null
null
null
from setuptools import setup setup(name='distributions-shatakshi700', version='1.2', description='Gaussian and Binomial distributions', packages=['distributions-shatakshi700'], author = 'Shatakshi Pachori', author_email = 'shatakshi700@gmail.com', zip_safe=False)
30.1
56
0.700997
30
301
6.966667
0.8
0.239234
0
0
0
0
0
0
0
0
0
0.044898
0.186047
301
9
57
33.444444
0.808163
0
0
0
0
0
0.428571
0.245847
0
0
0
0
0
1
0
true
0
0.125
0
0.125
0
0
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
3
c2651040659692d04d376f2423bef3c04530fe87
343
py
Python
2021/06/p2.py
jo3-l/advent
22c0e8feb594bcb1d9f36b464bd735c6a8ab4ea0
[ "MIT" ]
null
null
null
2021/06/p2.py
jo3-l/advent
22c0e8feb594bcb1d9f36b464bd735c6a8ab4ea0
[ "MIT" ]
null
null
null
2021/06/p2.py
jo3-l/advent
22c0e8feb594bcb1d9f36b464bd735c6a8ab4ea0
[ "MIT" ]
null
null
null
import re from functools import cache def lmap(f, it): return list(map(f, it)) def ints(it): return lmap(int, it) @cache def F(d, s): reset_at = d - s - 1 if reset_at < 0: return 1 return F(reset_at, 6) + F(reset_at, 8) def solve(input): return sum(F(256, x) for x in ints(re.findall(r"-?\d+", input)))
14.913043
68
0.586006
64
343
3.078125
0.5
0.142132
0.081218
0
0
0
0
0
0
0
0
0.031621
0.262391
343
22
69
15.590909
0.747036
0
0
0
0
0
0.014577
0
0
0
0
0
0
1
0.285714
false
0
0.142857
0.214286
0.785714
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
3
c27e0a5bc108a3b94464fa9fee46c9c9a08f9f7f
236
py
Python
hood_watch/admin.py
JuneMuoti/Hood-watch
2659a0a0b4025e4cbb9680f1de078de7801f46b9
[ "MIT" ]
null
null
null
hood_watch/admin.py
JuneMuoti/Hood-watch
2659a0a0b4025e4cbb9680f1de078de7801f46b9
[ "MIT" ]
null
null
null
hood_watch/admin.py
JuneMuoti/Hood-watch
2659a0a0b4025e4cbb9680f1de078de7801f46b9
[ "MIT" ]
null
null
null
from django.contrib import admin from .models import User,Business,Neighbourhood,Post admin.site.register(User) admin.site.register(Business) admin.site.register(Neighbourhood) admin.site.register(Post) # Register your models here.
19.666667
52
0.813559
32
236
6
0.4375
0.1875
0.354167
0
0
0
0
0
0
0
0
0
0.088983
236
11
53
21.454545
0.893023
0.110169
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.333333
0
0.333333
0
0
0
0
null
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
3
c27e2fefecac84ecec830e65d34c3d713c8d75a4
2,226
py
Python
auto_struct/data_types/enums/base_enum.py
Valmarelox/auto_struct
ec06fc426d468d4d01f300add3081df9eda87f41
[ "MIT" ]
7
2020-09-03T20:54:13.000Z
2022-03-09T01:21:07.000Z
auto_struct/data_types/enums/base_enum.py
Valmarelox/auto_struct
ec06fc426d468d4d01f300add3081df9eda87f41
[ "MIT" ]
null
null
null
auto_struct/data_types/enums/base_enum.py
Valmarelox/auto_struct
ec06fc426d468d4d01f300add3081df9eda87f41
[ "MIT" ]
null
null
null
from struct import Struct from types import FunctionType from typing import Optional, Sequence, Any, Dict from auto_struct.data_types.base.base_type import BaseTypeMeta, BaseType from auto_struct.exceptions.enum import NoSuchEnumElement class BaseEnumMeta(BaseTypeMeta): def __new__(mcs, cls: str, bases: Sequence[type], classdict: Dict[str, Any]): element_type = classdict.get('__ELEMENT_TYPE__', None) if not element_type: for base in bases: if hasattr(base, '__ELEMENT_TYPE__'): element_type = base.__ELEMENT_TYPE__ break else: raise TypeError(f'__ELEMENT_TYPE__ Not defined for class {cls}') values = {} for key in classdict.copy(): if not key.startswith('_') and not isinstance(classdict[key], FunctionType): values[key] = element_type(classdict[key]) classdict['__VALUES__'] = values cls = super().__new__(mcs, cls, bases, classdict) for item in values: setattr(cls, item, cls(cls.__dict__[item])) return cls @property def struct(cls) -> Optional[Struct]: return cls.__ELEMENT_TYPE__.struct class BaseEnum(BaseType, metaclass=BaseEnumMeta): __ELEMENT_TYPE__ = type(None) def __init__(self, value): # TODO: IS this this? self.value = self.__ELEMENT_TYPE__(value) self.verify() def verify(self) -> bool: if self.value not in self.__VALUES__.values(): raise NoSuchEnumElement(f'Value {self.value} not in enum {type(self).__name__}') def __repr__(self): for (key, value) in self.__VALUES__.items(): if self.value == value: return f'{type(self).__name__}.{key}' def __int__(self): return int(self.value) def __str__(self): return str(self.value) def __bytes__(self): return bytes(self.value) def __bool__(self): return bool(self.value) def __eq__(self, other): return type(self) is type(other) and self.value == other.value def to_json(self): return self.value def __hash__(self): return hash(self.value)
29.68
92
0.626685
267
2,226
4.816479
0.273408
0.083981
0.046656
0.021773
0
0
0
0
0
0
0
0
0.271788
2,226
74
93
30.081081
0.793337
0.008535
0
0
0
0
0.075283
0.021769
0
0
0
0.013514
0
1
0.226415
false
0
0.09434
0.150943
0.566038
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
1
0
0
1
0
0
0
1
0
0
0
3
c2ab9cbd81fe05dd5891887b66cf1d30223353eb
1,314
py
Python
cauldron/session/writing/components/__init__.py
JohnnyPeng18/cauldron
09120c2a4cef65df46f8c0c94f5d79395b3298cd
[ "MIT" ]
90
2016-09-02T15:11:10.000Z
2022-01-02T11:37:57.000Z
cauldron/session/writing/components/__init__.py
JohnnyPeng18/cauldron
09120c2a4cef65df46f8c0c94f5d79395b3298cd
[ "MIT" ]
86
2016-09-23T16:52:22.000Z
2022-03-31T21:39:56.000Z
cauldron/session/writing/components/__init__.py
JohnnyPeng18/cauldron
09120c2a4cef65df46f8c0c94f5d79395b3298cd
[ "MIT" ]
261
2016-12-22T05:36:48.000Z
2021-11-26T12:40:42.000Z
from cauldron.session import projects from cauldron.session.writing.components import bokeh_component from cauldron.session.writing.components import definitions from cauldron.session.writing.components import plotly_component from cauldron.session.writing.components import project_component from cauldron.session.writing.components.definitions import COMPONENT from cauldron.session.writing.components.definitions import WEB_INCLUDE def _get_components(lib_name: str, project: 'projects.Project') -> COMPONENT: if lib_name == 'bokeh': return bokeh_component.create(project) if lib_name == 'plotly': return plotly_component.create(project) # Unknown components will just return as empty components. There used # to be a shared component type that was removed in 1.0.0, but hadn't # been used for a long time before that. If that becomes interesting # again old code can be reviewed to see how shared components once # worked. return COMPONENT([], []) def get(step: 'projects.ProjectStep') -> COMPONENT: """...""" return definitions.merge_components( project_component.create_many(step.project, step.web_includes), *[ _get_components(name, step.project) for name in step.report.library_includes ], )
38.647059
77
0.740487
165
1,314
5.787879
0.4
0.087958
0.139267
0.163351
0.324607
0.324607
0.236649
0.129843
0
0
0
0.002783
0.179604
1,314
33
78
39.818182
0.883117
0.21309
0
0
0
0
0.045898
0
0
0
0
0
0
1
0.095238
false
0
0.333333
0
0.619048
0
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
3
c2aedd92889e7976fbd8c27dd8de6a194403044e
45
py
Python
blueking/__init__.py
jin-cc/bastion-test
9feecbe927e5446213ab25b4da4a5eca23cf6bae
[ "Apache-2.0" ]
42
2021-06-16T12:06:03.000Z
2022-03-29T13:18:00.000Z
blueking/__init__.py
jin-cc/bastion-test
9feecbe927e5446213ab25b4da4a5eca23cf6bae
[ "Apache-2.0" ]
3
2020-06-05T20:56:09.000Z
2021-06-10T21:29:05.000Z
blueking/__init__.py
wangzishuo111/bk_prometheus
c6aa16d8a547a3d00fbca317f6846ad35b1297ea
[ "MIT" ]
16
2021-07-13T01:17:57.000Z
2022-03-01T12:39:32.000Z
# -*- coding: utf-8 -*- __author__ = u"蓝鲸智云"
15
23
0.555556
6
45
3.5
1
0
0
0
0
0
0
0
0
0
0
0.027027
0.177778
45
2
24
22.5
0.540541
0.466667
0
0
0
0
0.181818
0
0
0
0
0
0
1
0
false
0
0
0
0
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
3
c2f45e527cf984be7dfa85791bbd05f503f58200
333
py
Python
basic/arithmetic.py
anuragarwalkar/basic-python
1de8088b29247a4851c31e1c03fe168945f06951
[ "MIT" ]
null
null
null
basic/arithmetic.py
anuragarwalkar/basic-python
1de8088b29247a4851c31e1c03fe168945f06951
[ "MIT" ]
null
null
null
basic/arithmetic.py
anuragarwalkar/basic-python
1de8088b29247a4851c31e1c03fe168945f06951
[ "MIT" ]
null
null
null
# Arithmetic Operators import math print(math.ceil(2.9)) # 3 print(math.floor(2.9)) # 2 print(10+3) # 13 print(10-3) # 7 print(10/3) # 3.3 Floating point number print(10//3) # 3 Int print(10%3) # 1 print(10*3) # 30 print(10**3) # 1000 10 to the power of 3 print(abs(-2.9)) # 2.9 x = 10 x += 3 print(x) x = 10 + 3 * 2 print(x)
13.32
40
0.606607
73
333
2.767123
0.356164
0.118812
0.277228
0.089109
0
0
0
0
0
0
0
0.203008
0.201201
333
24
41
13.875
0.556391
0.288288
0
0.125
0
0
0
0
0
0
0
0
0
1
0
false
0
0.0625
0
0.0625
0.75
0
0
0
null
0
1
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
1
0
3
c2f7632e4a6d22e87a4ba06e837c9df0ab2bdf52
569
py
Python
migrations/versions/0297b_change_primary_service.py
davidbgk/notification-api
0ede6a61b48289236d1873124965d2bc22a9b27b
[ "MIT" ]
10
2020-05-04T14:11:06.000Z
2022-02-22T19:06:36.000Z
migrations/versions/0297b_change_primary_service.py
davidbgk/notification-api
0ede6a61b48289236d1873124965d2bc22a9b27b
[ "MIT" ]
554
2020-05-07T21:56:24.000Z
2022-03-31T23:04:51.000Z
migrations/versions/0297b_change_primary_service.py
davidbgk/notification-api
0ede6a61b48289236d1873124965d2bc22a9b27b
[ "MIT" ]
4
2020-08-27T16:43:29.000Z
2021-02-17T22:17:27.000Z
""" Revision ID: 0297b_change_primary_service Revises: 0297a_add_sns_provider Create Date: 2019-07-09 13:01:46.993577 """ from alembic import op import sqlalchemy as sa revision = '0297b_change_primary_service' down_revision = '0297a_add_sns_provider' def upgrade(): op.execute("UPDATE services SET name = 'Notification', email_from = 'notification' where id='d6aa2c68-a2d9-4437-ab19-3ae8eb202553'") def downgrade(): op.execute("UPDATE services SET name = 'GOV.UK Notify', email_from = 'gov.uk.notify' where id='d6aa2c68-a2d9-4437-ab19-3ae8eb202553'")
24.73913
138
0.760984
81
569
5.160494
0.580247
0.052632
0.086124
0.119617
0.330144
0.330144
0.186603
0
0
0
0
0.152
0.121265
569
22
139
25.863636
0.684
0.198594
0
0
0
0.25
0.647191
0.296629
0
0
0
0
0
1
0.25
false
0
0.25
0
0.5
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
1
0
null
0
0
0
0
0
1
0
0
0
0
0
0
0
3
c2ffebd56640c555ab280690853007ed121e7766
181
py
Python
tests/test_init.py
xiaojieluo/pelican-manager
9e0839074d9b50faa3ee6a20df1f415e9ba15b0b
[ "Apache-2.0" ]
1
2018-02-01T02:21:51.000Z
2018-02-01T02:21:51.000Z
tests/test_init.py
xiaojieluo/pelican-manager
9e0839074d9b50faa3ee6a20df1f415e9ba15b0b
[ "Apache-2.0" ]
null
null
null
tests/test_init.py
xiaojieluo/pelican-manager
9e0839074d9b50faa3ee6a20df1f415e9ba15b0b
[ "Apache-2.0" ]
null
null
null
from flask import Flask from pelican_manager import make_app def test_make_app(): path = 'tests/pelicanconf.py' app = make_app(path) assert isinstance(app, Flask)
20.111111
36
0.718232
26
181
4.807692
0.576923
0.168
0.176
0
0
0
0
0
0
0
0
0
0.20442
181
8
37
22.625
0.868056
0
0
0
0
0
0.110497
0
0
0
0
0
0.166667
1
0.166667
false
0
0.333333
0
0.5
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
3
6c10a052fdfd7a3c7592723537df2f1f8919dd78
80
py
Python
tests/experiments/exp_dict.py
jonathanchukinas/fuzzytable
3d574047c3a8b0c28ab6a00436526c92ca1ea6d2
[ "MIT" ]
1
2019-11-22T21:16:34.000Z
2019-11-22T21:16:34.000Z
tests/experiments/exp_dict.py
jonathanchukinas/fuzzytable
3d574047c3a8b0c28ab6a00436526c92ca1ea6d2
[ "MIT" ]
3
2019-11-22T13:16:44.000Z
2019-11-26T19:49:39.000Z
tests/experiments/exp_dict.py
jonathanchukinas/fuzzytable
3d574047c3a8b0c28ab6a00436526c92ca1ea6d2
[ "MIT" ]
null
null
null
a = {'a': 1, 'b': 2} b = a del b['a'] print(a) print(b) c = 5 del a del b, c
6.666667
20
0.4375
21
80
1.666667
0.380952
0.114286
0.285714
0
0
0
0
0
0
0
0
0.053571
0.3
80
11
21
7.272727
0.571429
0
0
0
0
0
0.037975
0
0
0
0
0
0
1
0
false
0
0
0
0
0.25
1
0
1
null
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
3
6c1806ade49fd269866f88ff796303c3b0b56bd1
379
py
Python
utils/config.py
a1401358759/my_site
9ed227f825f1c25c903a10271ea429fba1e1ee73
[ "MIT" ]
50
2019-02-19T09:57:07.000Z
2021-11-09T12:02:14.000Z
utils/config.py
a1401358759/my_site
9ed227f825f1c25c903a10271ea429fba1e1ee73
[ "MIT" ]
17
2019-12-13T07:09:53.000Z
2021-12-11T03:57:58.000Z
utils/config.py
a1401358759/my_site
9ed227f825f1c25c903a10271ea429fba1e1ee73
[ "MIT" ]
11
2019-02-19T09:58:08.000Z
2021-03-28T13:22:20.000Z
import os # 数据库配置 MYSQL_HOST = os.getenv('MYSQL_HOST', 'localhost') MYSQL_PORT = os.getenv('MYSQL_PORT', '3306') MYSQL_DATABASE = os.getenv('MYSQL_DATABASE', 'my-site') MYSQL_USER = os.getenv('MYSQL_USER', 'admin') MYSQL_PASSWORD = os.getenv('MYSQL_PASSWORD', 'root123') # redis配置 REDIS_HOST = os.getenv('REDIS_HOST', '127.0.0.1') REDIS_PORT = os.getenv('REDIS_PORT', '6379')
27.071429
55
0.720317
57
379
4.54386
0.385965
0.216216
0.250965
0
0
0
0
0
0
0
0
0.049708
0.097625
379
13
56
29.153846
0.707602
0.034301
0
0
0
0
0.338843
0
0
0
0
0
0
1
0
false
0.125
0.125
0
0.125
0
0
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
1
0
0
0
0
0
3
6c1aaf410d35d52c4541e926d9cd25b359b2f627
4,292
py
Python
app/models.py
tonymontaro/algo-notebook-api
c6c240ca15491374e40a97aee67caf5f7cb9cc01
[ "MIT" ]
null
null
null
app/models.py
tonymontaro/algo-notebook-api
c6c240ca15491374e40a97aee67caf5f7cb9cc01
[ "MIT" ]
null
null
null
app/models.py
tonymontaro/algo-notebook-api
c6c240ca15491374e40a97aee67caf5f7cb9cc01
[ "MIT" ]
null
null
null
"""Application models.""" import os from flask_login import UserMixin from werkzeug.security import generate_password_hash, check_password_hash from app import db, login_manager class DBHelper(object): """Perform common SQLAlchemy tasks.""" @staticmethod def add(item): """Add item to database.""" db.session.add(item) db.session.commit() @staticmethod def delete(item): """Delete an item from the database.""" db.session.delete(item) db.session.commit() class User(UserMixin, db.Model): """User model, used for registration and login.""" id = db.Column(db.Integer, primary_key=True) email = db.Column(db.String(255), unique=True, nullable=False) username = db.Column(db.String(255), nullable=False) role = db.Column(db.String(255), default='user') password = db.Column(db.String(255), nullable=False) algorithms = db.relationship('Algorithm', backref='user', lazy=True) def set_password(self, password): """Set user password hash.""" self.password = generate_password_hash(password) def check_password(self, password): """Verify user's password.""" return check_password_hash(self.password, password) @staticmethod def register(email, password, username=None, role='user'): """Register a user.""" prev_user = User.query.filter_by(email=email).first() if email and password and not prev_user: username = username or email user = User(email=email, username=username, role=role) user.set_password(password) DBHelper.add(user) return user return None @staticmethod def get_user(email, password): """Find and authenticate a user.""" user = User.query.filter_by(email=email).first() if user and user.check_password(password): return user return None class Algorithm(db.Model): """Algorithm model.""" id = db.Column(db.Integer, primary_key=True) title = db.Column(db.String(255), nullable=False) content = db.Column(db.String()) sub_category = db.Column(db.String(255)) user_id = db.Column(db.Integer, db.ForeignKey('user.id'), nullable=False) category_id = db.Column(db.Integer, db.ForeignKey('category.id')) access = db.Column(db.String(100), default='public') def save(self): DBHelper.add(self) def delete(self): DBHelper.delete(self) @staticmethod def add(**kwargs): """Add item to database.""" algorithm = Algorithm(**kwargs) DBHelper.add(algorithm) return algorithm @staticmethod def get(id_): return Algorithm.query.get(id_) def get_secure_attributes(self): """Return secure attributes as a Dict.""" return { 'id': self.id, 'title': self.title, 'content': self.content, 'category_id': self.category_id, 'sub_category': self.sub_category, 'user_id': self.user_id, 'access': self.access } class Category(db.Model): """Category model.""" id = db.Column(db.Integer, primary_key=True) name = db.Column(db.String(255), nullable=False, unique=True) algorithms = db.relationship('Algorithm', backref='category', lazy=True) def save(self): return DBHelper.add(self) def delete(self): return DBHelper.delete(self) @staticmethod def get(id_): return Category.query.get(id_) @staticmethod def add(name): """Add item to database.""" if Category.query.filter_by(name=name).first(): return None category = Category(name=name) DBHelper.add(category) return category @login_manager.user_loader def load_user(user_id): """User loader for Flask-Login.""" return User.query.get(user_id) def seed_db(): """Seed the database.""" admin = User.query.filter_by(username='admin').first() admin_pass = os.getenv('ADMIN_PASSWORD') admin_email = os.getenv('ADMIN_EMAIL') if not admin and admin_pass and admin_email: User.register(username='admin', password=admin_pass, email=admin_email, role='admin')
29.197279
77
0.63164
528
4,292
5.034091
0.191288
0.042137
0.052671
0.054176
0.252445
0.162152
0.141084
0.069601
0.057186
0
0
0.007366
0.240913
4,292
146
78
29.39726
0.808471
0.095294
0
0.244898
1
0
0.039979
0
0
0
0
0
0
1
0.173469
false
0.142857
0.040816
0.040816
0.561224
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
0
1
0
0
1
0
0
3
6c208afc17aef9a8c9497631a6cb2c9f67254270
470
py
Python
test/functional/test-framework/log/presentation_policy.py
josehu07/open-cas-linux-mf
5c6870be8bbb6816645955b6e479c9b5c7c0074d
[ "BSD-3-Clause-Clear" ]
2
2021-08-13T14:44:45.000Z
2022-01-10T07:41:40.000Z
test/functional/test-framework/log/presentation_policy.py
josehu07/open-cas-linux-mf
5c6870be8bbb6816645955b6e479c9b5c7c0074d
[ "BSD-3-Clause-Clear" ]
null
null
null
test/functional/test-framework/log/presentation_policy.py
josehu07/open-cas-linux-mf
5c6870be8bbb6816645955b6e479c9b5c7c0074d
[ "BSD-3-Clause-Clear" ]
null
null
null
# # Copyright(c) 2019-2020 Intel Corporation # SPDX-License-Identifier: BSD-3-Clause-Clear # class PresentationPolicy: def __init__(self, standard_log, group_begin_func): self.standard = standard_log self.group_begin = group_begin_func def std_log_entry(msg_id, msg, log_result, html_node): pass def group_log_begin(msg_id, msg, html_node): return html_node, html_node null_policy = PresentationPolicy(std_log_entry, group_log_begin)
21.363636
64
0.757447
68
470
4.838235
0.5
0.097264
0.085106
0
0
0
0
0
0
0
0
0.022785
0.159574
470
21
65
22.380952
0.810127
0.178723
0
0
0
0
0
0
0
0
0
0
0
1
0.333333
false
0.111111
0
0.111111
0.555556
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
1
0
1
0
0
0
3
6c43a909dc8dfc3030e47ebde6e1dd09283f325d
2,468
py
Python
example_project/example_app/views.py
warrenwestfall/django-custom-table
dab69502661ed272ffb4d4a73aa4c3de3b54805b
[ "MIT" ]
null
null
null
example_project/example_app/views.py
warrenwestfall/django-custom-table
dab69502661ed272ffb4d4a73aa4c3de3b54805b
[ "MIT" ]
null
null
null
example_project/example_app/views.py
warrenwestfall/django-custom-table
dab69502661ed272ffb4d4a73aa4c3de3b54805b
[ "MIT" ]
null
null
null
import json from django.shortcuts import render from django.http import HttpResponse, JsonResponse from custom_table.views import BaseMetadataView, BaseCustomTableView from example_app.models import RestSpaFormatMetadata class RestMetadataListView(BaseMetadataView): def get(self, request): return JsonResponse(self.get_list(), safe=False) def post(self, request): new_record = self.create(json.loads(request.body)) return JsonResponse({'pk': new_record.pk}, status=201) class RestMetadataDetailView(BaseMetadataView): always_update_fields = ['modified'] def get(self, request, name_or_pk): return JsonResponse(self.get_detail(name_or_pk), safe=False) def patch(self, request, name_or_pk): self.update_fields(name_or_pk, json.loads(request.body)) return HttpResponse(status=202) def delete(self, request, name_or_pk): self.delete_record(name_or_pk) return HttpResponse(status=204) class RestCustomTableListView(BaseCustomTableView): include_metadata = False def get(self, request): return JsonResponse(self.get_grid_list(), safe=False) def post(self, request): new_record = self.create(json.loads(request.body)) return JsonResponse({'pk': new_record.pk}, status=201) class RestCustomTableDetailView(BaseCustomTableView): include_metadata = False def get(self, request, pk): return JsonResponse(self.get_detail(pk), safe=False) def patch(self, request, pk): self.update_fields(pk, json.loads(request.body)) return HttpResponse(status=202) def delete(self, request, pk): self.delete_record(pk) return HttpResponse(status=204) class HtmlCustomTableListView(BaseCustomTableView): metadata_model = RestSpaFormatMetadata # queryset = ExampleCustomTable.objects.all() # context_object_name = 'example_custom_table_list' # template_name = 'examplecustomtable_list.html' def get(self, request): return render(request, 'custom_table_list.html', self.get_grid_list()) class HtmlCustomTableDetailView(BaseCustomTableView): metadata_model = RestSpaFormatMetadata # queryset = ExampleCustomTable.objects.all() # context_object_name = 'example_custom_table_list' # template_name = 'examplecustomtable_list.html' def get(self, request, pk): return render(request, 'custom_table_edit.html', self.get_detail(pk))
30.097561
78
0.72812
284
2,468
6.137324
0.232394
0.075732
0.034423
0.05852
0.694205
0.653471
0.558233
0.52381
0.421113
0.421113
0
0.008854
0.176256
2,468
81
79
30.469136
0.8485
0.113857
0
0.413043
0
0
0.025688
0.020183
0
0
0
0
0
1
0.26087
false
0
0.108696
0.130435
0.869565
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
3
665d3dce9f9ec229d0ec0f2bd42acb3c7935387a
81
py
Python
DNA/apps.py
shym98/DNA_circuits
6882adece5b2b70317d47e2495d91890606c6982
[ "MIT" ]
null
null
null
DNA/apps.py
shym98/DNA_circuits
6882adece5b2b70317d47e2495d91890606c6982
[ "MIT" ]
null
null
null
DNA/apps.py
shym98/DNA_circuits
6882adece5b2b70317d47e2495d91890606c6982
[ "MIT" ]
null
null
null
from django.apps import AppConfig class DnaConfig(AppConfig): name = 'DNA'
13.5
33
0.728395
10
81
5.9
0.9
0
0
0
0
0
0
0
0
0
0
0
0.185185
81
5
34
16.2
0.893939
0
0
0
0
0
0.037037
0
0
0
0
0
0
1
0
false
0
0.333333
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
3
665d54ab35847d701eb03a1e4ed5011fb9146fde
1,732
py
Python
mobile/kivy/utils.py
b3j0f/simpleneed
85defc25380f1f320e12285d337dc35f59401ab0
[ "MIT" ]
3
2016-10-26T12:16:10.000Z
2017-02-24T18:24:19.000Z
mobile/kivy/utils.py
b3j0f/simpleneed
85defc25380f1f320e12285d337dc35f59401ab0
[ "MIT" ]
14
2016-10-17T22:24:56.000Z
2017-04-29T17:46:14.000Z
mobile/kivy/utils.py
b3j0f/simpleneed
85defc25380f1f320e12285d337dc35f59401ab0
[ "MIT" ]
null
null
null
import requests from settings import url, IMAGES, DATA from kivy.core.image import Image from kivy.uix.dropdown import DropDown from kivy.uix.slider import Slider from os.path import join class BaseDropDown(DropDown): NAME = None def __init__(self, *args, **kwargs): super(BaseDropDown, self).__init__(*args, **kwargs) self.values = getvalues(self.NAME + 's') self.loadimages() def loadimages(self): self.children = [] map( lambda name: self.add_widget(Image(source=getimage(name))), self.values ) class BaseSlider(Slider): NAME = None def __init__(self, *args, **kwargs): super(BaseSlider, self).__init__(*args, **kwargs) self.values = getvalues(self.NAME + 's') self.max = len(self.values) - 1 def on_value(self, value): self.cursor_image = getimage(self.values[value]) def get(query, params=None): return requests.get(url + query, params=params) def getresults(query): return get(url + query).json()['results'] def post(query, data, files=None): return requests.post(url + query, data=data, files=files) def put(query, data): return requests.put(url + query, data=data) def delete(query): return requests.delete(url + query) def Spinner(Spinner): def __init__(self, *args, **kwargs): super(Spinner, self).__init__(*args, **kwargs) self.values = [item['name'] for item in getresults(self.NAME + 's')] def getvalues(name): return [item[name] for item in getresults(url + name)] def getimage(name, ext='png'): return join(IMAGES, '{0}.{1}'.format(name, ext)) def getdata(name): return join(DATA, name)
17.85567
76
0.639145
224
1,732
4.821429
0.285714
0.055556
0.030556
0.041667
0.255556
0.255556
0.155556
0.155556
0.092593
0.092593
0
0.002234
0.224596
1,732
96
77
18.041667
0.801936
0
0
0.152174
0
0
0.013857
0
0
0
0
0
0
1
0.304348
false
0
0.130435
0.173913
0.695652
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
3
665f487d002342b10ac9928435e89eb23d30dda6
238
py
Python
songure-api/app/api/api.py
MatthewSaintBull/songure-api
673ed9243c69969c96d08397ec8bc4da9bf46ade
[ "MIT" ]
null
null
null
songure-api/app/api/api.py
MatthewSaintBull/songure-api
673ed9243c69969c96d08397ec8bc4da9bf46ade
[ "MIT" ]
null
null
null
songure-api/app/api/api.py
MatthewSaintBull/songure-api
673ed9243c69969c96d08397ec8bc4da9bf46ade
[ "MIT" ]
null
null
null
from fastapi import APIRouter from app.api.routes import register, login router = APIRouter() router.include_router(login.router, tags=["login"], prefix="/api" ) router.include_router(register.router, tags=["register"], prefix="/api")
26.444444
72
0.756303
31
238
5.741935
0.419355
0.123596
0.213483
0
0
0
0
0
0
0
0
0
0.092437
238
8
73
29.75
0.824074
0
0
0
0
0
0.088235
0
0
0
0
0
0
1
0
false
0
0.4
0
0.4
0
0
0
0
null
0
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
3
666462ad3a595cc8be33fa8eb97724de6a04e3d7
2,575
py
Python
tests/tests.py
vhajdari/pycdap
355983fa5b3f2958758658a78f13fa06fc6d52ea
[ "Apache-2.0" ]
1
2021-09-01T17:42:40.000Z
2021-09-01T17:42:40.000Z
tests/tests.py
vhajdari/pycdap
355983fa5b3f2958758658a78f13fa06fc6d52ea
[ "Apache-2.0" ]
null
null
null
tests/tests.py
vhajdari/pycdap
355983fa5b3f2958758658a78f13fa06fc6d52ea
[ "Apache-2.0" ]
2
2020-07-18T09:42:17.000Z
2020-12-08T04:13:09.000Z
from pycdap import Pipeline import json # p = Pipeline('http://Vetons-MBP.home:11015') p = Pipeline() p.connect() print '\n=====================================' print 'url: {}'.format(p.url) # print 'default_uri: {}'.format(p.default_uri) print 'status: {}'.format(p.status) print 'version: {}'.format(p.version) print 'namespaces: {}'.format(p.namespaces) print '=====================================\n' p.export(ns='foo') # p.export() # my_pipeline = '/Users/vetoni/Desktop/pipelines/Test1-cdap-data-pipeline.json' # p.upload('foo', '111', my_pipeline) # print dir(p) # print p._Pipeline__check_namespaces('default', 'PRGX') # p.export(ns='all', pipelines='draft') # === LIST === # p.list() # print json.dumps(p.list('json'), indent=2, sort_keys=True) # === APPS === #apps = c.apps() # print json.dumps(p.apps(), indent=2) # return all apps in the all ns # print json.dumps(p.apps('default'), indent=2) # return all apps in the 'default' ns # print json.dumps(p.apps('default', 'PRGX'), indent=2) # return all apps in the 'default' and 'PRGX' ns # print json.dumps(p.apps('foo'), indent=2) # Terminate, 'foo' is not a valid namespace # print json.dumps(p.apps('foo', 'default'), indent=2) # Terminate: even though 'default is valid 'foo' is not # === DRAFTS === # print json.dumps(p.drafts(), indent=2) # return all drafts in all ns # print json.dumps(p.drafts('default'), indent=2) # return all drafts in 'default' ns # print json.dumps(p.drafts('default', 'PRGX'), indent=2) # return all drafts in 'default' ns # print json.dumps(p.drafts('foo'), indent=2) # Terminate, 'foo' is not a valid namespace # print json.dumps(p.drafts('default', 'foo'), indent=2) # Terminate: even though 'default is valid 'foo' is not # print json.dumps(p.drafts('default','default','default'), indent=2) # return all drafts in 'default' ns # === EXPORT === # namespaces = 'n', 'ns', 'namespace' # types = 'p', 'pipeline', 'pipelines', 'type' # app types: ('app', 'apps', 'deployed') # draft types: ('draft', 'drafts') # p.export() # Exports all pipelines in all namespaces # p.export(namespace='NS1') # Exports all pipelines for namespace NS1 # p.export(n='default', p='app') # Exports all the deployed pipelines for namespace NS1 # p.export(ns='default', type='app') # Exports all the draft pipelines for namespace NS1 # # p.export(ns='default', type='all') # p.export(ns='default', type='all', o='.') # p.export(ns='default', type='app', o='/tmp')
42.213115
114
0.617864
363
2,575
4.358127
0.192837
0.068268
0.106195
0.11378
0.542351
0.517067
0.396966
0.326802
0.286346
0.201011
0
0.011693
0.169709
2,575
60
115
42.916667
0.728251
0.831845
0
0
0
0
0.325397
0.206349
0
0
0
0
0
0
null
null
0
0.181818
null
null
0.545455
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
1
0
3
6665a47b1f38a4a760cf42045ebb4af7e7f7adbc
379
py
Python
main.py
xiaohong2019/crawl_image
4fe7d266e5446b1fa5bfca77669dfcf57a54ed68
[ "Apache-2.0" ]
1
2019-06-08T04:54:43.000Z
2019-06-08T04:54:43.000Z
main.py
xiaohong2019/crawl_image
4fe7d266e5446b1fa5bfca77669dfcf57a54ed68
[ "Apache-2.0" ]
null
null
null
main.py
xiaohong2019/crawl_image
4fe7d266e5446b1fa5bfca77669dfcf57a54ed68
[ "Apache-2.0" ]
null
null
null
#!/usr/bin/env python # -*- coding: utf-8 -*- from crawl_image.run_factory import run_for_url_list if __name__ == '__main__': # run() # run_for_url_list('C:/Users/xh/Desktop/url/1.txt', img_save_path='D:/crawl/image/1', do_last_url_file_name=True) run_for_url_list('C:/Users/xh/Desktop/url/url.txt', img_save_path='D:/crawl/image/real', do_last_url_file_name=True)
37.9
120
0.725594
69
379
3.536232
0.492754
0.122951
0.110656
0.159836
0.631148
0.631148
0.459016
0.254098
0.254098
0
0
0.008798
0.100264
379
9
121
42.111111
0.706745
0.422164
0
0
0
0
0.269767
0.144186
0
0
0
0
0
1
0
true
0
0.333333
0
0.333333
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
3
6666d08e23459635d000448a92ff88ce0628b5a5
273
py
Python
tests/test_line.py
themiwi/ggplot
b6d23c22d52557b983da8ce7a3a6992501dadcd6
[ "BSD-2-Clause" ]
1,133
2017-01-10T16:58:15.000Z
2022-03-31T14:40:29.000Z
tests/test_line.py
themiwi/ggplot
b6d23c22d52557b983da8ce7a3a6992501dadcd6
[ "BSD-2-Clause" ]
287
2015-01-02T18:54:17.000Z
2017-01-10T14:48:14.000Z
tests/test_line.py
themiwi/ggplot
b6d23c22d52557b983da8ce7a3a6992501dadcd6
[ "BSD-2-Clause" ]
295
2017-01-16T19:16:49.000Z
2022-02-18T14:10:58.000Z
from ggplot import * import pandas as pd import numpy as np import random x = np.arange(100) random.shuffle(x) df = pd.DataFrame({ 'x': x, 'y': np.arange(100) }) print ggplot(df, aes(x='x', y='y')) + geom_line() print ggplot(df, aes(x='x', y='y')) + geom_path()
17.0625
49
0.622711
49
273
3.428571
0.428571
0.035714
0.053571
0.190476
0.285714
0.285714
0.285714
0.285714
0.285714
0
0
0.026786
0.179487
273
15
50
18.2
0.723214
0
0
0
0
0
0.021978
0
0
0
0
0
0
0
null
null
0
0.333333
null
null
0.166667
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
1
0
0
0
0
3
666f888c9e06f7478379540a96979284a1c28588
85
py
Python
fortunecookie/__init__.py
ninemoreminutes/django-fortunecookie
7d3cd01a942a08b60f8d095dd57e1322db0c5b9e
[ "BSD-3-Clause" ]
1
2017-06-29T19:50:41.000Z
2017-06-29T19:50:41.000Z
fortunecookie/__init__.py
ninemoreminutes/django-fortunecookie
7d3cd01a942a08b60f8d095dd57e1322db0c5b9e
[ "BSD-3-Clause" ]
1
2020-06-05T19:39:59.000Z
2020-06-05T19:39:59.000Z
fortunecookie/__init__.py
ninemoreminutes/django-fortunecookie
7d3cd01a942a08b60f8d095dd57e1322db0c5b9e
[ "BSD-3-Clause" ]
null
null
null
__version__ = '0.3.0' default_app_config = 'fortunecookie.apps.FortuneCookieConfig'
21.25
61
0.8
10
85
6.2
0.9
0
0
0
0
0
0
0
0
0
0
0.038462
0.082353
85
3
62
28.333333
0.75641
0
0
0
0
0
0.505882
0.447059
0
0
0
0
0
1
0
false
0
0
0
0
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
1
null
0
0
0
0
0
0
0
0
0
0
0
0
0
3
667d34b2fcd073e20dc76681102ce2c66a8721f3
457
py
Python
documentscraper/request_scraper.py
gfournier/document-scraper
09d0817fea394d439a4c7c6e77fe9bf0bf3d765f
[ "MIT" ]
1
2020-09-15T21:54:11.000Z
2020-09-15T21:54:11.000Z
documentscraper/request_scraper.py
gfournier/document-scraper
09d0817fea394d439a4c7c6e77fe9bf0bf3d765f
[ "MIT" ]
null
null
null
documentscraper/request_scraper.py
gfournier/document-scraper
09d0817fea394d439a4c7c6e77fe9bf0bf3d765f
[ "MIT" ]
null
null
null
import requests from lxml import html from documentscraper.base import ScraperEngineBase class RequestsScraperEngine(ScraperEngineBase): def get_page(self, url: str): response = requests.get(url) response.raise_for_status() return html.fromstring(response.content) def get_element(self, page, xpath: str): pass def navigate(self, page, element): pass def as_string(self, element): pass
20.772727
50
0.68709
53
457
5.830189
0.54717
0.038835
0
0
0
0
0
0
0
0
0
0
0.236324
457
21
51
21.761905
0.885387
0
0
0.214286
0
0
0
0
0
0
0
0
0
1
0.285714
false
0.214286
0.214286
0
0.642857
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
1
0
0
1
0
0
3
66984d626a28ddaa2d7b0d36937e3c1290850bb9
289
py
Python
top/api/rest/ShopcatsListGetRequest.py
forestsheep/middleman
34d54f9ffd9d7bcd775a8dcce4f00dd6c5bb1acd
[ "MIT" ]
null
null
null
top/api/rest/ShopcatsListGetRequest.py
forestsheep/middleman
34d54f9ffd9d7bcd775a8dcce4f00dd6c5bb1acd
[ "MIT" ]
null
null
null
top/api/rest/ShopcatsListGetRequest.py
forestsheep/middleman
34d54f9ffd9d7bcd775a8dcce4f00dd6c5bb1acd
[ "MIT" ]
null
null
null
''' Created by auto_sdk on 2016.03.19 ''' from top.api.base import RestApi class ShopcatsListGetRequest(RestApi): def __init__(self,domain='gw.api.taobao.com',port=80): RestApi.__init__(self,domain, port) self.fields = None def getapiname(self): return 'taobao.shopcats.list.get'
24.083333
55
0.747405
43
289
4.813953
0.744186
0.077295
0.135266
0
0
0
0
0
0
0
0
0.039063
0.114187
289
11
56
26.272727
0.769531
0.114187
0
0
0
0
0.165323
0.096774
0
0
0
0
0
1
0.285714
false
0
0.142857
0.142857
0.714286
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
3
66af013c1bee87d5152f1cd3209498f3fd9d5d51
262
py
Python
what_is_the_mixin/demo2_1.py
NightmareQAQ/python-notes
4e766be06073a495ff9654f0dd8c0bb03310c559
[ "MIT" ]
106
2017-05-02T10:25:50.000Z
2022-03-23T14:57:28.000Z
what_is_the_mixin/demo2_1.py
NightmareQAQ/python-notes
4e766be06073a495ff9654f0dd8c0bb03310c559
[ "MIT" ]
2
2021-01-14T15:07:15.000Z
2021-12-21T07:18:05.000Z
what_is_the_mixin/demo2_1.py
NightmareQAQ/python-notes
4e766be06073a495ff9654f0dd8c0bb03310c559
[ "MIT" ]
42
2017-07-31T07:07:38.000Z
2021-12-26T09:36:55.000Z
class HelloMixin: def display(self): print('HelloMixin hello') class SuperHelloMixin: def display(self): print('SuperHello hello') class A(SuperHelloMixin, HelloMixin): pass if __name__ == '__main__': a = A() a.display()
15.411765
37
0.633588
28
262
5.642857
0.5
0.126582
0.177215
0.240506
0
0
0
0
0
0
0
0
0.248092
262
16
38
16.375
0.80203
0
0
0.181818
0
0
0.153846
0
0
0
0
0
0
1
0.181818
false
0.090909
0
0
0.454545
0.181818
1
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
1
0
0
0
0
0
3
66ba7f889c594f4543e6649cf2535c0bbe1d534b
361
py
Python
Week 2/Week2/ex1.3.py
rmit-s3559384-andrew-alvaro/IoT
ec444d0b037ddbd2e3aab01c34ea57fd2bd51d5f
[ "MIT" ]
null
null
null
Week 2/Week2/ex1.3.py
rmit-s3559384-andrew-alvaro/IoT
ec444d0b037ddbd2e3aab01c34ea57fd2bd51d5f
[ "MIT" ]
1
2021-06-01T23:39:58.000Z
2021-06-01T23:39:58.000Z
Week 2/Week2/ex1.3.py
AndrewAlvaro/IoT
ec444d0b037ddbd2e3aab01c34ea57fd2bd51d5f
[ "MIT" ]
null
null
null
def isPalindrome(string): return string == string[::-1] # OR # left_pos = 0 # right_pos = len(string) - 1 # # while right_pos >= left_pos: # if(string[left_pos] != string[right_pos]): # return False # left_pos += 1 # right_pos -= 1 # # return True print(isPalindrome("aza"))
22.5625
53
0.506925
41
361
4.268293
0.414634
0.16
0
0
0
0
0
0
0
0
0
0.021645
0.360111
361
15
54
24.066667
0.735931
0.529086
0
0
0
0
0.020979
0
0
0
0
0
0
1
0.333333
false
0
0
0.333333
0.666667
0.333333
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
3
66cdd668c31abec69ba2dd95f706be5cb64949ef
499
py
Python
mev/api/serializers/resource_types.py
hsph-qbrc/mev-backend
c381800aa7d53d7256e89a4db5a0f9444264e9a6
[ "MIT" ]
2
2021-11-15T08:11:59.000Z
2022-03-12T05:24:23.000Z
mev/api/serializers/resource_types.py
hsph-qbrc/mev-backend
c381800aa7d53d7256e89a4db5a0f9444264e9a6
[ "MIT" ]
37
2020-08-03T14:57:02.000Z
2022-02-25T19:56:40.000Z
mev/api/serializers/resource_types.py
hsph-qbrc/mev-backend
c381800aa7d53d7256e89a4db5a0f9444264e9a6
[ "MIT" ]
2
2021-07-12T03:22:52.000Z
2021-11-15T08:12:01.000Z
import logging from rest_framework import serializers, exceptions logger = logging.getLogger(__name__) class ResourceTypeSerializer(serializers.Serializer): ''' Serializer for describing the types of available Resources that users may choose. ''' resource_type_key = serializers.CharField(max_length=50) resource_type_title = serializers.CharField(max_length=250) resource_type_description = serializers.CharField(max_length=2000) example = serializers.JSONField()
33.266667
70
0.785571
55
499
6.872727
0.672727
0.095238
0.18254
0.230159
0
0
0
0
0
0
0
0.021077
0.144289
499
15
71
33.266667
0.864169
0.162325
0
0
0
0
0
0
0
0
0
0
0
1
0
false
0
0.25
0
0.875
0
0
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
3
dd0aa9b32c7007679473b967673c2bee5ad97270
184
py
Python
Rio_olympics/flags/temp.py
Data-Analytics/data-analytics.github.io
1736745f46e4de941b21fa8cadb0e52ab2abbcad
[ "BSD-3-Clause" ]
12
2015-04-21T21:17:17.000Z
2022-01-31T11:58:25.000Z
Rio_olympics/flags/temp.py
Data-Analytics/data-analytics.github.io
1736745f46e4de941b21fa8cadb0e52ab2abbcad
[ "BSD-3-Clause" ]
null
null
null
Rio_olympics/flags/temp.py
Data-Analytics/data-analytics.github.io
1736745f46e4de941b21fa8cadb0e52ab2abbcad
[ "BSD-3-Clause" ]
16
2015-04-10T16:39:27.000Z
2021-04-04T03:46:51.000Z
import urllib image = urllib.URLopener() for k in xrange(300,400): try: image.retrieve("http://olympicshub.stats.com/flags/48x48/"+str(k)+".png",str(k)+".png") except: print k
26.285714
88
0.679348
29
184
4.310345
0.758621
0.064
0.112
0
0
0
0
0
0
0
0
0.061728
0.119565
184
7
89
26.285714
0.709877
0
0
0
0
0
0.264865
0
0
0
0
0
0
0
null
null
0
0.142857
null
null
0.142857
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
3
dd11698f25384ae5cba1c6b0ebc18bc6e3aa9934
87
py
Python
queue_messaging/utils/__init__.py
socialwifi/queue-messaging
c400108297823a126e4675fe5b3fb16838e4faaf
[ "BSD-3-Clause" ]
8
2017-01-25T15:51:41.000Z
2019-01-15T13:57:28.000Z
queue_messaging/utils/__init__.py
socialwifi/queue-messaging
c400108297823a126e4675fe5b3fb16838e4faaf
[ "BSD-3-Clause" ]
8
2017-01-25T15:13:19.000Z
2018-08-17T09:57:35.000Z
queue_messaging/utils/__init__.py
socialwifi/queue-messaging
c400108297823a126e4675fe5b3fb16838e4faaf
[ "BSD-3-Clause" ]
2
2017-11-23T09:36:43.000Z
2018-06-07T06:31:47.000Z
from .environment_context import EnvironmentContext __all__ = [EnvironmentContext, ]
17.4
51
0.827586
7
87
9.571429
0.857143
0
0
0
0
0
0
0
0
0
0
0
0.114943
87
4
52
21.75
0.87013
0
0
0
0
0
0
0
0
0
0
0
0
1
0
false
0
0.5
0
0.5
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
3
dd2351e9ae928b6ac395d84e7a4bfa9409f8df45
276
py
Python
gameGap/urls.py
chackett87/GameGap
7283e49fdf170fdcacdc31fb444b005359c8d3dd
[ "MIT" ]
null
null
null
gameGap/urls.py
chackett87/GameGap
7283e49fdf170fdcacdc31fb444b005359c8d3dd
[ "MIT" ]
22
2015-06-10T01:48:51.000Z
2015-06-23T17:38:57.000Z
gameGap/urls.py
chackett87/GameGap
7283e49fdf170fdcacdc31fb444b005359c8d3dd
[ "MIT" ]
null
null
null
from django.conf.urls import url from .api.views.entry_view import PostView from .api.views.entry_view import CommentView urlpatterns = [ url(r'^entries/$', PostView.as_view(), name="PostViewer"), url(r'^comments/$', CommentView.as_view(), name="CommentViewer") ]
34.5
72
0.724638
37
276
5.297297
0.540541
0.071429
0.122449
0.173469
0.27551
0.27551
0
0
0
0
0
0
0.123188
276
8
73
34.5
0.809917
0
0
0
0
0
0.158845
0
0
0
0
0
0
1
0
false
0
0.428571
0
0.428571
0
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
3
dd24f8117bdac543dbc57df4b5a576832f1e55ae
2,796
py
Python
python/ns/py/Errors.py
redpawfx/massiveImporter
2772d1ce530041007d00d8ba4274dccdda7b8900
[ "MIT" ]
2
2018-01-30T07:50:48.000Z
2020-03-10T02:10:38.000Z
python/ns/py/Errors.py
redpawfx/massiveImporter
2772d1ce530041007d00d8ba4274dccdda7b8900
[ "MIT" ]
null
null
null
python/ns/py/Errors.py
redpawfx/massiveImporter
2772d1ce530041007d00d8ba4274dccdda7b8900
[ "MIT" ]
3
2016-10-25T14:29:34.000Z
2021-08-09T13:37:33.000Z
# The MIT License # # Copyright (c) 2008 James Piechota # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. class Error( Exception ): """Base exception class. Contains a string with an optional error message.""" def __init__( self, message ): self._message = message def __str__( self ): return self._message def __repr__( self ): return self._message def __unicode__( self ): return self._message def msg( self ): return self._message class UnitializedError( Error ): """Thrown when an unitialized variable is accessed.""" def __init__( self, message ): Error.__init__( self, message ) class BadArgumentError( Error ): """Thrown when an invalid argument is provided.""" def __init__( self, message ): Error.__init__( self, message ) class OutOfBoundsError( Error ): """Thrown when the value of an argument is outside the allow range.""" def __init__( self, message ): Error.__init__( self, message ) class UnsupportedError( Error ): """Thrown when an implemented feature is invoked.""" def __init__( self, message ): Error.__init__( self, message ) class ThirdPartyError( Error ): """Thrown when a third party library has an error.""" def __init__( self, message ): Error.__init__( self, message ) class SilentError( Error ): """Thrown when an error has occurred but no message should be printed. Either there's none to print or something else has already printed it.""" def __init__( self, message ): Error.__init__( self, message ) class AbortError( Error ): """Thrown when an operation has been aborted either by the user or otherwise.""" def __init__( self, message ): Error.__init__( self, message )
29.744681
80
0.7103
373
2,796
5.117962
0.41555
0.115244
0.117863
0.075432
0.192771
0.155055
0.155055
0.155055
0.135149
0
0
0.001816
0.212089
2,796
93
81
30.064516
0.86473
0.577611
0
0.59375
0
0
0
0
0
0
0
0
0
1
0.375
false
0
0
0.125
0.75
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
3
dd4d2accfab502c1ef8d32c511af3c1f72c65c7a
233
py
Python
bitglitter/read/readstate/multiprocess_state_generator.py
eurekaX696/BitGlitter-Python
c13176084ae71af959d8e551886055cdc1827391
[ "MIT" ]
1
2022-02-27T22:02:41.000Z
2022-02-27T22:02:41.000Z
bitglitter/read/readstate/multiprocess_state_generator.py
eurekaX696/BitGlitter-Python
c13176084ae71af959d8e551886055cdc1827391
[ "MIT" ]
null
null
null
bitglitter/read/readstate/multiprocess_state_generator.py
eurekaX696/BitGlitter-Python
c13176084ae71af959d8e551886055cdc1827391
[ "MIT" ]
null
null
null
def multiprocess_state_generator(video_frame_generator, stream_sha256): """Returns a packaged dict object for use in frame_process""" for frame in video_frame_generator: yield {'mode': 'video', 'main_sequence': True}
46.6
71
0.746781
31
233
5.322581
0.709677
0.121212
0.230303
0
0
0
0
0
0
0
0
0.015306
0.158798
233
5
72
46.6
0.826531
0.236052
0
0
0
0
0.127168
0
0
0
0
0
0
1
0.333333
false
0
0
0
0.333333
0
0
0
0
null
0
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
0
0
0
3
dd6de01c1051e030015dc53597ea89f2625d2535
6,690
py
Python
graphene/types/tests/test_query.py
dialoguemd/graphene
ceffc4de691509968f200065642731fcc4acd217
[ "MIT" ]
null
null
null
graphene/types/tests/test_query.py
dialoguemd/graphene
ceffc4de691509968f200065642731fcc4acd217
[ "MIT" ]
null
null
null
graphene/types/tests/test_query.py
dialoguemd/graphene
ceffc4de691509968f200065642731fcc4acd217
[ "MIT" ]
null
null
null
import json from functools import partial from graphql import execute, Source, parse from ..objecttype import ObjectType from ..inputfield import InputField from ..inputobjecttype import InputObjectType from ..scalars import String, Int from ..schema import Schema from ..structures import List def test_query(): class Query(ObjectType): hello = String(resolver=lambda *_: 'World') hello_schema = Schema(Query) executed = hello_schema.execute('{ hello }') assert not executed.errors assert executed.data == {'hello': 'World'} def test_query_resolve_function(): class Query(ObjectType): hello = String() def resolve_hello(self, args, context, info): return 'World' hello_schema = Schema(Query) executed = hello_schema.execute('{ hello }') assert not executed.errors assert executed.data == {'hello': 'World'} def test_query_arguments(): class Query(ObjectType): test = String(a_str=String(), a_int=Int()) def resolve_test(self, args, context, info): return json.dumps([self, args], separators=(',', ':')) test_schema = Schema(Query) result = test_schema.execute('{ test }', None) assert not result.errors assert result.data == {'test': '[null,{}]'} result = test_schema.execute('{ test(aStr: "String!") }', 'Source!') assert not result.errors assert result.data == {'test': '["Source!",{"a_str":"String!"}]'} result = test_schema.execute('{ test(aInt: -123, aStr: "String!") }', 'Source!') assert not result.errors assert result.data in [ {'test': '["Source!",{"a_str":"String!","a_int":-123}]'}, {'test': '["Source!",{"a_int":-123,"a_str":"String!"}]'} ] def test_query_input_field(): class Input(InputObjectType): a_field = String() recursive_field = InputField(lambda: Input) class Query(ObjectType): test = String(a_input=Input()) def resolve_test(self, args, context, info): return json.dumps([self, args], separators=(',', ':')) test_schema = Schema(Query) result = test_schema.execute('{ test }', None) assert not result.errors assert result.data == {'test': '[null,{}]'} result = test_schema.execute('{ test(aInput: {aField: "String!"} ) }', 'Source!') assert not result.errors assert result.data == {'test': '["Source!",{"a_input":{"a_field":"String!"}}]'} result = test_schema.execute('{ test(aInput: {recursiveField: {aField: "String!"}}) }', 'Source!') assert not result.errors assert result.data == {'test': '["Source!",{"a_input":{"recursive_field":{"a_field":"String!"}}}]'} def test_query_middlewares(): class Query(ObjectType): hello = String() other = String() def resolve_hello(self, args, context, info): return 'World' def resolve_other(self, args, context, info): return 'other' def reversed_middleware(next, *args, **kwargs): p = next(*args, **kwargs) return p.then(lambda x: x[::-1]) hello_schema = Schema(Query, middlewares=[reversed_middleware]) executed = hello_schema.execute('{ hello, other }') assert not executed.errors assert executed.data == {'hello': 'dlroW', 'other': 'rehto'} def test_big_list_query_benchmark(benchmark): big_list = range(10000) class Query(ObjectType): all_ints = List(Int) def resolve_all_ints(self, args, context, info): return big_list hello_schema = Schema(Query) big_list_query = partial(hello_schema.execute, '{ allInts }') result = benchmark(big_list_query) assert not result.errors assert result.data == {'allInts': list(big_list)} def test_big_list_query_compiled_query_benchmark(benchmark): big_list = range(100000) class Query(ObjectType): all_ints = List(Int) def resolve_all_ints(self, args, context, info): return big_list hello_schema = Schema(Query) source = Source('{ allInts }') query_ast = parse(source) big_list_query = partial(execute, hello_schema, query_ast) result = benchmark(big_list_query) assert not result.errors assert result.data == {'allInts': list(big_list)} def test_big_list_of_containers_query_benchmark(benchmark): class Container(ObjectType): x = Int() big_container_list = [Container(x=x) for x in range(1000)] class Query(ObjectType): all_containers = List(Container) def resolve_all_containers(self, args, context, info): return big_container_list hello_schema = Schema(Query) big_list_query = partial(hello_schema.execute, '{ allContainers { x } }') result = benchmark(big_list_query) assert not result.errors assert result.data == {'allContainers': [{'x': c.x} for c in big_container_list]} def test_big_list_of_containers_multiple_fields_query_benchmark(benchmark): class Container(ObjectType): x = Int() y = Int() z = Int() o = Int() big_container_list = [Container(x=x, y=x, z=x, o=x) for x in range(1000)] class Query(ObjectType): all_containers = List(Container) def resolve_all_containers(self, args, context, info): return big_container_list hello_schema = Schema(Query) big_list_query = partial(hello_schema.execute, '{ allContainers { x, y, z, o } }') result = benchmark(big_list_query) assert not result.errors assert result.data == {'allContainers': [{'x': c.x, 'y': c.y, 'z': c.z, 'o': c.o} for c in big_container_list]} def test_big_list_of_containers_multiple_fields_custom_resolvers_query_benchmark(benchmark): class Container(ObjectType): x = Int() y = Int() z = Int() o = Int() def resolve_x(self, args, context, info): return self.x def resolve_y(self, args, context, info): return self.y def resolve_z(self, args, context, info): return self.z def resolve_o(self, args, context, info): return self.o big_container_list = [Container(x=x, y=x, z=x, o=x) for x in range(1000)] class Query(ObjectType): all_containers = List(Container) def resolve_all_containers(self, args, context, info): return big_container_list hello_schema = Schema(Query) big_list_query = partial(hello_schema.execute, '{ allContainers { x, y, z, o } }') result = benchmark(big_list_query) assert not result.errors assert result.data == {'allContainers': [{'x': c.x, 'y': c.y, 'z': c.z, 'o': c.o} for c in big_container_list]}
29.866071
115
0.639761
834
6,690
4.943645
0.110312
0.035654
0.050934
0.064516
0.79481
0.745816
0.666748
0.657531
0.634004
0.634004
0
0.006342
0.222272
6,690
223
116
30
0.786085
0
0
0.594771
0
0
0.111061
0.03423
0
0
0
0
0.183007
1
0.163399
false
0
0.058824
0.091503
0.555556
0
0
0
0
null
0
0
0
0
1
0
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
3
dd8d15ec2b9244497afcfc2d92a12ebea34556f1
115
py
Python
grayscale-conversion.py
SametSisartenep/opencv-practices
5b4210a4e13d52b3897f9e458a8906cdbdc33b6b
[ "MIT" ]
1
2015-11-08T11:18:12.000Z
2015-11-08T11:18:12.000Z
grayscale-conversion.py
SametSisartenep/opencv-practices
5b4210a4e13d52b3897f9e458a8906cdbdc33b6b
[ "MIT" ]
null
null
null
grayscale-conversion.py
SametSisartenep/opencv-practices
5b4210a4e13d52b3897f9e458a8906cdbdc33b6b
[ "MIT" ]
null
null
null
import cv2 grayImage = cv2.imread('pic2.png', cv2.CV_LOAD_IMAGE_GRAYSCALE) cv2.imwrite('pic2Gray.png', grayImage)
23
63
0.782609
17
115
5.117647
0.705882
0
0
0
0
0
0
0
0
0
0
0.056604
0.078261
115
4
64
28.75
0.764151
0
0
0
0
0
0.173913
0
0
0
0
0
0
1
0
false
0
0.333333
0
0.333333
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
3
dd9b7f31a395c1ac127cbbd0ed95dd8482b989a1
544
py
Python
eikon/eikonError.py
tschm/eikon-docker
a72a13591b4f560442ba37d11021133434425848
[ "Apache-2.0" ]
3
2020-05-10T22:15:49.000Z
2021-04-05T19:29:52.000Z
eikon/eikonError.py
tschm/eikon-docker
a72a13591b4f560442ba37d11021133434425848
[ "Apache-2.0" ]
null
null
null
eikon/eikonError.py
tschm/eikon-docker
a72a13591b4f560442ba37d11021133434425848
[ "Apache-2.0" ]
1
2020-07-22T16:54:32.000Z
2020-07-22T16:54:32.000Z
# coding: utf-8 __all__ = ['EikonError'] class EikonError(Exception): """ Base class for exceptions specific to Eikon platform. """ def __init__(self, code, message): """ Parameters ---------- code: int message: string Indicate the sort direction. Possible values are 'asc' or 'desc'. The default value is 'asc' """ self.code = code self.message = message def __str__(self): return 'Error code {} | {}'.format(self.code, self.message)
22.666667
104
0.5625
58
544
5.068966
0.672414
0.081633
0.102041
0
0
0
0
0
0
0
0
0.002667
0.310662
544
23
105
23.652174
0.781333
0.393382
0
0
0
0
0.108108
0
0
0
0
0
0
1
0.285714
false
0
0
0.142857
0.571429
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
3
dda066f72b0c8917716d98e719c39da09b30788c
178
py
Python
unclassed/map_example.py
gabrielcostasilva/python-basic-examples
d9c20887b94b823fc59bc82f250d39d76b918ad8
[ "MIT" ]
null
null
null
unclassed/map_example.py
gabrielcostasilva/python-basic-examples
d9c20887b94b823fc59bc82f250d39d76b918ad8
[ "MIT" ]
null
null
null
unclassed/map_example.py
gabrielcostasilva/python-basic-examples
d9c20887b94b823fc59bc82f250d39d76b918ad8
[ "MIT" ]
null
null
null
vector = [{"name": "John Doe", "age": 37}, {"name": "Anna Doe", "age": 35}] # for item in vector: # print(item["name"]) print(list(map(lambda item: item["name"], vector)))
25.428571
75
0.573034
26
178
3.923077
0.576923
0.117647
0
0
0
0
0
0
0
0
0
0.026846
0.162921
178
6
76
29.666667
0.657718
0.241573
0
0
0
0
0.257576
0
0
0
0
0
0
1
0
false
0
0
0
0
0.5
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
1
0
3
06f23cc4005b9df3f1fbd96a2dfa02d607e6345a
719
py
Python
lagen/nu/direktiv.py
redhog/ferenda
6935e26fdc63adc68b8e852292456b8d9155b1f7
[ "BSD-2-Clause" ]
18
2015-03-12T17:42:44.000Z
2021-12-27T10:32:22.000Z
lagen/nu/direktiv.py
redhog/ferenda
6935e26fdc63adc68b8e852292456b8d9155b1f7
[ "BSD-2-Clause" ]
13
2016-01-27T10:19:07.000Z
2021-12-13T20:24:36.000Z
lagen/nu/direktiv.py
redhog/ferenda
6935e26fdc63adc68b8e852292456b8d9155b1f7
[ "BSD-2-Clause" ]
6
2016-11-28T15:41:29.000Z
2022-01-08T11:16:48.000Z
# -*- coding: utf-8 -*- from __future__ import (absolute_import, division, print_function, unicode_literals) from builtins import * from ferenda.sources.legal.se import Direktiv as OrigDirektiv from ferenda.sources.legal.se.direktiv import DirTrips as OrigDirTrips from ferenda.sources.legal.se.direktiv import DirAsp as OrigDirAsp from ferenda.sources.legal.se.direktiv import DirRegeringen as OrigDirRegeringen from . import SameAs class DirTrips(OrigDirTrips, SameAs): pass class DirAsp(OrigDirAsp, SameAs): pass class DirRegeringen(OrigDirRegeringen, SameAs): pass class Direktiv(OrigDirektiv): subrepos = DirRegeringen, DirAsp, DirTrips extrabase = SameAs
24.793103
80
0.757997
82
719
6.560976
0.378049
0.081784
0.133829
0.171004
0.263941
0.217472
0.217472
0
0
0
0
0.001681
0.172462
719
28
81
25.678571
0.902521
0.029207
0
0.176471
0
0
0
0
0
0
0
0
0
1
0
false
0.176471
0.411765
0
0.764706
0.058824
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
1
1
0
0
0
0
3
06fe6eb52dbbbcaac9a9cfe5778a8be7e71e1a24
271
py
Python
src/generate_inverter.py
Verkhovskaya/FPGA_planet_physics
1893549e5aea699ad760000b9234434d88181b4c
[ "MIT" ]
6
2018-05-23T17:45:38.000Z
2021-01-05T08:50:20.000Z
src/generate_inverter.py
Verkhovskaya/FPGA_planet_physics
1893549e5aea699ad760000b9234434d88181b4c
[ "MIT" ]
null
null
null
src/generate_inverter.py
Verkhovskaya/FPGA_planet_physics
1893549e5aea699ad760000b9234434d88181b4c
[ "MIT" ]
1
2021-01-05T08:50:37.000Z
2021-01-05T08:50:37.000Z
for x_dist in range(11): for y_dist in range(11): if not ((x_dist == 0)&(y_dist == 0)): print "assign pre_calculated["+str(x_dist)+"]["+str(y_dist)+"] =", print "21'b" + str(bin(int((2**21)*1.0/(y_dist**2+x_dist**2)**(3/2))))[2:]+ ";"
45.166667
91
0.505535
49
271
2.612245
0.44898
0.15625
0.171875
0.203125
0
0
0
0
0
0
0
0.085308
0.221402
271
5
92
54.2
0.521327
0
0
0
0
0
0.118081
0
0
0
0
0
0
0
null
null
0
0
null
null
0.4
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
3
b0854b3b7354416f86f560df88bac37620b6c180
98
py
Python
cecmap/__init__.py
coldfix/cecmap
f3a3cf0e5b783ae443c0c46cd216bc0ec1598458
[ "Unlicense" ]
null
null
null
cecmap/__init__.py
coldfix/cecmap
f3a3cf0e5b783ae443c0c46cd216bc0ec1598458
[ "Unlicense" ]
null
null
null
cecmap/__init__.py
coldfix/cecmap
f3a3cf0e5b783ae443c0c46cd216bc0ec1598458
[ "Unlicense" ]
null
null
null
import os # Needed for pynput (!): os.environ.setdefault('DISPLAY', ':0') __version__ = '1.0.0'
14
38
0.653061
14
98
4.285714
0.785714
0
0
0
0
0
0
0
0
0
0
0.047619
0.142857
98
6
39
16.333333
0.666667
0.22449
0
0
0
0
0.189189
0
0
0
0
0
0
1
0
false
0
0.333333
0
0.333333
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
3
b0889b13f577fd6e5f90d4df57206f9afc8ae3a4
113
py
Python
ch3/exercises/ans3_18.py
chunhua2017/pythonprogrammingdemo
64e4ac2b33c54cde4671291a6203e94cd96de4ba
[ "MIT" ]
4
2020-05-18T05:25:44.000Z
2021-07-30T01:02:39.000Z
ch3/exercises/ans3_18.py
chunhua2017/pythonprogrammingdemo
64e4ac2b33c54cde4671291a6203e94cd96de4ba
[ "MIT" ]
null
null
null
ch3/exercises/ans3_18.py
chunhua2017/pythonprogrammingdemo
64e4ac2b33c54cde4671291a6203e94cd96de4ba
[ "MIT" ]
2
2021-09-15T05:41:05.000Z
2022-01-25T05:44:43.000Z
# 请用“*”打印出五行五列的等腰直角三角形 N = 5 for i in range(N): for j in range(i + 1): print("*", end="") print()
18.833333
26
0.504425
18
113
3.166667
0.666667
0.245614
0
0
0
0
0
0
0
0
0
0.025
0.292035
113
6
27
18.833333
0.6875
0.176991
0
0
0
0
0.01087
0
0
0
0
0
0
1
0
false
0
0
0
0
0.4
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
3
b08f0868f6a1a3eca4cddfe0a1cbaa26926da412
247
py
Python
calendary/api/urls.py
kraupn3r/intranet
4cabf6f365ef0ea0f352f67f9322318e161ed265
[ "MIT" ]
null
null
null
calendary/api/urls.py
kraupn3r/intranet
4cabf6f365ef0ea0f352f67f9322318e161ed265
[ "MIT" ]
null
null
null
calendary/api/urls.py
kraupn3r/intranet
4cabf6f365ef0ea0f352f67f9322318e161ed265
[ "MIT" ]
null
null
null
from django.contrib import admin from django.urls import path, include from .views import DeventDetailAPIView, CalendarAPIView urlpatterns = [ path('', CalendarAPIView.as_view()), path('devent/<int:pk>/', DeventDetailAPIView.as_view()), ]
30.875
60
0.753036
28
247
6.571429
0.607143
0.108696
0
0
0
0
0
0
0
0
0
0
0.121457
247
7
61
35.285714
0.847926
0
0
0
0
0
0.064777
0
0
0
0
0
0
1
0
false
0
0.428571
0
0.428571
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
3
b0981d35fb6c08440312916278fd7381e7fc4b04
510
py
Python
learning_log/migrations/0004_auto_20201214_2054.py
willy-r/learning-log-site
b2156e2b7be93435876367681a9ce587d8fd28eb
[ "MIT" ]
2
2021-11-19T16:57:32.000Z
2021-11-23T15:51:23.000Z
learning_log/migrations/0004_auto_20201214_2054.py
willy-r/learning-log
b2156e2b7be93435876367681a9ce587d8fd28eb
[ "MIT" ]
5
2020-10-21T17:03:14.000Z
2021-09-22T18:59:38.000Z
learning_log/migrations/0004_auto_20201214_2054.py
willy-r/learning-log-site
b2156e2b7be93435876367681a9ce587d8fd28eb
[ "MIT" ]
2
2020-10-02T09:02:44.000Z
2021-06-14T06:05:59.000Z
# Generated by Django 3.0.7 on 2020-12-14 23:54 from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('learning_log', '0003_topic_public'), ] operations = [ migrations.AlterModelOptions( name='entry', options={'ordering': ['-date_added'], 'verbose_name_plural': 'entries'}, ), migrations.AlterModelOptions( name='topic', options={'ordering': ['-date_added']}, ), ]
23.181818
84
0.576471
48
510
5.979167
0.729167
0.188153
0.216028
0.167247
0
0
0
0
0
0
0
0.052055
0.284314
510
21
85
24.285714
0.734247
0.088235
0
0.266667
1
0
0.222462
0
0
0
0
0
0
1
0
false
0
0.066667
0
0.266667
0
0
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
3
b0b9b46f4a9ac3817c7992129a9974d310d750a0
4,427
py
Python
src/sequence_jacobian/classes/impulse_dict.py
gboehl/sequence-jacobian
01d177cc254a2ccee57a3ed273117bea58554be2
[ "MIT" ]
null
null
null
src/sequence_jacobian/classes/impulse_dict.py
gboehl/sequence-jacobian
01d177cc254a2ccee57a3ed273117bea58554be2
[ "MIT" ]
null
null
null
src/sequence_jacobian/classes/impulse_dict.py
gboehl/sequence-jacobian
01d177cc254a2ccee57a3ed273117bea58554be2
[ "MIT" ]
null
null
null
"""ImpulseDict class for manipulating impulse responses.""" import numpy as np from .result_dict import ResultDict from ..utilities.ordered_set import OrderedSet from ..utilities.bijection import Bijection from .steady_state_dict import SteadyStateDict class ImpulseDict(ResultDict): def __init__(self, data, internals=None, T=None): if isinstance(data, ImpulseDict): if internals is not None or T is not None: raise ValueError('Supplying ImpulseDict and also internal or T to constructor not allowed') super().__init__(data) self.T = data.T else: if not isinstance(data, dict): raise ValueError('ImpulseDicts are initialized with a `dict` of top-level impulse responses.') super().__init__(data, internals) self.T = (T if T is not None else self.infer_length()) def __getitem__(self, k): return super().__getitem__(k, T=self.T) def __add__(self, other): return self.binary_operation(other, lambda a, b: a + b) def __radd__(self, other): return self.__add__(other) def __sub__(self, other): return self.binary_operation(other, lambda a, b: a - b) def __rsub__(self, other): return self.binary_operation(other, lambda a, b: b - a) def __mul__(self, other): return self.binary_operation(other, lambda a, b: a * b) def __rmul__(self, other): return self.__mul__(other) def __truediv__(self, other): return self.binary_operation(other, lambda a, b: a / b) def __rtruediv__(self, other): return self.binary_operation(other, lambda a, b: b / a) def __neg__(self): return self.unary_operation(lambda a: -a) def __pos__(self): return self def __abs__(self): return self.unary_operation(lambda a: abs(a)) def binary_operation(self, other, op): if isinstance(other, (SteadyStateDict, ImpulseDict)): toplevel = {k: op(v, other[k]) for k, v in self.toplevel.items()} internals = {} for b in self.internals: other_internals = other.internals[b] internals[b] = {k: op(v, other_internals[k]) for k, v in self.internals[b].items()} return ImpulseDict(toplevel, internals, self.T) elif isinstance(other, (float, int)): toplevel = {k: op(v, other) for k, v in self.toplevel.items()} internals = {} for b in self.internals: internals[b] = {k: op(v, other) for k, v in self.internals[b].items()} return ImpulseDict(toplevel, internals, self.T) else: return NotImplementedError(f'Can only perform operations with ImpulseDicts and other ImpulseDicts, SteadyStateDicts, or numbers, not {type(other).__name__}') def unary_operation(self, op): toplevel = {k: op(v) for k, v in self.toplevel.items()} internals = {} for b in self.internals: internals[b] = {k: op(v) for k, v in self.internals[b].items()} return ImpulseDict(toplevel, internals, self.T) def pack(self): T = self.T bigv = np.empty(T*len(self.toplevel)) for i, v in enumerate(self.toplevel.values()): bigv[i*T:(i+1)*T] = v return bigv @staticmethod def unpack(bigv, outputs, T): impulse = {} for i, o in enumerate(outputs): impulse[o] = bigv[i*T:(i+1)*T] return ImpulseDict(impulse, T=T) def infer_length(self): lengths = [len(v) for v in self.toplevel.values()] length = max(lengths) if length != min(lengths): raise ValueError(f'Building ImpulseDict with inconsistent lengths {max(lengths)} and {min(lengths)}') return length def get(self, k): """Like __getitem__ but with default of zero impulse""" if isinstance(k, str): return self.toplevel.get(k, np.zeros(self.T)) elif isinstance(k, tuple): raise TypeError(f'Key {k} to {type(self).__name__} cannot be tuple') else: try: return type(self)({ki: self.toplevel.get(ki, np.zeros(self.T)) for ki in k}, T=self.T) except TypeError: raise TypeError(f'Key {k} to {type(self).__name__} needs to be a string or an iterable (list, set, etc) of strings')
38.163793
169
0.609894
587
4,427
4.41908
0.224872
0.046261
0.046261
0.058597
0.350424
0.340786
0.325752
0.298766
0.297224
0.266384
0
0.000625
0.276711
4,427
115
170
38.495652
0.809494
0.023266
0
0.131868
0
0.021978
0.114769
0.014839
0
0
0
0
0
1
0.208791
false
0
0.054945
0.131868
0.505495
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
3
b0cccdc6152b7b46c83d5ec7da17e3099a79a6ae
1,349
py
Python
tests/unit/test_pathify_by_key_ends.py
xguse/snaketools
ba3b68088bd9bb656b9ad64656a537bc1cfccdb4
[ "MIT" ]
1
2017-11-20T22:58:16.000Z
2017-11-20T22:58:16.000Z
tests/unit/test_pathify_by_key_ends.py
xguse/snaketools
ba3b68088bd9bb656b9ad64656a537bc1cfccdb4
[ "MIT" ]
16
2017-09-13T14:49:26.000Z
2018-06-01T17:03:31.000Z
tests/unit/test_pathify_by_key_ends.py
xguse/snaketools
ba3b68088bd9bb656b9ad64656a537bc1cfccdb4
[ "MIT" ]
null
null
null
"""Unit test the pathify_by_key_ends function.""" from pathlib import Path from snaketools import snaketools from tests.test_snaketools import * # noqa: F403,F401 def test_pathify_this(): """Ensure pathify_this returns expected values.""" assert snaketools.pathify_this("TEXT_FILE") assert snaketools.pathify_this("TEXT_PATH") assert snaketools.pathify_this("TEXT_DIR") assert snaketools.pathify_this("DIR") assert not snaketools.pathify_this("TEXT") def test_pathify_by_key_ends(config_1_dict): """Ensure pathify_by_key_ends returns expected types.""" original = config_1_dict pathified = snaketools.pathify_by_key_ends(dictionary=original) assert isinstance(pathified.COMMON, dict) assert isinstance(pathified.COMMON.RUN_NAME, str) assert isinstance(pathified.COMMON.OUT_DIR, Path) assert isinstance(pathified.COMMON.INTERIM_DIR, Path) assert isinstance(pathified.COMMON.DRAW_RULE, str) assert isinstance(pathified.COMMON.DRAW_PRETTY_NAMES, bool) assert isinstance(pathified.RULE_1, dict) assert isinstance(pathified.RULE_1.PARAMS, dict) assert isinstance(pathified.RULE_1.PARAMS.PARAM_1, int) assert isinstance(pathified.RULE_1.PARAMS.PARAM_2, str) assert isinstance(pathified.RULE_1.IN, dict) assert isinstance(pathified.RULE_1.IN.IN_FILE_1_PATH, Path)
38.542857
67
0.775389
182
1,349
5.494505
0.285714
0.192
0.3
0.186
0.469
0.27
0.126
0
0
0
0
0.01453
0.132691
1,349
34
68
39.676471
0.840171
0.115641
0
0
0
0
0.028037
0
0
0
0
0
0.708333
1
0.083333
false
0
0.125
0
0.208333
0
0
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
1
0
0
0
0
0
0
0
0
0
3
b0db46f07a9f9010949f8796ab0fe4ba861b887d
321
py
Python
curso-em-video/ex111.py
joseluizbrits/sobre-python
316143c341e5a44070a3b13877419082774bd730
[ "MIT" ]
null
null
null
curso-em-video/ex111.py
joseluizbrits/sobre-python
316143c341e5a44070a3b13877419082774bd730
[ "MIT" ]
null
null
null
curso-em-video/ex111.py
joseluizbrits/sobre-python
316143c341e5a44070a3b13877419082774bd730
[ "MIT" ]
null
null
null
# Transformando módulos em pacotes '''Crie um PACOTE chamado uteis que tenha dois módulos internos chamados moeda e dado. Transfira todas as funções utilizadas nos ex107, ex108 e ex109 para o primeiro pacote e mantenha tudo funcionando''' print() print('\033[1:35m''Nesse exercício não há código para escrever') print()
29.181818
64
0.785047
49
321
5.142857
0.857143
0
0
0
0
0
0
0
0
0
0
0.054745
0.146417
321
10
65
32.1
0.864964
0.719626
0
0.666667
0
0
0.630952
0
0
0
0
0
0
1
0
true
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
null
0
0
0
0
0
0
1
0
0
0
0
1
0
3
9fdba0f2cacf5917a91548ec4aa0dc8c8406e7f8
875
py
Python
mmdet/datasets/pipelines/stack.py
vietnamican/mmdetection
458f593608ec0a416c38f18c743004992c27096d
[ "Apache-2.0" ]
null
null
null
mmdet/datasets/pipelines/stack.py
vietnamican/mmdetection
458f593608ec0a416c38f18c743004992c27096d
[ "Apache-2.0" ]
null
null
null
mmdet/datasets/pipelines/stack.py
vietnamican/mmdetection
458f593608ec0a416c38f18c743004992c27096d
[ "Apache-2.0" ]
null
null
null
import os.path as osp import mmcv import numpy as np import pycocotools.mask as maskUtils from mmdet.core import BitmapMasks, PolygonMasks from ..builder import PIPELINES @PIPELINES.register_module() class Stack: def __init__(self): pass def __call__(self, results): """Call functions to load image and get image meta information. Args: results (dict): Result dict from :obj:`mmdet.CustomDataset`. Returns: dict: The dict contains loaded image and meta information. """ img = results['img'][..., np.newaxis] img = np.concatenate([img, img, img], axis=2) results['img'] = img return results def __repr__(self): repr_str = (f'{self.__class__.__name__}(' f'stack grayscale image into three channels') return repr_str
25
72
0.624
105
875
4.980952
0.561905
0.034417
0
0
0
0
0
0
0
0
0
0.001597
0.284571
875
35
73
25
0.833866
0.234286
0
0
0
0
0.116987
0.041667
0
0
0
0
0
1
0.157895
false
0.052632
0.315789
0
0.631579
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
1
1
0
1
0
0
3
9fee7bb626d761dbf408cf926ee49e9a9aab4009
167
py
Python
Python3/IteratorsAndGenerators/count_up_to.py
norbertosanchezdichi/TIL
2e9719ddd288022f53b094a42679e849bdbcc625
[ "MIT" ]
null
null
null
Python3/IteratorsAndGenerators/count_up_to.py
norbertosanchezdichi/TIL
2e9719ddd288022f53b094a42679e849bdbcc625
[ "MIT" ]
null
null
null
Python3/IteratorsAndGenerators/count_up_to.py
norbertosanchezdichi/TIL
2e9719ddd288022f53b094a42679e849bdbcc625
[ "MIT" ]
null
null
null
def count_up_to(max): count = 1 while count <= max: yield count count += 1 counter = count_up_to(5) for num in counter: print(num)
18.555556
24
0.562874
25
167
3.6
0.56
0.155556
0.2
0
0
0
0
0
0
0
0
0.027523
0.347305
167
9
25
18.555556
0.798165
0
0
0
0
0
0
0
0
0
0
0
0
1
0.125
false
0
0
0
0.125
0.125
1
0
0
null
0
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
3
b039397e732a9d112f1046ceb8eabd0ecbebd388
50,988
py
Python
dark_emulator/darkemu/de_interface.py
DarkQuestCosmology/dark_emulator_public
f0f2eb2fcf3bf95d0e93b3e7239928cc7107a3c2
[ "MIT" ]
13
2021-03-22T11:47:50.000Z
2021-05-19T12:27:32.000Z
dark_emulator/darkemu/de_interface.py
DarkQuestCosmology/dark_emulator_public
f0f2eb2fcf3bf95d0e93b3e7239928cc7107a3c2
[ "MIT" ]
12
2021-05-05T14:24:47.000Z
2021-11-10T17:57:42.000Z
dark_emulator/darkemu/de_interface.py
DarkQuestCosmology/dark_emulator_public
f0f2eb2fcf3bf95d0e93b3e7239928cc7107a3c2
[ "MIT" ]
2
2021-03-28T09:05:41.000Z
2022-02-16T23:55:51.000Z
import os from .cosmo_util import cosmo_class from .cosmo_util import constants from .pklin import pklin_gp from .xinl import xinl_gp from .gamma1 import gamma1_gp from .cross import cross_gp from .auto import auto_gp from .hmf import hmf_gp from .. import pyfftlog_interface import numpy as np from scipy.interpolate import InterpolatedUnivariateSpline as iuspline from scipy import integrate class base_class(object): """base_class The base class of dark emulator. This holds all the individual emulator class objects for different statistical quantities. By passing to the base class object, the cosmological paramters in all the lower-level objects are updated. Args: cparam (numpy array): Cosmological parameters :math:`(\omega_b, \omega_c, \Omega_{de}, \ln(10^{10}A_s), n_s, w)` Attributes: cosmo (class cosmo_class): A class object dealing with the cosmological parameters and some basic cosmological quantities such as expansion and linear growth. pkL (class pklin_gp): A class object that takes care of the linear matter power spectrum g1 (class gamma1_gp): A class object that takes care of the large-scale bias as well as the BAO damping xi_cross (class cross_gp): A class object that takes care of the halo-matter cross correlation function xi_auto (class auto_gp): A class object that takes care of the halo-halo correlation function massfunc (class hmf_gp): A class object that takes care of the halo mass function xiNL (class xinl_gp): A class object that takes care of the nonlinear matter correlation function (experimental) """ def __init__(self): self.cosmo = cosmo_class() self.pkL = pklin_gp() self.g1 = gamma1_gp() self.xi_cross = cross_gp() self.xi_auto = auto_gp() self.massfunc = hmf_gp() self.xiNL = xinl_gp() # initialize emulators with the fiducial model and at z=0 self.set_cosmology(self.cosmo.get_cosmology()) def set_cosmology(self, cparam): """set_cosmology Let the emulator know the cosmological parameters. This interface passes the 6 parameters to all the class objects used for the emulation of various halo statistics. The current version supports wCDM cosmologies specified by the 6 parameters as described below. Other parameters are automatically computed: :math:`\Omega_{m}=1-\Omega_{de},` :math:`h=\sqrt{(\omega_b+\omega_c+\omega_{\\nu})/\Omega_m},` where the neutrino density is fixed by :math:`\omega_{\\nu} = 0.00064` corresponding to the mass sum of 0.06 eV. Args: cparam (numpy array): Cosmological parameters :math:`(\omega_b, \omega_c, \Omega_{de}, \ln(10^{10}A_s), n_s, w)` """ self.cosmo.set_cosmology(cparam) self.pkL.set_cosmology(self.cosmo) # self.xiL.set_cosmology(self.cosmo) self.xiNL.set_cosmology(self.cosmo) self.xi_auto.set_cosmology(self.cosmo) self.xi_cross.set_cosmology(self.cosmo) self.massfunc.set_cosmology(self.cosmo) self.g1.set_cosmology(self.cosmo) def _set_cosmology_predefined(self, i): self.set_cosmology(self.cosmo.get_cosmology_predefined(i)) def get_sd(self, z): """get_sd Compute the root mean square of the linear displacement, :math:`\sigma_d`, for the current cosmological model at redshift z. Args: z (float): redshift Returns: float: :math:`\sigma_d` """ return self.Dgrowth_from_z(z)*self.g1.sd0 def mass_to_dens(self, mass_thre, redshift, integration="quad"): """mass_to_dens Convert the halo mass threshold to the cumulative number density for the current cosmological model at redshift z. Args: mass_thre (float): mass threshold in :math:`h^{-1}M_{\odot}` redshift (float): redshift integration (str, optional): type of integration (default: "quad", "trapz" is also supported) Returns: float: halo number density in :math:`[(h^{-1}\mathrm{Mpc})^{-3}]` """ return self.massfunc.mass_to_dens(mass_thre, redshift, integration=integration) def dens_to_mass(self, dens, redshift, nint=20, integration="quad"): """dens_to_mass Convert the cumulative number density to the halo mass threshold for the current cosmological model at redshift z. Args: dens (float): halo number density in :math:`(h^{-1}\mathrm{Mpc})^{-3}` redshift (float): redshift nint (int, optional): number of sampling points in log(M) used for interpolation integration (str, optional): type of integration (default: "quad", "trapz" is also supported) Returns: float: mass threshold in :math:`[h^{-1}M_{\odot}]` """ return self.massfunc.dens_to_mass(dens, redshift, nint, integration=integration) def get_f_HMF(self, redshift): """get_f_HMF Compute the multiplicity function :math:`f(\sigma)`, defined through :math:`dn/dM = f(\sigma)\\bar{\\rho}_m/M d \ln \sigma^{-1}/dM`. Args: redshift (float): redshift Returns: (tuple): tuple containing: mass(numpy array): :math:`M_{200b}` mass variance(numpy array): :math:`\sigma(M_{200b)` multiplicity function(numpy array): :math:`f(\sigma)` """ D0 = self.Dgrowth_from_z(redshift) return self.massfunc.Mlist, D0*self.massfunc.sigs0, self.massfunc.f_HMF_func(D0*self.massfunc.sigs0, redshift) def get_nhalo(self, Mmin, Mmax, vol, redshift): """get_nhalo Compute the mean number of halos in a given mass range and volume. Args: Mmin (float): Minimum halo mass in :math:`[h^{-1}M_\odot]` Mmax (float): Maximum halo mass in :math:`[h^{-1}M_\odot]` vol (float): Volume in :math:`[(h^{-1}\mathrm{Mpc})^3]` Returns: float: Number of halos """ return self.massfunc.get_nhalo(Mmin, Mmax, vol, redshift) def get_nhalo_tinker(self, Mmin, Mmax, vol, redshift): """get_nhalo_tinker Compute the mean number of halos in a given mass range and volume based on the fitting formula by Tinker et al. (ApJ 688 (2008) 709). Args: Mmin (float): Minimum halo mass in :math:`[h^{-1}M_\odot]` Mmax (float): Maximum halo mass in :math:`[h^{-1}M_\odot]` vol (float): Volume in :math:`[(h^{-1}\mathrm{Mpc})^3]` Returns: float: Number of halos """ return self.massfunc.get_nhalo_tinker(Mmin, Mmax, vol, redshift) def get_xilin(self, xs): """get_xilin Compute the linear matter correlation function at z=0. Args: xs (numpy array): Separations in :math:`[h^{-1}\mathrm{Mpc}]` Returns: numpy array: Correlation function at separations given in the argument xs. """ ks = np.logspace(-3, 3, 300) return pyfftlog_interface.pk2xi_pyfftlog(iuspline(ks, self.pkL.get(ks)))(xs) def _get_xinl_tree(self, xs, redshift): return pyfftlog_interface.pk2xi_pyfftlog(self._get_pkmatter_tree_spline(redshift))(xs) def _get_xinl_direct(self, xs, z): return self.xiNL.get(xs, z) def get_xinl(self, xs, redshift): """get_xinl Compute the nonlinear matter correlation function. Note that this is still in a development phase, and the accuracy has not yet been fully evaluated. Args: xs (numpy array): Separations in :math:`[h^{-1}\mathrm{Mpc}]` Returns: numpy array: Correlation function at separations given in the argument xs. """ xi_dir = self._get_xinl_direct(xs, redshift) xi_tree = self._get_xinl_tree(xs, redshift) rswitch = min(60., 0.5 * self.cosmo.get_BAO_approx()) return xi_dir * np.exp(-(xs/rswitch)**4) + xi_tree * (1-np.exp(-(xs/rswitch)**4)) def get_pknl(self, k, z): """get_pknl Compute the nonlinear matter power spectrum. Note that this is still in a development phase, and the accuracy has not yet been fully evaluated. Args: k (numpy array): Wavenumbers in :math:`[h\mathrm{Mpc}^{-1}]` z (float): redshift Returns: numpy array: Nonlinear matter power spectrum at wavenumbers given in the argument k. """ xs = np.logspace(-3, 3, 2000) xinl = self.get_xinl(xs, z) return pyfftlog_interface.xi2pk_pyfftlog(iuspline(xs, xinl))(k) def get_pklin(self, k): """get_pklin Compute the linear matter power spectrum at z=0. Args: k (numpy array): Wavenumbers in :math:`[h\mathrm{Mpc}^{-1}]` Returns: numpy array: Linear power spectrum at wavenumbers given in the argument k. """ return self.pkL.get(k) def _get_pklin_from_snap(self, x, i): Dp = self._Dgrowth_from_snapnum(i) return Dp**2 * self.pkL.get(x) def get_pklin_from_z(self, k, z): """get_pklin_z Compute the linear matter power spectrum. Args: k (numpy array): Wavenumbers in :math:`[h\mathrm{Mpc}^{-1}]` z (float): redshift Returns: numpy array: Linear power spectrum at wavenumbers given in the argument k. """ Dp = self.Dgrowth_from_z(z) return Dp**2 * self.pkL.get(k) def _get_xiauto_tree(self, xs, logdens1, logdens2, redshift): ks = np.logspace(-3, 3, 2000) g1 = self.g1.get(ks, redshift, logdens1) g2 = self.g1.get(ks, redshift, logdens2) pm_lin = self.get_pklin(ks) ph_tree = g1 * g2 * pm_lin return pyfftlog_interface.pk2xi_pyfftlog(iuspline(ks, ph_tree))(xs) def _get_xiauto_direct(self, xs, logdens1, logdens2, redshift): return self.xi_auto.get(xs, redshift, logdens1, logdens2) def get_xiauto(self, xs, logdens1, logdens2, redshift): """get_xiauto Compute the halo-halo correlation function, :math:`\\xi_\mathrm{hh}(x;n_1,n_2)`, bwtween 2 mass threshold halo samples specified by the corresponding cumulative number densities. Args: xs (numpy array): Separations in :math:`[h^{-1}\mathrm{Mpc}]` logdens1 (float): Logarithm of the cumulative halo number density of the first halo sample taken from the most massive, :math:`\log_{10}[n_1/(h^{-1}\mathrm{Mpc})^3]` logdens2 (float): Logarithm of the cumulative halo number density of the second halo sample taken from the most massive, :math:`\log_{10}[n_2/(h^{-1}\mathrm{Mpc})^3]` redshift (float): Redshift at which the correlation function is evaluated Returns: numpy array: Halo correlation function """ xi_tree = self._get_xiauto_tree(xs, logdens1, logdens2, redshift) if logdens1 >= -5.75 and logdens2 >= -5.75: xi_dir = self._get_xiauto_direct(xs, logdens1, logdens2, redshift) rswitch = min(60., 0.5 * self.cosmo.get_BAO_approx()) xi_tot = xi_dir * np.exp(-(xs/rswitch)**4) + \ xi_tree * (1-np.exp(-(xs/rswitch)**4)) elif logdens1 >= -5.75 and logdens2 < -5.75: xi_dir = self._get_xiauto_direct( xs, logdens1, -5.75, redshift) * self.g1.bias_ratio(redshift, logdens2) rswitch = min(60., 0.5 * self.cosmo.get_BAO_approx()) xi_tot = xi_dir * np.exp(-(xs/rswitch)**4) + \ xi_tree * (1-np.exp(-(xs/rswitch)**4)) elif logdens1 < -5.75 and logdens2 >= -5.75: xi_dir = self._get_xiauto_direct( xs, -5.75, logdens2, redshift) * self.g1.bias_ratio(redshift, logdens1) rswitch = min(60., 0.5 * self.cosmo.get_BAO_approx()) xi_tot = xi_dir * np.exp(-(xs/rswitch)**4) + \ xi_tree * (1-np.exp(-(xs/rswitch)**4)) else: xi_dir = self._get_xiauto_direct(xs, -5.75, -5.75, redshift) * self.g1.bias_ratio( redshift, logdens1)*self.g1.bias_ratio(redshift, logdens2) rswitch = min(60., 0.5 * self.cosmo.get_BAO_approx()) xi_tot = xi_dir * np.exp(-(xs/rswitch)**4) + \ xi_tree * (1-np.exp(-(xs/rswitch)**4)) return xi_tot def _get_xiauto_spl(self, logdens1, logdens2, redshift): xs = np.logspace(-1, 3., 2000) xi_tree = self._get_xiauto_tree(xs, logdens1, logdens2, redshift) if logdens1 >= -5.75 and logdens2 >= -5.75: xi_dir = self._get_xiauto_direct(xs, logdens1, logdens2, redshift) rswitch = min(60., 0.5 * self.cosmo.get_BAO_approx()) xi_tot = xi_dir * np.exp(-(xs/rswitch)**4) + \ xi_tree * (1-np.exp(-(xs/rswitch)**4)) elif logdens1 >= -5.75 and logdens2 < -5.75: xi_dir = self._get_xiauto_direct( xs, logdens1, -5.75, redshift) * self.g1.bias_ratio(redshift, logdens2) rswitch = min(60., 0.5 * self.cosmo.get_BAO_approx()) xi_tot = xi_dir * np.exp(-(xs/rswitch)**4) + \ xi_tree * (1-np.exp(-(xs/rswitch)**4)) elif logdens1 < -5.75 and logdens2 >= -5.75: xi_dir = self._get_xiauto_direct( xs, -5.75, logdens2, redshift) * self.g1.bias_ratio(redshift, logdens1) rswitch = min(60., 0.5 * self.cosmo.get_BAO_approx()) xi_tot = xi_dir * np.exp(-(xs/rswitch)**4) + \ xi_tree * (1-np.exp(-(xs/rswitch)**4)) else: xi_dir = self._get_xiauto_direct(xs, -5.75, -5.75, redshift) * self.g1.bias_ratio( redshift, logdens1)*self.g1.bias_ratio(redshift, logdens2) rswitch = min(60., 0.5 * self.cosmo.get_BAO_approx()) xi_tot = xi_dir * np.exp(-(xs/rswitch)**4) + \ xi_tree * (1-np.exp(-(xs/rswitch)**4)) return iuspline(xs, xi_tot) def get_xiauto_massthreshold(self, xs, Mthre, redshift): """get_xiauto_massthreshold Compute the halo-halo correlation function, :math:`\\xi_\mathrm{hh}(x;>M_\mathrm{th})`, for a mass threshold halo sample. Args: xs (numpy array): Separations in :math:`[h^{-1}\mathrm{Mpc}]` Mthre (float): Minimum halo mass threshold in :math:`[h^{-1}M_\odot]` redshift (float): Redshift at which the correlation function is evaluated Returns: numpy array: Halo correlation function """ logdens = np.log10(self.mass_to_dens(Mthre, redshift)) return self.get_xiauto(xs, logdens, logdens, redshift) def get_xiauto_mass(self, xs, M1, M2, redshift): """get_xiauto_mass Compute the halo-halo correlation function, :math:`\\xi_\mathrm{hh}(x;M_1,M_2)`, between 2 halo samples with mass :math:`M_1` and :math:`M_2`. Args: xs (numpy array): Separations in :math:`[h^{-1}\mathrm{Mpc}]` M1 (float): Halo mass of the first sample in :math:`[h^{-1}M_\odot]` M2 (float): Halo mass of the second sample in :math:`[h^{-1}M_\odot]` redshift (float): Redshift at which the correlation function is evaluated Returns: numpy array: Halo correlation function """ M1p = M1 * 1.01 M1m = M1 * 0.99 M2p = M2 * 1.01 M2m = M2 * 0.99 dens1p = self.mass_to_dens(M1p, redshift) dens1m = self.mass_to_dens(M1m, redshift) dens2p = self.mass_to_dens(M2p, redshift) dens2m = self.mass_to_dens(M2m, redshift) logdens1p, logdens1m, logdens2p, logdens2m = np.log10( dens1p), np.log10(dens1m), np.log10(dens2p), np.log10(dens2m) ximm = self.get_xiauto(xs, logdens1m, logdens2m, redshift) ximp = self.get_xiauto(xs, logdens1m, logdens2p, redshift) xipm = self.get_xiauto(xs, logdens1p, logdens2m, redshift) xipp = self.get_xiauto(xs, logdens1p, logdens2p, redshift) numer = ximm * dens1m * dens2m - ximp * dens1m * dens2p - \ xipm * dens1p * dens2m + xipp * dens1p * dens2p denom = dens1m * dens2m - dens1m * dens2p - dens1p * dens2m + dens1p * dens2p return numer / denom def _get_phh_tree(self,ks,logdens1,logdens2,redshift): g1 = self.g1.get(ks,redshift,logdens1) g2 = self.g1.get(ks,redshift,logdens2) pm_lin = self.get_pklin(ks) ph_tree = g1 * g2 * pm_lin return ph_tree def _get_phh_direct(self,ks,logdens1,logdens2,redshift): xs = np.logspace(-3,3,4000) xihh = self.xi_auto.get(xs,redshift,logdens1,logdens2) return pyfftlog_interface.xi2pk_pyfftlog(iuspline(xs,xihh))(ks) def get_phh(self,ks,logdens1,logdens2,redshift): """get_phh Compute the halo-halo power spectrum :math:`P_{hh}(k;n_1,n_2)` between 2 mass threshold halo samples specified by the corresponding cumulative number densities. Args: ks (numpy array): Wavenumbers in :math:`[h\mathrm{Mpc}^{-1}]` logdens1 (float): Logarithm of the cumulative halo number density of the first halo sample taken from the most massive, :math:`\log_{10}[n_1/(h^{-1}\mathrm{Mpc})^3]` logdens2 (float): Logarithm of the cumulative halo number density of the second halo sample taken from the most massive, :math:`\log_{10}[n_2/(h^{-1}\mathrm{Mpc})^3]` redshift (float): redshift at which the power spectrum is evaluated Returns: numpy array: halo power spectrum in :math:`[(h^{-1}\mathrm{Mpc})^{3}]` """ xs = np.logspace(-3,3,4000) xi_tree = self._get_xiauto_tree(xs,logdens1,logdens2,redshift) rswitch = min(60.,0.5 * self.cosmo.get_BAO_approx()) if logdens1 >= -5.75 and logdens2 >= -5.75: xi_dir = self._get_xiauto_direct(xs,logdens1,logdens2,redshift) xi_tot = xi_dir * np.exp(-(xs/rswitch)**4) + xi_tree * (1-np.exp(-(xs/rswitch)**4)) elif logdens1 >= -5.75 and logdens2 < -5.75: xi_dir = self._get_xiauto_direct(xs,logdens1,-5.75,redshift) * self.g1.bias_ratio(redshift,logdens2) xi_tot = xi_dir * np.exp(-(xs/rswitch)**4) + xi_tree * (1-np.exp(-(xs/rswitch)**4)) elif logdens1 < -5.75 and logdens2 >= -5.75: xi_dir = self._get_xiauto_direct(xs,-5.75,logdens2,redshift) * self.g1.bias_ratio(redshift,logdens1) xi_tot = xi_dir * np.exp(-(xs/rswitch)**4) + xi_tree * (1-np.exp(-(xs/rswitch)**4)) else: xi_dir = self._get_xiauto_direct(xs,-5.75,-5.75,redshift) * self.g1.bias_ratio(redshift,logdens1)*self.g1.bias_ratio(redshift,logdens2) xi_tot = xi_dir * np.exp(-(xs/rswitch)**4) + xi_tree * (1-np.exp(-(xs/rswitch)**4)) return pyfftlog_interface.xi2pk_pyfftlog(iuspline(xs,xi_tot))(ks) def _get_phh_tree_cut(self,ks,logdens1,logdens2,redshift): xs = np.logspace(-3,3,4000) xi_tree = self._get_xiauto_tree(xs,logdens1,logdens2,redshift) rswitch = min(60.,0.5 * self.cosmo.get_BAO_approx()) xi_tot = xi_tree * (1-np.exp(-(xs/rswitch)**4)) return pyfftlog_interface.xi2pk_pyfftlog(iuspline(xs,xi_tot))(ks) def _get_phh_direct_cut(self,ks,logdens1,logdens2,redshift): xs = np.logspace(-3,3,4000) rswitch = min(60.,0.5 * self.cosmo.get_BAO_approx()) if logdens1 >= -5.75 and logdens2 >= -5.75: xi_dir = self._get_xiauto_direct(xs,logdens1,logdens2,redshift) xi_tot = xi_dir * np.exp(-(xs/rswitch)**4) elif logdens1 >= -5.75 and logdens2 < -5.75: xi_dir = self._get_xiauto_direct(xs,logdens1,-5.75,redshift) * self.g1.bias_ratio(redshift,logdens2) xi_tot = xi_dir * np.exp(-(xs/rswitch)**4) elif logdens1 < -5.75 and logdens2 >= -5.75: xi_dir = self._get_xiauto_direct(xs,-5.75,logdens2,redshift) * self.g1.bias_ratio(redshift,logdens1) xi_tot = xi_dir * np.exp(-(xs/rswitch)**4) else: xi_dir = self._get_xiauto_direct(xs,-5.75,-5.75,redshift) * self.g1.bias_ratio(redshift,logdens1)*self.g1.bias_ratio(redshift,logdens2) xi_tot = xi_dir * np.exp(-(xs/rswitch)**4) return pyfftlog_interface.xi2pk_pyfftlog(iuspline(xs,xi_tot))(ks) def get_phh_massthreshold(self,ks,Mthre,redshift): """get_phh_massthreshold Compute the halo-halo auto power spectrum :math:`P_{hh}(k;>M_\mathrm{th})` for a mass threshold halo sample. Args: ks (numpy array): Wavenumbers in :math:`[h\mathrm{Mpc}^{-1}]` Mthre (float): Minimum halo mass threshold in :math:`[h^{-1}M_\odot]` redshift (float): redshift at which the power spectrum is evaluated Returns: numpy array: halo power spectrum in :math:`[(h^{-1}\mathrm{Mpc})^{3}]` """ logdens = np.log10(self.mass_to_dens(Mthre,redshift)) return self.get_phh(ks,logdens,logdens,redshift) def get_phh_mass(self,ks,M1,M2,redshift): """get_phh_mass Compute the halo-halo power spectrum :math:`P_{hh}(k;M_1,M_2)` between 2 halo samples with mass :math:`M_1` and :math:`M_2`. Args: ks (numpy array): Wavenumbers in :math:`[h\mathrm{Mpc}^{-1}]` M1 (float): Halo mass of the first sample in :math:`[h^{-1}M_\odot]` M2 (float): Halo mass of the second sample in :math:`[h^{-1}M_\odot]` redshift (float): redshift at which the power spectrum is evaluated Returns: numpy array: halo power spectrum in :math:`[(h^{-1}\mathrm{Mpc})^{3}]` """ M1p = M1 * 1.01 M1m = M1 * 0.99 M2p = M2 * 1.01 M2m = M2 * 0.99 dens1p = self.mass_to_dens(M1p,redshift) dens1m = self.mass_to_dens(M1m,redshift) dens2p = self.mass_to_dens(M2p,redshift) dens2m = self.mass_to_dens(M2m,redshift) logdens1p, logdens1m, logdens2p, logdens2m = np.log10(dens1p), np.log10(dens1m), np.log10(dens2p), np.log10(dens2m) pmm = self.get_phh(ks,logdens1m,logdens2m,redshift) pmp = self.get_phh(ks,logdens1m,logdens2p,redshift) ppm = self.get_phh(ks,logdens1p,logdens2m,redshift) ppp = self.get_phh(ks,logdens1p,logdens2p,redshift) numer = pmm * dens1m * dens2m - pmp * dens1m * dens2p - ppm * dens1p * dens2m + ppp * dens1p * dens2p denom = dens1m * dens2m - dens1m * dens2p - dens1p * dens2m + dens1p * dens2p return numer / denom def get_wauto(self, R2d, logdens1, logdens2, redshift): """get_wauto Compute the projected halo-halo correlation function :math:`w_{hh}(R;n_1,n_2)` for 2 mass threshold halo samples specified by the corresponding cumulative number densities. Args: R2d (numpy array): 2 dimensional projected separation in :math:`[h^{-1}\mathrm{Mpc}]` logdens1 (float): Logarithm of the cumulative halo number density of the first halo sample taken from the most massive, :math:`\log_{10}[n_1/(h^{-1}\mathrm{Mpc})^3]` logdens2 (float): Logarithm of the cumulative halo number density of the second halo sample taken from the most massive, :math:`\log_{10}[n_2/(h^{-1}\mathrm{Mpc})^3]` redshift (float): redshift at which the power spectrum is evaluated Returns: numpy array: projected halo correlation function in :math:`[h^{-1}\mathrm{Mpc}]` """ xs = np.logspace(-3, 3, 1000) xi_auto = self.get_xiauto(xs, logdens1, logdens2, redshift) pk_spl = pyfftlog_interface.xi2pk_pyfftlog(iuspline(xs, xi_auto)) return pyfftlog_interface.pk2xiproj_J0_pyfftlog(pk_spl, logkmin=-3.0, logkmax=3.0)(R2d) def get_wauto_cut(self, R2d, logdens1, logdens2, redshift, pimax, integration="quad"): """get_wauto_cut Compute the projected halo-halo correlation function :math:`w_{hh}(R;n_1,n_2)` for 2 mass threshold halo samples specified by the corresponding cumulative number densities. Unlike get_wauto, this function considers a finite width for the radial integration, from :math:`-\pi_\mathrm{max}` to :math:`\pi_\mathrm{max}`. Args: R2d (numpy array): 2 dimensional projected separation in :math:`[h^{-1}\mathrm{Mpc}]` logdens1 (float): Logarithm of the cumulative halo number density of the first halo sample taken from the most massive, :math:`\log_{10}[n_1/(h^{-1}\mathrm{Mpc})^3]` logdens2 (float): Logarithm of the cumulative halo number density of the second halo sample taken from the most massive, :math:`\log_{10}[n_2/(h^{-1}\mathrm{Mpc})^3]` redshift (float): redshift at which the power spectrum is evaluated pimax (float): :math:`\pi_\mathrm{max}` for the upper limit of the integral Returns: numpy array: projected halo correlation function in :math:`[h^{-1}\mathrm{Mpc}]` """ xi3d = self._get_xiauto_spl(logdens1, logdens2, redshift) wauto = [] if integration == "quad": for R2dnow in R2d: wauto.append( 2*integrate.quad(lambda t: xi3d(np.sqrt(t**2+R2dnow**2)), 0, pimax, epsabs=1e-4)[0]) elif integration == "trapz": t = np.linspace(0, pimax, 1024) dt = t[1]-t[0] for R2dnow in R2d: wauto.append( 2*integrate.trapz(xi3d(np.sqrt(t**2+R2dnow**2)), dx=dt)) else: raise RuntimeError( "You should specify valid integration algorithm: quad or trapz") return np.array(wauto) def get_wauto_massthreshold(self, R2d, Mthre, redshift): """get_wauto_massthreshold Compute the projected halo-halo correlation function :math:`w_{hh}(R;>M_\mathrm{th})` for a mass threshold halo sample. Args: R2d (numpy array): 2 dimensional projected separation in :math:`[h^{-1}\mathrm{Mpc}]` Mthre (float): Minimum halo mass threshold in :math:`[h^{-1}M_\odot]` redshift (float): redshift at which the power spectrum is evaluated Returns: numpy array: projected halo correlation function in :math:`[h^{-1}\mathrm{Mpc}]` """ logdens = np.log10(self.mass_to_dens(Mthre, redshift)) return self.get_wauto(R2d, logdens, logdens, redshift) def get_wauto_masthreshold_cut(self, R2d, Mthre, redshift, pimax, integration="quad"): """get_wauto_massthreshold_cut Compute the projected halo-halo correlation function :math:`w_{hh}(R;>M_\mathrm{th})` for a mass threshold halo sample. Unlike get_wauto_massthreshold, this function considers a finite width for the radial integration, from :math:`-\pi_\mathrm{max}` to :math:`\pi_\mathrm{max}`. Args: R2d (numpy array): 2 dimensional projected separation in :math:`[h^{-1}\mathrm{Mpc}]` Mthre (float): Minimum halo mass threshold in :math:`[h^{-1}M_\odot]` redshift (float): redshift at which the power spectrum is evaluated pimax (float): :math:`\pi_\mathrm{max}` for the upper limit of the integral Returns: numpy array: projected halo correlation function in :math:`[h^{-1}\mathrm{Mpc}]` """ logdens = np.log10(self.mass_to_dens(Mthre, redshift)) return self.get_wauto_cut(R2d, logdens, logdens, redshift, pimax, integration) def get_wauto_mass(self, R2d, M1, M2, redshift): """get_wauto_mass Compute the projected halo-halo correlation function :math:`w_{hh}(R;M_1,M_2)` for 2 mass threshold halo samples. Args: R2d (numpy array): 2 dimensional projected separation in :math:`[h^{-1}\mathrm{Mpc}]` M1 (float): Halo mass of the first sample in :math:`[h^{-1}M_\odot]` M2 (float): Halo mass of the second sample in :math:`[h^{-1}M_\odot]` redshift (float): redshift at which the power spectrum is evaluated Returns: numpy array: projected halo correlation function in :math:`[h^{-1}\mathrm{Mpc}]` """ M1p = M1 * 1.01 M1m = M1 * 0.99 M2p = M2 * 1.01 M2m = M2 * 0.99 dens1p = self.mass_to_dens(M1p, redshift) dens1m = self.mass_to_dens(M1m, redshift) dens2p = self.mass_to_dens(M2p, redshift) dens2m = self.mass_to_dens(M2m, redshift) logdens1p, logdens1m, logdens2p, logdens2m = np.log10( dens1p), np.log10(dens1m), np.log10(dens2p), np.log10(dens2m) wmm = self.get_wauto(R2d, logdens1m, logdens2m, redshift) wmp = self.get_wauto(R2d, logdens1m, logdens2p, redshift) wpm = self.get_wauto(R2d, logdens1p, logdens2m, redshift) wpp = self.get_wauto(R2d, logdens1p, logdens2p, redshift) numer = wmm * dens1m * dens2m - wmp * dens1m * dens2p - \ wpm * dens1p * dens2m + wpp * dens1p * dens2p denom = dens1m * dens2m - dens1m * dens2p - dens1p * dens2m + dens1p * dens2p return numer / denom def get_wauto_mass_cut(self, R2d, M1, M2, redshift, pimax): """get_wauto_mass_cut Compute the projected halo-halo correlation function :math:`w_{hh}(R;M_1,M_2)` for 2 mass threshold halo samples. Unlike get_wauto_mass, this function considers a finite width for the radial integration, from :math:`-\pi_\mathrm{max}` to :math:`\pi_\mathrm{max}`. Args: R2d (numpy array): 2 dimensional projected separation in :math:`[h^{-1}\mathrm{Mpc}]` M1 (float): Halo mass of the first sample in :math:`[h^{-1}M_\odot]` M2 (float): Halo mass of the second sample in :math:`[h^{-1}M_\odot]` redshift (float): redshift at which the power spectrum is evaluated pimax (float): :math:`\pi_\mathrm{max}` for the upper limit of the integral Returns: numpy array: projected halo correlation function in :math:`[h^{-1}\mathrm{Mpc}]` """ M1p = M1 * 1.01 M1m = M1 * 0.99 M2p = M2 * 1.01 M2m = M2 * 0.99 dens1p = self.mass_to_dens(M1p, redshift) dens1m = self.mass_to_dens(M1m, redshift) dens2p = self.mass_to_dens(M2p, redshift) dens2m = self.mass_to_dens(M2m, redshift) logdens1p, logdens1m, logdens2p, logdens2m = np.log10( dens1p), np.log10(dens1m), np.log10(dens2p), np.log10(dens2m) wmm = self.get_wauto_cut(R2d, logdens1m, logdens2m, redshift, pimax) wmp = self.get_wauto_cut(R2d, logdens1m, logdens2p, redshift, pimax) wpm = self.get_wauto_cut(R2d, logdens1p, logdens2m, redshift, pimax) wpp = self.get_wauto_cut(R2d, logdens1p, logdens2p, redshift, pimax) numer = wmm * dens1m * dens2m - wmp * dens1m * dens2p - \ wpm * dens1p * dens2m + wpp * dens1p * dens2p denom = dens1m * dens2m - dens1m * dens2p - dens1p * dens2m + dens1p * dens2p return numer / denom def _get_pkmatter_tree(self, redshift): ks = np.logspace(-3, 3, 2000) g1_dm = self.g1.get_dm(ks, redshift) pm_lin = self.get_pklin(ks) return g1_dm**2 * pm_lin # TN suppressed this because it is a duplication of get_pknl # def get_pmnl(self,ks,redshift): # xs = np.logspace(-3,3,2000) # xi = self.get_xinl(xs,redshift) # return pyfftlog_interface.xi2pk_pyfftlog(iuspline(xs,xi))(ks) def _get_pkmatter_tree_spline(self, redshift): ks = np.logspace(-3, 3, 2000) g1_dm = self.g1.get_dm(ks, redshift) pm_lin = self.get_pklin(ks) return iuspline(ks, g1_dm**2 * pm_lin) def _get_pkcross_tree(self, logdens, redshift): ks = np.logspace(-3, 3, 2000) g1 = self.g1.get(ks, redshift, logdens) g1_dm = self.g1.get_dm(ks, redshift) pm_lin = self.get_pklin(ks) return g1*g1_dm * pm_lin def _get_pkcross_tree_spline(self, logdens, redshift): ks = np.logspace(-3, 3, 2000) g1 = self.g1.get(ks, redshift, logdens) g1_dm = self.g1.get_dm(ks, redshift) pm_lin = self.get_pklin(ks) return iuspline(ks, g1*g1_dm * pm_lin) def _get_xicross_tree(self, xs, logdens, redshift): return pyfftlog_interface.pk2xi_pyfftlog(self._get_pkcross_tree_spline(logdens, redshift))(xs) def _get_xicross_direct(self, xs, logdens, redshift): return self.xi_cross.get(xs, redshift, logdens) def get_xicross(self, xs, logdens, redshift): """get_xicross Compute the halo-matter cross correlation function :math:`\\xi_{hm}(x;n_h)` for a mass threshold halo sample specified by the corresponding cumulative number density. Args: xs (numpy array): Separations in :math:`[h^{-1}\mathrm{Mpc}]` logdens (float): Logarithm of the cumulative halo number density of the halo sample taken from the most massive, :math:`\log_{10}[n_h/(h^{-1}\mathrm{Mpc})^3]` redshift (float): redshift at which the power spectrum is evaluated Returns: numpy array: Halo-matter cross correlation function """ xi_dir = self._get_xicross_direct(xs, logdens, redshift) xi_tree = self._get_xicross_tree(xs, logdens, redshift) rswitch = min(60., 0.5 * self.cosmo.get_BAO_approx()) return xi_dir * np.exp(-(xs/rswitch)**4) + xi_tree * (1-np.exp(-(xs/rswitch)**4)) def get_xicross_massthreshold(self, xs, Mthre, redshift): """get_xicross_massthreshold Compute the halo-matter cross correlation function :math:`\\xi_{hm}(x;>M_\mathrm{th})` for a mass threshold halo sample. Args: xs (numpy array): Separations in :math:`[h^{-1}\mathrm{Mpc}]` Mthre (float): Minimum mass threshold of a halo sample in :math:`[h^{-1}M_\odot]` redshift (float): redshift at which the power spectrum is evaluated Returns: numpy array: Halo-matter cross correlation function """ logdens = np.log10(self.mass_to_dens(Mthre, redshift)) return self.get_xicross(xs, logdens, redshift) def get_xicross_mass(self, xs, M, redshift): """get_xicross_mass Compute the halo-matter cross correlation function :math:`\\xi_{hm}(x;M)` for halos with mass :math:`M`. Args: xs (numpy array): Separations in :math:`[h^{-1}\mathrm{Mpc}]` M (float): Halo mass in :math:`[h^{-1}M_\odot]` redshift (float): redshift at which the power spectrum is evaluated Returns: numpy array: Halo-matter cross correlation function """ Mp = M * 1.01 Mm = M * 0.99 logdensp = np.log10(self.mass_to_dens(Mp, redshift)) logdensm = np.log10(self.mass_to_dens(Mm, redshift)) xip = self.get_xicross(xs, logdensp, redshift) xim = self.get_xicross(xs, logdensm, redshift) return (xim * 10**logdensm - xip * 10**logdensp) / (10**logdensm - 10**logdensp) def _get_phm_tree(self,ks,logdens,redshift): g1 = self.g1.get(ks,redshift,logdens) g1_dm = self.g1.get_dm(ks,redshift) pm_lin = self.get_pklin(ks) return g1*g1_dm * pm_lin def _get_phm_direct(self,ks,logdens,redshift): xs = np.logspace(-3,3,2000) return pyfftlog_interface.xi2pk_pyfftlog(iuspline(xs,self.xi_cross.get(xs,redshift,logdens)))(ks) def get_phm(self,ks,logdens,redshift): """get_phm Compute the halo-matter cross power spectrum :math:`P_{hm}(k;n_h)` for a mass threshold halo sample specified by the corresponding cumulative number density. Args: ks (numpy array): Wavenumbers in :math:`[h\mathrm{Mpc}^{-1}]` logdens (float): Logarithm of the cumulative halo number density of the halo sample taken from the most massive, :math:`\log_{10}[n_h/(h^{-1}\mathrm{Mpc})^3]` redshift (float): redshift at which the power spectrum is evaluated Returns: numpy array: Halo-matter cross power spectrum in :math:`[(h^{-1}\mathrm{Mpc})^{3}]` """ xs = np.logspace(-4,3,4000) xi_dir = self._get_xicross_direct(xs,logdens,redshift) xi_tree = self._get_xicross_tree(xs,logdens,redshift) rswitch = min(60.,0.5 * self.cosmo.get_BAO_approx()) xi = xi_dir * np.exp(-(xs/rswitch)**4) + xi_tree * (1-np.exp(-(xs/rswitch)**4)) return pyfftlog_interface.xi2pk_pyfftlog(iuspline(xs,xi),logrmin = -4.0, logrmax = 3.0)(ks) def _get_phm_tree_cut(self,ks,logdens,redshift): xs = np.logspace(-4,3,4000) xi_tree = self._get_xicross_tree(xs,logdens,redshift) rswitch = min(60.,0.5 * self.cosmo.get_BAO_approx()) xi = xi_tree * (1-np.exp(-(xs/rswitch)**4)) return pyfftlog_interface.xi2pk_pyfftlog(iuspline(xs,xi),logrmin = -4.0, logrmax = 3.0)(ks) def _get_phm_direct_cut(self,ks,logdens,redshift): xs = np.logspace(-4,3,4000) xi_dir = self._get_xicross_direct(xs,logdens,redshift) rswitch = min(60.,0.5 * self.cosmo.get_BAO_approx()) xi = xi_dir * np.exp(-(xs/rswitch)**4) return pyfftlog_interface.xi2pk_pyfftlog(iuspline(xs,xi),logrmin = -4.0, logrmax = 3.0)(ks) def get_phm_massthreshold(self,ks,Mthre,redshift): """get_phm_massthreshold Compute the halo-matter cross power spectrum :math:`P_{hm}(k;>M_\mathrm{th})` for a mass threshold halo sample. Args: ks (numpy array): Wavenumbers in :math:`[h\mathrm{Mpc}^{-1}]` Mthre (float): Minimum halo mass threshold in :math:`[h^{-1}M_\odot]` redshift (float): redshift at which the power spectrum is evaluated Returns: numpy array: Halo-matter cross power spectrum in :math:`[(h^{-1}\mathrm{Mpc})^{3}]` """ logdens = np.log10(self.mass_to_dens(Mthre,redshift)) return self.get_phm(ks,logdens,redshift) def get_phm_mass(self,ks,M,redshift): """get_phm_mass Compute the halo-matter cross power spectrum :math:`P_{hm}(k;M)` for halos with mass :math:`M`. Args: ks (numpy array): Wavenumbers in :math:`[h\mathrm{Mpc}^{-1}]` M (float): Halo mass in :math:`[h^{-1}M_\odot]` redshift (float): redshift at which the power spectrum is evaluated Returns: numpy array: Halo-matter cross power spectrum in :math:`[(h^{-1}\mathrm{Mpc})^{3}]` """ Mp = M * 1.01 Mm = M * 0.99 logdensp = np.log10(self.mass_to_dens(Mp,redshift)) logdensm = np.log10(self.mass_to_dens(Mm,redshift)) pip = self.get_phm(ks,logdensp,redshift) pim = self.get_phm(ks,logdensm,redshift) return (pim * 10**logdensm - pip * 10**logdensp) / (10**logdensm - 10**logdensp) def _get_DeltaSigma_tree(self, R2d, logdens, redshift): return self.cosmo.get_Omega0() * self.cosmo.rho_cr / 1e12 * pyfftlog_interface.pk2xiproj_J2_pyfftlog(self._get_pkcross_tree_spline(logdens, redshift))(R2d) def _get_DeltaSigma_direct(self, R2d, logdens, redshift): xs = np.logspace(-3, 3, 2000) xi = self._get_xicross_direct(xs, logdens, redshift) pk_spl = pyfftlog_interface.xi2pk_pyfftlog(iuspline(xs, xi)) return self.cosmo.get_Omega0() * self.cosmo.rho_cr / 1e12 * pyfftlog_interface.pk2xiproj_J2_pyfftlog(pk_spl)(R2d) def get_DeltaSigma(self, R2d, logdens, redshift): """get_DeltaSigma Compute the halo-galaxy lensing signal, the excess surface mass density, :math:`\Delta\Sigma(R;n_h)`, for a mass threshold halo sample specified by the corresponding cumulative number density. Args: R2d (numpy array): 2 dimensional projected separation in :math:`h^{-1}\mathrm{Mpc}` logdens (float): Logarithm of the cumulative halo number density taken from the most massive, :math:`\log_{10}[n_h/(h^{-1}\mathrm{Mpc})^3]` redshift (float): redshift at which the lens halos are located Returns: numpy array: excess surface mass density in :math:`[h M_\odot \mathrm{pc}^{-2}]` """ xs = np.logspace(-3, 3, 2000) xi_tot = self.get_xicross(xs, logdens, redshift) pk_spl = pyfftlog_interface.xi2pk_pyfftlog(iuspline(xs, xi_tot)) return self.cosmo.get_Omega0() * self.cosmo.rho_cr / 1e12 * pyfftlog_interface.pk2xiproj_J2_pyfftlog(pk_spl)(R2d) def get_DeltaSigma_massthreshold(self, R2d, Mthre, redshift): """get_DeltaSigma_massthreshold Compute the halo-galaxy lensing signal, the excess surface mass density, :math:`\Delta\Sigma(R;>M_\mathrm{th})`, for a mass threshold halo sample. Args: R2d (numpy array): 2 dimensional projected separation in :math:`h^{-1}\mathrm{Mpc}` Mthre (float): Minimum halo mass threshold in :math:`[h^{-1}M_\odot]` redshift (float): redshift at which the lens halos are located Returns: numpy array: excess surface mass density in :math:`[h M_\odot \mathrm{pc}^{-2}]` """ logdens = np.log10(self.mass_to_dens(Mthre, redshift)) return self.get_DeltaSigma(R2d, logdens, redshift) def get_DeltaSigma_mass(self, R2d, M, redshift): """get_DeltaSigma_mass Compute the halo-galaxy lensing signal, the excess surface mass density, :math:`\Delta\Sigma(R;M)`, for halos with mass :math:`M`. Args: R2d (numpy array): 2 dimensional projected separation in :math:`h^{-1}\mathrm{Mpc}` M (float): Halo mass in :math:`[h^{-1}M_\odot]` redshift (float): redshift at which the lens halos are located Returns: numpy array: excess surface mass density in :math:`[h M_\odot \mathrm{pc}^{-2}]` """ Mp = M * 1.01 Mm = M * 0.99 logdensp = np.log10(self.mass_to_dens(Mp, redshift)) logdensm = np.log10(self.mass_to_dens(Mm, redshift)) DSp = self.get_DeltaSigma(R2d, logdensp, redshift) DSm = self.get_DeltaSigma(R2d, logdensm, redshift) return (DSm * 10**logdensm - DSp * 10**logdensp) / (10**logdensm - 10**logdensp) def _get_Sigma_tree(self, R2d, logdens, redshift): return self.cosmo.get_Omega0() * self.cosmo.rho_cr / 1e12 * pyfftlog_interface.pk2xiproj_J0_pyfftlog(self._get_pkcross_tree_spline(logdens, redshift))(R2d) def _get_Sigma_direct(self, R2d, logdens, redshift): xs = np.logspace(-3, 3, 2000) xi = self._get_xicross_direct(xs, logdens, redshift) pk_spl = pyfftlog_interface.xi2pk_pyfftlog(iuspline(xs, xi)) return self.cosmo.get_Omega0() * self.cosmo.rho_cr / 1e12 * pyfftlog_interface.pk2xiproj_J0_pyfftlog(pk_spl)(R2d) def get_Sigma(self, R2d, logdens, redshift): """get_Sigma Compute the surface mass density, :math:`\Sigma(R;n_h)`, for a mass threshold halo sample specified by the corresponding cumulative number density. Args: R2d (numpy array): 2 dimensional projected separation in :math:`h^{-1}\mathrm{Mpc}` logdens (float): Logarithm of the cumulative halo number density taken from the most massive, :math:`\log_{10}[n_h/(h^{-1}\mathrm{Mpc})^3]` redshift (float): redshift at which the lens halos are located Returns: numpy array: surface mass density in :math:`[h M_\odot \mathrm{pc}^{-2}]` """ xs = np.logspace(-3, 3, 2000) xi_tot = self._get_xicross(xs, logdens, redshift) pk_spl = pyfftlog_interface.xi2pk_pyfftlog(iuspline(xs, xi_tot)) return self.cosmo.get_Omega0() * self.cosmo.rho_cr / 1e12 * pyfftlog_interface.pk2xiproj_J0_pyfftlog(pk_spl)(R2d) def get_Sigma_massthreshold(self, R2d, Mthre, redshift): """get_Sigma_massthreshold Compute the surface mass density, :math:`\Sigma(R;>M_\mathrm{th})`, for a mass threshold halo sample. Args: R2d (numpy array): 2 dimensional projected separation in :math:`h^{-1}\mathrm{Mpc}` Mthre (float): Minimum halo mass threshold in :math:`[h^{-1}M_\odot]` redshift (float): redshift at which the lens halos are located Returns: numpy array: surface mass density in :math:`[h M_\odot \mathrm{pc}^{-2}]` """ logdens = np.log10(self.mass_to_dens(Mthre, redshift)) return self.get_Sigma(R2d, logdens, redshift) def get_Sigma_mass(self, R2d, M, redshift): """get_Sigma_mass Compute the surface mass density, :math:`\Sigma(R;M)`, for halos with mass :math:`M`. Args: R2d (numpy array): 2 dimensional projected separation in :math:`h^{-1}\mathrm{Mpc}` M (float): Halo mass in :math:`[h^{-1}M_\odot]` redshift (float): redshift at which the lens halos are located Returns: numpy array: surface mass density in :math:`[h M_\odot \mathrm{pc}^{-2}]` """ Mp = M * 1.01 Mm = M * 0.99 logdensp = np.log10(self.mass_to_dens(Mp, redshift)) logdensm = np.log10(self.mass_to_dens(Mm, redshift)) Sp = self.get_Sigma(R2d, logdensp, redshift) Sm = self.get_Sigma(R2d, logdensm, redshift) return (Sm * 10**logdensm - Sp * 10**logdensp) / (10**logdensm - 10**logdensp) def _get_gamma1_dm(self, k, redshift): return self.g1.get_dm(k, redshift) def _get_bd(self, logdens, redshift): return self.g1.get_bd(redshift, logdens) def get_bias(self, logdens, redshift): """get_bias Compute the linear bias for a mass threshold halo sample specified by the corresponding cumulative number density. Args: logdens (float): Logarithm of the cumulative halo number density taken from the most massive, :math:`\log_{10}[n_h/(h^{-1}\mathrm{Mpc})^3]` redshift (float): redshift at which the lens halos are located Returns: float: linear bias factor """ return self.g1.get_bias(redshift, logdens) def get_bias_massthreshold(self, Mth, redshift): """get_bias_massthreshold Compute the linear bias, :math:`b(>M_\mathrm{th})`, for a mass threshold halo sample. Args: Mth (float): Halo mass threshold in :math:`[h^{-1}M_\odot]` redshift (float): redshift at which the lens halos are located Returns: float: linear bias factor """ logdens = np.log10(self.mass_to_dens(Mth, redshift)) return self.get_bias(logdens, redshift) def get_bias_mass(self, M, redshift): """get_bias_mass Compute the linear bias for halos with mass :math:`M`. Args: M (float): Halo mass in :math:`[h^{-1}M_\odot]` redshift (float): redshift at which the lens halos are located Returns: float: linear bias factor """ Mp = M * 1.01 Mm = M * 0.99 logdensp = np.log10(self.mass_to_dens(Mp, redshift)) logdensm = np.log10(self.mass_to_dens(Mm, redshift)) bp = self.get_bias(logdensp, redshift) bm = self.get_bias(logdensm, redshift) return (bm * 10**logdensm - bp * 10**logdensp) / (10**logdensm - 10**logdensp) def _get_gamma1_h(self, k, logdens, redshift): return self.g1.get(k, redshift, logdens) def Dgrowth_from_z(self, z): """Dgrowth_from_z Compute the linear growth factor, D_+, at redshift z. Normalized to unity at z=0. Args: z: redshift Returns: float: linear growth factor """ return self.cosmo.Dgrowth_from_z(z) def Dgrowth_from_a(self, a): """Dgrowth_from_a Compute the linear growth factor, D_+, at scale factor a. Normalized to unity at z=0. Args: a: scale factor normalized to unity at present. Returns: float: linear growth factor """ return self.cosmo.Dgrowth_from_a(a) def _Dgrowth_from_snapnum(self, i): return self.cosmo.Dgrowth_from_snapnum(i) def f_from_z(self, z): """f_from_z Compute the linear growth rate, :math:`f = \mathrm{d}\ln D_+/\mathrm{d}\ln a`, at redshift z. Args: z: redshift Returns: float: linear growth rate """ return self.cosmo.f_from_z(z) def f_from_a(self, a): """f_from_a Compute the linear growth rate, :math:`f = \mathrm{d}\ln D_+/\mathrm{d}\ln a`, at scale factor a. Args: a: scale factor normalized to unity at present. Returns: float: linear growth rate """ return self.cosmo.f_from_a(a) def _f_from_snapnum(self, i): return self.cosmo.f_from_snampum(i) def get_cosmology(self): """get_cosmology Obtain the cosmological parameters currently set to the emulator. Returns: numpy array: Cosmological parameters :math:`(\omega_b, \omega_c, \Omega_{de}, \ln(10^{10}A_s), n_s, w)` """ return self.cosmo.get_cosmology() def get_sigma8(self, logkmin=-4, logkmax=1, nint=100): """get_sigma8 Compute :math:`\sigma_8` for the current cosmology. Args: logkmin (float, optional): log10 of the minimum wavenumber for the integral (default=-4) logkmin (float, optional): log10 of the maximum wavenumber for the integral (default=1) nint (int, optional): Number of samples taken for the trapz integration (default=100) Returns: float: :math:`\sigma_8` """ R = 8. ks = np.logspace(logkmin, logkmax, nint) logks = np.log(ks) kR = ks * R integrant = ks**3*self.get_pklin(ks)*_window_tophat(kR)**2 return np.sqrt(integrate.trapz(integrant, logks)/(2.*np.pi**2)) def _window_tophat(kR): return 3.*(np.sin(kR)-kR*np.cos(kR))/kR**3
45.688172
200
0.615968
7,006
50,988
4.341707
0.06209
0.019331
0.01818
0.016832
0.79979
0.753107
0.719738
0.693339
0.674732
0.659774
0
0.036155
0.262807
50,988
1,115
201
45.729148
0.773092
0.419687
0
0.503282
0
0
0.003354
0
0
0
0
0
0
1
0.16849
false
0
0.028446
0.028446
0.36105
0
0
0
0
null
0
0
0
0
1
1
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
3
b03d6c5a62283a3ce682016786f174bddcb2fc92
111
py
Python
misc/bin2hex.py
tomverbeure/led_matrix
7aba01e3915b7c9398cb5ec78fb4c61e98a81334
[ "Unlicense" ]
2
2021-03-24T13:40:23.000Z
2021-05-16T07:46:46.000Z
misc/bin2hex.py
tomverbeure/led_matrix
7aba01e3915b7c9398cb5ec78fb4c61e98a81334
[ "Unlicense" ]
null
null
null
misc/bin2hex.py
tomverbeure/led_matrix
7aba01e3915b7c9398cb5ec78fb4c61e98a81334
[ "Unlicense" ]
null
null
null
#! /usr/bin/env python3 import sys for line in sys.stdin: print( "{0:02x}".format(int(line.strip(),2)) )
15.857143
50
0.630631
19
111
3.684211
0.894737
0
0
0
0
0
0
0
0
0
0
0.053763
0.162162
111
6
51
18.5
0.698925
0.198198
0
0
0
0
0.079545
0
0
0
0
0
0
1
0
false
0
0.333333
0
0.333333
0.333333
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
3
b044ce395967b2ef88a6405a3f904aa7256c068c
6,214
py
Python
pyacq/core/tests/test_node.py
cimbi/pyacq
b320251f1cf899c1d2cc4fddd5596a1ae835b39d
[ "BSD-3-Clause" ]
20
2015-12-18T05:52:04.000Z
2021-05-22T05:12:24.000Z
pyacq/core/tests/test_node.py
cimbi/pyacq
b320251f1cf899c1d2cc4fddd5596a1ae835b39d
[ "BSD-3-Clause" ]
72
2015-07-17T19:43:36.000Z
2021-09-14T07:37:30.000Z
pyacq/core/tests/test_node.py
cimbi/pyacq
b320251f1cf899c1d2cc4fddd5596a1ae835b39d
[ "BSD-3-Clause" ]
14
2015-06-19T12:07:25.000Z
2021-08-16T14:44:42.000Z
# -*- coding: utf-8 -*- # Copyright (c) 2016, French National Center for Scientific Research (CNRS) # Distributed under the (new) BSD License. See LICENSE for more info. import time import sys from pyacq import create_manager from pyqtgraph.Qt import QtCore, QtGui import pyqtgraph as pg from pyacq.core.tests.fakenodes import FakeSender, FakeReceiver, ReceiverWidget import logging #~ logging.getLogger().level=logging.INFO def test_stream_between_local_nodes(): # create local nodes in QApplication app = pg.mkQApp() sender = FakeSender() stream_spec = dict(protocol='tcp', interface='127.0.0.1', port='*', transfermode='plaindata', streamtype='analogsignal', dtype='float32', shape=(-1, 16), compression ='', scale = None, offset = None, units = '') sender.configure(sample_interval=0.001) sender.outputs['signals'].configure(**stream_spec) # sender.output.configure(**stream_spec) sender.initialize() receiver = FakeReceiver() receiver.configure() receiver.inputs['signals'].connect(sender.outputs['signals']) # receiver.input.connect(sender.output) receiver.initialize() # start them for a while sender.start() receiver.start() def terminate(): sender.stop() receiver.stop() app.quit() timer = QtCore.QTimer(singleShot=True, interval=3000) timer.timeout.connect(terminate) timer.start() app.exec_() def test_stream_between_remote_nodes(): # this is done at Manager level the manager do known the connection man = create_manager(auto_close_at_exit=False) nodegroup = man.create_nodegroup('nodegroup') nodegroup.register_node_type_from_module('pyacq.core.tests.fakenodes', 'FakeSender') nodegroup.register_node_type_from_module('pyacq.core.tests.fakenodes', 'FakeReceiver') # create ndoes sender = nodegroup.create_node('FakeSender', name='sender') stream_spec = dict(protocol='tcp', interface='127.0.0.1', port='*', transfermode='plaindata', streamtype='analogsignal', dtype='float32', shape=(-1, 16), compression='', scale=None, offset=None, units='') sender.configure(sample_interval=0.001) sender.outputs['signals'].configure(**stream_spec) sender.initialize() receiver = nodegroup.create_node('FakeReceiver', name='receiver') receiver.configure() receiver.inputs['signals'].connect(sender.outputs['signals']) receiver.initialize() # start them for a while sender.start() receiver.start() #~ print(nodegroup.any_node_running()) time.sleep(2.) sender.stop() receiver.stop() #~ print(nodegroup.any_node_running()) man.close() def test_stream_between_local_and_remote_nodes(): # this is done at Manager level the manager do known the connection man = create_manager(auto_close_at_exit=False) nodegroup = man.create_nodegroup('nodegroup') nodegroup.register_node_type_from_module('pyacq.core.tests.fakenodes', 'FakeSender') # create ndoes sender = nodegroup.create_node('FakeSender', name='sender') stream_spec = dict(protocol='tcp', interface='127.0.0.1', port='*', transfermode='plaindata', streamtype='analogsignal', dtype='float32', shape=(-1, 16), compression ='', scale = None, offset = None, units = '') sender.configure(sample_interval=0.001) sender.output.configure(**stream_spec) sender.initialize() # create local nodes in QApplication app = pg.mkQApp() receiver = FakeReceiver() receiver.configure() receiver.input.connect(sender.output) receiver.initialize() # start them for a while sender.start() receiver.start() def terminate(): sender.stop() receiver.stop() app.quit() timer = QtCore.QTimer(singleShot=True, interval=2000) timer.timeout.connect(terminate) timer.start() app.exec_() man.close() def test_visual_node_both_in_main_qapp_and_remote_qapp(): man = create_manager(auto_close_at_exit=False) nodegroup = man.create_nodegroup('nodegroup') nodegroup.register_node_type_from_module('pyacq.core.tests.fakenodes', 'FakeSender') nodegroup.register_node_type_from_module('pyacq.core.tests.fakenodes', 'ReceiverWidget') # create ndoes sender = nodegroup.create_node('FakeSender', name='sender') stream_spec = dict(protocol='tcp', interface='127.0.0.1', port='*', transfermode='plaindata', streamtype='analogsignal', dtype='float32', shape=(-1, 16), compression ='', scale = None, offset = None, units = '') sender.configure(sample_interval=0.001) sender.output.configure(**stream_spec) sender.initialize() # receiver0 is in remote QApp (in nodegroup) receiver0 = nodegroup.create_node('ReceiverWidget', name='receiver0', tag='<b>I am in distant QApp</b>') receiver0.configure() receiver0.input.connect(sender.output) receiver0.initialize() receiver0.show() # receiver1 is in local QApp app = pg.mkQApp() receiver1 = ReceiverWidget(name='receiver1', tag='<b>I am in local QApp</b>') receiver1.configure() receiver1.input.connect(sender.output) receiver1.initialize() receiver1.show() # start them for a while sender.start() receiver0.start() receiver1.start() def terminate(): sender.stop() receiver0.stop() receiver1.stop() receiver1.close() app.quit() timer = QtCore.QTimer(singleShot=True, interval=2000) timer.timeout.connect(terminate) timer.start() app.exec_() receiver0.close() man.close() if __name__ == '__main__': #~ test_stream_between_local_nodes() #~ test_stream_between_remote_nodes() #~ test_stream_between_local_and_remote_nodes() test_visual_node_both_in_main_qapp_and_remote_qapp()
31.383838
108
0.651754
693
6,214
5.678211
0.206349
0.022872
0.021347
0.03507
0.766709
0.701906
0.695299
0.662008
0.629733
0.629733
0
0.020362
0.225459
6,214
197
109
31.543147
0.797216
0.14065
0
0.707317
0
0
0.108978
0.024468
0
0
0
0
0
1
0.056911
false
0
0.056911
0
0.113821
0
0
0
0
null
0
0
0
0
1
0
0
0
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
3
b056f02b3c8a44cf228fd1a5f139da867c408bc2
3,023
py
Python
confused_stud/trash.py
4gatepylon/NeuroMHW
9b912aa827d50a9547c8ed06311136c4c26b70b0
[ "MIT" ]
null
null
null
confused_stud/trash.py
4gatepylon/NeuroMHW
9b912aa827d50a9547c8ed06311136c4c26b70b0
[ "MIT" ]
1
2022-01-26T22:47:09.000Z
2022-01-26T22:47:09.000Z
confused_stud/trash.py
4gatepylon/NeuroMHW
9b912aa827d50a9547c8ed06311136c4c26b70b0
[ "MIT" ]
null
null
null
# NOTE this is what they did for the students dataset # Some nonsense to help you select features that will best predict the label # y=pd.get_dummies(df['user-definedlabeln']) # mi_score=mutual_info_classif(df.drop('user-definedlabeln',axis=1),df['user-definedlabeln']) # mi_score=pd.Series(mi_score,index=df.drop('user-definedlabeln',axis=1).columns) # mi_score=(mi_score*100).sort_values(ascending=False) # print(mi_score) # Selects the top 14 features # print(mi_score.head(14).index) # top_fea=['VideoID', 'Attention', 'Alpha2', 'Delta', 'Gamma1', 'Theta', 'Beta1', # 'Alpha1', 'Mediation', 'Gamma2', 'SubjectID', 'Beta2', 'Raw', 'age'] # Set to zero mean and unit variance (i.e. divide by variance). This assumes thin tails. # https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.StandardScaler.html # df_sc=StandardScaler().fit_transform(df[top_fea]) # TODO pytorch this shit # import tensorflow as tf # from tensorflow import keras # from tensorflow.keras import callbacks,layers # TODO train/test split # from sklearn.model_selection import train_test_split # Xtr,xte,Ytr,yte=train_test_split(df_sc,y,random_state=108,test_size=0.27) # xtr,xval,ytr,yval=train_test_split(Xtr,Ytr,random_state=108,test_size=0.27) # TODO this is their model, probably too big for what we want to run, but I could be wrong! # I'm willing to bet their network is overfitted # Model-Building step, stacking the hidden layers # model=keras.Sequential([ # layers.Dense(64,input_shape=(14,),activation='relu'), # layers.BatchNormalization(), # layers.Dropout(0.27), # layers.Dense(124,activation='relu'), # layers.BatchNormalization(), # layers.Dropout(0.3), # layers.Dense(248,activation='relu'), # layers.BatchNormalization(), # layers.Dropout(0.32), # layers.Dense(512,activation='relu'), # layers.BatchNormalization(), # layers.Dropout(0.27), # layers.Dense(664,activation='relu'), # layers.BatchNormalization(), # layers.Dropout(0.3), # layers.Dense(512,activation='relu'), # layers.BatchNormalization(), # layers.Dropout(0.32), # layers.Dense(264,activation='relu'), # layers.BatchNormalization(), # layers.Dropout(0.27), # layers.Dense(124,activation='relu'), # layers.BatchNormalization(), # layers.Dropout(0.3), # layers.Dense(2,activation='sigmoid') # ]) # Compiling the model with Adamax Optimizer # model.compile(optimizer='adamax',loss='binary_crossentropy',metrics='accuracy') # Creating the callback feature to stop the training in-Between, in case of no improvement # call=callbacks.EarlyStopping(patience=20,min_delta=0.0001,restore_best_weights=True) # Fitting the model to the training data # history=model.fit(xtr,ytr,validation_data=(xval,yval),batch_size=28,epochs=150,callbacks=[call]) # Testing on the testing data # model.evaluate(xte,yte) # training=pd.DataFrame(history.history) # training.loc[:,['loss','val_loss']].plot() # training.loc[:,['accuracy','val_accuracy']].plot()
41.986111
98
0.725438
421
3,023
5.123515
0.482185
0.045897
0.074177
0.140936
0.318498
0.29439
0.268428
0.245248
0.245248
0.245248
0
0.032404
0.122064
3,023
72
99
41.986111
0.780332
0.955342
0
null
0
null
0
0
null
0
0
0.013889
null
1
null
true
0
0
null
null
null
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
null
0
0
1
0
0
0
1
0
0
0
0
0
0
3
b05707528f3bfb31a23d84ea25583d12a16d848d
72
py
Python
example_snippets/multimenus_snippets/Snippets/SciPy/Physical and mathematical constants/CODATA physical constants/N/neutron mag. mom. to Bohr magneton ratio.py
kuanpern/jupyterlab-snippets-multimenus
477f51cfdbad7409eab45abe53cf774cd70f380c
[ "BSD-3-Clause" ]
null
null
null
example_snippets/multimenus_snippets/Snippets/SciPy/Physical and mathematical constants/CODATA physical constants/N/neutron mag. mom. to Bohr magneton ratio.py
kuanpern/jupyterlab-snippets-multimenus
477f51cfdbad7409eab45abe53cf774cd70f380c
[ "BSD-3-Clause" ]
null
null
null
example_snippets/multimenus_snippets/Snippets/SciPy/Physical and mathematical constants/CODATA physical constants/N/neutron mag. mom. to Bohr magneton ratio.py
kuanpern/jupyterlab-snippets-multimenus
477f51cfdbad7409eab45abe53cf774cd70f380c
[ "BSD-3-Clause" ]
1
2021-02-04T04:51:48.000Z
2021-02-04T04:51:48.000Z
constants.physical_constants["neutron mag. mom. to Bohr magneton ratio"]
72
72
0.819444
10
72
5.8
0.9
0
0
0
0
0
0
0
0
0
0
0
0.083333
72
1
72
72
0.878788
0
0
0
0
0
0.547945
0
0
0
0
0
0
1
0
true
0
0
0
0
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
3
c674a4f9bfa77a8f4c036c38e95be3c2e82ac88d
85
py
Python
locale/pot/api/examples/_autosummary/pyvista-examples-downloads-download_honolulu-1.py
tkoyama010/pyvista-doc-translations
23bb813387b7f8bfe17e86c2244d5dd2243990db
[ "MIT" ]
4
2020-08-07T08:19:19.000Z
2020-12-04T09:51:11.000Z
locale/pot/api/examples/_autosummary/pyvista-examples-downloads-download_honolulu-1.py
tkoyama010/pyvista-doc-translations
23bb813387b7f8bfe17e86c2244d5dd2243990db
[ "MIT" ]
19
2020-08-06T00:24:30.000Z
2022-03-30T19:22:24.000Z
locale/pot/api/examples/_autosummary/pyvista-examples-downloads-download_honolulu-1.py
tkoyama010/pyvista-doc-translations
23bb813387b7f8bfe17e86c2244d5dd2243990db
[ "MIT" ]
1
2021-03-09T07:50:40.000Z
2021-03-09T07:50:40.000Z
from pyvista import examples dataset = examples.download_honolulu() # doctest:+SKIP
28.333333
55
0.8
10
85
6.7
0.9
0
0
0
0
0
0
0
0
0
0
0
0.117647
85
2
56
42.5
0.893333
0.152941
0
0
0
0
0
0
0
0
0
0
0
1
0
false
0
0.5
0
0.5
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
3
c681b4ef118ef937faf38cb281b260bba37da661
603
py
Python
src/pyrin/database/migration/alembic/handlers/current.py
wilsonGmn/pyrin
25dbe3ce17e80a43eee7cfc7140b4c268a6948e0
[ "BSD-3-Clause" ]
null
null
null
src/pyrin/database/migration/alembic/handlers/current.py
wilsonGmn/pyrin
25dbe3ce17e80a43eee7cfc7140b4c268a6948e0
[ "BSD-3-Clause" ]
null
null
null
src/pyrin/database/migration/alembic/handlers/current.py
wilsonGmn/pyrin
25dbe3ce17e80a43eee7cfc7140b4c268a6948e0
[ "BSD-3-Clause" ]
null
null
null
# -*- coding: utf-8 -*- """ alembic handlers current module. """ from pyrin.database.migration.alembic.decorators import alembic_cli_handler from pyrin.database.migration.alembic.enumerations import AlembicCLIHandlersEnum from pyrin.database.migration.alembic.handlers.base import AlembicReportingCLIHandlerBase @alembic_cli_handler() class CurrentCLIHandler(AlembicReportingCLIHandlerBase): """ current cli handler class. """ def __init__(self): """ initializes an instance of CurrentCLIHandler. """ super().__init__(AlembicCLIHandlersEnum.CURRENT)
26.217391
89
0.746269
56
603
7.821429
0.517857
0.061644
0.116438
0.178082
0.226027
0
0
0
0
0
0
0.001969
0.157546
603
22
90
27.409091
0.860236
0.212272
0
0
0
0
0
0
0
0
0
0
0
1
0.142857
false
0
0.428571
0
0.714286
0
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
3
c6a26ff3844b78f260af6516fa1712bb005ca664
265
py
Python
Day1.1.py
m-berk/AdventOfCode2019
73a930fc24a726186364923fc0575c84e19176af
[ "MIT" ]
1
2021-06-16T07:34:30.000Z
2021-06-16T07:34:30.000Z
Day1.1.py
m-berk/AdventOfCode2019
73a930fc24a726186364923fc0575c84e19176af
[ "MIT" ]
null
null
null
Day1.1.py
m-berk/AdventOfCode2019
73a930fc24a726186364923fc0575c84e19176af
[ "MIT" ]
2
2020-09-03T07:47:52.000Z
2021-02-04T21:07:40.000Z
Total_Fuel_Need =0 Data_File = open("Day1_Data.txt") Data_Lines = Data_File.readlines() for i in range(len(Data_Lines)): Data_Lines[i] = int(Data_Lines[i].rstrip('\n')) Total_Fuel_Need += int(Data_Lines[i] / 3) - 2 print(Total_Fuel_Need)
18.928571
52
0.664151
44
265
3.681818
0.5
0.277778
0.240741
0.160494
0
0
0
0
0
0
0
0.018605
0.188679
265
13
53
20.384615
0.734884
0
0
0
0
0
0.06
0
0
0
0
0
0
1
0
false
0
0
0
0
0.142857
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
3
c6a70cd9c66360df0c460a33b4a253d77fc41336
188
py
Python
bin/sgml.py
jamesbolden/lingua
3824458338c572c83051031208d660958c085944
[ "Apache-2.0" ]
null
null
null
bin/sgml.py
jamesbolden/lingua
3824458338c572c83051031208d660958c085944
[ "Apache-2.0" ]
null
null
null
bin/sgml.py
jamesbolden/lingua
3824458338c572c83051031208d660958c085944
[ "Apache-2.0" ]
null
null
null
from bs4 import BeautifulSoup def parseSGML(file): fd = open(file, "r", encoding="utf-8") bsObj = BeautifulSoup(fd) return [item.get_text() for item in bsObj.findAll("body")]
26.857143
62
0.680851
27
188
4.703704
0.814815
0
0
0
0
0
0
0
0
0
0
0.012903
0.175532
188
6
63
31.333333
0.806452
0
0
0
0
0
0.053191
0
0
0
0
0
0
1
0.2
false
0
0.2
0
0.6
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
3
c6bdd1083746562349d11da1f7f6400c24f74446
797
py
Python
iris_sdk/models/covered_rate_centers.py
NumberAI/python-bandwidth-iris
0e05f79d68b244812afb97e00fd65b3f46d00aa3
[ "MIT" ]
2
2020-04-13T13:47:59.000Z
2022-02-23T20:32:41.000Z
iris_sdk/models/covered_rate_centers.py
bandwidthcom/python-bandwidth-iris
dbcb30569631395041b92917252d913166f7d3c9
[ "MIT" ]
5
2020-09-18T20:59:24.000Z
2021-08-25T16:51:42.000Z
iris_sdk/models/covered_rate_centers.py
bandwidthcom/python-bandwidth-iris
dbcb30569631395041b92917252d913166f7d3c9
[ "MIT" ]
5
2018-12-12T14:39:50.000Z
2020-11-17T21:42:29.000Z
#!/usr/bin/env python from __future__ import division, absolute_import, print_function from future.builtins import super from iris_sdk.models.base_resource import BaseResource from iris_sdk.models.data.covered_rate_centers import CoveredRateCentersData from iris_sdk.models.rate_center import RateCenter XPATH_COVERED_RATE_CENTERS = "/coveredratecenters" class CoveredRateCenters(BaseResource, CoveredRateCentersData): """Covered rate centers""" _xpath = XPATH_COVERED_RATE_CENTERS def __init__(self, parent=None, client=None): super().__init__(parent, client) CoveredRateCentersData.__init__(self, self) def get(self, id): return RateCenter(self).get(id) def list(self, params): return self._get_data(params=params).covered_rate_center
30.653846
76
0.775408
97
797
6
0.412371
0.094502
0.123711
0.087629
0
0
0
0
0
0
0
0
0.143036
797
26
77
30.653846
0.852123
0.051443
0
0
0
0
0.0253
0
0
0
0
0
0
1
0.2
false
0
0.333333
0.133333
0.8
0.066667
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
1
1
0
0
3
c6ca0b91e877d7c729801f8cb8936c52893dfa74
24,113
py
Python
SysPy_ver/funcs/_port_map_assignment_check.py
evlog/SysPy
d1ee6e2ca60492d20339c0016a9c24d027170553
[ "CNRI-Python" ]
4
2017-12-28T14:00:16.000Z
2021-01-21T08:53:14.000Z
SysPy_ver/funcs/_port_map_assignment_check.py
evlog/SysPy
d1ee6e2ca60492d20339c0016a9c24d027170553
[ "CNRI-Python" ]
1
2018-07-31T16:27:00.000Z
2018-07-31T16:27:37.000Z
SysPy_ver/funcs/_port_map_assignment_check.py
evlog/SysPy
d1ee6e2ca60492d20339c0016a9c24d027170553
[ "CNRI-Python" ]
2
2015-10-12T09:13:13.000Z
2020-01-06T12:22:55.000Z
""" ***************************************************************************** * H E A D E R I N F O R M A T I O N * * ***************************************************************************** Project Name: SysPy (System Python) http://cgi.di.uoa.gr/~evlog/syspy.html File Name: _port_map_assignment_check.py Created by: Evangelos Logaras ***************************************************************************** * C O P Y R I G H T N O T I C E * * ***************************************************************************** This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; version 2.1 of the License, a copy of which is available from http://www.gnu.org/licenses/old-licenses/lgpl-2.1.txt. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA ***************************************************************************** * D E S C R I P T I O N * * ***************************************************************************** Checking if the 2 signals in a port map assignmet are compatible. """ import _MyExceptions reload(_MyExceptions) from pdb import * def port_map_assignment_check(signals, comp_signals, port_map_assignment_signals, func, comp_name, generic_comp, generic_comp_val): """ FUNCTION: port_map_assignment_check(a[], b[]. c[], d str, e str, f[], g str) a: design's signal list b: component's signal list c: port map assignment's signals list d: string name of the design function e: string name of the component f: generics' list g: string of the generics' values - Checking if the 2 signals in a port map assignmet are compatible. """ # Python's variable declerations #---------------------------------------------------------------------------------------------------------------------------------- comp_signals_ex = [] left_signal = '' right_signal = '' flag_left_signal = 0 flag_right_signal = 0 flag_signal_slice_length = 0 right_signal_len = 0 left_signal_len = 0 flag_signal_slice_length = 0 sig_doc = '' pos = 0 #---------------------------------------------------------------------------------------------------------------------------------- left_signal = port_map_assignment_signals[0][1] left_signal = left_signal.replace('=', '') left_signal = left_signal.replace(' ', '') print("port_map_assignment_signals:", port_map_assignment_signals) if (port_map_assignment_signals[1][0] == "name_right_binary_slice"): right_signal = port_map_assignment_signals[1][1] right_signal[0] = right_signal[0].replace(' ', '') elif (port_map_assignment_signals[1][0] == "name_right_item"): right_signal = port_map_assignment_signals[1][1] right_signal[0] = right_signal[0].replace(' ', '') else: right_signal = port_map_assignment_signals[1][1] right_signal = right_signal.replace(' ', '') pos = port_map_assignment_signals[0][2] if ((port_map_assignment_signals[1][0] != "name_right") and (port_map_assignment_signals[1][0] != "const_binary_bit") and (port_map_assignment_signals[1][0] != "const_binary_bits") and (port_map_assignment_signals[1][0] != "open_key") and (port_map_assignment_signals[1][0] != "name_right_binary_slice") and (port_map_assignment_signals[1][0] != "name_right_item")): raise funcs._MyExceptions.MyExceptions("File :\"" + func +".py\": Line: " + str(pos) + ": wrong assignment type for signal \"" + str(right_signal) + "\" .Only binary signals or constants allowed in port assignments.") # Extracting all component's signals in "comp_signals_ex" list #---------------------------------------------------------------------------------------------------------------------------------- for i in range(len(comp_signals)): n = comp_signals[i]['N'].__doc__ if (n.find("str") == 0): comp_signals_ex.append(comp_signals[i]) elif (n.find("list") == 0): for j in range(len(comp_signals[i]['N'])): comp_signals_ex.append({'D': comp_signals[i]['D'], 'T': comp_signals[i]['T'], 'L': comp_signals[i]['L'], 'N': comp_signals[i]['N'][j]}) #---------------------------------------------------------------------------------------------------------------------------------- # Identifying the signals in the port map assignment #---------------------------------------------------------------------------------------------------------------------------------- ## Checking if the left signal has been declared in the component's declaration in "_struct_lib.py" and setting the "flag_left_signal" flag_left_signal = 0 for i in range(len(comp_signals_ex)): if (left_signal == comp_signals_ex[i]['N']): left_signal = comp_signals_ex[i] flag_left_signal = 1 print("left_signal:", left_signal) if (flag_left_signal == 0): raise _MyExceptions.MyExceptions("File :\"" + func +".py\": Line: " + str(pos) + ": signal \"" + left_signal + "\" not declared in component \"" + comp_name + "\".") flag_right_signal = 0 flag_signal_slice_length = 0 ## The existence of the right signal has already been checked in "_signal_declaration_check" for i in range(len(signals)): if (port_map_assignment_signals[1][0] == "name_right_binary_slice"): if ((right_signal[0] == signals[i]['N'])): right_signal[0] = {'D': signals[i]['D'], 'T': signals[i]['T'], 'L': [int(right_signal[1]), int(right_signal[2])], 'N': signals[i]['N']} flag_right_signal = 1 if (signals[i]['L'][0] > signals[i]['L'][1]): if ((int(right_signal[1]) <= signals[i]['L'][0]) and (int(right_signal[2]) >= signals[i]['L'][1])): flag_signal_slice_length = 1 elif (signals[i]['L'][0] < signals[i]['L'][1]): if ((int(right_signal[1]) >= signals[i]['L'][0]) and (int(right_signal[2]) <= signals[i]['L'][1])): flag_signal_slice_length = 1 elif (port_map_assignment_signals[1][0] == "name_right_item"): if ((right_signal[0] == signals[i]['N'])): ## Tracking item assignments for binary and array signals if (len(right_signal) == 2): right_signal[0] = {'D': signals[i]['D'], 'T': signals[i]['T'], 'L': 1, 'N': signals[i]['N']} flag_right_signal = 1 if (signals[i]['L'][0] > signals[i]['L'][1]): if ((int(right_signal[1]) <= signals[i]['L'][0]) and (int(right_signal[1]) >= signals[i]['L'][1])): flag_signal_slice_length = 1 elif (signals[i]['L'][0] < signals[i]['L'][1]): if ((int(right_signal[1]) >= signals[i]['L'][0]) and (int(right_signal[1]) <= signals[i]['L'][1])): flag_signal_slice_length = 1 else: if ((right_signal == signals[i]['N'])): right_signal = signals[i] flag_right_signal = 1 if (port_map_assignment_signals[1][0] == "name_right_binary_slice"): right_signal = right_signal[0] elif (port_map_assignment_signals[1][0] == "name_right_item"): right_signal = right_signal[0] print("right_signal:", right_signal) sig_doc = right_signal.__doc__ if ((flag_right_signal == 0) and (right_signal != "\"open\"") and (sig_doc.find("str") != 0)): if (port_map_assignment_signals[1][0] == "name_right_binary_slice"): raise funcs._MyExceptions.MyExceptions("File :\"" + func +".py\": Line: " + str(pos) + ": signal \"" + right_signal['N'] + "\" not declared.") elif (port_map_assignment_signals[1][0] == "name_right_item"): raise funcs._MyExceptions.MyExceptions("File :\"" + func +".py\": Line: " + str(pos) + ": signal \"" + right_signal['N'] + "\" not declared.") else: raise funcs._MyExceptions.MyExceptions("File :\"" + func +".py\": Line: " + str(pos) + ": signal \"" + right_signal + "\" not declared.") if ((flag_signal_slice_length == 0) and (port_map_assignment_signals[1][0] == "name_right_binary_slice")): if (flag_right_signal == 1): raise funcs._MyExceptions.MyExceptions("File :\"" + func +".py\": Line " + str(pos) + ": signal \"" + right_signal['N'] + "\" is not compatible with the slice assignment \"" + right_signal['N'] + '[' + str(right_signal['L'][0]) + ':' + str(right_signal['L'][1]) + "].") elif (flag_right_signal == 0): raise funcs._MyExceptions.MyExceptions("File :\"" + func +".py\": Line " + str(pos) + ": signal \"" + right_signal + "\" not declared.") elif ((flag_signal_slice_length == 0) and (port_map_assignment_signals[1][0] == "name_right_item")): if (flag_right_signal == 1): raise funcs._MyExceptions.MyExceptions("File :\"" + func +".py\": Line " + str(pos) + ": signal \"" + right_signal['N'] + "\" is not compatible with the item assignment.") elif (flag_right_signal == 0): raise funcs._MyExceptions.MyExceptions("File :\"" + func +".py\": Line " + str(pos) + ": signal \"" + right_signal + "\" not declared.") # Checking signals compatibility in the port map assignment #---------------------------------------------------------------------------------------------------------------------------------- ## Evaluating generic signals' widths g = generic_comp.__doc__ if (g.find("list") == 0): if (generic_comp[0] == True): i = generic_comp_val.find('(') generic_comp_val = generic_comp_val[i + 1:] generic_comp_val = generic_comp_val.replace('>', '') generic_comp_val = generic_comp_val.replace(',', ';') exec(generic_comp_val) print("rignth_signal:", right_signal) print("left_signal:", left_signal) sig_doc = right_signal.__doc__ if (sig_doc.find("dict") == 0): if ((right_signal['T'] != 'b') and (right_signal['T'] != 'arrb')): raise funcs._MyExceptions.MyExceptions("File :\"" + func +".py\": Line: " + str(pos) + ": signal \"" + right_signal['N'] + "\" has different type from signal \"" + left_signal['N'] + "\" in component \"" + comp_name + "\". Only binary signals allowed in port assignments.") elif ((sig_doc.find("str") == 0) and (right_signal == "\"open\"")): if ((left_signal['D'] != 'o') and (left_signal['D'] != 'io')): raise _MyExceptions.MyExceptions("File :\"" + func +".py\": Line: " + str(pos) + ": input signal \"" + left_signal['N'] + "\" in component \"" + comp_name + "\" cannot be assigned with \"open\" keyword.") elif ((sig_doc.find("str") == 0) and (right_signal != "\"open\"")): if (left_signal['D'] != 'i'): raise _MyExceptions.MyExceptions("File :\"" + func +".py\": Line: " + str(pos) + ": output signal \"" + left_signal['N'] + "\" in component \"" + comp_name + "\" cannot be assigned a constant value.") if (sig_doc.find("dict") == 0): if ((left_signal['D'] == 'i') and (right_signal['D'] == 'o')): raise funcs._MyExceptions.MyExceptions("File :\"" + func +".py\": Line: " + str(pos) + ": signal \"" + left_signal['N'] + "\" in component \"" + comp_name + "\" cannot be assigned with an output signal.") if ((left_signal['D'] == 'o') and (right_signal['D'] == 'i')): raise funcs._MyExceptions.MyExceptions("File :\"" + func +".py\": Line: " + str(pos) + ": signal \"" + left_signal['N'] + "\" in component \"" + comp_name + "\" cannot be assigned with an input signal.") L = left_signal['L'].__doc__ if (L.find("list") == 0): L0 = left_signal['L'][0].__doc__ L1 = left_signal['L'][1].__doc__ if (L.find("int") == 0): left_signal_len = left_signal['L'] elif (L.find("list") == 0): if ((L0.find("int") == 0) and (L1.find("int") == 0)): left_signal_len = abs(left_signal['L'][0] - left_signal['L'][1]) + 1 elif ((L0.find("str") == 0) and (L1.find("int") == 0)): left_signal_len = abs(eval(left_signal['L'][0]) - left_signal['L'][1]) + 1 elif ((L0.find("int") == 0) and (L1.find("str") == 0)): left_signal_len = abs(left_signal['L'][0] - eval(left_signal['L'][1])) + 1 elif ((L0.find("str") == 0) and (L1.find("str") == 0)): left_signal_len = abs(eval(left_signal['L'][0]) - eval(left_signal['L'][1])) + 1 if (sig_doc.find("dict") == 0): L = right_signal['L'].__doc__ if (L.find("int") == 0): right_signal_len = right_signal['L'] elif (L.find("list") == 0): right_signal_len = abs(right_signal['L'][0] - right_signal['L'][1]) + 1 elif ((sig_doc.find("str") == 0) and (right_signal != "open")): right_signal_len = len(right_signal) - 2 L = left_signal['L'].__doc__ if (L.find("list") == 0): L0 = left_signal['L'][0].__doc__ L1 = left_signal['L'][1].__doc__ #if ((L0.find("int") == 0) and (L1.find("int") == 0)): if(right_signal != "\"open\""): if (right_signal_len != left_signal_len): if (port_map_assignment_signals[1][0] == "name_right_binary_slice"): if ((L0.find("int") == 0) and (L1.find("int") == 0)): raise funcs._MyExceptions.MyExceptions("File :\"" + func +".py\": Line: " + str(pos) + ": signal \"" + right_signal['N'] + '[' + str(right_signal['L'][0]) + ':' + str(right_signal['L'][1]) + ']' + "\" has different length from signal \"" + left_signal['N'] + '[' + str(left_signal['L'][0]) + ':' + str(left_signal['L'][1]) + ']' + "\" in component \"" + comp_name + "\".") elif ((L0.find("str") == 0) and (L1.find("int") == 0)): raise funcs._MyExceptions.MyExceptions("File :\"" + func +".py\": Line: " + str(pos) + ": signal \"" + right_signal['N'] + '[' + str(right_signal['L'][0]) + ':' + str(right_signal['L'][1]) + ']' + "\" has different length from signal \"" + left_signal['N'] + '[' + str(eval(left_signal['L'][0])) + ':' + str(left_signal['L'][1]) + ']' + "\" in component \"" + comp_name + "\".") elif ((L0.find("int") == 0) and (L1.find("str") == 0)): raise funcs._MyExceptions.MyExceptions("File :\"" + func +".py\": Line: " + str(pos) + ": signal \"" + right_signal['N'] + '[' + str(right_signal['L'][0]) + ':' + str(right_signal['L'][1]) + ']' + "\" has different length from signal \"" + left_signal['N'] + '[' + str(left_signal['L'][0]) + ':' + str(eval(left_signal['L'][1])) + ']' + "\" in component \"" + comp_name + "\".") elif ((L0.find("str") == 0) and (L1.find("str") == 0)): raise funcs._MyExceptions.MyExceptions("File :\"" + func +".py\": Line: " + str(pos) + ": signal \"" + right_signal['N'] + '[' + str(right_signal['L'][0]) + ':' + str(right_signal['L'][1]) + ']' + "\" has different length from signal \"" + left_signal['N'] + '[' + str(eval(left_signal['L'][0])) + ':' + str(eval(left_signal['L'][1])) + ']' + "\" in component \"" + comp_name + "\".") elif (port_map_assignment_signals[1][0] == "name_right_item"): if ((L0.find("int") == 0) and (L1.find("int") == 0)): raise funcs._MyExceptions.MyExceptions("File :\"" + func +".py\": Line: " + str(pos) + ": signal \"" + right_signal['N'] + '[' + str(right_signal['L'][0]) + ':' + str(right_signal['L'][1]) + ']' + "\" has different length from signal \"" + left_signal['N'] + '[' + str(left_signal['L'][0]) + ':' + str(left_signal['L'][1]) + ']' + "\" in component \"" + comp_name + "\".") elif ((L0.find("str") == 0) and (L1.find("int") == 0)): raise funcs._MyExceptions.MyExceptions("File :\"" + func +".py\": Line: " + str(pos) + ": signal \"" + right_signal['N'] + '[' + str(right_signal['L'][0]) + ':' + str(right_signal['L'][1]) + ']' + "\" has different length from signal \"" + left_signal['N'] + '[' + str(eval(left_signal['L'][0])) + ':' + str(left_signal['L'][1]) + ']' + "\" in component \"" + comp_name + "\".") elif ((L0.find("int") == 0) and (L1.find("str") == 0)): raise funcs._MyExceptions.MyExceptions("File :\"" + func +".py\": Line: " + str(pos) + ": signal \"" + right_signal['N'] + '[' + str(right_signal['L'][0]) + ':' + str(right_signal['L'][1]) + ']' + "\" has different length from signal \"" + left_signal['N'] + '[' + str(left_signal['L'][0]) + ':' + str(eval(left_signal['L'][1])) + ']' + "\" in component \"" + comp_name + "\".") elif ((L0.find("str") == 0) and (L1.find("str") == 0)): raise funcs._MyExceptions.MyExceptions("File :\"" + func +".py\": Line: " + str(pos) + ": signal \"" + right_signal['N'] + '[' + str(right_signal['L'][0]) + ':' + str(right_signal['L'][1]) + ']' + "\" has different length from signal \"" + left_signal['N'] + '[' + str(eval(left_signal['L'][0])) + ':' + str(eval(left_signal['L'][1])) + ']' + "\" in component \"" + comp_name + "\".") else: if (sig_doc.find("dict") == 0): if ((L0.find("int") == 0) and (L1.find("int") == 0)): raise funcs._MyExceptions.MyExceptions("File :\"" + func +".py\": Line: " + str(pos) + ": signal \"" + right_signal['N'] + '[' + str(right_signal['L'][0]) + ':' + str(right_signal['L'][1]) + ']' + "\" has different length from signal \"" + left_signal['N'] + '[' + str(left_signal['L'][0]) + ':' + str(left_signal['L'][1]) + ']' + "\" in component \"" + comp_name + "\".") elif ((L0.find("str") == 0) and (L1.find("int") == 0)): raise funcs._MyExceptions.MyExceptions("File :\"" + func +".py\": Line: " + str(pos) + ": signal \"" + right_signal['N'] + '[' + str(right_signal['L'][0]) + ':' + str(right_signal['L'][1]) + ']' + "\" has different length from signal \"" + left_signal['N'] + '[' + str(eval(left_signal['L'][0])) + ':' + str(left_signal['L'][1]) + ']' + "\" in component \"" + comp_name + "\".") elif ((L0.find("int") == 0) and (L1.find("str") == 0)): raise funcs._MyExceptions.MyExceptions("File :\"" + func +".py\": Line: " + str(pos) + ": signal \"" + right_signal['N'] + '[' + str(right_signal['L'][0]) + ':' + str(right_signal['L'][1]) + ']' + "\" has different length from signal \"" + left_signal['N'] + '[' + str(left_signal['L'][0]) + ':' + str(eval(left_signal['L'][1])) + ']' + "\" in component \"" + comp_name + "\".") elif ((L0.find("str") == 0) and (L1.find("str") == 0)): raise funcs._MyExceptions.MyExceptions("File :\"" + func +".py\": Line: " + str(pos) + ": signal \"" + right_signal['N'] + '[' + str(right_signal['L'][0]) + ':' + str(right_signal['L'][1]) + ']' + "\" has different length from signal \"" + left_signal['N'] + '[' + str(eval(left_signal['L'][0])) + ':' + str(eval(left_signal['L'][1])) + ']' + "\" in component \"" + comp_name + "\".") elif (sig_doc.find("str") == 0): if (L.find("list") == 0): if ((L0.find("int") == 0) and (L1.find("int") == 0)): raise _MyExceptions.MyExceptions("File :\"" + func +".py\": Line: " + str(pos) + ": binary constant " + right_signal + " has different length from signal \"" + left_signal['N'] + '[' + str(eval(left_signal['L'][0])) + ':' + str(left_signal['L'][1]) + ']' + "\" in component \"" + comp_name + "\".") elif ((L0.find("str") == 0) and (L1.find("int") == 0)): raise funcs._MyExceptions.MyExceptions("File :\"" + func +".py\": Line: " + str(pos) + ": binary constant " + right_signal + " has different length from signal \"" + left_signal['N'] + '[' + str(eval(left_signal['L'][0])) + ':' + str(left_signal['L'][1]) + ']' + "\" in component \"" + comp_name + "\".") elif ((L0.find("int") == 0) and (L1.find("str") == 0)): raise funcs._MyExceptions.MyExceptions("File :\"" + func +".py\": Line: " + str(pos) + ": binary constant " + right_signal + " has different length from signal \"" + left_signal['N'] + '[' + str(left_signal['L'][0]) + ':' + str(eval(left_signal['L'][1])) + ']' + "\" in component \"" + comp_name + "\".") elif ((L0.find("str") == 0) and (L1.find("str") == 0)): raise funcs._MyExceptions.MyExceptions("File :\"" + func +".py\": Line: " + str(pos) + ": binary constant " + right_signal + " has different length from signal \"" + left_signal['N'] + '[' + str(eval(left_signal['L'][0])) + ':' + str(eval(left_signal['L'][1])) + ']' + "\" in component \"" + comp_name + "\".")
78.800654
442
0.463941
2,647
24,113
4.015111
0.081602
0.12213
0.048645
0.090327
0.77691
0.733346
0.710576
0.673034
0.669646
0.642266
0
0.018696
0.303488
24,113
305
443
79.059016
0.614111
0.163688
0
0.539683
0
0.021164
0.334813
0.085224
0
0
0
0
0
1
0.005291
false
0
0.010582
0
0.015873
0.026455
0
0
0
null
0
0
0
0
1
1
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
3
c6e4a8699e9a8b38d680a9982ab0f64eeb4722dd
671
py
Python
src/contexts/kms/computed_data/domain/repositories/ComputedDataRepository.py
parada3desu/foxy-key-broker
fc95de9e9bfd61c506a9a18aa64c5c9cbeac8a9c
[ "Apache-2.0" ]
null
null
null
src/contexts/kms/computed_data/domain/repositories/ComputedDataRepository.py
parada3desu/foxy-key-broker
fc95de9e9bfd61c506a9a18aa64c5c9cbeac8a9c
[ "Apache-2.0" ]
null
null
null
src/contexts/kms/computed_data/domain/repositories/ComputedDataRepository.py
parada3desu/foxy-key-broker
fc95de9e9bfd61c506a9a18aa64c5c9cbeac8a9c
[ "Apache-2.0" ]
null
null
null
from src.contexts.kms.computed_data.domain.entities.ComputedData import ComputedData from src.contexts.kms.computed_data.domain.entities.ComputedDataInput import ComputedDataInput from src.contexts.kms.computed_data.domain.entities.ComputedDataType import ComputedDataType from src.contexts.kms.cryptokeys.domain.entities.CryptoKey import CryptoKey from src.contexts.shared.domain.Interface import Interface class ComputedDataRepository(Interface): async def find_one_by_crypto_key_and_input(self, key: CryptoKey, input: ComputedDataInput, type: ComputedDataType) -> ComputedData: raise NotImplementedError()
51.615385
94
0.789866
73
671
7.136986
0.424658
0.067179
0.143954
0.138196
0.253359
0.253359
0.253359
0.253359
0
0
0
0
0.14456
671
12
95
55.916667
0.907666
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.555556
0
0.666667
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
3
c6f76ecd3eac07ccd431ca3b66aa276d641c32a0
84
py
Python
python3.6/src/furnace.py
ProjectFurnace/module-templates
e6abae68f7262806ab3918ea407f1bdffbf3add4
[ "Apache-2.0" ]
null
null
null
python3.6/src/furnace.py
ProjectFurnace/module-templates
e6abae68f7262806ab3918ea407f1bdffbf3add4
[ "Apache-2.0" ]
null
null
null
python3.6/src/furnace.py
ProjectFurnace/module-templates
e6abae68f7262806ab3918ea407f1bdffbf3add4
[ "Apache-2.0" ]
null
null
null
async def processEvent(event): # Do event processing here ... return event
16.8
34
0.678571
10
84
5.7
0.8
0
0
0
0
0
0
0
0
0
0
0
0.238095
84
4
35
21
0.890625
0.333333
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0
0
0.5
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
3
05a0a8daee8233c28c1583c28072da372169b844
91
py
Python
x_3_1.py
ofl/kuku2
7247fb1862d917d23258ebe7a93dca5939433225
[ "MIT" ]
null
null
null
x_3_1.py
ofl/kuku2
7247fb1862d917d23258ebe7a93dca5939433225
[ "MIT" ]
1
2021-11-13T08:03:04.000Z
2021-11-13T08:03:04.000Z
x_3_1.py
ofl/kuku2
7247fb1862d917d23258ebe7a93dca5939433225
[ "MIT" ]
null
null
null
# x_3_1 # # mathモジュールからインポートした円周率を使って半径5の円の面積を計算してください import math print(math.pi) r = 5
9.1
44
0.769231
11
91
6.181818
0.909091
0
0
0
0
0
0
0
0
0
0
0.051948
0.153846
91
9
45
10.111111
0.831169
0.527473
0
0
0
0
0
0
0
0
0
0
0
1
0
false
0
0.333333
0
0.333333
0.333333
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
3
05a0acaf0ca3e8c3a04e52292720a2e31e6ddb44
940
py
Python
pyorm/db/models/managers.py
TonyFlury/pyorm
6d811fa32d3ba4c4a013fbb8f627277fa9d20b64
[ "MIT" ]
null
null
null
pyorm/db/models/managers.py
TonyFlury/pyorm
6d811fa32d3ba4c4a013fbb8f627277fa9d20b64
[ "MIT" ]
null
null
null
pyorm/db/models/managers.py
TonyFlury/pyorm
6d811fa32d3ba4c4a013fbb8f627277fa9d20b64
[ "MIT" ]
null
null
null
#!/usr/bin/env python # coding=utf-8 """ # pyORM : Implementation of managers.py Summary : <summary of module/class being implemented> Use Case : As a <actor> I want <outcome> So that <justification> Testable Statements : Can I <Boolean statement> .... """ __version__ = "0.1" __author__ = 'Tony Flury : anthony.flury@btinternet.com' __created__ = '26 Aug 2017' class Manager: def __init__(self, name='', model=None): self._name = name self._model = model @property def model(self): return self._model @property def name(self): return self._name @name.setter def name(self, new_name): if self.name: raise AttributeError('Cannot change name attribute once set') self._name = new_name # Todo Add all relevant methods to the Manager - including filters etc #Todo write ForiegnKey, One to One and Many to Many Managers
21.363636
78
0.647872
122
940
4.803279
0.655738
0.068259
0.040956
0
0
0
0
0
0
0
0
0.012857
0.255319
940
44
79
21.363636
0.824286
0.418085
0
0.111111
0
0
0.171322
0.052142
0
0
0
0.022727
0
1
0.222222
false
0
0
0.111111
0.388889
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
1
0
0
1
0
0
0
1
0
0
0
3
05e482ac16fc97e856bc955fd606ef3cd58f3810
848
py
Python
app/contact/routes.py
zSelimReborn/TopFlix
236e113dd1edac2ece914cb6622562c3fafa3376
[ "Apache-2.0" ]
null
null
null
app/contact/routes.py
zSelimReborn/TopFlix
236e113dd1edac2ece914cb6622562c3fafa3376
[ "Apache-2.0" ]
3
2020-05-18T16:34:44.000Z
2020-05-18T16:34:45.000Z
app/contact/routes.py
zSelimReborn/TopFlix
236e113dd1edac2ece914cb6622562c3fafa3376
[ "Apache-2.0" ]
null
null
null
from flask import request, escape, render_template, redirect, flash, url_for, jsonify, current_app, g from app.contact import bp from flask_login import current_user, login_required from app.contact.forms import ContactForm from app.contact.email import send_contact_email from app.auth.forms import LoginForm, RegisterForm, RequestPasswordForm @bp.before_request def inject_user_forms(): g.login_form = LoginForm() g.register_form = RegisterForm() g.reset_form = RequestPasswordForm() @bp.route("/", methods=["GET", "POST"]) def new_contact(): contact_form = ContactForm() if contact_form.validate_on_submit(): send_contact_email(contact_form) flash("Messaggio inviato correttamente") return redirect(url_for("main.homepage")) return render_template("contact/index.html", form=contact_form)
36.869565
101
0.759434
110
848
5.627273
0.463636
0.045234
0.067851
0
0
0
0
0
0
0
0
0
0.143868
848
23
102
36.869565
0.852617
0
0
0
0
0
0.08245
0
0
0
0
0
0
1
0.105263
false
0.105263
0.315789
0
0.526316
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
1
1
0
1
0
0
3
05f19a639859e179dbe0fc04512e43cd736ac355
173
py
Python
pwdtk/auth_backends_settings.py
galech/django-pwdtk
0f780e92ceb0014240e3bf8aaf431cb4eb464112
[ "MIT" ]
1
2020-02-26T11:10:45.000Z
2020-02-26T11:10:45.000Z
pwdtk/auth_backends_settings.py
galech/django-pwdtk
0f780e92ceb0014240e3bf8aaf431cb4eb464112
[ "MIT" ]
11
2019-06-14T13:19:48.000Z
2021-10-02T00:32:22.000Z
pwdtk/auth_backends_settings.py
galech/django-pwdtk
0f780e92ceb0014240e3bf8aaf431cb4eb464112
[ "MIT" ]
5
2019-02-18T17:52:11.000Z
2020-11-25T09:41:06.000Z
import logging from pwdtk.settings import * # noqa: F401,F403 logger = logging.getLogger() logger.warning("This module is obosolete. Please use pwdtk.settings instead")
21.625
77
0.768786
23
173
5.782609
0.782609
0.195489
0
0
0
0
0
0
0
0
0
0.040268
0.138728
173
7
78
24.714286
0.852349
0.086705
0
0
0
0
0.378205
0
0
0
0
0
0
1
0
false
0
0.5
0
0.5
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
3
05f6d67ea278f4cc03a6fdd1bcd5c73460a19bf6
192
py
Python
janitriapp/serializers.py
kumarisneha/daily_news
dc067f7474cac94f6df351133efdfffb41c52627
[ "MIT" ]
null
null
null
janitriapp/serializers.py
kumarisneha/daily_news
dc067f7474cac94f6df351133efdfffb41c52627
[ "MIT" ]
null
null
null
janitriapp/serializers.py
kumarisneha/daily_news
dc067f7474cac94f6df351133efdfffb41c52627
[ "MIT" ]
null
null
null
from django.core import serializers from django.contrib.auth.models import User from janitriapp.models import UserInterest, NewsWebsite json_serializer = serializers.get_serializer("json")()
32
55
0.838542
24
192
6.625
0.625
0.125786
0
0
0
0
0
0
0
0
0
0
0.088542
192
6
56
32
0.908571
0
0
0
0
0
0.020725
0
0
0
0
0
0
1
0
false
0
0.75
0
0.75
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
3
af2f70f709c643b8c178fc84adcc85163a96ad60
564
py
Python
venv/Lib/site-packages/PyOpenGL-3.0.1/OpenGL/raw/GL/ARB/shadow_ambient.py
temelkirci/Motion_Editor
a8b8d4c4d2dcc9be28385600f56066cef92a38ad
[ "MIT" ]
1
2022-03-02T17:07:20.000Z
2022-03-02T17:07:20.000Z
venv/Lib/site-packages/PyOpenGL-3.0.1/OpenGL/raw/GL/ARB/shadow_ambient.py
temelkirci/RealTime_6DOF_Motion_Editor
a8b8d4c4d2dcc9be28385600f56066cef92a38ad
[ "MIT" ]
null
null
null
venv/Lib/site-packages/PyOpenGL-3.0.1/OpenGL/raw/GL/ARB/shadow_ambient.py
temelkirci/RealTime_6DOF_Motion_Editor
a8b8d4c4d2dcc9be28385600f56066cef92a38ad
[ "MIT" ]
null
null
null
'''OpenGL extension ARB.shadow_ambient Automatically generated by the get_gl_extensions script, do not edit! ''' from OpenGL import platform, constants, constant, arrays from OpenGL import extensions from OpenGL.GL import glget import ctypes EXTENSION_NAME = 'GL_ARB_shadow_ambient' _DEPRECATED = False GL_TEXTURE_COMPARE_FAIL_VALUE_ARB = constant.Constant( 'GL_TEXTURE_COMPARE_FAIL_VALUE_ARB', 0x80BF ) def glInitShadowAmbientARB(): '''Return boolean indicating whether this extension is available''' return extensions.hasGLExtension( EXTENSION_NAME )
33.176471
100
0.817376
72
564
6.138889
0.583333
0.067873
0.072398
0.090498
0.126697
0.126697
0
0
0
0
0
0.006036
0.118794
564
16
101
35.25
0.8833
0.297872
0
0
1
0
0.140625
0.140625
0
0
0.015625
0
0
1
0.111111
false
0
0.444444
0
0.666667
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
3
af3e0d0e8752871b63a95a87b284f235513e8fa7
192
py
Python
packages/PIPS/validation/C_syntax/float02.py
DVSR1966/par4all
86b33ca9da736e832b568c5637a2381f360f1996
[ "MIT" ]
51
2015-01-31T01:51:39.000Z
2022-02-18T02:01:50.000Z
packages/PIPS/validation/C_syntax/float02.py
DVSR1966/par4all
86b33ca9da736e832b568c5637a2381f360f1996
[ "MIT" ]
7
2017-05-29T09:29:00.000Z
2019-03-11T16:01:39.000Z
packages/PIPS/validation/C_syntax/float02.py
DVSR1966/par4all
86b33ca9da736e832b568c5637a2381f360f1996
[ "MIT" ]
12
2015-03-26T08:05:38.000Z
2022-02-18T02:01:51.000Z
from __future__ import with_statement from pyps import workspace wname = "float02" with workspace(wname+".c",name=wname,deleteOnCLose=True, deleteOnCreate=True) as w: w.fun.main.display()
32
83
0.78125
27
192
5.37037
0.703704
0.193103
0
0
0
0
0
0
0
0
0
0.011696
0.109375
192
5
84
38.4
0.836257
0
0
0
0
0
0.046875
0
0
0
0
0
0
1
0
false
0
0.4
0
0.4
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
3
af3fe4beeada52a1013dbc3fc606a1a73e6bdd99
208
py
Python
MiniTwitter/MiniTwitter/serializer.py
camumbembe/mini_twitter
af9c77813d94ec833fcfd36e3c77bb835b2a703b
[ "MIT" ]
null
null
null
MiniTwitter/MiniTwitter/serializer.py
camumbembe/mini_twitter
af9c77813d94ec833fcfd36e3c77bb835b2a703b
[ "MIT" ]
null
null
null
MiniTwitter/MiniTwitter/serializer.py
camumbembe/mini_twitter
af9c77813d94ec833fcfd36e3c77bb835b2a703b
[ "MIT" ]
null
null
null
from rest_framework import serializers from .models import Tweet class TweetModelSerializer(serializers.ModelSerializer): class Meta: model = Tweet fiels = ('author', 'content', 'likes')
26
56
0.716346
21
208
7.047619
0.761905
0
0
0
0
0
0
0
0
0
0
0
0.197115
208
8
57
26
0.886228
0
0
0
0
0
0.086124
0
0
0
0
0
0
1
0
false
0
0.333333
0
0.666667
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
3
af54085069d99c7b426c58db6586a7846ed829b6
757
py
Python
ModelServices/eventTriggerOutputDeviceMappingServices.py
tuanldchainos/HcPullData
65f89cfdcae135781aad4b3edf210c0ecd2d6a1c
[ "Apache-2.0" ]
null
null
null
ModelServices/eventTriggerOutputDeviceMappingServices.py
tuanldchainos/HcPullData
65f89cfdcae135781aad4b3edf210c0ecd2d6a1c
[ "Apache-2.0" ]
null
null
null
ModelServices/eventTriggerOutputDeviceMappingServices.py
tuanldchainos/HcPullData
65f89cfdcae135781aad4b3edf210c0ecd2d6a1c
[ "Apache-2.0" ]
null
null
null
from Repository.eventTriggerOutputDeviceMappingRepo import eventTriggerOutputDeviceMappingRepo from sqlalchemy import Table from sqlalchemy.engine.base import Connection from sqlalchemy.sql.expression import BinaryExpression class eventTriggerOutputDeviceMappingServices(): __eventTriggerOutputDeviceMappingRepo: eventTriggerOutputDeviceMappingRepo def __init__(self, eventTriggerOutputDeviceMappingTable: Table, context: Connection): self.__eventTriggerOutputDeviceMappingRepo = eventTriggerOutputDeviceMappingRepo(eventTriggerOutputDeviceMappingTable, context=context) def AddManyEventTriggerOutputDeviceMappingWithCustomData(self, l: list): self.__eventTriggerOutputDeviceMappingRepo.InsertManyWithCustomData(l)
58.230769
143
0.857332
47
757
13.595745
0.489362
0.065728
0
0
0
0
0
0
0
0
0
0
0.101717
757
13
144
58.230769
0.939706
0
0
0
0
0
0
0
0
0
0
0
0
1
0.2
false
0
0.4
0
0.8
0
0
0
1
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
3
af5caba52e0f2f55e436148261af246282a6fd90
23
py
Python
spinegeneric/__init__.py
renelabounek/spine-generic
e86eaf5e5a6f912dd348cfb0ea5bd266dc38ea4d
[ "MIT" ]
6
2020-08-26T15:12:55.000Z
2022-03-23T16:52:18.000Z
spinegeneric/__init__.py
renelabounek/spine-generic
e86eaf5e5a6f912dd348cfb0ea5bd266dc38ea4d
[ "MIT" ]
153
2020-07-01T21:04:15.000Z
2022-01-04T19:39:45.000Z
spinegeneric/__init__.py
renelabounek/spine-generic
e86eaf5e5a6f912dd348cfb0ea5bd266dc38ea4d
[ "MIT" ]
5
2019-05-01T15:37:10.000Z
2020-06-06T03:51:39.000Z
__version__ = '2.5dev'
11.5
22
0.695652
3
23
4
1
0
0
0
0
0
0
0
0
0
0
0.1
0.130435
23
1
23
23
0.5
0
0
0
0
0
0.26087
0
0
0
0
0
0
1
0
false
0
0
0
0
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
3
af608b4b05e9eb7d667f6973b2d1d24da65b5e19
253
py
Python
src/pylo/__init__.py
olympus112/pylo2
cfbe29d1c2f8eead0193ee2d024090555407c528
[ "MIT" ]
80
2020-10-20T14:25:28.000Z
2022-02-27T14:29:24.000Z
src/pylo/__init__.py
olympus112/pylo2
cfbe29d1c2f8eead0193ee2d024090555407c528
[ "MIT" ]
8
2020-10-20T14:16:55.000Z
2021-03-19T13:51:54.000Z
src/pylo/__init__.py
olympus112/pylo2
cfbe29d1c2f8eead0193ee2d024090555407c528
[ "MIT" ]
7
2020-10-21T21:01:31.000Z
2021-09-29T09:57:14.000Z
# from .engines.language import Constant, Variable, Functor, Structure, Predicate, List, Atom, Negation, Conj, Clause, list_func, c_var, c_pred, c_fresh_var, c_const, c_functor, c_literal, c_symbol # from pylo.engines.prolog.prologsolver import Prolog
63.25
197
0.794466
38
253
5.052632
0.657895
0.041667
0
0
0
0
0
0
0
0
0
0
0.110672
253
3
198
84.333333
0.853333
0.976285
0
null
0
null
0
0
null
0
0
0
null
1
null
true
0
0
null
null
null
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
1
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
3
af7e53c396436f85a3d9df567bcae59dbef83b6f
8,288
py
Python
racelines/Austin-1000-4-2020-11-16-145405.py
hyunsukgo/deepracer
3a5e6ebc5dcda6a4d2166b090bf9dd2e946dc0ce
[ "MIT-0" ]
null
null
null
racelines/Austin-1000-4-2020-11-16-145405.py
hyunsukgo/deepracer
3a5e6ebc5dcda6a4d2166b090bf9dd2e946dc0ce
[ "MIT-0" ]
null
null
null
racelines/Austin-1000-4-2020-11-16-145405.py
hyunsukgo/deepracer
3a5e6ebc5dcda6a4d2166b090bf9dd2e946dc0ce
[ "MIT-0" ]
null
null
null
array([[ -8.71756106, -1.36180276], [ -8.58324975, -1.6198254 ], [ -8.44660752, -1.87329902], [ -8.30694854, -2.12042891], [ -8.16366778, -2.35941504], [ -8.01626074, -2.58846783], [ -7.8643173 , -2.80576865], [ -7.70752251, -3.0094502 ], [ -7.5458139 , -3.19806094], [ -7.37912158, -3.3697834 ], [ -7.20747945, -3.5226388 ], [ -7.03113284, -3.65485777], [ -6.85017635, -3.76269362], [ -6.66523309, -3.84275185], [ -6.47736681, -3.89099441], [ -6.28828709, -3.90157094], [ -6.10156897, -3.86340302], [ -5.91871331, -3.79305295], [ -5.74014533, -3.69605418], [ -5.56572991, -3.57762694], [ -5.39517681, -3.44210881], [ -5.22781241, -3.29456196], [ -5.06232383, -3.14085048], [ -4.89257735, -2.98799629], [ -4.71966422, -2.83672204], [ -4.54372346, -2.68713228], [ -4.36476778, -2.53940585], [ -4.18276114, -2.39377984], [ -3.99760462, -2.25058361], [ -3.80926438, -2.11016077], [ -3.61765447, -1.97299377], [ -3.4227503 , -1.83963827], [ -3.22466935, -1.71063201], [ -3.02363515, -1.5864963 ], [ -2.8199305 , -1.46774788], [ -2.61386232, -1.3549046 ], [ -2.40573771, -1.24848664], [ -2.19584837, -1.14901525], [ -1.98446111, -1.05701006], [ -1.77181225, -0.97298507], [ -1.55810414, -0.89744287], [ -1.34350217, -0.83086662], [ -1.12813134, -0.77370943], [ -0.91207182, -0.72638078], [ -0.69535387, -0.68923045], [ -0.47795283, -0.66253066], [ -0.25978553, -0.64645761], [ -0.04070946, -0.64107399], [ 0.17947379, -0.64631382], [ 0.40100956, -0.661971 ], [ 0.62417469, -0.68769217], [ 0.84925505, -0.72297438], [ 1.0765168 , -0.7671692 ], [ 1.30617386, -0.81949558], [ 1.53834874, -0.87904872], [ 1.77300538, -0.94474994], [ 2.01004375, -1.0156523 ], [ 2.24941382, -1.09111049], [ 2.49090963, -1.17034172], [ 2.76505053, -1.25699281], [ 3.0392737 , -1.33956764], [ 3.31357819, -1.4173004 ], [ 3.58794949, -1.48951235], [ 3.86235753, -1.55528368], [ 4.13674424, -1.61362995], [ 4.41101533, -1.66379521], [ 4.68503054, -1.70493339], [ 4.95859183, -1.73617318], [ 5.23143608, -1.75675047], [ 5.50323138, -1.76605581], [ 5.77357759, -1.7636462 ], [ 6.04201219, -1.74925505], [ 6.30802249, -1.72280316], [ 6.57106704, -1.6844134 ], [ 6.83061188, -1.63443731], [ 7.0861612 , -1.57343982], [ 7.33719115, -1.50196243], [ 7.58299863, -1.42023179], [ 7.8226528 , -1.32820378], [ 8.0545371 , -1.22514719], [ 8.27658784, -1.11037577], [ 8.48611424, -0.9831958 ], [ 8.67910996, -0.84268891], [ 8.85098102, -0.68873567], [ 8.99611087, -0.52182284], [ 9.10907438, -0.34357458], [ 9.18417432, -0.15630253], [ 9.21305752, 0.03692533], [ 9.1840698 , 0.23008171], [ 9.11910658, 0.4192829 ], [ 9.02164578, 0.60281683], [ 8.8945003 , 0.77935859], [ 8.73948257, 0.94759291], [ 8.5591441 , 1.10658568], [ 8.35621373, 1.25571578], [ 8.13392856, 1.39493299], [ 7.89600098, 1.5249387 ], [ 7.64494533, 1.64617321], [ 7.38361784, 1.75967206], [ 7.11449512, 1.86661923], [ 6.83961129, 1.96821002], [ 6.56059338, 2.06558415], [ 6.27873646, 2.15980551], [ 5.99507103, 2.25185709], [ 5.71041846, 2.34264296], [ 5.42539621, 2.43296897], [ 5.14049084, 2.52356403], [ 4.85570433, 2.61443171], [ 4.57103584, 2.70556915], [ 4.28648169, 2.79696731], [ 4.00217813, 2.88852238], [ 3.72231814, 2.97718081], [ 3.44870505, 3.06108881], [ 3.18310961, 3.13857246], [ 2.92666246, 3.20839957], [ 2.68000575, 3.26968441], [ 2.44342382, 3.32179983], [ 2.21693264, 3.36431422], [ 2.00034546, 3.39694375], [ 1.79331822, 3.41952112], [ 1.59535102, 3.4320146 ], [ 1.40578511, 3.43455695], [ 1.22408879, 3.42711031], [ 1.04965328, 3.40969902], [ 0.88172434, 3.38251449], [ 0.71940866, 3.34591804], [ 0.5619256 , 3.29981197], [ 0.40770804, 3.24544617], [ 0.25548391, 3.18343735], [ 0.10399591, 3.11449499], [ -0.04775527, 3.03962421], [ -0.20018042, 2.96056489], [ -0.35331487, 2.8785744 ], [ -0.52508461, 2.78543704], [ -0.69712913, 2.70164087], [ -0.86954503, 2.63519574], [ -1.04209219, 2.59240169], [ -1.21405958, 2.57909091], [ -1.38377538, 2.60407937], [ -1.55062259, 2.65787841], [ -1.71439338, 2.73536957], [ -1.87529031, 2.83169338], [ -2.03395581, 2.94135403], [ -2.19140835, 3.05794591], [ -2.33805266, 3.16255329], [ -2.48451156, 3.25896796], [ -2.63059232, 3.3426935 ], [ -2.77602606, 3.41054738], [ -2.92043777, 3.46019213], [ -3.06330851, 3.48968652], [ -3.20374051, 3.4957004 ], [ -3.34029351, 3.47415945], [ -3.47007084, 3.41768448], [ -3.59300929, 3.33351807], [ -3.70923634, 3.22566695], [ -3.81889624, 3.09672268], [ -3.92268025, 2.95011367], [ -4.02185397, 2.79022243], [ -4.1178521 , 2.62159416], [ -4.2161323 , 2.46452926], [ -4.31781117, 2.31796144], [ -4.42318035, 2.1831457 ], [ -4.5322453 , 2.06073257], [ -4.64501631, 1.95155647], [ -4.76140997, 1.85631353], [ -4.88126733, 1.77560052], [ -5.00435734, 1.70987926], [ -5.13042301, 1.65962035], [ -5.25938944, 1.62667518], [ -5.39072417, 1.61040595], [ -5.52382922, 1.60897593], [ -5.65849722, 1.62377451], [ -5.79444211, 1.65715234], [ -5.93107153, 1.71528106], [ -6.06746368, 1.79081424], [ -6.20317769, 1.88240466], [ -6.33788905, 1.98864946], [ -6.47136342, 2.10834012], [ -6.60346438, 2.24020588], [ -6.73412053, 2.3831744 ], [ -6.86333984, 2.53617923], [ -6.99113472, 2.69858842], [ -7.12064178, 2.87523527], [ -7.25444618, 3.04299389], [ -7.39243765, 3.20138631], [ -7.53467379, 3.34977808], [ -7.68110851, 3.48788772], [ -7.83182097, 3.61515988], [ -7.98682564, 3.73117341], [ -8.14612629, 3.83548086], [ -8.30966415, 3.92772849], [ -8.47726222, 4.00781042], [ -8.64863214, 4.07585135], [ -8.82344578, 4.13196495], [ -9.0013035 , 4.17638389], [ -9.18202552, 4.20814242], [ -9.36528871, 4.22605346], [ -9.55064213, 4.22773377], [ -9.73708983, 4.20936143], [ -9.92226831, 4.16594795], [-10.10086343, 4.09095817], [-10.26193798, 3.97820376], [-10.38552981, 3.82399663], [-10.47199275, 3.64300549], [-10.52918735, 3.44482848], [-10.5584774 , 3.23235077], [-10.56124879, 3.00791184], [-10.53904816, 2.77357185], [-10.49439476, 2.53143451], [-10.42938497, 2.2831314 ], [-10.34681037, 2.03021889], [-10.24958903, 1.77398657], [-10.14059192, 1.51544409], [-10.02259337, 1.25535648], [ -9.89818344, 0.99427857], [ -9.76968504, 0.73258465], [ -9.63916162, 0.47047985], [ -9.50822 , 0.20830133], [ -9.37711487, -0.05380021], [ -9.24579077, -0.31579848], [ -9.11420368, -0.5776724 ], [ -8.98238041, -0.83943435], [ -8.85033536, -1.10109084], [ -8.71756106, -1.36180276]])
37
37
0.483349
897
8,288
4.465998
0.510591
0.004493
0.004993
0.008987
0
0
0
0
0
0
0
0.715999
0.325772
8,288
224
37
37
0.000895
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
1
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
3
afa62b132b8f8365bf855ed6c6ef45dba43bacb2
12,718
py
Python
06 - Capstone Project/Week 3 Interactive Visual Analytics and Dashboard/interactive_visual_analytics_with_folium.py
marcoshsq/Stocks_Market_Data_Analysis
a48ab868d8693f226cc2e843836b6012be9642e5
[ "MIT" ]
null
null
null
06 - Capstone Project/Week 3 Interactive Visual Analytics and Dashboard/interactive_visual_analytics_with_folium.py
marcoshsq/Stocks_Market_Data_Analysis
a48ab868d8693f226cc2e843836b6012be9642e5
[ "MIT" ]
null
null
null
06 - Capstone Project/Week 3 Interactive Visual Analytics and Dashboard/interactive_visual_analytics_with_folium.py
marcoshsq/Stocks_Market_Data_Analysis
a48ab868d8693f226cc2e843836b6012be9642e5
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- """Interactive Visual Analytics with Folium.ipynb Automatically generated by Colaboratory. Original file is located at https://colab.research.google.com/drive/1SbB9ACtJSXfYbu9T17cbbknWO4izrnL2 # **Launch Sites Locations Analysis with Folium** Estimated time needed: **40** minutes The launch success rate may depend on many factors such as payload mass, orbit type, and so on. It may also depend on the location and proximities of a launch site, i.e., the initial position of rocket trajectories. Finding an optimal location for building a launch site certainly involves many factors and hopefully we could discover some of the factors by analyzing the existing launch site locations. In the previous exploratory data analysis labs, you have visualized the SpaceX launch dataset using `matplotlib` and `seaborn` and discovered some preliminary correlations between the launch site and success rates. In this lab, you will be performing more interactive visual analytics using `Folium`. ## Objectives This lab contains the following tasks: * **TASK 1:** Mark all launch sites on a map * **TASK 2:** Mark the success/failed launches for each site on the map * **TASK 3:** Calculate the distances between a launch site to its proximities After completed the above tasks, you should be able to find some geographical patterns about launch sites. Let's first import required Python packages for this lab: """ !pip3 install folium !pip3 install wget import folium import wget import pandas as pd # Import folium MarkerCluster plugin from folium.plugins import MarkerCluster # Import folium MousePosition plugin from folium.plugins import MousePosition # Import folium DivIcon plugin from folium.features import DivIcon """## Task 1: Mark all launch sites on a map First, let's try to add each site's location on a map using site's latitude and longitude coordinates The following dataset with the name `spacex_launch_geo.csv` is an augmented dataset with latitude and longitude added for each site. """ # Download and read the `spacex_launch_geo.csv` spacex_csv_file = wget.download('https://cf-courses-data.s3.us.cloud-object-storage.appdomain.cloud/IBM-DS0321EN-SkillsNetwork/datasets/spacex_launch_geo.csv') spacex_df=pd.read_csv(spacex_csv_file) """Now, you can take a look at what are the coordinates for each site. """ # Select relevant sub-columns: `Launch Site`, `Lat(Latitude)`, `Long(Longitude)`, `class` spacex_df = spacex_df[['Launch Site', 'Lat', 'Long', 'class']] launch_sites_df = spacex_df.groupby(['Launch Site'], as_index=False).first() launch_sites_df = launch_sites_df[['Launch Site', 'Lat', 'Long', 'class']] launch_sites_df launch_sites_df["Lat"][0] """Above coordinates are just plain numbers that can not give you any intuitive insights about where are those launch sites. If you are very good at geography, you can interpret those numbers directly in your mind. If not, that's fine too. Let's visualize those locations by pinning them on a map. We first need to create a folium `Map` object, with an initial center location to be NASA Johnson Space Center at Houston, Texas. """ # Start location is NASA Johnson Space Center nasa_coordinate = [29.559684888503615, -95.0830971930759] site_map = folium.Map(location=nasa_coordinate, zoom_start=10) """We could use `folium.Circle` to add a highlighted circle area with a text label on a specific coordinate. For example, """ # Create a blue circle at NASA Johnson Space Center's coordinate with a popup label showing its name circle = folium.Circle(nasa_coordinate, radius=1000, color='#d35400', fill=True).add_child(folium.Popup('NASA Johnson Space Center')) # Create a blue circle at NASA Johnson Space Center's coordinate with a icon showing its name marker = folium.map.Marker( nasa_coordinate, # Create an icon as a text label icon=DivIcon( icon_size=(20,20), icon_anchor=(0,0), html='<div style="font-size: 12; color:#d35400;"><b>%s</b></div>' % 'NASA JSC', ) ) site_map.add_child(circle) site_map.add_child(marker) """and you should find a small yellow circle near the city of Houston and you can zoom-in to see a larger circle. Now, let's add a circle for each launch site in data frame `launch_sites` *TODO:* Create and add `folium.Circle` and `folium.Marker` for each launch site on the site map """ # Initial the map site_map = folium.Map(location=nasa_coordinate, zoom_start=5) # For each launch site, add a Circle object based on its coordinate (Lat, Long) values. In addition, add Launch site name as a popup label for i in range (len(launch_sites_df.index)): coordinate = [launch_sites_df["Lat"][i], launch_sites_df["Long"][i]] circle = folium.Circle(coordinate, radius=100, color='#d35400', fill=True).add_child(folium.Popup(launch_sites_df["Launch Site"][i])) marker = folium.map.Marker( coordinate, icon=DivIcon( icon_size=(20,20), icon_anchor=(0,0), html='<div style="font-size: 12; color:#d35400;"><b>%s</b></div>' % launch_sites_df["Launch Site"][i], ) ) site_map.add_child(circle) site_map.add_child(marker) site_map """The generated map with marked launch sites should look similar to the following: <center> <img src="https://cf-courses-data.s3.us.cloud-object-storage.appdomain.cloud/IBM-DS0321EN-SkillsNetwork/labs/module_3/images/launch_site_markers.png" /> </center> Now, you can explore the map by zoom-in/out the marked areas , and try to answer the following questions: * Are all launch sites in proximity to the Equator line? * Are all launch sites in very close proximity to the coast? Also please try to explain your findings. # Task 2: Mark the success/failed launches for each site on the map Next, let's try to enhance the map by adding the launch outcomes for each site, and see which sites have high success rates. Recall that data frame spacex_df has detailed launch records, and the `class` column indicates if this launch was successful or not """ spacex_df.tail(10) """Next, let's create markers for all launch records. If a launch was successful `(class=1)`, then we use a green marker and if a launch was failed, we use a red marker `(class=0)` Note that a launch only happens in one of the four launch sites, which means many launch records will have the exact same coordinate. Marker clusters can be a good way to simplify a map containing many markers having the same coordinate. Let's first create a `MarkerCluster` object """ marker_cluster = MarkerCluster() """*TODO:* Create a new column in `launch_sites` dataframe called `marker_color` to store the marker colors based on the `class` value """ launch_sites_df def func(item): if item == 1: return 'green' else: return 'red' launch_sites_df["marker_color"] = launch_sites_df["class"].apply(func) # Apply a function to check the value of `class` column # If class=1, marker_color value will be green # If class=0, marker_color value will be red launch_sites_df # Function to assign color to launch outcome def assign_marker_color(launch_outcome): if launch_outcome == 1: return 'green' else: return 'red' spacex_df['marker_color'] = spacex_df['class'].apply(assign_marker_color) spacex_df.tail(10) """*TODO:* For each launch result in `spacex_df` data frame, add a `folium.Marker` to `marker_cluster` """ # Function to assign color to launch outcome def assign_marker_color(launch_outcome): if launch_outcome == 1: return 'green' else: return 'red' spacex_df['marker_color'] = spacex_df['class'].apply(assign_marker_color) spacex_df.tail(10) """Your updated map may look like the following screenshots: <center> <img src="https://cf-courses-data.s3.us.cloud-object-storage.appdomain.cloud/IBM-DS0321EN-SkillsNetwork/labs/module_3/images/launch_site_marker_cluster.png" /> </center> <center> <img src="https://cf-courses-data.s3.us.cloud-object-storage.appdomain.cloud/IBM-DS0321EN-SkillsNetwork/labs/module_3/images/launch_site_marker_cluster_zoomed.png" /> </center> From the color-labeled markers in marker clusters, you should be able to easily identify which launch sites have relatively high success rates. # TASK 3: Calculate the distances between a launch site to its proximities Next, we need to explore and analyze the proximities of launch sites. Let's first add a `MousePosition` on the map to get coordinate for a mouse over a point on the map. As such, while you are exploring the map, you can easily find the coordinates of any points of interests (such as railway) """ # Add Mouse Position to get the coordinate (Lat, Long) for a mouse over on the map formatter = "function(num) {return L.Util.formatNum(num, 5);};" mouse_position = MousePosition( position='topright', separator=' Long: ', empty_string='NaN', lng_first=False, num_digits=20, prefix='Lat:', lat_formatter=formatter, lng_formatter=formatter, ) site_map.add_child(mouse_position) site_map """Now zoom in to a launch site and explore its proximity to see if you can easily find any railway, highway, coastline, etc. Move your mouse to these points and mark down their coordinates (shown on the top-left) in order to the distance to the launch site. You can calculate the distance between two points on the map based on their `Lat` and `Long` values using the following method: """ from math import sin, cos, sqrt, atan2, radians def calculate_distance(lat1, lon1, lat2, lon2): # approximate radius of earth in km R = 6373.0 lat1 = radians(lat1) lon1 = radians(lon1) lat2 = radians(lat2) lon2 = radians(lon2) dlon = lon2 - lon1 dlat = lat2 - lat1 a = sin(dlat / 2)**2 + cos(lat1) * cos(lat2) * sin(dlon / 2)**2 c = 2 * atan2(sqrt(a), sqrt(1 - a)) distance = R * c return distance """*TODO:* Mark down a point on the closest railway using MousePosition and calculate the distance between the railway point to the launch site. """ # distance_railway = calculate_distance(lat1, lon1, lat2, lon2) lat1=34.632834 lon1=-120.610746 lat2=34.63494 lon2 = -120.62429 distance_railway = calculate_distance(lat1, lon1, lat2, lon2) """*TODO:* After obtained its coordinate, create a `folium.Marker` to show the distance """ # create and add a folium.Marker on your selected closest raiwaly point on the map # show the distance to the launch site using the icon property coordinate = [34.63494,-120.62429] icon_ = folium.DivIcon(html=str(round(distance_railway, 2)) + " km") marker = folium.map.Marker( coordinate, icon=icon_ ) marker.add_to(site_map) site_map """*TODO:* Draw a `PolyLine` between a launch site to the selected """ # Create a `folium.PolyLine` object using the railway point coordinate and launch site coordinate railway = [34.63494,-120.62429] launch = [34.632834, -120.610746] line = folium.PolyLine([railway, launch]) site_map.add_child(line) """Your updated map with distance line should look like the following screenshot: <center> <img src="https://cf-courses-data.s3.us.cloud-object-storage.appdomain.cloud/IBM-DS0321EN-SkillsNetwork/labs/module_3/images/launch_site_marker_distance.png" /> </center> *TODO:* Similarly, you can draw a line betwee a launch site to its closest city, coastline, highway, etc. """ # Create a marker with distance to a closest city, coastline, highway, etc. # Draw a line between the marker to the launch site """After you plot distance lines to the proximities, you can answer the following questions easily: * Are launch sites in close proximity to railways? * Are launch sites in close proximity to highways? * Are launch sites in close proximity to coastline? * Do launch sites keep certain distance away from cities? Also please try to explain your findings. # Next Steps: Now you have discovered many interesting insights related to the launch sites' location using folium, in a very interactive way. Next, you will need to build a dashboard using Ploty Dash on detailed launch records. ## Authors [Yan Luo](https://www.linkedin.com/in/yan-luo-96288783/?utm_medium=Exinfluencer&utm_source=Exinfluencer&utm_content=000026UJ&utm_term=10006555&utm_id=NA-SkillsNetwork-Channel-SkillsNetworkCoursesIBMDS0321ENSkillsNetwork26802033-2021-01-01) ### Other Contributors Joseph Santarcangelo ## Change Log | Date (YYYY-MM-DD) | Version | Changed By | Change Description | | ----------------- | ------- | ---------- | --------------------------- | | 2021-05-26 | 1.0 | Yan | Created the initial version | # (づ。◕‿‿◕。)づ(づ。◕‿‿◕。)づ(づ。◕‿‿◕。)づ """
38.307229
403
0.737616
1,967
12,718
4.705643
0.244535
0.038029
0.019663
0.009723
0.292459
0.253781
0.22299
0.205272
0.18669
0.162273
0
0.029858
0.165199
12,718
332
404
38.307229
0.840256
0.113304
0
0.354545
1
0.027273
0.136897
0.021643
0
0
0
0.021084
0
0
null
null
0
0.063636
null
null
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
1
0
1
0
0
0
0
0
0
0
0
3
bb604c967967807eaaa76a088a22a28e4c20c7d1
303
py
Python
Project/App/views.py
cs-fullstack-2019-spring/django-fields-widgets-cw-rdunavant
043332540c44d3e2f705330700e7df0156d8f77a
[ "Apache-2.0" ]
null
null
null
Project/App/views.py
cs-fullstack-2019-spring/django-fields-widgets-cw-rdunavant
043332540c44d3e2f705330700e7df0156d8f77a
[ "Apache-2.0" ]
null
null
null
Project/App/views.py
cs-fullstack-2019-spring/django-fields-widgets-cw-rdunavant
043332540c44d3e2f705330700e7df0156d8f77a
[ "Apache-2.0" ]
null
null
null
from django.shortcuts import render from django.http import HttpResponse from .forms import ApplicationForm # Create your views here. def index(request): if(request.method=="POST"): form=ApplicationForm(request.POST) return render(request, "App/index.html", {"form": ApplicationForm()})
33.666667
73
0.745875
37
303
6.108108
0.621622
0.088496
0
0
0
0
0
0
0
0
0
0
0.138614
303
9
73
33.666667
0.8659
0.075908
0
0
0
0
0.078853
0
0
0
0
0
0
1
0.142857
false
0
0.428571
0
0.714286
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
3
bb8463b9adff6d93277680b2c08f9fa20dffcf5a
373
py
Python
OOP String Conversion/best_practice.py
shaunryan/PythonReference
a4d1ba3e4f4279523463fdf7457effc2861d9144
[ "MIT" ]
null
null
null
OOP String Conversion/best_practice.py
shaunryan/PythonReference
a4d1ba3e4f4279523463fdf7457effc2861d9144
[ "MIT" ]
null
null
null
OOP String Conversion/best_practice.py
shaunryan/PythonReference
a4d1ba3e4f4279523463fdf7457effc2861d9144
[ "MIT" ]
null
null
null
#always put a repr in place to explicitly differentiate str from repr class Car: def __init__(self, color, mileage): self.color = color self.mileage = mileage def __repr__(self): return '{self.__class__.__name__}({self.color}, {self.mileage})'.format(self=self) def __str__(self): return 'a {self.color} car'.format(self=self)
31.083333
90
0.662198
50
373
4.54
0.42
0.15859
0.140969
0
0
0
0
0
0
0
0
0
0.217158
373
12
91
31.083333
0.777397
0.182306
0
0
0
0
0.239344
0.127869
0
0
0
0
0
1
0.375
false
0
0
0.25
0.75
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
3
bb8be9ab96b3163a06c8aa68b73eace11a381606
112
py
Python
sensehat/2_text_scroll_180.py
brookshire/pypicamcapper
13f6f4df7c65fc57cd2cacf4bbf9097b3b565fed
[ "MIT" ]
null
null
null
sensehat/2_text_scroll_180.py
brookshire/pypicamcapper
13f6f4df7c65fc57cd2cacf4bbf9097b3b565fed
[ "MIT" ]
null
null
null
sensehat/2_text_scroll_180.py
brookshire/pypicamcapper
13f6f4df7c65fc57cd2cacf4bbf9097b3b565fed
[ "MIT" ]
null
null
null
from sense_hat import SenseHat sense = SenseHat() sense.set_rotation(180) sense.show_message("IoT Sensor Pack")
22.4
37
0.803571
17
112
5.117647
0.764706
0.298851
0
0
0
0
0
0
0
0
0
0.029703
0.098214
112
4
38
28
0.831683
0
0
0
0
0
0.133929
0
0
0
0
0
0
1
0
false
0
0.25
0
0.25
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
3
bbb00575ca8ef3b74b0d15508c540dbe6fe62cdd
323
py
Python
linux/poolboy/portpool.py
petergyorgy/virtue
3b4dbfbdf9f5e121d9c9887d675fdd3065ecdd3b
[ "BSD-3-Clause" ]
null
null
null
linux/poolboy/portpool.py
petergyorgy/virtue
3b4dbfbdf9f5e121d9c9887d675fdd3065ecdd3b
[ "BSD-3-Clause" ]
null
null
null
linux/poolboy/portpool.py
petergyorgy/virtue
3b4dbfbdf9f5e121d9c9887d675fdd3065ecdd3b
[ "BSD-3-Clause" ]
null
null
null
from multiprocessing import Lock ppoollock = Lock() ppool = set(range(30000, 60000)) import random #todo exceptions def getport(): global ppool with ppoollock: p = random.sample(ppool,1)[0] ppool.remove(p) return p def putport(port): global ppool with ppoollock: try: ppool.add(port) except: pass
12.92
32
0.702786
45
323
5.044444
0.644444
0.096916
0.132159
0.211454
0
0
0
0
0
0
0
0.046154
0.195046
323
24
33
13.458333
0.826923
0.04644
0
0.235294
0
0
0
0
0
0
0
0.041667
0
1
0.117647
false
0.058824
0.117647
0
0.294118
0
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
1
0
0
0
0
1
0
0
0
0
0
3
bbc72fa390b5841c367593f9c5bc8aa6702d0949
1,424
py
Python
src/eodc_openeo_bindings/map_logic_processes.py
eodcgmbh/eodc-openeo-bindings
4e80eba036771a0c81359e1ac66862f1eead407b
[ "MIT" ]
null
null
null
src/eodc_openeo_bindings/map_logic_processes.py
eodcgmbh/eodc-openeo-bindings
4e80eba036771a0c81359e1ac66862f1eead407b
[ "MIT" ]
7
2020-02-18T17:12:31.000Z
2020-09-24T07:19:04.000Z
src/eodc_openeo_bindings/map_logic_processes.py
eodcgmbh/eodc-openeo-bindings
4e80eba036771a0c81359e1ac66862f1eead407b
[ "MIT" ]
null
null
null
""" """ from eodc_openeo_bindings.map_utils import map_default, set_extra_values, get_process_params def map_and(process): """ """ param_dict = {'y': 'bool'} return map_default(process, 'and_', 'reduce', param_dict) def map_or(process): """ """ param_dict = {'y': 'bool'} return map_default(process, 'or_', 'reduce', param_dict) def map_xor(process): """ """ param_dict = {'y': 'bool'} return map_default(process, 'xor_', 'reduce', param_dict) def map_not(process): """ """ param_dict = {'y': 'bool'} return map_default(process, 'not_', 'apply', param_dict) def map_if(process): """ """ param_dict = get_process_params(process['arguments'], {'ignore_nodata': 'bool'}) return map_default(process, 'if_', 'reduce', param_dict) def map_any(process): """ """ process_params1 = set_extra_values(process['arguments']) process_params2 = get_process_params(process['arguments'], {'ignore_nodata': 'bool'}) return map_default(process, 'any_', 'reduce', {**process_params1, **process_params2}) def map_all(process): """ """ process_params1 = set_extra_values(process['arguments']) process_params2 = get_process_params(process['arguments'], {'ignore_nodata': 'bool'}) return map_default(process, 'all_', 'reduce', {**process_params1, **process_params2})
18.25641
92
0.627107
163
1,424
5.110429
0.208589
0.108043
0.109244
0.168067
0.805522
0.623049
0.623049
0.623049
0.623049
0.411765
0
0.007061
0.204354
1,424
77
93
18.493506
0.728155
0
0
0.333333
0
0
0.137387
0
0
0
0
0
0
1
0.291667
false
0
0.041667
0
0.625
0
0
0
0
null
0
0
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
0
0
0
3
bbdfe0ab928a81b89c65ace66876be8bd2c6c25d
196
py
Python
math_in_programming/02_set/02-07.py
fumiyanll23/algo-method
d86ea1d399cbc5a1db0ae49d0c82e41042f661ab
[ "MIT" ]
null
null
null
math_in_programming/02_set/02-07.py
fumiyanll23/algo-method
d86ea1d399cbc5a1db0ae49d0c82e41042f661ab
[ "MIT" ]
null
null
null
math_in_programming/02_set/02-07.py
fumiyanll23/algo-method
d86ea1d399cbc5a1db0ae49d0c82e41042f661ab
[ "MIT" ]
null
null
null
# input N, X, Y = map(int, input().split()) As = [*map(int, input().split())] Bs = [*map(int, input().split())] # compute # output print(sum(i not in As and i not in Bs for i in range(1, N+1)))
19.6
62
0.581633
38
196
3
0.526316
0.157895
0.289474
0.421053
0
0
0
0
0
0
0
0.0125
0.183673
196
9
63
21.777778
0.7
0.102041
0
0
0
0
0
0
0
0
0
0
0
1
0
false
0
0
0
0
0.25
0
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
3
bbef00db03099fb25013647af3ffa454a5b5b14c
76
py
Python
Samples/Hosting/Scenarios/register_user_commands.py
TwoUnderscorez/dlr
60dfacb9852ec022dd076c152e286b116553c905
[ "Apache-2.0" ]
307
2015-01-03T19:57:57.000Z
2022-03-30T21:22:59.000Z
Samples/Hosting/Scenarios/register_user_commands.py
TwoUnderscorez/dlr
60dfacb9852ec022dd076c152e286b116553c905
[ "Apache-2.0" ]
72
2015-09-28T16:23:24.000Z
2022-03-14T00:47:04.000Z
Samples/Hosting/Scenarios/register_user_commands.py
TwoUnderscorez/dlr
60dfacb9852ec022dd076c152e286b116553c905
[ "Apache-2.0" ]
85
2015-01-03T19:58:01.000Z
2021-12-23T15:47:11.000Z
import App def foo(): print 'hello world' App.UserCommands['foo'] = foo
12.666667
29
0.671053
12
76
4.333333
0.75
0
0
0
0
0
0
0
0
0
0
0
0.171053
76
6
29
12.666667
0.809524
0
0
0
0
0
0.184211
0
0
0
0
0
0
0
null
null
0
0.25
null
null
0.25
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
3
a51fd86bc13a1292cd1349fcea48ed496bd9ce77
1,879
py
Python
src/python/utility/shared.py
andyjost/Sprite
7ecd6fc7d48d7f62da644e48c12c7b882e1a2929
[ "MIT" ]
1
2022-03-16T16:37:11.000Z
2022-03-16T16:37:11.000Z
src/python/utility/shared.py
andyjost/Sprite
7ecd6fc7d48d7f62da644e48c12c7b882e1a2929
[ "MIT" ]
null
null
null
src/python/utility/shared.py
andyjost/Sprite
7ecd6fc7d48d7f62da644e48c12c7b882e1a2929
[ "MIT" ]
null
null
null
from copy import copy import collections, six, sys class Shared(object): ''' Manages an object with copy-on-write semantics. To access the contained object for reading or writing, use the ``read`` and ``write`` methods, resp. If the object has multiple references, then writing will trigger a copy. ''' def __init__(self, ty, obj=None): self.ty = ty if obj is None: self.obj = ty() assert self.unique else: self.obj = obj def __copy__(self): return Shared(self.ty, self.obj) # sharing copy @property def read(self): return self.obj @property def write(self): if not self.unique: self.obj = self.ty(self.obj) # copy for write assert self.unique return self.obj @property def refcnt(self): return sys.getrefcount(self.obj) - 1 @property def unique(self): return self.refcnt == 1 def __str__(self): return str(self.read) def __repr__(self): return str(self) return 'Shared(refcnt=%s, %s)' % (self.refcnt, self.obj) # Read-only container methods, for convenience. def __contains__(self, key): return key in self.obj def __len__(self): return len(self.obj) def __getitem__(self, key): return self.obj[key] def __iter__(self): return iter(self.obj) class DefaultDict(collections.defaultdict): '''Like defaultdict but recursively copies values.''' def __copy__(self): return DefaultDict(self.default_factory, {k: copy(v) for k,v in six.iteritems(self)}) copy = __copy__ def compose(typefunction, ty): ''' Composes a type function and type. The type function is a type, such as Shared or DefaultDict, that takes another type as its only argument. The returned object has object-copy semantics. ''' def factory(obj=None): if obj is None: return typefunction(ty) else: return copy(obj) return factory
27.231884
89
0.678552
271
1,879
4.553506
0.328413
0.073744
0.031605
0.017828
0.038898
0
0
0
0
0
0
0.00136
0.217137
1,879
68
90
27.632353
0.837525
0.284726
0
0.27451
0
0
0.016104
0
0
0
0
0
0.039216
1
0.294118
false
0
0.039216
0.196078
0.705882
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
3
a5462f8f8d322138f5f9a856ce16895061ed0c71
140
py
Python
sopy/spoiler/forms.py
sopython/sopython-site
2fc6907125b30b307261e339de70ecbd10a9df63
[ "BSD-3-Clause" ]
81
2015-02-17T17:07:27.000Z
2021-08-15T17:46:13.000Z
sopy/spoiler/forms.py
sopython/sopython-site
2fc6907125b30b307261e339de70ecbd10a9df63
[ "BSD-3-Clause" ]
81
2015-02-17T17:04:16.000Z
2021-02-21T03:52:55.000Z
sopy/spoiler/forms.py
sopython/sopython-site
2fc6907125b30b307261e339de70ecbd10a9df63
[ "BSD-3-Clause" ]
29
2015-01-18T18:28:06.000Z
2022-02-05T03:11:04.000Z
from flask_wtf import FlaskForm from wtforms import TextAreaField class SpoilerForm(FlaskForm): message = TextAreaField(validators=[])
23.333333
42
0.807143
15
140
7.466667
0.733333
0
0
0
0
0
0
0
0
0
0
0
0.128571
140
5
43
28
0.918033
0
0
0
0
0
0
0
0
0
0
0
0
1
0
false
0
0.5
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
3
a558934a614f268538e3195a97e806bb9c97d908
71
py
Python
4/44.py
Seaoftrees/Session2019
86d61f190979ea9be205a3bbde1deac85de26997
[ "MIT" ]
null
null
null
4/44.py
Seaoftrees/Session2019
86d61f190979ea9be205a3bbde1deac85de26997
[ "MIT" ]
null
null
null
4/44.py
Seaoftrees/Session2019
86d61f190979ea9be205a3bbde1deac85de26997
[ "MIT" ]
null
null
null
i = 1 while i<10: print("お皿が" + str(i) + "枚....") print("1まいたりなぁ〜い")
17.75
34
0.507042
14
71
2.642857
0.785714
0
0
0
0
0
0
0
0
0
0
0.068966
0.183099
71
4
35
17.75
0.551724
0
0
0
0
0
0.236111
0
0
0
0
0
0
1
0
false
0
0
0
0
0.5
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
1
0
3
a57104d025c16f4d3a30a6a8cf626efd62c9d3ce
101
py
Python
math/TrianguleQuest2.py
silvioedu/HackerRank-Python-Practice
e31ebe49d431c0a23fed0cd67a6984e2b0b7a260
[ "MIT" ]
null
null
null
math/TrianguleQuest2.py
silvioedu/HackerRank-Python-Practice
e31ebe49d431c0a23fed0cd67a6984e2b0b7a260
[ "MIT" ]
null
null
null
math/TrianguleQuest2.py
silvioedu/HackerRank-Python-Practice
e31ebe49d431c0a23fed0cd67a6984e2b0b7a260
[ "MIT" ]
null
null
null
if __name__ == '__main__': for i in range(1,int(input())+1): print(int((10**i-1)/9)**2)
33.666667
38
0.524752
18
101
2.5
0.777778
0
0
0
0
0
0
0
0
0
0
0.088608
0.217822
101
3
39
33.666667
0.481013
0
0
0
0
0
0.08
0
0
0
0
0
0
1
0
false
0
0
0
0
0.333333
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
1
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
3
a58d5627b9fb26ac8618b5f394aa9ea5de56b1cf
407
py
Python
setup.py
dkawalecc/virtual_copernicus_ng
7b25131f305b59e61e2fe3136de1259c02832109
[ "MIT" ]
null
null
null
setup.py
dkawalecc/virtual_copernicus_ng
7b25131f305b59e61e2fe3136de1259c02832109
[ "MIT" ]
null
null
null
setup.py
dkawalecc/virtual_copernicus_ng
7b25131f305b59e61e2fe3136de1259c02832109
[ "MIT" ]
null
null
null
from setuptools import setup setup( name='virtual_copernicus_ng', version='1.0', packages=['virtual_copernicus_ng'], package_data={'virtual_copernicus_ng': ['images_copernicus/*.png']}, include_package_data=True, install_requires=[ 'gpiozero == 1.5.1', 'numpy == 1.19.4', 'Pillow == 8.0.1', 'scipy == 1.5.4', 'sounddevice == 0.4.1', ], )
23.941176
72
0.58231
50
407
4.52
0.58
0.225664
0.252212
0
0
0
0
0
0
0
0
0.058252
0.240786
407
16
73
25.4375
0.673139
0
0
0
0
0
0.41769
0.211302
0
0
0
0
0
1
0
true
0
0.066667
0
0.066667
0
0
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
3
a59d5b93f8267b4aefa6532d5c7af6496a38d140
3,183
py
Python
src/streamer.py
jmwerner/Jitter
2c804e7233680491174e960917b672d74b2d79ac
[ "MIT" ]
null
null
null
src/streamer.py
jmwerner/Jitter
2c804e7233680491174e960917b672d74b2d79ac
[ "MIT" ]
null
null
null
src/streamer.py
jmwerner/Jitter
2c804e7233680491174e960917b672d74b2d79ac
[ "MIT" ]
null
null
null
# Python streaming script heavily based on streaming example # from twython documentation https://github.com/ryanmcgrath/twython from twython import TwythonStreamer import sys class MyGeoStreamer(TwythonStreamer): def set_iterator(self, maximum_tweets): self.iterator=1 self.max_tweets = maximum_tweets def on_success(self, data): if 'geo' in data: if data['lang'] == "en": if data['geo']: print data['text'].encode('utf-8'), "\t", data['created_at'], "\t", data['geo'], "\t" self.iterator = self.iterator + 1 if self.iterator > self.max_tweets: self.disconnect() def on_error(self, status_code, data): print status_code self.disconnect() class MyGeoFilterStreamer(TwythonStreamer): def set_iterator(self, maximum_tweets): self.iterator=1 self.max_tweets = maximum_tweets def on_success(self, data): if 'text' in data: if data['lang'] == "en": if data['geo']: print data['text'].encode('utf-8'), "\t", data['created_at'], "\t", data['geo'], "\t" self.iterator = self.iterator + 1 if self.iterator > self.max_tweets: self.disconnect() def on_error(self, status_code, data): print status_code self.disconnect() class MyStreamer(TwythonStreamer): def set_iterator(self, maximum_tweets): self.iterator=1 self.max_tweets = maximum_tweets def on_success(self, data): if 'text' in data: if data['lang'] == "en": print data['text'].encode('utf-8'), "\t", data['created_at'], "\t", data['geo'], "\t" self.iterator = self.iterator + 1 if self.iterator > self.max_tweets: self.disconnect() def on_error(self, status_code, data): print status_code self.disconnect() class MyStreamer_user(TwythonStreamer): def on_success(self, data): if 'text' in data: print "##########\nTweet: ", data['text'].encode('utf-8'), "\n" print "Tweeter: ", data['user']['name'], "(", data['user']['screen_name'], ")\n" print "Time: ", data['created_at'], "\n##########\n\n" def on_error(self, status_code, data): print status_code self.disconnect() if sys.argv[1] == "geo": geostream = MyGeoStreamer(sys.argv[2], sys.argv[3], sys.argv[4], sys.argv[5]) geostream.set_iterator(int(sys.argv[6])) #geostream.statuses.sample() #Worldwide random sampling geostream.statuses.filter(locations='-130,26,-60,50') #Rough USA coordinates #For future development elif sys.argv[1] == "geofilter": gfstream = MyGeoFilterStreamer(sys.argv[2], sys.argv[3], sys.argv[4], sys.argv[5]) gfstream.set_iterator(int(sys.argv[6])) gfstream.statuses.filter(track=sys.argv[7]) #Worldwide elif sys.argv[1] == "stream": stream = MyStreamer(sys.argv[2], sys.argv[3], sys.argv[4], sys.argv[5]) stream.set_iterator(int(sys.argv[6])) #stream.statuses.sample() #Worldwide random sampling stream.statuses.filter(locations='-130,26,-60,50') elif sys.argv[1] == "streamfilter": stream = MyStreamer(sys.argv[2], sys.argv[3], sys.argv[4], sys.argv[5]) stream.set_iterator(int(sys.argv[6])) stream.statuses.filter(track=sys.argv[7]) #Worldwide elif sys.argv[1] == "user": stream = MyStreamer_user(sys.argv[2], sys.argv[3], sys.argv[4], sys.argv[5]) stream.user()
30.314286
91
0.676092
462
3,183
4.5671
0.188312
0.102844
0.036967
0.026066
0.75545
0.71564
0.694787
0.664455
0.664455
0.649289
0
0.021643
0.143575
3,183
104
92
30.605769
0.752384
0.090795
0
0.662162
0
0
0.098162
0
0
0
0
0
0
0
null
null
0
0.027027
null
null
0.135135
0
0
0
null
0
0
0
0
1
0
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
3
a59ddea6f4bcfaf4a697db3e4a3af0dda7bf84a2
11,640
py
Python
experiments/ec2/plots/plot_stable.py
Henriknu/consensus-unstable-throughput
0c2963fd91097c4c43546eb1b3af17b9b72a7c08
[ "MIT" ]
1
2021-07-30T19:11:13.000Z
2021-07-30T19:11:13.000Z
experiments/ec2/plots/plot_stable.py
Henriknu/consensus-unstable-throughput
0c2963fd91097c4c43546eb1b3af17b9b72a7c08
[ "MIT" ]
null
null
null
experiments/ec2/plots/plot_stable.py
Henriknu/consensus-unstable-throughput
0c2963fd91097c4c43546eb1b3af17b9b72a7c08
[ "MIT" ]
null
null
null
import matplotlib.pyplot as plt from data import MEASUREMENTS_STABLE_WAN, MEASUREMENTS_BARE_LATENCY_LAN_ABFT, MEASUREMENTS_BARE_LATENCY_LAN_BEAT_BEAT, MEASUREMENTS_BARE_LATENCY_LAN_BEAT_HB, MEASUREMENTS_BARE_LATENCY_WAN_ABFT, MEASUREMENTS_BARE_LATENCY_WAN_DUMBO_HB, MEASUREMENTS_BARE_LATENCY_WAN_DUMBO_DUMBO1, MEASUREMENTS_BARE_LATENCY_WAN_DUMBO_DUMBO2, MEASUREMENTS_WAN_THROUGHPUT_ABFT, MEASUREMENTS_WAN_THROUGHPUT_HB, MEASUREMENTS_WAN_THROUGHPUT_DUMBO1, MEASUREMENTS_WAN_THROUGHPUT_DUMBO2, MEASUREMENTS_UNSTABLE_DELAY, MEASUREMENTS_UNSTABLE_PACKET_LOSS # STABLE_LAN STABLE_WAN UNSTABLE_DELAY UNSTABLE_PACKET_LOSS SHOULD_PLOT_FOR = "STABLE_WAN" def plot_related_latency_LAN(): labels = ['N=4', 'N=7', 'N=10', 'N=13', 'N=16'] colors = { "HB": "red", "BEAT0": "green", "ABFT": "blue" } x = [4, 7, 10, 13, 16] width = 0.5 f = plt.figure(1, figsize=(7, 5)) plt.clf() ax = f.add_subplot(1, 1, 1) for N, _, latency in MEASUREMENTS_BARE_LATENCY_LAN_BEAT_HB: bar = ax.bar(N - 1.25 * width, latency, width, label='Honeybadger', color=colors["HB"]) ax.bar_label(bar, fmt='%.2f', padding=3) for N, _, latency in MEASUREMENTS_BARE_LATENCY_LAN_BEAT_BEAT: bar = ax.bar(N, latency, width, label='BEAT0', color=colors["BEAT0"]) ax.bar_label(bar, fmt='%.2f', padding=3) for N, _, latency in MEASUREMENTS_BARE_LATENCY_LAN_ABFT[0:2]: bar = ax.bar(N + 1.25 * width, latency, width, label='ABFT', color=colors["ABFT"]) ax.bar_label(bar, fmt='%.2f', padding=3) for N, _, latency in MEASUREMENTS_BARE_LATENCY_LAN_ABFT[2:]: bar = ax.bar(N, latency, width, label='ABFT', color=colors["ABFT"]) ax.bar_label(bar, fmt='%.2f', padding=3) # Handle duplicate legends handles, _labels = plt.gca().get_legend_handles_labels() by_label = dict(zip(_labels, handles)) plt.legend(by_label.values(), by_label.keys(), loc='best') plt.ylabel('Latency (Seconds) ') plt.xlabel('Number of nodes') plt.xticks(x, labels) plt.ylim([0, 2]) plt.tight_layout() plt.savefig(f'pdfs/plot_latency_LAN.pdf', format='pdf', dpi=1000) def plot_related_latency_WAN(): labels = ['N=32', 'N=64', 'N=100'] colors = { "HB": "red", "Dumbo1": "green", "Dumbo2": "purple", "ABFT": "blue" } x = [32, 64, 100] width = 2 f = plt.figure(1, figsize=(7, 5)) plt.clf() ax = f.add_subplot(1, 1, 1) for N, _, latency in MEASUREMENTS_BARE_LATENCY_WAN_DUMBO_HB: bar = ax.bar(N - 2.5 * width, latency, width, label='Honeybadger', color=colors["HB"]) ax.bar_label(bar, padding=3) for N, _, latency in MEASUREMENTS_BARE_LATENCY_WAN_DUMBO_DUMBO1: bar = ax.bar(N - 0.75 * width, latency, width, label='Dumbo1', color=colors["Dumbo1"]) ax.bar_label(bar, padding=3) for N, _, latency in MEASUREMENTS_BARE_LATENCY_WAN_DUMBO_DUMBO2: bar = ax.bar(N + 0.75 * width, latency, width, label='Dumbo2', color=colors["Dumbo2"]) ax.bar_label(bar, padding=3) for N, _, latency in MEASUREMENTS_BARE_LATENCY_WAN_ABFT: bar = ax.bar(N + 2.5 * width, latency, width, label='ABFT', color=colors["ABFT"]) ax.bar_label(bar, fmt='%.2f', padding=3) # Handle duplicate legends handles, _labels = plt.gca().get_legend_handles_labels() by_label = dict(zip(_labels, handles)) plt.legend(by_label.values(), by_label.keys(), loc='best') ax.set_yscale("log") plt.ylim([1, 10**2.9]) plt.ylabel('Latency (Seconds) ') plt.xlabel('Number of nodes') plt.xticks(x, labels) plt.tight_layout() plt.savefig(f'pdfs/plot_latency_WAN.pdf', format='pdf', dpi=1000) def plot_related_throughput_WAN(): labels = ['N=8', 'N=32', 'N=64', 'N=100'] colors = { "HB": "red", "Dumbo1": "green", "Dumbo2": "purple", "ABFT": "blue" } x = [8, 32, 64, 100] width = 4 f = plt.figure(1, figsize=(7, 5)) plt.clf() ax = f.add_subplot(1, 1, 1) for N, _, latency in MEASUREMENTS_WAN_THROUGHPUT_HB: bar = ax.bar(N - 2.5 * width, latency, width, label='Honeybadger', color=colors["HB"]) ax.bar_label(bar, padding=3) for N, _, latency in MEASUREMENTS_WAN_THROUGHPUT_DUMBO1: bar = ax.bar(N - 0.75 * width, latency, width, label='Dumbo1', color=colors["Dumbo1"]) ax.bar_label(bar, padding=3) for N, _, latency in MEASUREMENTS_WAN_THROUGHPUT_DUMBO2: bar = ax.bar(N + 0.75 * width, latency, width, label='Dumbo2', color=colors["Dumbo2"]) ax.bar_label(bar, padding=3) for N, _, latency in [MEASUREMENTS_WAN_THROUGHPUT_ABFT[0]]: bar = ax.bar(N, latency, width, label='ABFT', color=colors["ABFT"]) ax.bar_label(bar, padding=3) for N, _, latency in MEASUREMENTS_WAN_THROUGHPUT_ABFT[1:]: bar = ax.bar(N + 2.5 * width, latency, width, label='ABFT', color=colors["ABFT"]) ax.bar_label(bar, padding=3) # Handle duplicate legends handles, _labels = plt.gca().get_legend_handles_labels() by_label = dict(zip(_labels, handles)) plt.legend(by_label.values(), by_label.keys(), loc='best') plt.ylim([0, 50000]) plt.ylabel('Throughput (Tx per second)') plt.xlabel('Number of nodes') plt.xticks(x, labels) plt.tight_layout() plt.savefig(f'pdfs/plot_throughput_WAN.pdf', format='pdf', dpi=1000) def plot_stable(): plot_latency(MEASUREMENTS_STABLE_WAN, "STABLE_WAN") plot_throughput(MEASUREMENTS_STABLE_WAN, "STABLE_WAN") plot_v_latency_throughput(MEASUREMENTS_STABLE_WAN, "STABLE_WAN") plot_cpu(MEASUREMENTS_STABLE_WAN, "STABLE_WAN") plot_mem(MEASUREMENTS_STABLE_WAN, "STABLE_WAN") plot_net(MEASUREMENTS_STABLE_WAN, "STABLE_WAN") def plot_latency(data=None, suffix=None): if not data: data = get_data() if not suffix: suffix = SHOULD_PLOT_FOR f = plt.figure(1, figsize=(7, 5)) plt.clf() ax = f.add_subplot(1, 1, 1) for N, t, entries, _ in data: batch = [] latencies = [] for ToverN, latency, _, _, _ in entries: batch.append(ToverN * N) latencies.append(latency) ax.plot(batch, latencies, label='%d/%d' % (N, t)) ax.set_xscale("log") ax.set_yscale("log") plt.ylim([10**0.2, 10**2.6]) plt.xlim([10**2.2, 3 * 10**6]) plt.legend(title='Nodes / Tolerance', loc='best') plt.ylabel('Latency (Seconds) ') plt.xlabel('Batch size (Number of Tx) in log scale') plt.tight_layout() plt.savefig(f'pdfs/plot_latency_{suffix}.pdf', format='pdf', dpi=1000) def plot_throughput(data=None, suffix=None): if not data: data = get_data() if not suffix: suffix = SHOULD_PLOT_FOR f = plt.figure(1, figsize=(7, 5)) plt.clf() ax = f.add_subplot(1, 1, 1) for N, t, entries, style in data: batch = [] throughput = [] for ToverN, latency, _, _, _ in entries: batch.append(N*ToverN) throughput.append(ToverN*(N-t) / latency) ax.plot(batch, throughput, style, label='%d/%d' % (N, t)) print(N, throughput) ax.set_xscale("log") ax.set_yscale("log") # plt.ylim([10**2.1, 10**4.8]) # plt.xlim([10**3.8, 10**6.4]) plt.legend(title='Nodes / Tolerance', loc='best') plt.ylabel('Throughput (Tx per second) in log scale') plt.xlabel('Batch size (Number of Tx) in log scale') plt.savefig(f'pdfs/plot_throughput_{suffix}.pdf', format='pdf', dpi=1000) def plot_v_latency_throughput(data=None, suffix=None): if not data: data = get_data() if not suffix: suffix = SHOULD_PLOT_FOR f = plt.figure(1, figsize=(7, 5)) plt.clf() ax = f.add_subplot(1, 1, 1) for N, t, entries, style in data: throughput = [] latencies = [] for ToverN, latency, _, _, _ in entries: throughput.append(ToverN*(N-t) / latency) latencies.append(latency) ax.plot(throughput, latencies, style, label='%d/%d' % (N, t)) ax.set_xscale("log") ax.set_yscale("log") plt.legend(title='Nodes / Tolerance', loc='best') plt.ylabel('Latency (Seconds) in log scale') plt.xlabel('Throughput (Tx per second) in log scale') plt.tight_layout() plt.savefig( f'pdfs/plot_latency_throughput_{suffix}.pdf', format='pdf', dpi=1000) def plot_cpu(data=None, suffix=None): if not data: data = get_data() if not suffix: suffix = SHOULD_PLOT_FOR f = plt.figure(1, figsize=(7, 5)) plt.clf() ax = f.add_subplot(1, 1, 1) for N, t, entries, style in data: batches = [] cpu_usage = [] for ToverN, _, cpu, _, _ in entries: batches.append(N*ToverN) cpu_usage.append(cpu) ax.plot(batches, cpu_usage, style, label='%d/%d' % (N, t)) ax.set_xscale("log") plt.ylim([0, 100]) plt.legend(title='Nodes / Tolerance', loc='best') plt.ylabel('CPU utilization (Percentage)') plt.xlabel('Batch size (Number of Tx) in log scale') plt.tight_layout() plt.savefig( f'pdfs/plot_res_cpu_{suffix}.pdf', format='pdf', dpi=1000) def plot_mem(data=None, suffix=None): if not data: data = get_data() if not suffix: suffix = SHOULD_PLOT_FOR f = plt.figure(1, figsize=(7, 5)) plt.clf() ax = f.add_subplot(1, 1, 1) for N, t, entries, style in data: batches = [] mem_usage = [] for ToverN, _, _, mem, _ in entries: batches.append(N*ToverN) mem_usage.append(mem) ax.plot(batches, mem_usage, style, label='%d/%d' % (N, t)) ax.set_xscale("log") plt.ylim([10**6, 4 * 10**9]) plt.legend(title='Nodes / Tolerance', loc='best') plt.ylabel('Memory utilization (Bytes)') plt.xlabel('Throughput (Tx per second) in log scale') plt.tight_layout() plt.savefig( f'pdfs/plot_res_mem_{suffix}.pdf', format='pdf', dpi=1000) def plot_net(data=None, suffix=None): if not data: data = get_data() if not suffix: suffix = SHOULD_PLOT_FOR f = plt.figure(1, figsize=(7, 5)) plt.clf() ax = f.add_subplot(1, 1, 1) for N, t, entries, style in data: batches = [] net_usage = [] for ToverN, _, _, _, net in entries: batches.append(ToverN*(N-t)) net_usage.append(net) ax.plot(batches, net_usage, style, label='%d/%d' % (N, t)) ax.set_xscale("log") ax.set_yscale("log") plt.legend(title='Nodes / Tolerance', loc='best') plt.ylabel('Outbound network traffic (Bytes)') plt.xlabel('Throughput (Tx per second) in log scale') plt.tight_layout() plt.savefig( f'pdfs/plot_res_net_{suffix}.pdf', format='pdf', dpi=1000) def get_data(): if SHOULD_PLOT_FOR == "STABLE_LAN": return MEASUREMENTS_STABLE_LAN elif SHOULD_PLOT_FOR == "STABLE_WAN": return MEASUREMENTS_STABLE_WAN elif SHOULD_PLOT_FOR == "UNSTABLE_DELAY": return MEASUREMENTS_UNSTABLE_DELAY elif SHOULD_PLOT_FOR == "UNSTABLE_PACKET_LOSS": return MEASUREMENTS_UNSTABLE_PACKET_LOSS else: print("Data collection not found") None if __name__ == '__main__': from IPython import embed embed()
31.544715
523
0.609536
1,629
11,640
4.151013
0.092695
0.019225
0.05102
0.024993
0.818693
0.780834
0.701715
0.668146
0.634871
0.593168
0
0.030524
0.245704
11,640
368
524
31.630435
0.739636
0.016323
0
0.618375
0
0
0.128201
0.02377
0
0
0
0
0
1
0.038869
false
0
0.010601
0
0.063604
0.007067
0
0
0
null
0
0
0
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
3
a5baf0d3d99139b79248186991d3df3e2e40f265
7,176
py
Python
src/stem.py
imapsingh/stemmer
75c572a695f3ced4dfba7a3faf3882a879c72906
[ "MIT" ]
null
null
null
src/stem.py
imapsingh/stemmer
75c572a695f3ced4dfba7a3faf3882a879c72906
[ "MIT" ]
null
null
null
src/stem.py
imapsingh/stemmer
75c572a695f3ced4dfba7a3faf3882a879c72906
[ "MIT" ]
null
null
null
class PorterStemmer: def __init__(self): self.vowels = ('a', 'e', 'i', 'o', 'u') def is_consonant(self, s: str, i: int): return not self.is_vowel(s, i) def is_vowel(self, s: str, i: int): if s[i].lower() in self.vowels: return True elif s[i].lower() == 'y': if self.is_consonant(s, i-1): return True else: return False def find_m(self, s): i = 0 m = 0 while i < len(s): if self.is_vowel(s, i) and self.is_consonant(s, i+1): m += 1 i += 2 else: i += 1 return m def contains_vowel(self, s): for v in self.vowels: if v in s: return True for i in range(len(s)): if s[i] == 'y': if self.is_vowel(s, i): return True return False def step1a(self, s): if s[-4:] == 'sses': s = s[:-4] + 'ss' elif s[-3:] == "ies": s = s[:-3] + "i" elif s[-2:] == "ss": pass elif s[-1] == "s": s = s[:-1] return s def step1b(self, s): if s[-3:] == 'eed': m = self.find_m(s[:-3]) if m > 0: s = s[:-1] elif s[-2:] == 'ed': if self.contains_vowel(s[:-2]): s = s[:-2] elif s[-3:] == 'ing': if self.contains_vowel(s[:-3]): s = s[:-3] return s def step2(self, s): if s[-7:] == 'ational': m = self.find_m(s[:-7]) if m > 0: s = s[:-5]+"e" elif s[-6:] == 'tional': m = self.find_m(s[:-6]) if m > 0: s = s[:-2] elif s[-4:] == 'enci': m = self.find_m(s[:-4]) if m > 0: s = s[:-1]+"e" elif s[-4:] == 'anci': m = self.find_m(s[:-4]) if m > 0: s = s[:-1]+"e" elif s[-4:] == 'izer': m = self.find_m(s[:-4]) if m > 0: s = s[:-1] elif s[-4:] == 'abli': m = self.find_m(s[:-4]) if m > 0: s = s[:-1]+"e" elif s[-4:] == 'alli': m = self.find_m(s[:-1]) if m > 0: s = s[:-2] elif s[-5:] == 'entli': m = self.find_m(s[:-5]) if m > 0: s = s[:-2] elif s[-3:] == 'eli': m = self.find_m(s[:-3]) if m > 0: s = s[:-2] elif s[-5:] == 'ousli': m = self.find_m(s[:-5]) if m > 0: s = s[:-2] elif s[-7:] == 'ization': m = self.find_m(s[:-7]) if m > 0: s = s[:-5]+"e" elif s[-5:] == 'ation': m = self.find_m(s[:-5]) if m > 0: s = s[:-3]+"e" elif s[-4:] == 'ator': m = self.find_m(s[:-4]) if m > 0: s = s[:-2]+"e" elif s[-5:] == 'alism': m = self.find_m(s[:-5]) if m > 0: s = s[:-3] elif s[-7:] == 'iveness': m = self.find_m(s[:-7]) if m > 0: s = s[:-4] elif s[-7:] == 'fulness': m = self.find_m(s[:-7]) if m > 0: s = s[:-4] elif s[-7:] == 'ousness': m = self.find_m(s[:-7]) if m > 0: s = s[:-4] elif s[-5:] == 'aliti': m = self.find_m(s[:-5]) if m > 0: s = s[:-3] elif s[-5:] == 'iviti': m = self.find_m(s[:-5]) if m > 0: s = s[:-3]+"e" elif s[-6:] == 'bliti': m = self.find_m(s[:-6]) if m > 0: s = s[:-3]+"e" return s def step3(self, s): if s[-5:] == 'icate': m = self.find_m(s[:-5]) if m > 0: s = s[:-3] elif s[-5:] == 'ative': m = self.find_m(s[:-5]) if m > 0: s = s[:-5] elif s[-5:] == 'alize': m = self.find_m(s[:-5]) if m > 0: s = s[:-3] elif s[-5:] == 'iciti': m = self.find_m(s[:-5]) if m > 0: s = s[:-3] elif s[-4:] == 'ical': m = self.find_m(s[:-4]) if m > 0: s = s[:-2] elif s[-3:] == 'ful': m = self.find_m(s[:-3]) if m > 0: s = s[:-3] elif s[-4:] == 'ness': m = self.find_m(s[:-4]) if m > 0: s = s[:-4] def step4(self, s): if s[-2:] == 'al': m = self.find_m(s[:-2]) if m > 1: s = s[:-2] elif s[-4:] == 'ance': m = self.find_m(s[:-4]) if m > 1: s = s[:-4] elif s[-4:] == 'ence': m = self.find_m(s[:-4]) if m > 1: s = s[:-4] elif s[-2:] == 'er': m = self.find_m(s[:-2]) if m > 1: s = s[:-2] elif s[-2:] == 'ic': m = self.find_m(s[:-2]) if m > 1: s = s[:-2] elif s[-4:] == 'able': m = self.find_m(s[:-4]) if m > 1: s = s[:-4] elif s[-4:] == 'ible': m = self.find_m(s[:-4]) if m > 1: s = s[:-4] elif s[-4:] == 'ant': m = self.find_m(s[:-3]) if m > 1: s = s[:-3] elif s[-5:] == 'ement': m = self.find_m(s[:-5]) if m > 1: s = s[:-5] elif s[-4:] == 'ment': m = self.find_m(s[:-4]) if m > 1: s = s[:-4] elif s[-3:] == 'ent': m = self.find_m(s[:-3]) if m > 1: s = s[:-3] elif s[-3:] == 'ion': m = self.find_m(s[:-3]) if m > 1 and (s[-4]== "s" or s[-4]=="t"): s = s[:-3] elif s[-2:] == 'ou': m = self.find_m(s[:-2]) if m > 1: s = s[:-2] elif s[-3:] == 'ism': m = self.find_m(s[:-3]) if m > 1: s = s[:-3] elif s[-3:] == 'ate': m = self.find_m(s[:-3]) if m > 1: s = s[:-3] elif s[-3:] == 'iti': m = self.find_m(s[:-3]) if m > 1: s = s[:-3] elif s[-3:] == 'ous': m = self.find_m(s[:-3]) if m > 1: s = s[:-3] elif s[-3:] == 'ive': m = self.find_m(s[:-3]) if m > 1: s = s[:-3] elif s[-3:] == 'ize': m = self.find_m(s[:-3]) if m > 1: s = s[:-3] def __call__(self, s: str): s = self.step1a(s) s = self.step1b(s) return s
27.181818
65
0.29097
996
7,176
2.03012
0.105422
0.053412
0.209199
0.232443
0.640455
0.586053
0.546489
0.5455
0.529674
0.504451
0
0.061876
0.511288
7,176
263
66
27.285171
0.514685
0
0
0.621951
0
0
0.032474
0
0
0
0
0
0
1
0.044715
false
0.004065
0
0.004065
0.097561
0
0
0
0
null
0
1
1
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
3
36feafbde0cb1410e0c30ad0dd2ecb48a8ee4f71
243
py
Python
slbo/policies/__init__.py
LinZichuan/AdMRL
50a22d4d480e99125cc91cc65dfcc0df4a883ac6
[ "MIT" ]
27
2020-06-17T11:40:17.000Z
2021-11-16T07:39:33.000Z
slbo/policies/__init__.py
LinZichuan/AdMRL
50a22d4d480e99125cc91cc65dfcc0df4a883ac6
[ "MIT" ]
3
2020-06-19T07:01:48.000Z
2020-06-19T07:14:57.000Z
slbo/policies/__init__.py
LinZichuan/AdMRL
50a22d4d480e99125cc91cc65dfcc0df4a883ac6
[ "MIT" ]
5
2020-11-19T01:11:24.000Z
2021-12-24T09:03:56.000Z
import abc from typing import Union import lunzi.nn as nn class BasePolicy(abc.ABC): @abc.abstractmethod def get_actions(self, states): pass BaseNNPolicy = Union[BasePolicy, nn.Module] # should be Intersection, see PEP544
18.692308
81
0.728395
33
243
5.333333
0.727273
0.068182
0
0
0
0
0
0
0
0
0
0.015385
0.197531
243
12
82
20.25
0.887179
0.139918
0
0
0
0
0
0
0
0
0
0
0
1
0.125
false
0.125
0.375
0
0.625
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
1
1
0
1
0
0
3
3c015adc1dabb48fb294a3beaa89a23254967f4b
92
py
Python
tests/views/test_app.py
DanielGrams/gsevp
e94034f7b64de76f38754b56455e83092378261f
[ "MIT" ]
1
2021-06-01T14:49:18.000Z
2021-06-01T14:49:18.000Z
tests/views/test_app.py
DanielGrams/gsevp
e94034f7b64de76f38754b56455e83092378261f
[ "MIT" ]
286
2020-12-04T14:13:00.000Z
2022-03-09T19:05:16.000Z
tests/views/test_app.py
DanielGrams/gsevpt
a92f71694388e227e65ed1b24446246ee688d00e
[ "MIT" ]
null
null
null
def test_index(client): response = client.get("/") assert b"oveda" in response.data
23
36
0.673913
13
92
4.692308
0.846154
0
0
0
0
0
0
0
0
0
0
0
0.184783
92
3
37
30.666667
0.813333
0
0
0
0
0
0.065217
0
0
0
0
0
0.333333
1
0.333333
false
0
0
0
0.333333
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
0
0
0
3
3c01b3361aadea444e055fa983f43d2e63de2259
102
py
Python
src/settings.py
sqoshi/masked-face-recognizer
40f66d776b203a1875200647b62d623f696d88a4
[ "MIT" ]
null
null
null
src/settings.py
sqoshi/masked-face-recognizer
40f66d776b203a1875200647b62d623f696d88a4
[ "MIT" ]
11
2021-10-20T20:01:02.000Z
2021-12-19T19:56:42.000Z
src/settings.py
sqoshi/masked-face-recognizer
40f66d776b203a1875200647b62d623f696d88a4
[ "MIT" ]
null
null
null
import os from pathlib import Path output = Path(os.path.abspath(__file__)).parent.parent / "output"
20.4
65
0.764706
15
102
4.933333
0.6
0
0
0
0
0
0
0
0
0
0
0
0.117647
102
4
66
25.5
0.822222
0
0
0
0
0
0.058824
0
0
0
0
0
0
1
0
false
0
0.666667
0
0.666667
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
3
3c1d75cd4074bd987f285324e6067f2ccfd5cf02
938
py
Python
mayi/test_case/page_obj/landlord_activity_page.py
18701016443/mayi
192c70c49a8e9e072b9d0d0136f02c653c589410
[ "MIT" ]
null
null
null
mayi/test_case/page_obj/landlord_activity_page.py
18701016443/mayi
192c70c49a8e9e072b9d0d0136f02c653c589410
[ "MIT" ]
null
null
null
mayi/test_case/page_obj/landlord_activity_page.py
18701016443/mayi
192c70c49a8e9e072b9d0d0136f02c653c589410
[ "MIT" ]
null
null
null
#!/usr/bin/env python # encoding: utf-8 """ @version: python.3.6 @author: zhangjiaheng @software: PyCharm @time: 2017/9/21 20:58 """ from .base import Pyse class LandlordActivity(Pyse): '''活动设置''' # url = "/" #活动页面文案 def text(self): text= self.get_text("xpath=>/html/body/div[14]/div[5]/div/div[1]/div[1]/p") return text #活动好处 def active_good(self): self.click("class=>active_good") #活动好处弹窗关闭按钮 def img_close(self): self.click("xpath=>/html/body/div[14]/div[5]/div/div[1]/div[4]/div[2]/img") #活动规则 def regular_desc(self): self.click("class=>regular_desc") #活动规则弹窗文案 def regular_desc_text(self): text = self.get_text("xpath=>/html/body/div[14]/div[5]/div/div[1]/div[3]/div[2]") return text #活动规则弹窗关闭按钮 def regular_desc_close(self): self.click("xpath=>/html/body/div[14]/div[5]/div/div[1]/div[3]/div[2]/img")
21.318182
89
0.604478
144
938
3.861111
0.388889
0.035971
0.093525
0.115108
0.395683
0.395683
0.395683
0.395683
0.395683
0.395683
0
0.049399
0.201493
938
43
90
21.813953
0.692924
0.189765
0
0.125
0
0.25
0.36413
0.313859
0
0
0
0
0
1
0.375
false
0
0.0625
0
0.625
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
null
0
0
0
0
0
1
0
0
0
0
1
0
0
3
3c32bb68816a61040a9ed789f1931ba1cfec42a3
13,439
py
Python
kubernetes/client/models/com_coreos_monitoring_v1_prometheus_spec_alerting_alertmanagers.py
mariusgheorghies/python
68ac7e168963d8b5a81dc493b1973d29e903a15b
[ "Apache-2.0" ]
null
null
null
kubernetes/client/models/com_coreos_monitoring_v1_prometheus_spec_alerting_alertmanagers.py
mariusgheorghies/python
68ac7e168963d8b5a81dc493b1973d29e903a15b
[ "Apache-2.0" ]
null
null
null
kubernetes/client/models/com_coreos_monitoring_v1_prometheus_spec_alerting_alertmanagers.py
mariusgheorghies/python
68ac7e168963d8b5a81dc493b1973d29e903a15b
[ "Apache-2.0" ]
null
null
null
# coding: utf-8 """ Kubernetes No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 The version of the OpenAPI document: v1.20.7 Generated by: https://openapi-generator.tech """ import pprint import re # noqa: F401 import six from kubernetes.client.configuration import Configuration class ComCoreosMonitoringV1PrometheusSpecAlertingAlertmanagers(object): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. """ """ Attributes: openapi_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ openapi_types = { 'api_version': 'str', 'authorization': 'ComCoreosMonitoringV1PrometheusSpecAlertingAuthorization', 'bearer_token_file': 'str', 'name': 'str', 'namespace': 'str', 'path_prefix': 'str', 'port': 'object', 'scheme': 'str', 'timeout': 'str', 'tls_config': 'ComCoreosMonitoringV1PrometheusSpecAlertingTlsConfig' } attribute_map = { 'api_version': 'apiVersion', 'authorization': 'authorization', 'bearer_token_file': 'bearerTokenFile', 'name': 'name', 'namespace': 'namespace', 'path_prefix': 'pathPrefix', 'port': 'port', 'scheme': 'scheme', 'timeout': 'timeout', 'tls_config': 'tlsConfig' } def __init__(self, api_version=None, authorization=None, bearer_token_file=None, name=None, namespace=None, path_prefix=None, port=None, scheme=None, timeout=None, tls_config=None, local_vars_configuration=None): # noqa: E501 """ComCoreosMonitoringV1PrometheusSpecAlertingAlertmanagers - a model defined in OpenAPI""" # noqa: E501 if local_vars_configuration is None: local_vars_configuration = Configuration() self.local_vars_configuration = local_vars_configuration self._api_version = None self._authorization = None self._bearer_token_file = None self._name = None self._namespace = None self._path_prefix = None self._port = None self._scheme = None self._timeout = None self._tls_config = None self.discriminator = None if api_version is not None: self.api_version = api_version if authorization is not None: self.authorization = authorization if bearer_token_file is not None: self.bearer_token_file = bearer_token_file self.name = name self.namespace = namespace if path_prefix is not None: self.path_prefix = path_prefix self.port = port if scheme is not None: self.scheme = scheme if timeout is not None: self.timeout = timeout if tls_config is not None: self.tls_config = tls_config @property def api_version(self): """Gets the api_version of this ComCoreosMonitoringV1PrometheusSpecAlertingAlertmanagers. # noqa: E501 Version of the Alertmanager API that Prometheus uses to send alerts. It can be \"v1\" or \"v2\". # noqa: E501 :return: The api_version of this ComCoreosMonitoringV1PrometheusSpecAlertingAlertmanagers. # noqa: E501 :rtype: str """ return self._api_version @api_version.setter def api_version(self, api_version): """Sets the api_version of this ComCoreosMonitoringV1PrometheusSpecAlertingAlertmanagers. Version of the Alertmanager API that Prometheus uses to send alerts. It can be \"v1\" or \"v2\". # noqa: E501 :param api_version: The api_version of this ComCoreosMonitoringV1PrometheusSpecAlertingAlertmanagers. # noqa: E501 :type: str """ self._api_version = api_version @property def authorization(self): """Gets the authorization of this ComCoreosMonitoringV1PrometheusSpecAlertingAlertmanagers. # noqa: E501 :return: The authorization of this ComCoreosMonitoringV1PrometheusSpecAlertingAlertmanagers. # noqa: E501 :rtype: ComCoreosMonitoringV1PrometheusSpecAlertingAuthorization """ return self._authorization @authorization.setter def authorization(self, authorization): """Sets the authorization of this ComCoreosMonitoringV1PrometheusSpecAlertingAlertmanagers. :param authorization: The authorization of this ComCoreosMonitoringV1PrometheusSpecAlertingAlertmanagers. # noqa: E501 :type: ComCoreosMonitoringV1PrometheusSpecAlertingAuthorization """ self._authorization = authorization @property def bearer_token_file(self): """Gets the bearer_token_file of this ComCoreosMonitoringV1PrometheusSpecAlertingAlertmanagers. # noqa: E501 BearerTokenFile to read from filesystem to use when authenticating to Alertmanager. # noqa: E501 :return: The bearer_token_file of this ComCoreosMonitoringV1PrometheusSpecAlertingAlertmanagers. # noqa: E501 :rtype: str """ return self._bearer_token_file @bearer_token_file.setter def bearer_token_file(self, bearer_token_file): """Sets the bearer_token_file of this ComCoreosMonitoringV1PrometheusSpecAlertingAlertmanagers. BearerTokenFile to read from filesystem to use when authenticating to Alertmanager. # noqa: E501 :param bearer_token_file: The bearer_token_file of this ComCoreosMonitoringV1PrometheusSpecAlertingAlertmanagers. # noqa: E501 :type: str """ self._bearer_token_file = bearer_token_file @property def name(self): """Gets the name of this ComCoreosMonitoringV1PrometheusSpecAlertingAlertmanagers. # noqa: E501 Name of Endpoints object in Namespace. # noqa: E501 :return: The name of this ComCoreosMonitoringV1PrometheusSpecAlertingAlertmanagers. # noqa: E501 :rtype: str """ return self._name @name.setter def name(self, name): """Sets the name of this ComCoreosMonitoringV1PrometheusSpecAlertingAlertmanagers. Name of Endpoints object in Namespace. # noqa: E501 :param name: The name of this ComCoreosMonitoringV1PrometheusSpecAlertingAlertmanagers. # noqa: E501 :type: str """ if self.local_vars_configuration.client_side_validation and name is None: # noqa: E501 raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501 self._name = name @property def namespace(self): """Gets the namespace of this ComCoreosMonitoringV1PrometheusSpecAlertingAlertmanagers. # noqa: E501 Namespace of Endpoints object. # noqa: E501 :return: The namespace of this ComCoreosMonitoringV1PrometheusSpecAlertingAlertmanagers. # noqa: E501 :rtype: str """ return self._namespace @namespace.setter def namespace(self, namespace): """Sets the namespace of this ComCoreosMonitoringV1PrometheusSpecAlertingAlertmanagers. Namespace of Endpoints object. # noqa: E501 :param namespace: The namespace of this ComCoreosMonitoringV1PrometheusSpecAlertingAlertmanagers. # noqa: E501 :type: str """ if self.local_vars_configuration.client_side_validation and namespace is None: # noqa: E501 raise ValueError("Invalid value for `namespace`, must not be `None`") # noqa: E501 self._namespace = namespace @property def path_prefix(self): """Gets the path_prefix of this ComCoreosMonitoringV1PrometheusSpecAlertingAlertmanagers. # noqa: E501 Prefix for the HTTP path alerts are pushed to. # noqa: E501 :return: The path_prefix of this ComCoreosMonitoringV1PrometheusSpecAlertingAlertmanagers. # noqa: E501 :rtype: str """ return self._path_prefix @path_prefix.setter def path_prefix(self, path_prefix): """Sets the path_prefix of this ComCoreosMonitoringV1PrometheusSpecAlertingAlertmanagers. Prefix for the HTTP path alerts are pushed to. # noqa: E501 :param path_prefix: The path_prefix of this ComCoreosMonitoringV1PrometheusSpecAlertingAlertmanagers. # noqa: E501 :type: str """ self._path_prefix = path_prefix @property def port(self): """Gets the port of this ComCoreosMonitoringV1PrometheusSpecAlertingAlertmanagers. # noqa: E501 Port the Alertmanager API is exposed on. # noqa: E501 :return: The port of this ComCoreosMonitoringV1PrometheusSpecAlertingAlertmanagers. # noqa: E501 :rtype: object """ return self._port @port.setter def port(self, port): """Sets the port of this ComCoreosMonitoringV1PrometheusSpecAlertingAlertmanagers. Port the Alertmanager API is exposed on. # noqa: E501 :param port: The port of this ComCoreosMonitoringV1PrometheusSpecAlertingAlertmanagers. # noqa: E501 :type: object """ if self.local_vars_configuration.client_side_validation and port is None: # noqa: E501 raise ValueError("Invalid value for `port`, must not be `None`") # noqa: E501 self._port = port @property def scheme(self): """Gets the scheme of this ComCoreosMonitoringV1PrometheusSpecAlertingAlertmanagers. # noqa: E501 Scheme to use when firing alerts. # noqa: E501 :return: The scheme of this ComCoreosMonitoringV1PrometheusSpecAlertingAlertmanagers. # noqa: E501 :rtype: str """ return self._scheme @scheme.setter def scheme(self, scheme): """Sets the scheme of this ComCoreosMonitoringV1PrometheusSpecAlertingAlertmanagers. Scheme to use when firing alerts. # noqa: E501 :param scheme: The scheme of this ComCoreosMonitoringV1PrometheusSpecAlertingAlertmanagers. # noqa: E501 :type: str """ self._scheme = scheme @property def timeout(self): """Gets the timeout of this ComCoreosMonitoringV1PrometheusSpecAlertingAlertmanagers. # noqa: E501 Timeout is a per-target Alertmanager timeout when pushing alerts. # noqa: E501 :return: The timeout of this ComCoreosMonitoringV1PrometheusSpecAlertingAlertmanagers. # noqa: E501 :rtype: str """ return self._timeout @timeout.setter def timeout(self, timeout): """Sets the timeout of this ComCoreosMonitoringV1PrometheusSpecAlertingAlertmanagers. Timeout is a per-target Alertmanager timeout when pushing alerts. # noqa: E501 :param timeout: The timeout of this ComCoreosMonitoringV1PrometheusSpecAlertingAlertmanagers. # noqa: E501 :type: str """ self._timeout = timeout @property def tls_config(self): """Gets the tls_config of this ComCoreosMonitoringV1PrometheusSpecAlertingAlertmanagers. # noqa: E501 :return: The tls_config of this ComCoreosMonitoringV1PrometheusSpecAlertingAlertmanagers. # noqa: E501 :rtype: ComCoreosMonitoringV1PrometheusSpecAlertingTlsConfig """ return self._tls_config @tls_config.setter def tls_config(self, tls_config): """Sets the tls_config of this ComCoreosMonitoringV1PrometheusSpecAlertingAlertmanagers. :param tls_config: The tls_config of this ComCoreosMonitoringV1PrometheusSpecAlertingAlertmanagers. # noqa: E501 :type: ComCoreosMonitoringV1PrometheusSpecAlertingTlsConfig """ self._tls_config = tls_config def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in six.iteritems(self.openapi_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, ComCoreosMonitoringV1PrometheusSpecAlertingAlertmanagers): return False return self.to_dict() == other.to_dict() def __ne__(self, other): """Returns true if both objects are not equal""" if not isinstance(other, ComCoreosMonitoringV1PrometheusSpecAlertingAlertmanagers): return True return self.to_dict() != other.to_dict()
35.933155
230
0.668725
1,319
13,439
6.674754
0.122062
0.049977
0.28169
0.224898
0.608928
0.528283
0.492049
0.336211
0.220241
0.098137
0
0.023044
0.257311
13,439
373
231
36.029491
0.859032
0.469975
0
0.08805
1
0
0.093362
0.017878
0
0
0
0
0
1
0.163522
false
0
0.025157
0
0.314465
0.012579
0
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
3
3c46ea7c5414e10f167d0a74b287db644fc72197
725
py
Python
NaijaBet_Api/bookmakers/nairabet.py
jayteealao/NaijaBet-Api
da949e175f7c16c5b846e33062d0a84547c0e441
[ "MIT" ]
1
2022-03-01T23:19:59.000Z
2022-03-01T23:19:59.000Z
NaijaBet_Api/bookmakers/nairabet.py
jayteealao/NaijaBet_Api
da949e175f7c16c5b846e33062d0a84547c0e441
[ "MIT" ]
null
null
null
NaijaBet_Api/bookmakers/nairabet.py
jayteealao/NaijaBet_Api
da949e175f7c16c5b846e33062d0a84547c0e441
[ "MIT" ]
null
null
null
from NaijaBet_Api.utils.normalizer import nairabet_match_normalizer from NaijaBet_Api.utils import jsonpaths from NaijaBet_Api.bookmakers.BaseClass import BookmakerBaseClass """ [summary] """ class Nairabet(BookmakerBaseClass): """ This class provides access to https://nairabet.com 's odds data. it provides a variety of methods to query the endpoints and obtain odds data at a competiton and match level. Attributes: session: holds a requests session object for the class as a static variable. """ _site = 'nairabet' _url = "https://nairabet.com" _headers = {} def normalizer(self, args): return nairabet_match_normalizer(jsonpaths.nairabet_validator(args))
27.884615
84
0.731034
90
725
5.766667
0.588889
0.069364
0.086705
0.077071
0
0
0
0
0
0
0
0
0.194483
725
25
85
29
0.888699
0.373793
0
0
0
0
0.069307
0
0
0
0
0
0
1
0.111111
false
0
0.333333
0.111111
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
1
1
0
0
3
3c577cc8e71014cdfabcb92ff3c60cf539f045ca
1,510
py
Python
blog/models.py
rpalo/classroom-blog
8ab74866e80c0fab3f1abfcd566515fd1a4e7baa
[ "MIT" ]
null
null
null
blog/models.py
rpalo/classroom-blog
8ab74866e80c0fab3f1abfcd566515fd1a4e7baa
[ "MIT" ]
null
null
null
blog/models.py
rpalo/classroom-blog
8ab74866e80c0fab3f1abfcd566515fd1a4e7baa
[ "MIT" ]
null
null
null
from django.db import models from django.contrib.auth.models import User class Classroom(models.Model): teacher = models.ForeignKey(User, on_delete=models.CASCADE) name = models.CharField(max_length=100) def __str__(self): return self.name def get_absolute_url(self): return "/classrooms/{}".format(self.pk) class UserProfile(models.Model): user = models.OneToOneField(User, related_name="profile", on_delete=models.CASCADE) classrooms = models.ManyToManyField(Classroom) def __str__(self): return "{} Profile".format(self.user.username) def is_enrolled(self): return self.classroom is not None class Blog(models.Model): user = models.ForeignKey(User, related_name="blogs", on_delete=models.CASCADE) title = models.CharField(max_length=100) classroom = models.ForeignKey(Classroom, related_name="blogs", on_delete=models.SET_NULL, null=True) def __str__(self): return self.title def get_absolute_url(self): return "/blogs/{}".format(self.pk) class Post(models.Model): blog = models.ForeignKey(Blog, related_name="posts", on_delete=models.CASCADE) title = models.CharField(max_length=100) date_created = models.DateField(auto_now_add=True) body = models.TextField() def author(self): user = self.blog.user return user.first_name def __str__(self): return self.title def get_absolute_url(self): return "/posts/{}".format(self.pk)
27.454545
104
0.696026
194
1,510
5.21134
0.304124
0.07913
0.069238
0.083086
0.32641
0.279921
0.207715
0.207715
0.207715
0.207715
0
0.007341
0.188079
1,510
54
105
27.962963
0.817292
0
0
0.305556
0
0
0.042412
0
0
0
0
0
0
1
0.25
false
0
0.055556
0.222222
0.972222
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
3
3c6741255d688a1ec5b8ff11ddcb534248f05bed
2,708
py
Python
ana/ab_old.py
hanswenzel/opticks
b75b5929b6cf36a5eedeffb3031af2920f75f9f0
[ "Apache-2.0" ]
11
2020-07-05T02:39:32.000Z
2022-03-20T18:52:44.000Z
ana/ab_old.py
hanswenzel/opticks
b75b5929b6cf36a5eedeffb3031af2920f75f9f0
[ "Apache-2.0" ]
null
null
null
ana/ab_old.py
hanswenzel/opticks
b75b5929b6cf36a5eedeffb3031af2920f75f9f0
[ "Apache-2.0" ]
4
2020-09-03T20:36:32.000Z
2022-01-19T07:42:21.000Z
# # Copyright (c) 2019 Opticks Team. All Rights Reserved. # # This file is part of Opticks # (see https://bitbucket.org/simoncblyth/opticks). # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # class MXD(object): def __init__(self, ab, key, cut, erc, shortname): """ :param ab: :param key: property name which returns a dict with numerical values :param cut: warn/error/fatal maximum permissable deviations, exceeding error level yields non-zero RC :param erc: integer return code if any of the values exceeds the cut RC passed from python to C++ via system calls are truncated beyond 0xff see: SSysTest """ self.ab = ab self.key = key self.cut = cut self.erc = erc self.shortname = shortname mxd = property(lambda self:getattr(self.ab, self.key)) def _get_mx(self): mxd = self.mxd return max(mxd.values()) if len(mxd) > 0 else 999. mx = property(_get_mx) def _get_rc(self): return self.erc if self.mx > self.cut[1] else 0 rc = property(_get_rc) def __repr__(self): mxd = self.mxd pres_ = lambda d:" ".join(map(lambda kv:"%10s : %8.3g " % (kv[0], kv[1]),d.items())) return "\n".join(["%s .rc %d .mx %7.3f .cut %7.3f/%7.3f/%7.3f %s " % ( self.shortname, self.rc, self.mx, self.cut[0], self.cut[1], self.cut[2], pres_(mxd) )]) class RC(object): def __init__(self, ab ): self.ab = ab self.c2p = MXD(ab, "c2p", ab.ok.c2max, 77, "ab.rc.c2p") self.rdv = MXD(ab, "rmxs", ab.ok.rdvmax, 88, "ab.rc.rdv") self.pdv = MXD(ab, "pmxs", ab.ok.pdvmax, 99, "ab.rc.pdv") def _get_rcs(self): return map(lambda _:_.rc, [self.c2p, self.rdv, self.pdv]) rcs = property(_get_rcs) def _get_rc(self): return max(self.rcs+[0]) rc = property(_get_rc) def __repr__(self): return "\n".join([ "ab.rc .rc %3d %r " % (self.rc, self.rcs) , repr(self.c2p), repr(self.rdv), repr(self.pdv), "." ])
33.02439
173
0.584195
394
2,708
3.923858
0.395939
0.03881
0.016818
0.020699
0.082794
0.034929
0.034929
0.034929
0
0
0
0.023896
0.289143
2,708
81
174
33.432099
0.779221
0.370015
0
0.25641
0
0.025641
0.082665
0
0
0
0
0
0
1
0.205128
false
0
0
0.102564
0.538462
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
3
3c702d158004e84a8def211d6901cce9989f6692
140
py
Python
Code/Miscellaneous/PypeRExample.py
tchakravarty/PythonExamples
a20a866f0f1dcf6ca429e5114baac1e40cf1da42
[ "Apache-2.0" ]
null
null
null
Code/Miscellaneous/PypeRExample.py
tchakravarty/PythonExamples
a20a866f0f1dcf6ca429e5114baac1e40cf1da42
[ "Apache-2.0" ]
null
null
null
Code/Miscellaneous/PypeRExample.py
tchakravarty/PythonExamples
a20a866f0f1dcf6ca429e5114baac1e40cf1da42
[ "Apache-2.0" ]
1
2018-11-23T17:21:05.000Z
2018-11-23T17:21:05.000Z
from pyper import R def foo(r): r("a <- NULL") for i in range(20): r("a <- rbind(a, seq(1000000) * 1.0 * %d)" % i) print r("sum(a)")
23.333333
51
0.528571
29
140
2.551724
0.724138
0.054054
0
0
0
0
0
0
0
0
0
0.101852
0.228571
140
6
52
23.333333
0.583333
0
0
0
0
0
0.375887
0
0
0
0
0
0
0
null
null
0
0.166667
null
null
0.166667
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
3
b1c3bc0a1c09a1a6406c4deab276769d28beac9a
205
py
Python
pyautogui/src/credentials.py
Vaansh/Reddit-to-Instagam-Automation
b5506239698475a15d6fc99e9fe5e34bf3950bea
[ "MIT" ]
4
2020-09-04T19:30:48.000Z
2021-01-05T05:04:54.000Z
using Instabot/credentials.py
Vaansh/Reddit-to-Instagram-Automation-No-Resize
ebe70374493f710fc311bdc1f10a0c513734fff1
[ "MIT" ]
1
2021-02-24T01:59:45.000Z
2021-02-24T01:59:45.000Z
using Instabot/credentials.py
Vaansh/Reddit-to-Instagram-Automation-No-Resize
ebe70374493f710fc311bdc1f10a0c513734fff1
[ "MIT" ]
1
2020-12-28T02:43:36.000Z
2020-12-28T02:43:36.000Z
import praw # Reddit developer credentials reddit = praw.Reddit(client_id="", client_secret="", username="", password="", user_agent="") # Instagram password and username IGusername = "" IGpassword = ""
22.777778
93
0.721951
22
205
6.590909
0.727273
0.137931
0
0
0
0
0
0
0
0
0
0
0.126829
205
8
94
25.625
0.810056
0.292683
0
0
0
0
0
0
0
0
0
0
0
1
0
false
0.5
0.25
0
0.25
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
1
0
0
0
0
0
3