hexsha string | size int64 | ext string | lang string | max_stars_repo_path string | max_stars_repo_name string | max_stars_repo_head_hexsha string | max_stars_repo_licenses list | max_stars_count int64 | max_stars_repo_stars_event_min_datetime string | max_stars_repo_stars_event_max_datetime string | max_issues_repo_path string | max_issues_repo_name string | max_issues_repo_head_hexsha string | max_issues_repo_licenses list | max_issues_count int64 | max_issues_repo_issues_event_min_datetime string | max_issues_repo_issues_event_max_datetime string | max_forks_repo_path string | max_forks_repo_name string | max_forks_repo_head_hexsha string | max_forks_repo_licenses list | max_forks_count int64 | max_forks_repo_forks_event_min_datetime string | max_forks_repo_forks_event_max_datetime string | content string | avg_line_length float64 | max_line_length int64 | alphanum_fraction float64 | qsc_code_num_words_quality_signal int64 | qsc_code_num_chars_quality_signal float64 | qsc_code_mean_word_length_quality_signal float64 | qsc_code_frac_words_unique_quality_signal float64 | qsc_code_frac_chars_top_2grams_quality_signal float64 | qsc_code_frac_chars_top_3grams_quality_signal float64 | qsc_code_frac_chars_top_4grams_quality_signal float64 | qsc_code_frac_chars_dupe_5grams_quality_signal float64 | qsc_code_frac_chars_dupe_6grams_quality_signal float64 | qsc_code_frac_chars_dupe_7grams_quality_signal float64 | qsc_code_frac_chars_dupe_8grams_quality_signal float64 | qsc_code_frac_chars_dupe_9grams_quality_signal float64 | qsc_code_frac_chars_dupe_10grams_quality_signal float64 | qsc_code_frac_chars_replacement_symbols_quality_signal float64 | qsc_code_frac_chars_digital_quality_signal float64 | qsc_code_frac_chars_whitespace_quality_signal float64 | qsc_code_size_file_byte_quality_signal float64 | qsc_code_num_lines_quality_signal float64 | qsc_code_num_chars_line_max_quality_signal float64 | qsc_code_num_chars_line_mean_quality_signal float64 | qsc_code_frac_chars_alphabet_quality_signal float64 | qsc_code_frac_chars_comments_quality_signal float64 | qsc_code_cate_xml_start_quality_signal float64 | qsc_code_frac_lines_dupe_lines_quality_signal float64 | qsc_code_cate_autogen_quality_signal float64 | qsc_code_frac_lines_long_string_quality_signal float64 | qsc_code_frac_chars_string_length_quality_signal float64 | qsc_code_frac_chars_long_word_length_quality_signal float64 | qsc_code_frac_lines_string_concat_quality_signal float64 | qsc_code_cate_encoded_data_quality_signal float64 | qsc_code_frac_chars_hex_words_quality_signal float64 | qsc_code_frac_lines_prompt_comments_quality_signal float64 | qsc_code_frac_lines_assert_quality_signal float64 | qsc_codepython_cate_ast_quality_signal float64 | qsc_codepython_frac_lines_func_ratio_quality_signal float64 | qsc_codepython_cate_var_zero_quality_signal bool | qsc_codepython_frac_lines_pass_quality_signal float64 | qsc_codepython_frac_lines_import_quality_signal float64 | qsc_codepython_frac_lines_simplefunc_quality_signal float64 | qsc_codepython_score_lines_no_logic_quality_signal float64 | qsc_codepython_frac_lines_print_quality_signal float64 | qsc_code_num_words int64 | qsc_code_num_chars int64 | qsc_code_mean_word_length int64 | qsc_code_frac_words_unique null | qsc_code_frac_chars_top_2grams int64 | qsc_code_frac_chars_top_3grams int64 | qsc_code_frac_chars_top_4grams int64 | qsc_code_frac_chars_dupe_5grams int64 | qsc_code_frac_chars_dupe_6grams int64 | qsc_code_frac_chars_dupe_7grams int64 | qsc_code_frac_chars_dupe_8grams int64 | qsc_code_frac_chars_dupe_9grams int64 | qsc_code_frac_chars_dupe_10grams int64 | qsc_code_frac_chars_replacement_symbols int64 | qsc_code_frac_chars_digital int64 | qsc_code_frac_chars_whitespace int64 | qsc_code_size_file_byte int64 | qsc_code_num_lines int64 | qsc_code_num_chars_line_max int64 | qsc_code_num_chars_line_mean int64 | qsc_code_frac_chars_alphabet int64 | qsc_code_frac_chars_comments int64 | qsc_code_cate_xml_start int64 | qsc_code_frac_lines_dupe_lines int64 | qsc_code_cate_autogen int64 | qsc_code_frac_lines_long_string int64 | qsc_code_frac_chars_string_length int64 | qsc_code_frac_chars_long_word_length int64 | qsc_code_frac_lines_string_concat null | qsc_code_cate_encoded_data int64 | qsc_code_frac_chars_hex_words int64 | qsc_code_frac_lines_prompt_comments int64 | qsc_code_frac_lines_assert int64 | qsc_codepython_cate_ast int64 | qsc_codepython_frac_lines_func_ratio int64 | qsc_codepython_cate_var_zero int64 | qsc_codepython_frac_lines_pass int64 | qsc_codepython_frac_lines_import int64 | qsc_codepython_frac_lines_simplefunc int64 | qsc_codepython_score_lines_no_logic int64 | qsc_codepython_frac_lines_print int64 | effective string | hits int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
9726eafab32c15410feb2dca6780b3667286e901 | 1,295 | py | Python | tests/preprocessing/test_util.py | svenruf/trackintel | 8b3482f0a4f44cf5f5de366198a39ca54ac04e75 | [
"MIT"
] | null | null | null | tests/preprocessing/test_util.py | svenruf/trackintel | 8b3482f0a4f44cf5f5de366198a39ca54ac04e75 | [
"MIT"
] | null | null | null | tests/preprocessing/test_util.py | svenruf/trackintel | 8b3482f0a4f44cf5f5de366198a39ca54ac04e75 | [
"MIT"
] | null | null | null | import datetime
import pytest
from trackintel.preprocessing.util import calc_temp_overlap
@pytest.fixture
def time_1():
return datetime.datetime(year=1, month=1, day=1, hour=0, minute=0, second=0)
@pytest.fixture
def one_hour():
return datetime.timedelta(hours=1)
class TestCalc_temp_overlap():
def test_same_interval(self, time_1, one_hour):
"""Two equal intervals should have 100 % overlap"""
ratio = calc_temp_overlap(time_1, time_1 + one_hour, time_1, time_1 + one_hour)
assert ratio == 1
def test_1_in_2(self, time_1, one_hour):
"""If interval 1 is fully covered by interval 2 the overlap should be 100 %"""
ratio = calc_temp_overlap(time_1, time_1 + one_hour, time_1, time_1 + 2 * one_hour)
assert ratio == 1
def test_2_in_1(self, time_1, one_hour):
"""If interval 1 is only covered half by interval 2 the overlap should be 50 %"""
ratio = calc_temp_overlap(time_1, time_1 + 2 * one_hour, time_1, time_1 + one_hour)
assert ratio == 0.5
def test_no_overlap(self, time_1, one_hour):
"""If the two intervals do not overlap the ratio should be 0"""
ratio = calc_temp_overlap(time_1, time_1 + one_hour, time_1 + one_hour, time_1 + 2 * one_hour)
assert ratio == 0
| 34.078947 | 102 | 0.681853 | 214 | 1,295 | 3.859813 | 0.257009 | 0.127119 | 0.096852 | 0.145278 | 0.530266 | 0.508475 | 0.478208 | 0.335351 | 0.299031 | 0.228814 | 0 | 0.052579 | 0.221622 | 1,295 | 37 | 103 | 35 | 0.766865 | 0.194595 | 0 | 0.181818 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.181818 | 1 | 0.272727 | false | 0 | 0.136364 | 0.090909 | 0.545455 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 2 |
972738928f80909c41adae3de306c1deaa0741e7 | 869 | py | Python | nla_semparse/nla_semparse/nla_language.py | ramild/allennlp-guide | 4cff916e7bc4629184bc70594e213ef56e14ec70 | [
"MIT"
] | 71 | 2020-06-06T03:12:44.000Z | 2022-03-12T20:21:48.000Z | nla_semparse/nla_semparse/nla_language.py | ramild/allennlp-guide | 4cff916e7bc4629184bc70594e213ef56e14ec70 | [
"MIT"
] | 50 | 2020-06-18T14:19:15.000Z | 2022-03-28T07:04:16.000Z | nla_semparse/nla_semparse/nla_language.py | ramild/allennlp-guide | 4cff916e7bc4629184bc70594e213ef56e14ec70 | [
"MIT"
] | 37 | 2020-06-05T19:08:44.000Z | 2022-03-17T08:23:41.000Z | from allennlp_semparse import DomainLanguage, predicate
class NlaLanguage(DomainLanguage):
def __init__(self):
super().__init__(
start_types={int},
allowed_constants={
"0": 0,
"1": 1,
"2": 2,
"3": 3,
"4": 4,
"5": 5,
"6": 6,
"7": 7,
"8": 8,
"9": 9,
},
)
@predicate
def add(self, num1: int, num2: int) -> int:
return num1 + num2
@predicate
def subtract(self, num1: int, num2: int) -> int:
return num1 - num2
@predicate
def multiply(self, num1: int, num2: int) -> int:
return num1 * num2
@predicate
def divide(self, num1: int, num2: int) -> int:
return num1 // num2 if num2 != 0 else 0
| 23.486486 | 55 | 0.441887 | 92 | 869 | 4.054348 | 0.391304 | 0.128686 | 0.117962 | 0.160858 | 0.47185 | 0.47185 | 0.47185 | 0.47185 | 0.47185 | 0.378016 | 0 | 0.07943 | 0.434983 | 869 | 36 | 56 | 24.138889 | 0.680244 | 0 | 0 | 0.133333 | 0 | 0 | 0.011507 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.166667 | false | 0 | 0.033333 | 0.133333 | 0.366667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 2 |
9730ded7aff5753f7cf4dd267bc552b4a4f1c209 | 1,534 | py | Python | matchzoo/data_generator/dynamic_data_generator.py | freedombenLiu/MatchZoo | b1ba96ac8b84e70952f5787f62272ceef8cea106 | [
"Apache-2.0"
] | 2 | 2019-10-04T16:51:36.000Z | 2021-06-09T04:43:35.000Z | matchzoo/data_generator/dynamic_data_generator.py | ThanhChinhBK/MatchZoo | f77403044bca4ff0a84738921180724a54fd42f9 | [
"Apache-2.0"
] | null | null | null | matchzoo/data_generator/dynamic_data_generator.py | ThanhChinhBK/MatchZoo | f77403044bca4ff0a84738921180724a54fd42f9 | [
"Apache-2.0"
] | null | null | null | """Dynamic data generator with transform function inside."""
import numpy as np
from matchzoo.data_generator import DataGenerator
class DynamicDataGenerator(DataGenerator):
"""
Data generator with preprocess unit inside.
Examples:
>>> import matchzoo as mz
>>> raw_data = mz.datasets.toy.load_data()
>>> data_generator = DynamicDataGenerator(len, data_pack=raw_data,
... batch_size=1, shuffle=False)
>>> len(data_generator)
100
>>> x, y = data_generator[0]
>>> type(x)
<class 'dict'>
>>> x.keys()
dict_keys(['id_left', 'text_left', 'id_right', 'text_right'])
>>> type(x['id_left'])
<class 'numpy.ndarray'>
>>> type(x['id_right'])
<class 'numpy.ndarray'>
>>> type(x['text_left'])
<class 'numpy.ndarray'>
>>> type(x['text_right'])
<class 'numpy.ndarray'>
>>> type(y)
<class 'numpy.ndarray'>
"""
def __init__(self, func, *args, **kwargs):
""":class:`DynamicDataGenerator` constructor."""
super().__init__(*args, **kwargs)
self._func = func
def _get_batch_of_transformed_samples(self, indices: np.array):
"""
Get a batch of samples based on their ids.
:param indices: A list of instance ids.
:return: A batch of transformed samples.
"""
return self._data_pack[indices].apply_on_text(
self._func, verbose=0).unpack()
| 30.68 | 78 | 0.567797 | 170 | 1,534 | 4.905882 | 0.405882 | 0.093525 | 0.101918 | 0.100719 | 0.135492 | 0.098321 | 0 | 0 | 0 | 0 | 0 | 0.00552 | 0.291395 | 1,534 | 49 | 79 | 31.306122 | 0.76173 | 0.623859 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.222222 | false | 0 | 0.222222 | 0 | 0.666667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 2 |
97331be74487f5701fd3082af845c9f2cd54d805 | 167 | py | Python | src/user-config.py | aryamanarora/wdp | 331de5c76baf849790d7c4fb445d71599db99ae6 | [
"MIT"
] | null | null | null | src/user-config.py | aryamanarora/wdp | 331de5c76baf849790d7c4fb445d71599db99ae6 | [
"MIT"
] | null | null | null | src/user-config.py | aryamanarora/wdp | 331de5c76baf849790d7c4fb445d71599db99ae6 | [
"MIT"
] | null | null | null | family = 'wiktionary'
mylang = 'en'
usernames['wiktionary']['en'] = u'AryamanA' # change to your username
console_encoding = 'utf-8'
minthrottle = 0
maxthrottle = 1 | 18.555556 | 69 | 0.706587 | 21 | 167 | 5.571429 | 0.904762 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.021127 | 0.149701 | 167 | 9 | 70 | 18.555556 | 0.802817 | 0.137725 | 0 | 0 | 0 | 0 | 0.258741 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2 |
97625124976081b5c5a18735b086161919e65334 | 3,265 | py | Python | tests/conftest.py | ni/nitsm-python | c6ae0abbae5c71198a9516fb2449b046fe6201dc | [
"MIT"
] | 4 | 2021-08-21T06:21:45.000Z | 2021-12-27T05:27:43.000Z | tests/conftest.py | ni/nitsm-python | c6ae0abbae5c71198a9516fb2449b046fe6201dc | [
"MIT"
] | 51 | 2021-07-28T14:48:04.000Z | 2022-03-25T02:35:40.000Z | tests/conftest.py | ni/nitsm-python | c6ae0abbae5c71198a9516fb2449b046fe6201dc | [
"MIT"
] | 2 | 2021-06-23T19:53:17.000Z | 2022-03-27T20:10:27.000Z | import enum
import os.path
import pytest
import win32com.client
import win32com.client.selecttlb
import pythoncom
import nitsm.codemoduleapi
try:
_standalone_tsm_context_tlb = win32com.client.selecttlb.FindTlbsWithDescription(
"NI TestStand Semiconductor Module Standalone Semiconductor Module Context"
)[0]
except IndexError:
raise RuntimeError(
"The TSM Standalone Semiconductor Module Context component is not installed. "
"Contact one of the repository owners to determine how to obtain this "
"non-public component."
)
@pytest.fixture
def _published_data_reader_factory(request):
# get absolute path of the pin map file which is assumed to be relative to the test module
pin_map_path = request.node.get_closest_marker("pin_map").args[0]
module_directory = os.path.dirname(request.module.__file__)
pin_map_path = os.path.join(module_directory, pin_map_path)
published_data_reader_factory = win32com.client.Dispatch(
"NationalInstruments.TestStand.SemiconductorModule.Restricted.PublishedDataReaderFactory"
)
return published_data_reader_factory.NewSemiconductorModuleContext(pin_map_path)
@pytest.fixture
def standalone_tsm_context_com_object(_published_data_reader_factory):
return _published_data_reader_factory[0]
@pytest.fixture
def standalone_tsm_context(standalone_tsm_context_com_object):
return nitsm.codemoduleapi.SemiconductorModuleContext(standalone_tsm_context_com_object)
class PublishedDataType(enum.Enum):
Double = 0
Boolean = 1
String = 2
class PublishedData:
def __init__(self, published_data_com_obj):
self._published_data = win32com.client.CastTo(
published_data_com_obj, "IPublishedData", _standalone_tsm_context_tlb
)
self._published_data._oleobj_ = self._published_data._oleobj_.QueryInterface(
self._published_data.CLSID, pythoncom.IID_IDispatch
)
@property
def boolean_value(self) -> bool:
return self._published_data.BooleanValue
@property
def double_value(self) -> float:
return self._published_data.DoubleValue
@property
def pin(self) -> str:
return self._published_data.Pin
@property
def published_data_id(self) -> str:
return self._published_data.PublishedDataId
@property
def site_number(self) -> int:
return self._published_data.SiteNumber
@property
def string_value(self) -> str:
return self._published_data.StringValue
@property
def type(self) -> PublishedDataType:
return PublishedDataType(self._published_data.Type)
class PublishedDataReader:
def __init__(self, published_data_reader_com_obj):
self._published_data_reader = win32com.client.CastTo(
published_data_reader_com_obj, "IPublishedDataReader", _standalone_tsm_context_tlb
)
def get_and_clear_published_data(self):
published_data = self._published_data_reader.GetAndClearPublishedData()
return [PublishedData(published_data_point) for published_data_point in published_data]
@pytest.fixture
def published_data_reader(_published_data_reader_factory):
return PublishedDataReader(_published_data_reader_factory[1])
| 32.009804 | 97 | 0.759877 | 379 | 3,265 | 6.171504 | 0.311346 | 0.172296 | 0.116289 | 0.077811 | 0.276186 | 0.099188 | 0 | 0 | 0 | 0 | 0 | 0.007047 | 0.174273 | 3,265 | 101 | 98 | 32.326733 | 0.860534 | 0.026953 | 0 | 0.144737 | 0 | 0 | 0.115591 | 0.027402 | 0 | 0 | 0 | 0 | 0 | 1 | 0.184211 | false | 0 | 0.092105 | 0.131579 | 0.513158 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 2 |
97931078e2833c3d28ea86bb9e8e254f59e82ea4 | 1,323 | py | Python | packages/core/minos-microservice-networks/minos/networks/brokers/__init__.py | sorasful/minos-python | 1189330eebf6444627a2af6b29f347670f95a4dd | [
"MIT"
] | null | null | null | packages/core/minos-microservice-networks/minos/networks/brokers/__init__.py | sorasful/minos-python | 1189330eebf6444627a2af6b29f347670f95a4dd | [
"MIT"
] | null | null | null | packages/core/minos-microservice-networks/minos/networks/brokers/__init__.py | sorasful/minos-python | 1189330eebf6444627a2af6b29f347670f95a4dd | [
"MIT"
] | null | null | null | from .clients import (
BrokerClient,
)
from .collections import (
BrokerQueue,
InMemoryBrokerQueue,
PostgreSqlBrokerQueue,
)
from .dispatchers import (
BrokerDispatcher,
BrokerRequest,
BrokerResponse,
BrokerResponseException,
)
from .handlers import (
BrokerHandler,
BrokerHandlerService,
)
from .messages import (
REQUEST_HEADERS_CONTEXT_VAR,
REQUEST_REPLY_TOPIC_CONTEXT_VAR,
BrokerMessage,
BrokerMessageV1,
BrokerMessageV1Payload,
BrokerMessageV1Status,
BrokerMessageV1Strategy,
)
from .pools import (
BrokerClientPool,
)
from .publishers import (
BrokerPublisher,
BrokerPublisherQueue,
InMemoryBrokerPublisher,
InMemoryBrokerPublisherQueue,
PostgreSqlBrokerPublisherQueue,
PostgreSqlBrokerPublisherQueueQueryFactory,
QueuedBrokerPublisher,
)
from .subscribers import (
BrokerSubscriber,
BrokerSubscriberBuilder,
BrokerSubscriberQueue,
BrokerSubscriberQueueBuilder,
InMemoryBrokerSubscriber,
InMemoryBrokerSubscriberBuilder,
InMemoryBrokerSubscriberQueue,
InMemoryBrokerSubscriberQueueBuilder,
PostgreSqlBrokerSubscriberQueue,
PostgreSqlBrokerSubscriberQueueBuilder,
PostgreSqlBrokerSubscriberQueueQueryFactory,
QueuedBrokerSubscriber,
QueuedBrokerSubscriberBuilder,
)
| 24.054545 | 48 | 0.779289 | 69 | 1,323 | 14.84058 | 0.753623 | 0.019531 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.00366 | 0.173847 | 1,323 | 54 | 49 | 24.5 | 0.933211 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0.148148 | 0 | 0.148148 | 0 | 0 | 0 | 1 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 2 |
9799236da3399b0ece5c30019c02133bfcd6ee25 | 2,743 | py | Python | python-the-hard-way/18-names-variables-code-functions.py | Valka7a/python-playground | f08d4374f2cec2e8b1afec3753854b1ec10ff480 | [
"MIT"
] | null | null | null | python-the-hard-way/18-names-variables-code-functions.py | Valka7a/python-playground | f08d4374f2cec2e8b1afec3753854b1ec10ff480 | [
"MIT"
] | null | null | null | python-the-hard-way/18-names-variables-code-functions.py | Valka7a/python-playground | f08d4374f2cec2e8b1afec3753854b1ec10ff480 | [
"MIT"
] | null | null | null | # Exercise 18: Names, Variables, Code, Functions
# Functions do three things:
# 1. They name pieces of code the way variables name stings and numbers.
# 2. They take arguments the way your scripts take argv.
# 3. Using 1 and 2 they let you make your own "mini-scripts" or "tiny commands."
# This one is like your scripts with argv
def print_two(*args):
arg1, arg2 = args
print "arg1: %r, arg2: %r" % (arg1, arg2)
# Ok, that *args is actually pointless, we can just do this
def print_two_again(arg1, arg2):
print "arg1: %r, arg2: %r" % (arg1, arg2)
# This just takes one argument
def print_one(arg1):
print "arg1: %r" % arg1
# This one takes no arguments
def print_none():
print "I got nothin'."
print_two("Zed","Shaw")
print_two_again("Zed","Shaw")
print_one("First!")
print_none()
# Describe first function print_two
# 1. First we tell Python we want to make a function using
# def for "define".
# 2. On the same line as def we give the function a name. In
# this case we just called it "print_two" but it could also be
# "peanuts". It doesn't matter, except that your function
# should have a short name that says what it does.
# 3. Then we tell it we want *args (asterisk args) which is a
# lot like your argv parameter but for functions. This has to
# go inside () parentheses to work.
# 4. Then we end this line with a : colon, and start indenting.
# 5. After the colon all the lines that are indented four spaces
# will become attached to this name, print_two. Our first
# indented line is one that unpacks the arguments the same as
# with your scripts.
# 6. To Demonstrate how it works we print these arguments out,
# just like we would in a script.
# Study Drills
# Create a function checklist for later exercises. Write these
# checks on an index card and keep it by you while you complete
# the rest of these exercises or until you feel you do not need
# the index card anymore:
# 1. Did you start your function definition with def?
# 2. Does your function name have only characters and _
# (underscore) characters?
# 3. Did you put an open parenthesis ( right after the
# function name?
# 4. Did you put your arguments after the parenthesis (
# separated by commas?
# 5. Did you make each argument unique (meaning no
# duplicated names)?
# 6. Did you put a close parenthesis and a colon ): after
# the arguments?
# 7. Did you indent all lines of code you want in the
# function four spaces? No more, no less.
# 8. Did you "end" your function by going back to
# writing with no indent (dedenting we call it)?
# Drill
def checklist(first, second, third):
print "First: %r, Second: %r, Third: %r" % (first, second, third)
checklist("Live","Play","Die")
| 22.300813 | 80 | 0.707255 | 461 | 2,743 | 4.177874 | 0.40564 | 0.029076 | 0.015576 | 0.014538 | 0.023884 | 0.023884 | 0.023884 | 0 | 0 | 0 | 0 | 0.016598 | 0.20926 | 2,743 | 122 | 81 | 22.483607 | 0.871369 | 0.772512 | 0 | 0.125 | 0 | 0 | 0.228302 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0 | null | null | 0.8125 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 2 |
979ad1431f8eca0ee6a85e0c7dd6d02f95e590a3 | 1,480 | py | Python | tests/test_middleware.py | alekam/django-http-exceptions | 7ae86223d7f2fc84577f21df3bcb72a63c23e2bf | [
"Apache-2.0"
] | null | null | null | tests/test_middleware.py | alekam/django-http-exceptions | 7ae86223d7f2fc84577f21df3bcb72a63c23e2bf | [
"Apache-2.0"
] | null | null | null | tests/test_middleware.py | alekam/django-http-exceptions | 7ae86223d7f2fc84577f21df3bcb72a63c23e2bf | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
from django.test.testcases import TestCase
from django.test.utils import override_settings
from django.urls.base import reverse
class TestsMiddleware(TestCase):
def test_success_view(self):
resp = self.client.get(reverse('success_view'))
self.assertEquals(resp.status_code, 200)
self.assertEquals(resp['Content-Type'], 'application/json')
data = resp.json()
self.assertEquals(data['status'], True)
self.assertNotIn('exception', data)
def test_fail_view(self):
resp = self.client.get(reverse('fail_view'))
self.assertEquals(resp.status_code, 500)
self.assertEquals(resp['Content-Type'], 'application/json')
data = resp.json()
self.assertEquals(data['status'], False)
self.assertIn('exception', data)
self.assertEquals(data[u'exception'], 'ZeroDivisionError')
self.assertIn('message', data)
self.assertNotIn('traceback', data)
@override_settings(DEBUG=True)
def test_fail_debug_view(self):
resp = self.client.get(reverse('fail_view'))
self.assertEquals(resp.status_code, 500)
self.assertEquals(resp['Content-Type'], 'application/json')
data = resp.json()
self.assertEquals(data['status'], False)
self.assertIn('exception', data)
self.assertEquals(data[u'exception'], 'ZeroDivisionError')
self.assertIn('message', data)
self.assertIn('traceback', data)
| 37 | 67 | 0.666892 | 169 | 1,480 | 5.751479 | 0.266272 | 0.18107 | 0.123457 | 0.049383 | 0.691358 | 0.691358 | 0.656379 | 0.623457 | 0.623457 | 0.623457 | 0 | 0.008418 | 0.197297 | 1,480 | 39 | 68 | 37.948718 | 0.809764 | 0.008784 | 0 | 0.5625 | 0 | 0 | 0.16587 | 0 | 0 | 0 | 0 | 0 | 0.5625 | 1 | 0.09375 | false | 0 | 0.09375 | 0 | 0.21875 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2 |
979e7800496d7a6fead6fe6d12414a0bfbd2fd62 | 1,611 | py | Python | gui/block_views/block_view_factory.py | Wastack/blocky | 1f05b142d5a62649205bdd97813e75fd60eadd40 | [
"MIT"
] | null | null | null | gui/block_views/block_view_factory.py | Wastack/blocky | 1f05b142d5a62649205bdd97813e75fd60eadd40 | [
"MIT"
] | 5 | 2021-04-24T11:36:48.000Z | 2022-02-19T12:20:50.000Z | gui/block_views/block_view_factory.py | Wastack/blocky | 1f05b142d5a62649205bdd97813e75fd60eadd40 | [
"MIT"
] | null | null | null | import logging
from typing import Type
from game.blocks.block import AbstractBlock
from game.blocks.impl.duck_pool import DuckPoolBlock
from game.blocks.impl.empty_block import EmptyBlock
from game.blocks.impl.melting_ice import MeltingIceBlock
from game.blocks.impl.player import Player
from game.blocks.impl.rock import RockBlock
from game.blocks.impl.boulder import Boulder
from gui.block_views.block import BlockView
from gui.block_views.duck_pool import DuckPoolBlockView
from gui.block_views.empty_block import EmptyBlockView
from gui.block_views.melting_ice import MeltingIceBlockView
from gui.block_views.player import PlayerBlockView
from gui.block_views.rock_block import RockBlockView
from gui.block_views.boulder import BoulderView
registered_block_views = {
EmptyBlockView : EmptyBlock,
RockBlockView: RockBlock,
PlayerBlockView: Player,
MeltingIceBlockView: MeltingIceBlock,
DuckPoolBlockView: DuckPoolBlock,
BoulderView: Boulder,
}
def to_block(block_view_type: Type[BlockView]) -> AbstractBlock:
return registered_block_views[block_view_type]()
def from_block(block: AbstractBlock, canvas) -> BlockView:
for view_type, block_type in registered_block_views.items():
if block_type != type(block):
continue
candidate = view_type.from_block(canvas, block)
if candidate is None:
raise ValueError("Factory failed. Block view does not support block.")
return candidate
logging.error(f"No registered block view is found for block of type: {type(block)}")
raise ValueError("block view factory failed")
| 37.465116 | 88 | 0.787089 | 208 | 1,611 | 5.947115 | 0.274038 | 0.080841 | 0.079224 | 0.0962 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.148355 | 1,611 | 42 | 89 | 38.357143 | 0.901604 | 0 | 0 | 0 | 0 | 0 | 0.087523 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.055556 | false | 0 | 0.444444 | 0.027778 | 0.555556 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 2 |
97b6c44d2062abf016020d84821ec53b3dc0ac1d | 1,740 | py | Python | src/pyraug/trainers/training_config.py | clementchadebec/pyraug | d1b36c060fe56427ed158ecb38cdbc6cc3bc0f74 | [
"Apache-2.0"
] | 53 | 2021-09-05T09:04:38.000Z | 2022-03-22T05:34:23.000Z | src/pyraug/trainers/training_config.py | clementchadebec/pyraug | d1b36c060fe56427ed158ecb38cdbc6cc3bc0f74 | [
"Apache-2.0"
] | 2 | 2021-09-28T08:10:37.000Z | 2021-12-13T10:32:54.000Z | src/pyraug/trainers/training_config.py | clementchadebec/pyraug | d1b36c060fe56427ed158ecb38cdbc6cc3bc0f74 | [
"Apache-2.0"
] | 6 | 2021-09-05T01:03:48.000Z | 2021-10-13T01:03:08.000Z | from typing import Union
from pydantic.dataclasses import dataclass
from pyraug.config import BaseConfig
@dataclass
class TrainingConfig(BaseConfig):
"""
:class:`~pyraug.trainers.training_config.TrainingConfig` is the class in which all the training arguments
are stored.
This instance is then provided to a :class:`~pyraug.trainers.Trainer` instance which performs
a model training.
Parameters:
output_dir (str): The directory where model checkpoints, configs and final
model will be stored. Default: None.
batch_size (int): The number of training samples per batch. Default 50
max_epochs (int): The maximal number of epochs for training. Default: 10000
learning_rate (int): The learning rate applied to the `Optimizer`. Default: 1e-3
train_early_stopping (int): The maximal number of epochs authorized without train loss
improvement. If None no early stopping is performed. Default: 50
eval_early_stopping (int): The maximal number of epochs authorized without eval loss
improvement. If None no early stopping is performed. Default: None
steps_saving (int): A model checkpoint will be saved every `steps_saving` epoch
seed (int): The random seed for reprodicibility
no_cuda (bool): Disable `cuda` training. Default: False
verbose (bool): Allow verbosity
"""
output_dir: str = None
batch_size: int = 50
max_epochs: int = 10000
learning_rate: float = 1e-4
train_early_stopping: Union[int, None] = 50
eval_early_stopping: Union[int, None] = None
steps_saving: Union[int, None] = None
seed: int = 8
no_cuda: bool = False
verbose: bool = True
| 33.461538 | 109 | 0.700575 | 232 | 1,740 | 5.159483 | 0.409483 | 0.030075 | 0.032581 | 0.047619 | 0.249791 | 0.20802 | 0.185464 | 0.185464 | 0.185464 | 0.185464 | 0 | 0.017254 | 0.233908 | 1,740 | 51 | 110 | 34.117647 | 0.88072 | 0.675862 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0.2 | 0 | 0.933333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 1 | 0 | 0 | 2 |
97b75a6ed9e40dba55b3f2530d0b54ee643ccc95 | 10,841 | py | Python | azure-devops/azext_devops/dev/boards/iteration.py | cryptotheoryum/azure-devops-cli-extension | ac9fc127bda9bc78781caab5e932f2357882d3c2 | [
"MIT"
] | null | null | null | azure-devops/azext_devops/dev/boards/iteration.py | cryptotheoryum/azure-devops-cli-extension | ac9fc127bda9bc78781caab5e932f2357882d3c2 | [
"MIT"
] | null | null | null | azure-devops/azext_devops/dev/boards/iteration.py | cryptotheoryum/azure-devops-cli-extension | ac9fc127bda9bc78781caab5e932f2357882d3c2 | [
"MIT"
] | null | null | null | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from knack.util import CLIError
from azext_devops.devops_sdk.v5_0.work_item_tracking.models import WorkItemClassificationNode
from azext_devops.devops_sdk.v5_0.work.models import (TeamContext,
TeamSettingsIteration)
from azext_devops.dev.common.arguments import convert_date_only_string_to_iso8601
from azext_devops.dev.common.services import (resolve_instance_and_project,
get_work_item_tracking_client,
get_work_client)
_STRUCTURE_GROUP_ITERATION = 'iterations'
def get_project_iterations(depth=1, path=None, organization=None, project=None, detect=None):
"""List iterations for a project.
:param depth: Depth of child nodes to be fetched.
:type depth: int
"""
organization, project = resolve_instance_and_project(detect=detect,
organization=organization,
project=project)
client = get_work_item_tracking_client(organization)
list_of_iterations = client.get_classification_node(project=project,
structure_group=_STRUCTURE_GROUP_ITERATION,
depth=depth, path=path)
return list_of_iterations
def update_project_iteration(path, child_id=None, name=None, start_date=None,
finish_date=None, organization=None, project=None, detect=None):
"""Move iteration or update iteration details like name AND/OR start-date and finish-date.
:param name: New name of the iteration.
:type: str
:param child_id: Move an existing iteration and add as child node for this iteration.
:type: int
"""
if start_date is None and finish_date is None and name is None and child_id is None:
raise CLIError('At least one of --start-date , --finish-date , --child-id or --name arguments is required.')
organization, project = resolve_instance_and_project(detect=detect,
organization=organization,
project=project)
client = get_work_item_tracking_client(organization)
if child_id:
move_classification_node_object = WorkItemClassificationNode()
move_classification_node_object.id = child_id
update_iteration = client.create_or_update_classification_node(project=project,
posted_node=move_classification_node_object,
structure_group=_STRUCTURE_GROUP_ITERATION,
path=path)
classification_node_object = client.get_classification_node(project=project,
structure_group=_STRUCTURE_GROUP_ITERATION,
path=path)
if classification_node_object.attributes is None and \
((start_date and not finish_date) or (not start_date and finish_date)):
raise CLIError('You must specify both start and finish dates or neither date')
if classification_node_object.attributes is None:
attributes_obj = {}
classification_node_object.attributes = attributes_obj
if start_date:
start_date = convert_date_only_string_to_iso8601(value=start_date, argument='start_date')
classification_node_object.attributes['startDate'] = start_date
if finish_date:
finish_date = convert_date_only_string_to_iso8601(value=finish_date, argument='finish_date')
classification_node_object.attributes['finishDate'] = finish_date
if name is not None:
classification_node_object.name = name
update_iteration = client.update_classification_node(project=project,
posted_node=classification_node_object,
structure_group=_STRUCTURE_GROUP_ITERATION,
path=path)
return update_iteration
def delete_project_iteration(path, organization=None, project=None, detect=None):
"""Delete iteration.
"""
organization, project = resolve_instance_and_project(detect=detect,
organization=organization,
project=project)
client = get_work_item_tracking_client(organization)
response = client.delete_classification_node(project=project,
structure_group=_STRUCTURE_GROUP_ITERATION,
path=path)
return response
def get_project_iteration(id, organization=None, project=None, detect=None): # pylint: disable=redefined-builtin
"""Show iteration details for a project.
:param id: Iteration ID.
:type id: int
"""
ids = [int(id)]
organization, project = resolve_instance_and_project(detect=detect,
organization=organization,
project=project)
client = get_work_item_tracking_client(organization)
response = client.get_classification_nodes(project=project,
ids=ids)
return response
def create_project_iteration(name, path=None, start_date=None, finish_date=None,
organization=None, project=None, detect=None):
"""Create iteration.
:param name: Name of the iteration.
:type: str
"""
import pdb
pdb.set_trace()
if start_date is None and finish_date is None and name is None:
raise CLIError('At least one of --start-date , --finish-date or --name arguments is required.')
organization, project = resolve_instance_and_project(detect=detect,
organization=organization,
project=project)
client = get_work_item_tracking_client(organization)
classification_node_object = WorkItemClassificationNode()
if ((start_date and not finish_date) or (not start_date and finish_date)):
raise CLIError('You must specify both start and finish dates or neither date')
if classification_node_object.attributes is None:
attributes_obj = {}
classification_node_object.attributes = attributes_obj
if start_date:
start_date = convert_date_only_string_to_iso8601(value=start_date, argument='start_date')
classification_node_object.attributes['startDate'] = start_date
if finish_date:
finish_date = convert_date_only_string_to_iso8601(value=finish_date, argument='finish_date')
classification_node_object.attributes['finishDate'] = finish_date
if name is not None:
classification_node_object.name = name
response = client.create_or_update_classification_node(project=project,
posted_node=classification_node_object,
structure_group=_STRUCTURE_GROUP_ITERATION,
path=path)
return response
def get_team_iterations(team, timeframe=None, organization=None, project=None, detect=None):
"""List iterations for a team.
:param team: The name or id of the team.
:type team: str
:param timeframe: A filter for which iterations are returned based on relative time.
Only Current is supported currently.
:type: str
:rtype: :class:`<WebApiTeam> <v5_0.core.models.WebApiTeam>`
"""
organization, project = resolve_instance_and_project(detect=detect,
organization=organization,
project=project)
client = get_work_client(organization)
team_context = TeamContext(project=project, team=team)
list_of_iterations = client.get_team_iterations(team_context=team_context, timeframe=timeframe)
return list_of_iterations
def get_team_iteration(id, team, organization=None, project=None, detect=None): # pylint: disable=redefined-builtin
""" Get iteration details for a team.
:param id: Identifier of the iteration.
:type: str
:param team: Name or ID of the team.
:type: str
"""
organization, project = resolve_instance_and_project(detect=detect,
organization=organization,
project=project)
client = get_work_client(organization)
team_context = TeamContext(project=project, team=team)
team_iteration = client.get_team_iteration(team_context=team_context, id=id)
return team_iteration
def delete_team_iteration(id, team, organization=None, project=None, detect=None): # pylint: disable=redefined-builtin
""" Remove iteration from a team.
:param id: Identifier of the iteration.
:type: str
:param team: Name or ID of the team.
:type: str
"""
organization, project = resolve_instance_and_project(detect=detect, organization=organization, project=project)
client = get_work_client(organization)
team_context = TeamContext(project=project, team=team)
team_iteration = client.delete_team_iteration(team_context=team_context, id=id)
return team_iteration
def post_team_iteration(id, team, organization=None, project=None, detect=None): # pylint: disable=redefined-builtin
"""Add iteration to a team.
:param id: Identifier of the iteration.
:type: str
:param team: Name or ID of the team.
:type: str
"""
organization, project = resolve_instance_and_project(detect=detect, organization=organization, project=project)
client = get_work_client(organization)
team_context = TeamContext(project=project, team=team)
team_setting_iteration = TeamSettingsIteration(id=id)
team_iteration = client.post_team_iteration(iteration=team_setting_iteration, team_context=team_context)
return team_iteration
| 53.935323 | 119 | 0.617102 | 1,122 | 10,841 | 5.702317 | 0.131016 | 0.067521 | 0.067521 | 0.039075 | 0.750234 | 0.716318 | 0.697249 | 0.687402 | 0.677712 | 0.677712 | 0 | 0.00357 | 0.302463 | 10,841 | 200 | 120 | 54.205 | 0.842502 | 0.151001 | 0 | 0.618321 | 0 | 0.007634 | 0.041903 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.068702 | false | 0 | 0.045802 | 0 | 0.183206 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2 |
97ccb55e8a7a80cc164488089098bcbe7bd34bb5 | 1,802 | py | Python | sdv/docker/sdvstate/internal/display_report.py | opnfv/cirv-sdv | 31fb310d3fd1c9c1f12cfe0c654870e24f5efab6 | [
"Apache-2.0"
] | 2 | 2021-09-16T06:31:45.000Z | 2022-03-09T19:59:55.000Z | sdv/docker/sdvstate/internal/display_report.py | opnfv/cirv-sdv | 31fb310d3fd1c9c1f12cfe0c654870e24f5efab6 | [
"Apache-2.0"
] | null | null | null | sdv/docker/sdvstate/internal/display_report.py | opnfv/cirv-sdv | 31fb310d3fd1c9c1f12cfe0c654870e24f5efab6 | [
"Apache-2.0"
] | 2 | 2021-05-11T14:41:01.000Z | 2021-05-14T05:59:38.000Z | # Copyright 2020 University Of Delhi.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Display Report
"""
import logging
from datetime import datetime as dt
def display_report(report):
"""
Logs the final report
"""
installer = report['installer']
result = report['criteria']
start_time = dt.strptime(report['start_date'], '%Y-%m-%d %H:%M:%S')
stop_time = dt.strptime(report['stop_date'], '%Y-%m-%d %H:%M:%S')
duration = (stop_time - start_time).total_seconds()
logger = logging.getLogger(__name__)
logger.info('')
logger.info('')
logger.info('========================================')
logger.info('')
logger.info(f' Installer: {installer}')
logger.info(f' Duration: {duration}')
logger.info(f' Result: {result}')
logger.info('')
logger.info('')
logger.info(f' CHECKS PASSED:')
logger.info(' =============')
for case_name in report['details']['pass']:
logger.info(f' {case_name}')
logger.info('')
logger.info('')
logger.info(f' CHECKS FAILED:')
logger.info(' =============')
for case_name in report['details']['fail']:
logger.info(f' {case_name}')
logger.info('')
logger.info('========================================')
logger.info('')
logger.info('')
| 31.068966 | 74 | 0.613762 | 229 | 1,802 | 4.759825 | 0.441048 | 0.192661 | 0.161468 | 0.201835 | 0.274312 | 0.274312 | 0.274312 | 0.255046 | 0.13945 | 0.089908 | 0 | 0.005457 | 0.186459 | 1,802 | 57 | 75 | 31.614035 | 0.738063 | 0.329079 | 0 | 0.5 | 0 | 0 | 0.275745 | 0.068085 | 0 | 0 | 0 | 0 | 0 | 1 | 0.03125 | false | 0.0625 | 0.0625 | 0 | 0.09375 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 2 |
8ad829d019343b42c86560e53d7ba243a326e901 | 235 | py | Python | saveggs.py | ov357/gg3 | 512e9dd7445bd8208cc30729a222ac0e1a16a5ad | [
"MIT"
] | null | null | null | saveggs.py | ov357/gg3 | 512e9dd7445bd8208cc30729a222ac0e1a16a5ad | [
"MIT"
] | null | null | null | saveggs.py | ov357/gg3 | 512e9dd7445bd8208cc30729a222ac0e1a16a5ad | [
"MIT"
] | null | null | null | # purpose : save data tp different destinations
# 1. txt file
# 2. mysql Db
# 3. mongo Db
# 4. redis db
# 5. from a structured text tile (1.) can be derived any storage in others DB
def save_txt(data=[]):
pass
def ():
| 19.583333 | 78 | 0.634043 | 39 | 235 | 3.794872 | 0.794872 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.034682 | 0.26383 | 235 | 11 | 79 | 21.363636 | 0.820809 | 0.719149 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0.333333 | 0 | null | null | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 2 |
8addaee452739763657d0fd7290670e8583fe55d | 843 | py | Python | docs/examples/schema_unenforced_constraints.py | jasujm/pydantic | cc1cb4826c74ac5b651ef2d80c3478428a9950ca | [
"MIT"
] | 6 | 2021-08-11T11:37:59.000Z | 2021-11-12T01:33:11.000Z | docs/examples/schema_unenforced_constraints.py | jasujm/pydantic | cc1cb4826c74ac5b651ef2d80c3478428a9950ca | [
"MIT"
] | 197 | 2020-08-31T06:20:39.000Z | 2022-03-29T10:04:22.000Z | docs/examples/schema_unenforced_constraints.py | jasujm/pydantic | cc1cb4826c74ac5b651ef2d80c3478428a9950ca | [
"MIT"
] | 2 | 2021-11-23T16:28:21.000Z | 2021-11-23T16:28:33.000Z | from pydantic import BaseModel, Field, PositiveInt
try:
# this won't work since PositiveInt takes precedence over the
# constraints defined in Field meaning they're ignored
class Model(BaseModel):
foo: PositiveInt = Field(..., lt=10)
except ValueError as e:
print(e)
# but you can set the schema attribute directly:
# (Note: here exclusiveMaximum will not be enforce)
class Model(BaseModel):
foo: PositiveInt = Field(..., exclusiveMaximum=10)
print(Model.schema())
# if you find yourself needing this, an alternative is to declare
# the constraints in Field (or you could use conint())
# here both constraints will be enforced:
class Model(BaseModel):
# Here both constraints will be applied and the schema
# will be generated correctly
foo: int = Field(..., gt=0, lt=10)
print(Model.schema())
| 27.193548 | 65 | 0.716489 | 117 | 843 | 5.162393 | 0.589744 | 0.049669 | 0.094371 | 0.072848 | 0.208609 | 0.125828 | 0 | 0 | 0 | 0 | 0 | 0.010309 | 0.194543 | 843 | 30 | 66 | 28.1 | 0.879234 | 0.530249 | 0 | 0.416667 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0.083333 | 0 | 0.583333 | 0.25 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 1 | 0 | 0 | 2 |
8ae152e48dca5109d41d1d51681d99c7dc6def3a | 778 | py | Python | lectures/L7/test_roots.py | JasmineeeeeTONG/CS207_coursework | 666239ee5f8bd7cbe04725a52870191a3d40d8c2 | [
"MIT"
] | null | null | null | lectures/L7/test_roots.py | JasmineeeeeTONG/CS207_coursework | 666239ee5f8bd7cbe04725a52870191a3d40d8c2 | [
"MIT"
] | null | null | null | lectures/L7/test_roots.py | JasmineeeeeTONG/CS207_coursework | 666239ee5f8bd7cbe04725a52870191a3d40d8c2 | [
"MIT"
] | null | null | null | import roots
def test_quadroots_result():
assert roots.quad_roots(1.0, 1.0, -12.0) == ((3+0j), (-4+0j))
def test_quadroots_types():
try:
roots.quad_roots("", "green", "hi")
except TypeError as err:
assert(type(err) == TypeError)
def test_quadroots_zerocoeff():
try:
roots.quad_roots(a=0.0)
except ValueError as err:
assert(type(err) == ValueError)
def test_linearoots_result():
assert roots.linear_roots(2.0, -3.0) == 1.5
def test_linearroots_types():
try:
roots.linear_roots("ocean", 6.0)
except TypeError as err:
assert(type(err) == TypeError)
def test_linearroots_zerocoeff():
try:
roots.linear_roots(a=0.0)
except ValueError as err:
assert(type(err) == ValueError) | 25.096774 | 65 | 0.636247 | 109 | 778 | 4.376147 | 0.302752 | 0.08805 | 0.092243 | 0.125786 | 0.42348 | 0.42348 | 0.42348 | 0.42348 | 0.42348 | 0.42348 | 0 | 0.038079 | 0.22365 | 778 | 31 | 66 | 25.096774 | 0.751656 | 0 | 0 | 0.48 | 0 | 0 | 0.015404 | 0 | 0 | 0 | 0 | 0 | 0.24 | 1 | 0.24 | true | 0 | 0.04 | 0 | 0.28 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 2 |
8aeb2dbeffb758e969b18b024f9ee0e784ccff04 | 270 | py | Python | Enumerate.py | viraldii/PracticeBin | 1506c3928acf7f1686522375279076be8d867890 | [
"Apache-2.0"
] | null | null | null | Enumerate.py | viraldii/PracticeBin | 1506c3928acf7f1686522375279076be8d867890 | [
"Apache-2.0"
] | 1 | 2020-01-23T02:08:35.000Z | 2020-01-23T02:08:35.000Z | Enumerate.py | viraldii/PracticeBin | 1506c3928acf7f1686522375279076be8d867890 | [
"Apache-2.0"
] | null | null | null |
#By enumerating with two variables
for i,char in enumerate('enumerate'):
print(1,char)
for i,c in enumerate(list(range(100))):
print(i,c)
if c == 50:
#F is to make the print be able to format the i to a value.
print(f'Index of 50 is: {i}') | 24.545455 | 67 | 0.622222 | 50 | 270 | 3.36 | 0.6 | 0.047619 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.039801 | 0.255556 | 270 | 11 | 68 | 24.545455 | 0.79602 | 0.337037 | 0 | 0 | 0 | 0 | 0.159091 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0 | 0 | 0 | 0.5 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 | 0 | 2 |
8af40f3b5303c67538d47862d4ea89fbafe5a10c | 275 | py | Python | Contest/Diverta2019-2/b/main.py | mpses/AtCoder | 9c101fcc0a1394754fcf2385af54b05c30a5ae2a | [
"CC0-1.0"
] | null | null | null | Contest/Diverta2019-2/b/main.py | mpses/AtCoder | 9c101fcc0a1394754fcf2385af54b05c30a5ae2a | [
"CC0-1.0"
] | null | null | null | Contest/Diverta2019-2/b/main.py | mpses/AtCoder | 9c101fcc0a1394754fcf2385af54b05c30a5ae2a | [
"CC0-1.0"
] | null | null | null | #!/usr/bin/env python3
(n,), *z = [[*map(int, o.split())] for o in open(0)]
if n == 1:
print(1)
exit()
from itertools import*
from collections import*
print(n - Counter((z[i][0] - z[j][0], z[i][1] - z[j][1]) for i, j in permutations(range(n), 2)).most_common()[0][1]) | 34.375 | 116 | 0.578182 | 54 | 275 | 2.925926 | 0.555556 | 0.025316 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.047619 | 0.16 | 275 | 8 | 116 | 34.375 | 0.636364 | 0.076364 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0.285714 | 0 | 0.285714 | 0.285714 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 2 |
8afadaffb6ff5453ff6ae417e73f1332e3530dcd | 464 | py | Python | opacus/optimizers/utils.py | RQuispeC/opacus | 5c83d59fc169e93667946204f7a6859827a38ace | [
"Apache-2.0"
] | null | null | null | opacus/optimizers/utils.py | RQuispeC/opacus | 5c83d59fc169e93667946204f7a6859827a38ace | [
"Apache-2.0"
] | null | null | null | opacus/optimizers/utils.py | RQuispeC/opacus | 5c83d59fc169e93667946204f7a6859827a38ace | [
"Apache-2.0"
] | null | null | null | from typing import List
import torch.nn as nn
from torch.optim import Optimizer
def params(optimizer: Optimizer) -> List[nn.Parameter]:
"""
Return all parameters controlled by the optimizer
Args:
optimizer: optimizer
Returns:
Flat list of parameters from all ``param_groups``
"""
ret = []
for param_group in optimizer.param_groups:
ret += [p for p in param_group["params"] if p.requires_grad]
return ret
| 23.2 | 68 | 0.670259 | 62 | 464 | 4.935484 | 0.516129 | 0.117647 | 0.091503 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.247845 | 464 | 19 | 69 | 24.421053 | 0.876791 | 0.310345 | 0 | 0 | 0 | 0 | 0.020833 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.125 | false | 0 | 0.375 | 0 | 0.625 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 2 |
c1038aed1bd58ceaa917168c946c267ceee969b9 | 161 | py | Python | test/login.py | zhangrule/info | 567b71b04d1126703901ad8c128fa61c580ea833 | [
"MIT"
] | null | null | null | test/login.py | zhangrule/info | 567b71b04d1126703901ad8c128fa61c580ea833 | [
"MIT"
] | null | null | null | test/login.py | zhangrule/info | 567b71b04d1126703901ad8c128fa61c580ea833 | [
"MIT"
] | null | null | null | 分支合并dev
change agin zhangsan
num5 = 500
change zhangsan
changge master
pay function
num4 = 400
num3 = 300
write code
num1 = 100
num2 = 200
num3 = 300
| 6.708333 | 20 | 0.708075 | 24 | 161 | 4.75 | 0.833333 | 0.122807 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.2 | 0.254658 | 161 | 23 | 21 | 7 | 0.75 | 0 | 0 | 0.166667 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0 | null | null | 0 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2 |
c10c209d8eefa006712f9ef0f96331b9b5940208 | 328 | py | Python | Leetcode/0235. Lowest Common Ancestor of a Binary Search Tree.py | luckyrabbit85/Python | ed134fd70b4a7b84b183b87b85ad5190f54c9526 | [
"MIT"
] | 1 | 2021-07-15T18:40:26.000Z | 2021-07-15T18:40:26.000Z | Leetcode/0235. Lowest Common Ancestor of a Binary Search Tree.py | luckyrabbit85/Python | ed134fd70b4a7b84b183b87b85ad5190f54c9526 | [
"MIT"
] | null | null | null | Leetcode/0235. Lowest Common Ancestor of a Binary Search Tree.py | luckyrabbit85/Python | ed134fd70b4a7b84b183b87b85ad5190f54c9526 | [
"MIT"
] | null | null | null | class Solution:
def lowestCommonAncestor(self, root, p, q):
curr = root
while curr:
if p.val > curr.val and q.val > curr.val:
curr = curr.right
elif p.val < curr.val and q.val < curr.val:
curr = curr.left
else:
return curr
| 27.333333 | 55 | 0.481707 | 41 | 328 | 3.853659 | 0.439024 | 0.265823 | 0.253165 | 0.139241 | 0.417722 | 0.417722 | 0.417722 | 0.417722 | 0.417722 | 0.417722 | 0 | 0 | 0.432927 | 328 | 11 | 56 | 29.818182 | 0.849462 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.1 | false | 0 | 0 | 0 | 0.3 | 0 | 0 | 0 | 0 | null | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2 |
c1156e62e10fcfa44cd8ab625507ebd8dcbb9c3b | 1,174 | py | Python | src/graphics/font.py | JacobLondon/pyngine | 3a87347dfd23f86708f27259126cddc43db3fa4b | [
"MIT"
] | null | null | null | src/graphics/font.py | JacobLondon/pyngine | 3a87347dfd23f86708f27259126cddc43db3fa4b | [
"MIT"
] | null | null | null | src/graphics/font.py | JacobLondon/pyngine | 3a87347dfd23f86708f27259126cddc43db3fa4b | [
"MIT"
] | null | null | null | import pygame
class Font(object):
"""@brief Hold a set of fonts and allow for user defined fonts.
"""
def __init__(self, font, scale):
pygame.font.init()
# details about the font created
self.name = font
# scale is the ratio of screen width / number of grids wide the screen is
self.scale = scale
# make a set of font point sizes for built in components to use
self.set = {}
# default fonts
self.set['small'] = self.named_font(10)
self.set['standard'] = self.named_font(20)
self.set['large'] = self.named_font(40)
def __setitem__(self, key, val):
"""@brief Add user defined fonts.
"""
self.set[key] = val
def __getitem__(self, key):
"""@brief Get set fonts.
"""
return self.set[key]
def named_font(self, point):
"""@brief Create a font from the specified name.
"""
return Font.create(self.name, point * self.scale)
@staticmethod
def create(font_name, point):
"""@brief Simplify creating a font.
"""
return pygame.font.SysFont(font_name, int(point))
| 27.302326 | 81 | 0.584327 | 154 | 1,174 | 4.337662 | 0.409091 | 0.062874 | 0.058383 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.007344 | 0.304089 | 1,174 | 42 | 82 | 27.952381 | 0.810282 | 0.351789 | 0 | 0 | 0 | 0 | 0.02459 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.263158 | false | 0 | 0.052632 | 0 | 0.526316 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 2 |
c119892859d82d6da7dbee6b2a8b3a8efb28c7eb | 2,418 | py | Python | backend/api/views/import_export.py | skaghzz/doccano | 6b370028d414652fd1e141dda53beb3ebc318247 | [
"MIT"
] | 3,989 | 2019-12-01T22:34:26.000Z | 2022-03-31T23:21:50.000Z | backend/api/views/import_export.py | FDlucifer/doccano | 34a8d83347235a2dd2d63149962a41b0229bb98d | [
"MIT"
] | 986 | 2019-12-02T06:31:29.000Z | 2022-03-31T16:38:59.000Z | backend/api/views/import_export.py | FDlucifer/doccano | 34a8d83347235a2dd2d63149962a41b0229bb98d | [
"MIT"
] | 993 | 2019-12-02T17:25:42.000Z | 2022-03-31T10:05:07.000Z | from django.conf import settings
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework.views import APIView
class Features(APIView):
permission_classes = (IsAuthenticated,)
def get(self, request, *args, **kwargs):
return Response({
'cloud_upload': bool(settings.CLOUD_BROWSER_APACHE_LIBCLOUD_PROVIDER),
})
# class CloudUploadAPI(APIView):
# permission_classes = TextUploadAPI.permission_classes
#
# def get(self, request, *args, **kwargs):
# try:
# project_id = request.query_params['project_id']
# file_format = request.query_params['upload_format']
# cloud_container = request.query_params['container']
# cloud_object = request.query_params['object']
# except KeyError as ex:
# raise ValidationError('query parameter {} is missing'.format(ex))
#
# try:
# cloud_file = self.get_cloud_object_as_io(cloud_container, cloud_object)
# except ContainerDoesNotExistError:
# raise ValidationError('cloud container {} does not exist'.format(cloud_container))
# except ObjectDoesNotExistError:
# raise ValidationError('cloud object {} does not exist'.format(cloud_object))
#
# TextUploadAPI.save_file(
# user=request.user,
# file=cloud_file,
# file_format=file_format,
# project_id=project_id,
# )
#
# next_url = request.query_params.get('next')
#
# if next_url == 'about:blank':
# return Response(data='', content_type='text/plain', status=status.HTTP_201_CREATED)
#
# if next_url:
# return redirect(next_url)
#
# return Response(status=status.HTTP_201_CREATED)
#
# @classmethod
# def get_cloud_object_as_io(cls, container_name, object_name):
# provider = settings.CLOUD_BROWSER_APACHE_LIBCLOUD_PROVIDER.lower()
# account = settings.CLOUD_BROWSER_APACHE_LIBCLOUD_ACCOUNT
# key = settings.CLOUD_BROWSER_APACHE_LIBCLOUD_SECRET_KEY
#
# driver = get_driver(DriverType.STORAGE, provider)
# client = driver(account, key)
#
# cloud_container = client.get_container(container_name)
# cloud_object = cloud_container.get_object(object_name)
#
# return iterable_to_io(cloud_object.as_stream())
| 37.2 | 97 | 0.667907 | 259 | 2,418 | 5.938224 | 0.335907 | 0.057217 | 0.058518 | 0.06762 | 0.221066 | 0.089727 | 0 | 0 | 0 | 0 | 0 | 0.00324 | 0.234078 | 2,418 | 64 | 98 | 37.78125 | 0.827214 | 0.788255 | 0 | 0 | 0 | 0 | 0.025974 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.1 | false | 0 | 0.4 | 0.1 | 0.8 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 2 |
c122407f74bca18ed3a4dd9fdc8afa5df861b808 | 302 | py | Python | server/python/download_tower_data.py | saru-d2/partnet_anno_system | 8e151e7bf882f3812027fad56cce2aef0db1fb56 | [
"MIT"
] | null | null | null | server/python/download_tower_data.py | saru-d2/partnet_anno_system | 8e151e7bf882f3812027fad56cce2aef0db1fb56 | [
"MIT"
] | null | null | null | server/python/download_tower_data.py | saru-d2/partnet_anno_system | 8e151e7bf882f3812027fad56cce2aef0db1fb56 | [
"MIT"
] | 1 | 2021-12-14T13:39:30.000Z | 2021-12-14T13:39:30.000Z | import numpy as np
import json
import sys
import os
from subprocess import call
from jio_filestore.io.azure_filestore import ObjectStoreFactory
from .configs import file_store_config
fs = ObjectStoreFactory(file_store_config)
# connect to database
# list all items in file store
items = fs.list()
# | 20.133333 | 63 | 0.81457 | 45 | 302 | 5.333333 | 0.6 | 0.1125 | 0.125 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.142384 | 302 | 15 | 64 | 20.133333 | 0.926641 | 0.15894 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.777778 | 0 | 0.777778 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 2 |
c128a2d014a27ade154f82857c2526f48e93002c | 279 | py | Python | tests/unit/document/test_namedscore.py | fastflair/docarray | 0bbdbc816b2f4a3b399779f6816875fbc1dfe862 | [
"Apache-2.0"
] | 591 | 2022-01-09T14:39:59.000Z | 2022-03-31T13:19:39.000Z | tests/unit/document/test_namedscore.py | fastflair/docarray | 0bbdbc816b2f4a3b399779f6816875fbc1dfe862 | [
"Apache-2.0"
] | 210 | 2022-01-10T07:59:29.000Z | 2022-03-31T14:49:18.000Z | tests/unit/document/test_namedscore.py | fastflair/docarray | 0bbdbc816b2f4a3b399779f6816875fbc1dfe862 | [
"Apache-2.0"
] | 40 | 2022-01-09T14:52:20.000Z | 2022-03-31T07:59:45.000Z | import pytest
from docarray.score import NamedScore
@pytest.mark.parametrize(
'init_args', [None, dict(value=123, description='hello'), NamedScore()]
)
@pytest.mark.parametrize('copy', [True, False])
def test_construct_ns(init_args, copy):
NamedScore(init_args, copy)
| 23.25 | 75 | 0.741935 | 36 | 279 | 5.611111 | 0.638889 | 0.118812 | 0.19802 | 0.306931 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.012146 | 0.114695 | 279 | 11 | 76 | 25.363636 | 0.805668 | 0 | 0 | 0 | 0 | 0 | 0.064516 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.125 | false | 0 | 0.25 | 0 | 0.375 | 0 | 0 | 0 | 0 | null | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2 |
c13b548451d08206d749e0cf96c9708e4317ba96 | 1,677 | py | Python | test_geo.py | dm937/dm937-and-rem76-PartIA-Flood-Warning-System | 6c91174eb43fd20af0ff35650c26b761eef61af1 | [
"MIT"
] | null | null | null | test_geo.py | dm937/dm937-and-rem76-PartIA-Flood-Warning-System | 6c91174eb43fd20af0ff35650c26b761eef61af1 | [
"MIT"
] | null | null | null | test_geo.py | dm937/dm937-and-rem76-PartIA-Flood-Warning-System | 6c91174eb43fd20af0ff35650c26b761eef61af1 | [
"MIT"
] | null | null | null | from distutils.command import build
from floodsystem.geo import stations_by_distance, stations_within_radius, rivers_by_station_number, rivers_with_station, stations_by_river
from floodsystem.stationdata import build_station_list
AllStations = build_station_list()
def test_call_stations_by_distance():
x = stations_by_distance(AllStations, (52.2053, 0.1218))
def test_correct_lengths_stations_by_distance():
assert len(build_station_list()) == len(stations_by_distance(AllStations, (52.2053, 0.1218)))
# checking special values don't affect the program
assert len(AllStations) == len(stations_by_distance(AllStations, (0, 0)))
assert len(AllStations) == len(stations_by_distance(AllStations, (-90, 90)))
assert len(AllStations) == len(stations_by_distance(AllStations, (90, -90)))
def test_stations_within_radius():
assert len(stations_within_radius(AllStations, (52.2053, 0.1218), 10000)) == len(AllStations)
assert len(stations_within_radius(AllStations, (52.2053, 0.1218), 10)) > 0
assert len(stations_within_radius(AllStations, (52.2053, 0.1218), 1)) < len(stations_within_radius(AllStations, (52.2053, 0.1218), 10))
def test_rivers_by_station_number():
river_test = rivers_by_station_number(AllStations, 10)
assert len(river_test) >= 1
assert river_test[1][1] >= river_test[4][1]
assert type(river_test) == list
assert type(river_test[2]) == tuple
def test_rivers_with_station():
assert len(rivers_with_station(AllStations)) > 0
assert len(rivers_with_station([])) == 0
def test_stations_with_river():
assert len(stations_by_river(AllStations)) > 0
assert len(stations_by_river([])) == 0
| 47.914286 | 139 | 0.756708 | 236 | 1,677 | 5.063559 | 0.20339 | 0.090377 | 0.120502 | 0.090377 | 0.501255 | 0.373222 | 0.373222 | 0.373222 | 0.262762 | 0.262762 | 0 | 0.067531 | 0.12582 | 1,677 | 34 | 140 | 49.323529 | 0.747613 | 0.028623 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.555556 | 1 | 0.222222 | false | 0 | 0.111111 | 0 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2 |
c15976f9dfb10ad059519280d7da6a524874120c | 542 | py | Python | DappurMake/core/helper.py | DapperX/DappurMake | 48a9559e891890a3b797fdf8f51cc17d6daf56d3 | [
"BSD-2-Clause"
] | null | null | null | DappurMake/core/helper.py | DapperX/DappurMake | 48a9559e891890a3b797fdf8f51cc17d6daf56d3 | [
"BSD-2-Clause"
] | null | null | null | DappurMake/core/helper.py | DapperX/DappurMake | 48a9559e891890a3b797fdf8f51cc17d6daf56d3 | [
"BSD-2-Clause"
] | null | null | null | from .. import core
print(dir(core))
def warn(s):
print("[Warning] ", s)
class decorator:
@staticmethod
def ensure_instance(cls_):
def wrapper(func):
def inner(self, x, *args, **kwargs):
cls = eval(cls_)
if not isinstance(x, cls):
x = cls(x)
return func(self, x, *args, **kwargs)
return inner
return wrapper
def wrapper(func, *args, **kwargs):
def inner():
return func(*args, **kwargs)
return inner
def ufset_find(f, u):
return u if f[u]==u else ufset_find(f[u])
def nothing(*args, **kwargs):
pass | 16.9375 | 42 | 0.638376 | 83 | 542 | 4.108434 | 0.409639 | 0.146628 | 0.082111 | 0.087977 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.201107 | 542 | 32 | 43 | 16.9375 | 0.787529 | 0 | 0 | 0.086957 | 0 | 0 | 0.018416 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.347826 | false | 0.043478 | 0.043478 | 0.086957 | 0.695652 | 0.086957 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 2 |
c15c7702960114045389d43cb3a5d1de46060bf5 | 1,190 | py | Python | wk4_hw/ex4_pexpect.py | philuu12/PYTHON_4_NTWK_ENGRS | ac0126ed687a5201031a6295d0094a536547cb92 | [
"Apache-2.0"
] | 1 | 2016-03-01T14:39:17.000Z | 2016-03-01T14:39:17.000Z | wk4_hw/ex4_pexpect.py | philuu12/PYTHON_4_NTWK_ENGRS | ac0126ed687a5201031a6295d0094a536547cb92 | [
"Apache-2.0"
] | null | null | null | wk4_hw/ex4_pexpect.py | philuu12/PYTHON_4_NTWK_ENGRS | ac0126ed687a5201031a6295d0094a536547cb92 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
"""
Assignment 4:
Use Pexpect to change the logging buffer size (logging buffered <size>) on pynet-rtr2. Verify this change by examining the output of 'show run'.
"""
import pexpect
import re
from getpass import getpass
def main():
ip_addr = '50.76.53.27'
username = 'pyclass'
port = 8022
password = getpass()
# Spawn a child process
ssh_conn = pexpect.spawn('ssh -l {} {} -p {}'.format(username, ip_addr, port))
# Set time out
ssh_conn.timeout = 3
ssh_conn.expect('ssword:')
ssh_conn.sendline(password)
ssh_conn.expect("#")
# Set logging buffered to 25000
ssh_conn.sendline("configure terminal")
ssh_conn.sendline("logging buffered 25000")
# Search for pattern "logging buffered 25000" in 'show runn' output
pattern = re.compile(r'logging buffered 25000', re.MULTILINE)
ssh_conn.sendline("show running-config")
ssh_conn.expect(pattern)
print "The found matching pattern is:"
print ssh_conn.after
# Restoring the setting back to 20000
ssh_conn.sendline("configure terminal")
ssh_conn.sendline("logging buffered 20000")
if __name__ == "__main__":
main()
| 25.869565 | 144 | 0.683193 | 162 | 1,190 | 4.882716 | 0.518519 | 0.106195 | 0.11378 | 0.060683 | 0.156764 | 0.156764 | 0.156764 | 0.156764 | 0.156764 | 0.156764 | 0 | 0.04772 | 0.207563 | 1,190 | 45 | 145 | 26.444444 | 0.791092 | 0.157143 | 0 | 0.083333 | 0 | 0 | 0.244284 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0.125 | 0.125 | null | null | 0.083333 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 2 |
c16a4cc7b348d4eafede0cd48cb9313a66c1e1ca | 312 | py | Python | jobboardscraper/organizations/views.py | dillonko/jobboardscraper | 21d662c3bad2e054c3b43f648e5b276339ee1d32 | [
"BSD-3-Clause"
] | null | null | null | jobboardscraper/organizations/views.py | dillonko/jobboardscraper | 21d662c3bad2e054c3b43f648e5b276339ee1d32 | [
"BSD-3-Clause"
] | 5 | 2020-12-09T20:45:28.000Z | 2021-12-14T10:38:50.000Z | jobboardscraper/organizations/views.py | dillonko/jobboardscraper | 21d662c3bad2e054c3b43f648e5b276339ee1d32 | [
"BSD-3-Clause"
] | null | null | null | from django.views.generic import DetailView, ListView
from pure_pagination.mixins import PaginationMixin
from .models import Organization
class OrganizationDetailView(DetailView):
model = Organization
class OrganizationListView(PaginationMixin, ListView):
model = Organization
paginate_by = 50
| 20.8 | 54 | 0.807692 | 31 | 312 | 8.064516 | 0.645161 | 0.136 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.007491 | 0.144231 | 312 | 14 | 55 | 22.285714 | 0.928839 | 0 | 0 | 0.25 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.375 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 2 |
c1745b638a3ad821606c6fe480d0b1ef0d53d22f | 2,514 | py | Python | tests/unit/test_urls.py | vCra/Programdom | 23d734409102917734e67d233768b494904a5cef | [
"MIT"
] | 1 | 2021-06-14T01:08:55.000Z | 2021-06-14T01:08:55.000Z | tests/unit/test_urls.py | vCra/Programdom | 23d734409102917734e67d233768b494904a5cef | [
"MIT"
] | 10 | 2019-01-28T16:03:12.000Z | 2019-01-28T16:09:48.000Z | tests/unit/test_urls.py | vCra/Programdom | 23d734409102917734e67d233768b494904a5cef | [
"MIT"
] | null | null | null | from django.test import TestCase
from django.urls import reverse, resolve
class TestURLs(TestCase):
# Workshop URLs
def test_workshop_auth_url(self):
path = reverse("workshop_auth")
assert resolve(path).view_name == "workshop_auth"
def test_workshop_list_url(self):
path = reverse("workshop_list")
assert resolve(path).view_name == "workshop_list"
def test_workshop_new_url(self):
path = reverse("workshop_new")
assert resolve(path).view_name == "workshop_new"
def test_workshop_detail_url(self):
path = reverse("workshop_detail", kwargs={"pk": 1})
assert resolve(path).view_name == "workshop_detail"
def test_workshop_student_waiting_url(self):
path = reverse("workshop_student_waiting", kwargs={"pk": 1})
assert resolve(path).view_name == "workshop_student_waiting"
def test_workshop_present_url(self):
path = reverse("workshop_present", kwargs={"pk": 1})
assert resolve(path).view_name == "workshop_present"
def test_workshop_problems_url(self):
path = reverse("workshop_problems", kwargs={"pk": 1})
assert resolve(path).view_name == "workshop_problems"
# Problem URLs
def test_problem_list_url(self):
path = reverse("problem_list")
assert resolve(path).view_name == "problem_list"
def test_problem_new_url(self):
path = reverse("problem_create")
assert resolve(path).view_name == "problem_create"
def test_problem_detail_url(self):
path = reverse("problem_detail", kwargs={"pk": 1})
assert resolve(path).view_name == "problem_detail"
def test_problem_delete_url(self):
path = reverse("problem_delete", kwargs={"pk": 1})
assert resolve(path).view_name == "problem_delete"
def test_problem_student_url(self):
path = reverse("problem_student", kwargs={"pk": 1})
assert resolve(path).view_name == "problem_student"
def test_problem_test_new_url(self):
path = reverse("problem_test_new", kwargs={"pk": 1})
assert resolve(path).view_name == "problem_test_new"
def test_problem_test_update_url(self):
path = reverse("problem_test_update", kwargs={"pk": 1, "tc_pk": 1})
assert resolve(path).view_name == "problem_test_update"
def test_problem_test_delete_url(self):
path = reverse("problem_test_delete", kwargs={"pk": 1, "tc_pk": 1})
assert resolve(path).view_name == "problem_test_delete"
| 35.408451 | 75 | 0.675815 | 321 | 2,514 | 4.968847 | 0.105919 | 0.065831 | 0.103448 | 0.169279 | 0.660815 | 0.485266 | 0.280878 | 0.280878 | 0.280878 | 0.062696 | 0 | 0.00597 | 0.200477 | 2,514 | 70 | 76 | 35.914286 | 0.787562 | 0.010342 | 0 | 0 | 0 | 0 | 0.199678 | 0.019324 | 0 | 0 | 0 | 0 | 0.3125 | 1 | 0.3125 | false | 0 | 0.041667 | 0 | 0.375 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2 |
c17ab9292db114acd6cfc1d8b109742a1c360eeb | 542 | py | Python | purchases_storage/source/database/methods/users_methods.py | icYFTL/RTULAB_Service | a16d0fc2ac9ac103f0a14e90824caded7156bf11 | [
"Apache-2.0"
] | null | null | null | purchases_storage/source/database/methods/users_methods.py | icYFTL/RTULAB_Service | a16d0fc2ac9ac103f0a14e90824caded7156bf11 | [
"Apache-2.0"
] | null | null | null | purchases_storage/source/database/methods/users_methods.py | icYFTL/RTULAB_Service | a16d0fc2ac9ac103f0a14e90824caded7156bf11 | [
"Apache-2.0"
] | null | null | null | from ..models import User
from .. import Session
class UsersMethods:
def __init__(self):
self.__session = Session()
def add_user(self, user: User) -> User:
self.__session.add(user)
self.__session.commit()
return user
def get_user(self, **kwargs) -> User:
result = [x for x in self.__session.query(User).filter_by(**kwargs)]
return result[0] if result else None
def remove_user(self, user: User) -> None:
self.__session.delete(user)
self.__session.commit()
| 24.636364 | 76 | 0.629151 | 70 | 542 | 4.585714 | 0.4 | 0.205607 | 0.140187 | 0.099688 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.002469 | 0.252768 | 542 | 21 | 77 | 25.809524 | 0.790123 | 0 | 0 | 0.133333 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.266667 | false | 0 | 0.133333 | 0 | 0.6 | 0 | 0 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2 |
c17eef0234888415ecfe23673ab7bc88d0c6c1fd | 663 | py | Python | fastecdsa/tests/test_asn1.py | olalonde/fastecdsa | bb650642421fb6d2bf70929ef846ad55f184ac6e | [
"Unlicense"
] | 1 | 2020-01-29T07:18:31.000Z | 2020-01-29T07:18:31.000Z | fastecdsa/tests/test_asn1.py | olalonde/fastecdsa | bb650642421fb6d2bf70929ef846ad55f184ac6e | [
"Unlicense"
] | null | null | null | fastecdsa/tests/test_asn1.py | olalonde/fastecdsa | bb650642421fb6d2bf70929ef846ad55f184ac6e | [
"Unlicense"
] | 1 | 2020-05-28T09:28:40.000Z | 2020-05-28T09:28:40.000Z | from os import remove
from unittest import TestCase
from ..curve import P256
from ..keys import export_key, import_key, gen_keypair
class TestAsn1(TestCase):
def test_generate_and_parse_pem(self):
d, Q = gen_keypair(P256)
export_key(d, curve=P256, filepath='p256.key')
export_key(Q, curve=P256, filepath='p256.pub')
parsed_d, parsed_Q = import_key('p256.key')
self.assertEqual(parsed_d, d)
self.assertEqual(parsed_Q, Q)
parsed_d, parsed_Q = import_key('p256.pub')
self.assertTrue(parsed_d is None)
self.assertEqual(parsed_Q, Q)
remove('p256.key')
remove('p256.pub') | 28.826087 | 54 | 0.666667 | 95 | 663 | 4.442105 | 0.326316 | 0.066351 | 0.149289 | 0.099526 | 0.236967 | 0.127962 | 0.127962 | 0 | 0 | 0 | 0 | 0.060194 | 0.223228 | 663 | 23 | 55 | 28.826087 | 0.759223 | 0 | 0 | 0.117647 | 1 | 0 | 0.072289 | 0 | 0 | 0 | 0 | 0 | 0.235294 | 1 | 0.058824 | false | 0 | 0.352941 | 0 | 0.470588 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 2 |
c17f201d4546c262fe99fe1483c742dd30726f31 | 265 | py | Python | methods/abstract_method.py | tinagu945/DeepGMM | e73adc41ea915582159911d7f690283deee70a7d | [
"MIT"
] | 23 | 2019-07-25T05:28:28.000Z | 2022-03-27T13:35:10.000Z | DeepGMM/methods/abstract_method.py | edbakhitov/boostIV | ffbf105a72297146d8ccb1916722e35e8d3bc8a4 | [
"MIT"
] | 2 | 2020-10-19T19:56:33.000Z | 2021-08-25T16:11:49.000Z | DeepGMM/methods/abstract_method.py | edbakhitov/boostIV | ffbf105a72297146d8ccb1916722e35e8d3bc8a4 | [
"MIT"
] | 10 | 2020-12-05T17:12:49.000Z | 2022-01-10T23:42:37.000Z | __author__ = 'awbennett'
class AbstractMethod(object):
def __init__(self):
pass
def fit(self, x_train, z_train, y_train, x_dev, z_dev, y_dev):
raise NotImplementedError()
def predict(self, x_test):
raise NotImplementedError()
| 22.083333 | 66 | 0.667925 | 33 | 265 | 4.909091 | 0.575758 | 0.061728 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.233962 | 265 | 11 | 67 | 24.090909 | 0.79803 | 0 | 0 | 0.25 | 0 | 0 | 0.033962 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.375 | false | 0.125 | 0 | 0 | 0.5 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 2 |
c1832fbebde82534c72370bca566505203355fad | 250 | py | Python | build_flags.py | haimgel/hrv_controller | 55413f72578b9fc9a48ce06bdc88038207999e68 | [
"MIT"
] | null | null | null | build_flags.py | haimgel/hrv_controller | 55413f72578b9fc9a48ce06bdc88038207999e68 | [
"MIT"
] | null | null | null | build_flags.py | haimgel/hrv_controller | 55413f72578b9fc9a48ce06bdc88038207999e68 | [
"MIT"
] | 1 | 2021-04-01T22:01:55.000Z | 2021-04-01T22:01:55.000Z | import subprocess
Import("env")
gitref = subprocess.run(['git', 'rev-parse', '--short=8', 'HEAD'], stdout=subprocess.PIPE).stdout.decode('utf-8').strip()
print("Building gitref: ", gitref)
env.Append(CPPDEFINES=[('PIO_SRC_REV', f'\\"{gitref}\\"')])
| 35.714286 | 121 | 0.672 | 33 | 250 | 5.030303 | 0.69697 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.008584 | 0.068 | 250 | 6 | 122 | 41.666667 | 0.703863 | 0 | 0 | 0 | 0 | 0 | 0.3 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.4 | 0 | 0.4 | 0.2 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 2 |
c1842c45a50591f0ced92a862c8afb23b1839f2d | 496 | py | Python | tests/test_utils/views.py | Yoann-Vie/esgi-hearthstone | 115d03426c7e8e80d89883b78ac72114c29bed12 | [
"PSF-2.0",
"BSD-3-Clause"
] | null | null | null | tests/test_utils/views.py | Yoann-Vie/esgi-hearthstone | 115d03426c7e8e80d89883b78ac72114c29bed12 | [
"PSF-2.0",
"BSD-3-Clause"
] | null | null | null | tests/test_utils/views.py | Yoann-Vie/esgi-hearthstone | 115d03426c7e8e80d89883b78ac72114c29bed12 | [
"PSF-2.0",
"BSD-3-Clause"
] | null | null | null | from django.http import HttpResponse
from django.shortcuts import get_object_or_404
from django.template import Context, Template
from .models import Person
def get_person(request, pk):
person = get_object_or_404(Person, pk=pk)
return HttpResponse(person.name)
def no_template_used(request):
template = Template("This is a string-based template")
return HttpResponse(template.render(Context({})))
def empty_response(request):
return HttpResponse('')
| 24.8 | 59 | 0.739919 | 64 | 496 | 5.578125 | 0.453125 | 0.084034 | 0.061625 | 0.078431 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.01467 | 0.175403 | 496 | 19 | 60 | 26.105263 | 0.858191 | 0 | 0 | 0 | 0 | 0 | 0.06499 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.25 | false | 0 | 0.333333 | 0.083333 | 0.833333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 2 |
c19ea6fdc224c4d489d16907a82e6093f491cd53 | 1,336 | py | Python | gameevents/db_create.py | danilovbarbosa/sg-gameevents | 839de019fb6026c5a9d3178d3109cb2948f19f09 | [
"Apache-2.0"
] | null | null | null | gameevents/db_create.py | danilovbarbosa/sg-gameevents | 839de019fb6026c5a9d3178d3109cb2948f19f09 | [
"Apache-2.0"
] | null | null | null | gameevents/db_create.py | danilovbarbosa/sg-gameevents | 839de019fb6026c5a9d3178d3109cb2948f19f09 | [
"Apache-2.0"
] | 1 | 2018-09-28T00:03:29.000Z | 2018-09-28T00:03:29.000Z | '''
Created on 15 Oct 2015
@author: mbrandaoca
'''
from migrate.versioning import api
from config import SQLALCHEMY_DATABASE_URI
from config import SQLALCHEMY_MIGRATE_REPO
from gameevents_app import db, create_app
import os.path
import sys
app = create_app()
with app.app_context():
db.create_all()
#Add the admin user
from gameevents_app.models.client import Client
#Generate random password
from random import choice
import string
chars = string.ascii_letters + string.digits
length = 16
randompass = ''.join(choice(chars) for _ in range(length))
admin = Client('administrator', randompass, "admin")
db.session.add(admin)
try:
db.session.commit()
sys.stdout.write("Created administrator client: %s, with random apikey %s \n" % (admin.clientid, randompass) )
except Exception as e:
sys.stdout.write(e)
db.session.rollback()
db.session.flush() # for resetting non-commited .add()
#
# if not os.path.exists(SQLALCHEMY_MIGRATE_REPO):
# api.create(SQLALCHEMY_MIGRATE_REPO, 'database repository')
# api.version_control(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO)
# else:
# api.version_control(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO, api.version(SQLALCHEMY_MIGRATE_REPO)) | 29.688889 | 118 | 0.70509 | 168 | 1,336 | 5.440476 | 0.446429 | 0.111597 | 0.137856 | 0.056893 | 0.129103 | 0.129103 | 0.129103 | 0.129103 | 0.129103 | 0 | 0 | 0.007519 | 0.203593 | 1,336 | 45 | 119 | 29.688889 | 0.851504 | 0.320359 | 0 | 0 | 0 | 0 | 0.085106 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0.125 | 0.375 | 0 | 0.375 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 2 |
c1ae71d018819b93a4ff0e36af2c3f5e9a087889 | 1,052 | py | Python | webhooksocket/models.py | ToxicFrazzles/django-webhooksocket | 4ce5bb4e66052ec419c32dc7e281c16a69a7a2a3 | [
"MIT"
] | null | null | null | webhooksocket/models.py | ToxicFrazzles/django-webhooksocket | 4ce5bb4e66052ec419c32dc7e281c16a69a7a2a3 | [
"MIT"
] | null | null | null | webhooksocket/models.py | ToxicFrazzles/django-webhooksocket | 4ce5bb4e66052ec419c32dc7e281c16a69a7a2a3 | [
"MIT"
] | null | null | null | from django.db import models
from django.urls import reverse
import secrets
def random_ident():
characters = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"
return "".join(secrets.choice(characters) for i in range(64))
class Bridge(models.Model):
name = models.CharField(max_length=60)
hook_ident = models.CharField(
verbose_name="Webhook Unique Identifier",
max_length=64, unique=True,
db_index=True, default=random_ident
)
socket_ident = models.CharField(
verbose_name="Websocket Unique Identifier",
max_length=64, unique=True,
db_index=True, default=random_ident
)
description = models.CharField(max_length=1024, default="", blank=True)
def hook_url(self):
return reverse('webhooksocket:hooks', kwargs={
"ident": self.hook_ident
})
def socket_url(self):
return reverse('webhooksocket:sockets', kwargs={
"ident": self.socket_ident
})
def __str__(self):
return self.name
| 28.432432 | 81 | 0.676806 | 118 | 1,052 | 5.855932 | 0.415254 | 0.086831 | 0.052098 | 0.069465 | 0.376266 | 0.191028 | 0.191028 | 0.191028 | 0.191028 | 0.191028 | 0 | 0.026961 | 0.224335 | 1,052 | 36 | 82 | 29.222222 | 0.819853 | 0 | 0 | 0.206897 | 0 | 0 | 0.155894 | 0.078897 | 0 | 0 | 0 | 0 | 0 | 1 | 0.137931 | false | 0 | 0.103448 | 0.103448 | 0.551724 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 2 |
c1b104d5122f290eca8d7a13f720b4d5373a0f92 | 246 | py | Python | BasicPythonPrograms/pythonExe6.py | Pushkar745/PythonProgramming | ea60e97b70d46fb63ef203913c8b3f9570232dd3 | [
"Apache-2.0"
] | null | null | null | BasicPythonPrograms/pythonExe6.py | Pushkar745/PythonProgramming | ea60e97b70d46fb63ef203913c8b3f9570232dd3 | [
"Apache-2.0"
] | null | null | null | BasicPythonPrograms/pythonExe6.py | Pushkar745/PythonProgramming | ea60e97b70d46fb63ef203913c8b3f9570232dd3 | [
"Apache-2.0"
] | null | null | null | def findDivisible(numberList):
print("Given list is ",numberList)
print("Divisible by 5 in a list")
for num in numberList:
if(num%5==0):
print(num)
numberList=[10,55,21,26,55]
findDivisible(numberList) | 30.75 | 39 | 0.617886 | 33 | 246 | 4.606061 | 0.606061 | 0.302632 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.071429 | 0.260163 | 246 | 8 | 40 | 30.75 | 0.763736 | 0 | 0 | 0 | 0 | 0 | 0.157895 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.125 | false | 0 | 0 | 0 | 0.125 | 0.375 | 0 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2 |
c1b7fc1ccd9560f64c999ad55ca20770aa634007 | 235 | py | Python | miyu_bot/bot/aliases/event.py | sigonasr2/miyu-bot | 4f28a62b6f3657e19b8b569a60745261b0ddc2f6 | [
"MIT"
] | 11 | 2021-01-29T07:16:11.000Z | 2022-02-25T01:40:22.000Z | miyu_bot/bot/aliases/event.py | sigonasr2/miyu-bot | 4f28a62b6f3657e19b8b569a60745261b0ddc2f6 | [
"MIT"
] | null | null | null | miyu_bot/bot/aliases/event.py | sigonasr2/miyu-bot | 4f28a62b6f3657e19b8b569a60745261b0ddc2f6 | [
"MIT"
] | 3 | 2021-02-13T02:06:39.000Z | 2021-05-08T18:43:12.000Z | event_aliases = {
'halloween 2020': 1,
'candy': 2,
'swimsuits 2020': 3,
'maids': 5,
'christmas 2020': 6,
'countdown': 7,
'monster hunter pt1': 9,
'mh1': 9,
'monster hunter pt2': 10,
'mh2': 10,
}
| 18.076923 | 29 | 0.514894 | 29 | 235 | 4.137931 | 0.793103 | 0.216667 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.170732 | 0.302128 | 235 | 12 | 30 | 19.583333 | 0.560976 | 0 | 0 | 0 | 0 | 0 | 0.438298 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2 |
c1c1e3a0ef467be115f98449ca0bb2a415a28910 | 56 | py | Python | opensfm/commands/__init__.py | YangJae96/KMU_Visual-SLAM | 8fbbcdda58a5b349a344b580d22d13ab69ff53cc | [
"BSD-2-Clause"
] | null | null | null | opensfm/commands/__init__.py | YangJae96/KMU_Visual-SLAM | 8fbbcdda58a5b349a344b580d22d13ab69ff53cc | [
"BSD-2-Clause"
] | 2 | 2021-06-08T21:03:06.000Z | 2022-01-13T02:22:43.000Z | opensfm/commands/__init__.py | YangJae96/KMU_Visual-SLAM | 8fbbcdda58a5b349a344b580d22d13ab69ff53cc | [
"BSD-2-Clause"
] | null | null | null | from . import slam
opensfm_commands = [
slam,
]
| 7 | 21 | 0.607143 | 6 | 56 | 5.5 | 0.833333 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.303571 | 56 | 7 | 22 | 8 | 0.846154 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.25 | 0 | 0.25 | 0 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2 |
c1d55cf741c8d506522eb5f4eb906401cf32924f | 181 | py | Python | Emmanuel ANENE/Phase 1/Python Basic 1/Day10/task4.py | CodedLadiesInnovateTech/-python-challenge-solutions | 430cd3eb84a2905a286819eef384ee484d8eb9e7 | [
"MIT"
] | 6 | 2020-05-23T19:53:25.000Z | 2021-05-08T20:21:30.000Z | Emmanuel ANENE/Phase 1/Python Basic 1/Day10/task4.py | CodedLadiesInnovateTech/-python-challenge-solutions | 430cd3eb84a2905a286819eef384ee484d8eb9e7 | [
"MIT"
] | 8 | 2020-05-14T18:53:12.000Z | 2020-07-03T00:06:20.000Z | Emmanuel ANENE/Phase 1/Python Basic 1/Day10/task4.py | CodedLadiesInnovateTech/-python-challenge-solutions | 430cd3eb84a2905a286819eef384ee484d8eb9e7 | [
"MIT"
] | 39 | 2020-05-10T20:55:02.000Z | 2020-09-12T17:40:59.000Z | '''
Write a Python program to count the number occurrence of a specific character in a string.
'''
data = input("Enter a long sentence: ")
datas = data[4]
print(data.count(datas)) | 22.625 | 90 | 0.718232 | 29 | 181 | 4.482759 | 0.758621 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.006623 | 0.165746 | 181 | 8 | 91 | 22.625 | 0.854305 | 0.497238 | 0 | 0 | 0 | 0 | 0.27381 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.333333 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2 |
c1df36b140d279c8ae3be1af1c3040b04eff6d14 | 907 | py | Python | numba/__init__.py | aseyboldt/numba | 0c0a8aa7a7cad8b5f0a5383101f5cca5a4a89df3 | [
"BSD-2-Clause"
] | null | null | null | numba/__init__.py | aseyboldt/numba | 0c0a8aa7a7cad8b5f0a5383101f5cca5a4a89df3 | [
"BSD-2-Clause"
] | null | null | null | numba/__init__.py | aseyboldt/numba | 0c0a8aa7a7cad8b5f0a5383101f5cca5a4a89df3 | [
"BSD-2-Clause"
] | null | null | null | import sys
import logging
# NOTE: Be sure to keep the logging level commented out before commiting. See:
# https://github.com/numba/numba/issues/31
# A good work around is to make your tests handle a debug flag, per
# numba.tests.test_support.main().
logging.basicConfig(level=logging.DEBUG,
format="\n\033[1m%(levelname)s -- %(module)s:%(lineno)d:%(funcName)s\033[0m\n%(message)s")
try:
from . import minivect
except ImportError:
print(logging.error("Did you forget to update submodule minivect?"))
print(logging.error("Run 'git submodule init' followed by 'git submodule update'"))
raise
from . import _numba_types
from ._numba_types import *
from . import decorators
from .decorators import *
def test():
from subprocess import check_call
check_call([sys.executable, '-m', 'numba.tests.test_all'])
__all__ = _numba_types.__all__ + decorators.__all__
| 30.233333 | 110 | 0.719956 | 130 | 907 | 4.853846 | 0.584615 | 0.047544 | 0.044374 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.013193 | 0.164278 | 907 | 29 | 111 | 31.275862 | 0.819261 | 0.241455 | 0 | 0 | 0 | 0.055556 | 0.300146 | 0.111274 | 0 | 0 | 0 | 0 | 0 | 1 | 0.055556 | false | 0 | 0.5 | 0 | 0.555556 | 0.111111 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 2 |
de0427e9b7e8f6ff0fbe72ec9f9484d330a4901e | 690 | py | Python | Server/Flask/FlaskBoilerplate/application/controllers.py | dltech-xyz/PythonSkillTree | 88fad516b22811205a49b6438d48a6535e8a5441 | [
"Apache-2.0"
] | 26 | 2016-07-11T00:54:39.000Z | 2022-01-11T13:41:47.000Z | Server/Flask/FlaskBoilerplate/application/controllers.py | w4n9H/PythonSkillTree | 88fad516b22811205a49b6438d48a6535e8a5441 | [
"Apache-2.0"
] | null | null | null | Server/Flask/FlaskBoilerplate/application/controllers.py | w4n9H/PythonSkillTree | 88fad516b22811205a49b6438d48a6535e8a5441 | [
"Apache-2.0"
] | 7 | 2016-07-14T08:02:37.000Z | 2020-06-28T15:27:21.000Z | # !/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@author: mango
@contact: w4n9@sina.com
@create: 16/7/1
hail hydra!
"""
__author__ = "mango"
__version__ = "0.1"
import os
from flask import Flask, request, Response
from flask import render_template, url_for, redirect, send_from_directory
from flask import send_file, make_response, abort
from application import app
# routing for API endpoints, generated from the models designated as API_MODELS
# from application.core import api_manager
# from application.models import *
# routing for basic pages (pass routing onto the Angular app)
@app.route('/', methods=['GET'])
def show_entries():
return render_template('index.html') | 22.258065 | 79 | 0.744928 | 99 | 690 | 5.010101 | 0.636364 | 0.054435 | 0.090726 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.015254 | 0.144928 | 690 | 31 | 80 | 22.258065 | 0.825424 | 0.466667 | 0 | 0 | 0 | 0 | 0.061798 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.1 | false | 0 | 0.5 | 0.1 | 0.7 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 2 |
de05a37043b1a2680445c54636fdbb7f9c92b7a6 | 586 | py | Python | medical/wsgi.py | SarangWadode/medstore | 07cb70661a8cba6f8dd090dfbd589bfacb7bf12a | [
"MIT"
] | 2 | 2021-03-24T13:36:39.000Z | 2022-02-10T13:51:59.000Z | medical/wsgi.py | SarangWadode/medstore | 07cb70661a8cba6f8dd090dfbd589bfacb7bf12a | [
"MIT"
] | 44 | 2021-01-05T01:51:38.000Z | 2022-02-10T13:44:26.000Z | medical/wsgi.py | mukeshgurpude/medstore | 498b76acbeb9727e7a61560e4016b3577c2706d2 | [
"MIT"
] | 1 | 2020-10-28T09:26:01.000Z | 2020-10-28T09:26:01.000Z | """
WSGI config for medical project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
from django.contrib.staticfiles.handlers import StaticFilesHandler
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'medical.settings')
# StaticFileHandler is required or else, it will fail to load css, javascript files when debug set to false
application = StaticFilesHandler(get_wsgi_application())
| 30.842105 | 107 | 0.803754 | 82 | 586 | 5.670732 | 0.731707 | 0.043011 | 0.077419 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.003846 | 0.112628 | 586 | 18 | 108 | 32.555556 | 0.890385 | 0.546075 | 0 | 0 | 0 | 0 | 0.147287 | 0.085271 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.6 | 0 | 0.6 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 2 |
de0b8f6a6fb4e9fda9432bb2708ecaea5b6d7ee2 | 105 | py | Python | 48.py | nursyah21/project-euler | af819a561c1ca90794d5851121366855f3914c49 | [
"MIT"
] | null | null | null | 48.py | nursyah21/project-euler | af819a561c1ca90794d5851121366855f3914c49 | [
"MIT"
] | null | null | null | 48.py | nursyah21/project-euler | af819a561c1ca90794d5851121366855f3914c49 | [
"MIT"
] | null | null | null |
res=0
for i in range(1,1001):
res += i**i
res=str(res)
#res='nursyahjaya'
print(res[len(res)-10:])
| 11.666667 | 24 | 0.609524 | 21 | 105 | 3.047619 | 0.619048 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.090909 | 0.161905 | 105 | 8 | 25 | 13.125 | 0.636364 | 0.161905 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.2 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2 |
de0e9857b71b7d273245183aeae665f9ace25fc0 | 1,896 | py | Python | autonetkit/compilers/device/quagga.py | sysbot/autonetkit | eb91ee4cb15cc40b81d8d1a23059c1cddde5540f | [
"BSD-3-Clause"
] | 1 | 2015-11-08T07:26:26.000Z | 2015-11-08T07:26:26.000Z | autonetkit/compilers/device/quagga.py | sysbot/autonetkit | eb91ee4cb15cc40b81d8d1a23059c1cddde5540f | [
"BSD-3-Clause"
] | null | null | null | autonetkit/compilers/device/quagga.py | sysbot/autonetkit | eb91ee4cb15cc40b81d8d1a23059c1cddde5540f | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
from autonetkit.compilers.device.router_base import RouterCompiler
from autonetkit.nidb import config_stanza
class QuaggaCompiler(RouterCompiler):
"""Base Quagga compiler"""
lo_interface = 'lo:1'
def compile(self, node):
super(QuaggaCompiler, self).compile(node)
def interfaces(self, node):
"""Quagga interface compiler"""
#TODO: put this on the router base?
ipv4_node = self.anm['ipv4'].node(node)
phy_node = self.anm['phy'].node(node)
super(QuaggaCompiler, self).interfaces(node)
# OSPF cost
if phy_node.is_l3device():
node.loopback_zero.id = self.lo_interface
node.loopback_zero.description = 'Loopback'
node.loopback_zero.ipv4_address = ipv4_node.loopback
node.loopback_zero.ipv4_subnet = node.loopback_subnet
def ospf(self, node):
"""Quagga ospf compiler"""
super(QuaggaCompiler, self).ospf(node)
# add eBGP link subnets
node.ospf.passive_interfaces = []
for interface in node.physical_interfaces:
if interface.exclude_igp:
continue # don't configure IGP for this interface
bgp_int = self.anm['ebgp_v4'].interface(interface)
if bgp_int.is_bound: # ebgp interface
node.ospf.passive_interfaces.append(config_stanza(id=interface.id))
subnet = bgp_int['ipv4'].subnet
default_ebgp_area = 0
node.ospf.ospf_links.append(
config_stanza(network=subnet,
area=default_ebgp_area))
def isis(self, node):
"""Sets ISIS links
"""
g_isis = self.anm['isis']
isis_node = g_isis.node(node)
node.isis.net = isis_node.net
node.isis.process_id = isis_node.process_id
| 29.625 | 83 | 0.614451 | 224 | 1,896 | 5.026786 | 0.348214 | 0.063943 | 0.056838 | 0.047957 | 0.049734 | 0 | 0 | 0 | 0 | 0 | 0 | 0.008094 | 0.283228 | 1,896 | 63 | 84 | 30.095238 | 0.820456 | 0.132384 | 0 | 0 | 0 | 0 | 0.021014 | 0 | 0 | 0 | 0 | 0.015873 | 0 | 1 | 0.117647 | false | 0.058824 | 0.058824 | 0 | 0.235294 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 2 |
de13590944b5a28f7d6570a9b64ac087c03f8fa1 | 1,107 | py | Python | morpheusapi/provision_types.py | tadamhicks/morpheus-python | 5689342091e241099c92afbce07de80ed5f1d986 | [
"MIT"
] | null | null | null | morpheusapi/provision_types.py | tadamhicks/morpheus-python | 5689342091e241099c92afbce07de80ed5f1d986 | [
"MIT"
] | null | null | null | morpheusapi/provision_types.py | tadamhicks/morpheus-python | 5689342091e241099c92afbce07de80ed5f1d986 | [
"MIT"
] | 1 | 2022-01-21T19:05:19.000Z | 2022-01-21T19:05:19.000Z | from morpheusapi.morpheus import Morpheus
import json
from urlparse import urljoin
import posixpath
import requests
import six
class Provision_Type(Morpheus):
def __init__(
self, baseurl,
username, password
):
"""
Provision_Type object is a Morpheus object. Inherits OAuth
mechanism. Wrapper for provision-types endpoint. All methods
are specific provision-types API in morpheus.
"""
Morpheus.__init__(self, baseurl, username, password)
self.headers = {"Authorization": "BEARER " + self.access_token}
self.endpoint = posixpath.join('api', 'provision-types')
def get_all(self, id=None):
if id:
if not isinstance(id, six.string_types):
id = str(id)
provision_types_url = urljoin(
self.baseurl,
posixpath.join(self.endpoint, id)
)
else:
provision_types_url = urljoin(
self.baseurl,
self.endpoint
)
response = requests.get(provision_types_url, headers=self.headers)
return response.json()
| 24.6 | 71 | 0.631436 | 121 | 1,107 | 5.619835 | 0.438017 | 0.123529 | 0.075 | 0.067647 | 0.194118 | 0.102941 | 0 | 0 | 0 | 0 | 0 | 0 | 0.28636 | 1,107 | 44 | 72 | 25.159091 | 0.860759 | 0 | 0 | 0.137931 | 0 | 0 | 0.041712 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0.068966 | 0.206897 | null | null | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 2 |
de15320099d543ba1af968112e31b6d05f5c424c | 1,562 | py | Python | codes/utils/timer.py | CJWBW/HCFlow | ec6fd388783930a20a2f3d6c4b7396ff728fa6ad | [
"Apache-2.0"
] | 123 | 2021-08-03T10:02:40.000Z | 2022-03-22T04:32:49.000Z | codes/utils/timer.py | CJWBW/HCFlow | ec6fd388783930a20a2f3d6c4b7396ff728fa6ad | [
"Apache-2.0"
] | 12 | 2021-08-19T05:37:30.000Z | 2022-02-18T07:51:45.000Z | codes/utils/timer.py | CJWBW/HCFlow | ec6fd388783930a20a2f3d6c4b7396ff728fa6ad | [
"Apache-2.0"
] | 19 | 2021-08-19T10:14:45.000Z | 2022-03-08T04:49:49.000Z | import time
class ScopeTimer:
def __init__(self, name):
self.name = name
def __enter__(self):
self.start = time.time()
return self
def __exit__(self, *args):
self.end = time.time()
self.interval = self.end - self.start
print("{} {:.3E}".format(self.name, self.interval))
class Timer:
def __init__(self):
self.times = []
def tick(self):
self.times.append(time.time())
def get_average_and_reset(self):
if len(self.times) < 2:
return -1
avg = (self.times[-1] - self.times[0]) / (len(self.times) - 1)
self.times = [self.times[-1]]
return avg
def get_last_iteration(self):
if len(self.times) < 2:
return 0
return self.times[-1] - self.times[-2]
class TickTock:
def __init__(self):
self.time_pairs = []
self.current_time = None
def tick(self):
self.current_time = time.time()
def tock(self):
assert self.current_time is not None, self.current_time
self.time_pairs.append([self.current_time, time.time()])
self.current_time = None
def get_average_and_reset(self):
if len(self.time_pairs) == 0:
return -1
deltas = [t2 - t1 for t1, t2 in self.time_pairs]
avg = sum(deltas) / len(deltas)
self.time_pairs = []
return avg
def get_last_iteration(self):
if len(self.time_pairs) == 0:
return -1
return self.time_pairs[-1][1] - self.time_pairs[-1][0]
| 24.793651 | 70 | 0.569782 | 210 | 1,562 | 4.028571 | 0.228571 | 0.117021 | 0.122931 | 0.061466 | 0.416076 | 0.245863 | 0.245863 | 0.217494 | 0.217494 | 0.096927 | 0 | 0.021062 | 0.300896 | 1,562 | 62 | 71 | 25.193548 | 0.753663 | 0 | 0 | 0.446809 | 0 | 0 | 0.005762 | 0 | 0 | 0 | 0 | 0 | 0.021277 | 1 | 0.255319 | false | 0 | 0.021277 | 0 | 0.531915 | 0.021277 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 2 |
a9a9425c31bfcbd696f44d3c6c3b8f888d6bbd33 | 1,804 | py | Python | test/test_predict_heplx.py | HephyAnalysisSW/DeepJet | c06a618f776a92e84f56a0b0a313c962777d5387 | [
"Apache-2.0"
] | null | null | null | test/test_predict_heplx.py | HephyAnalysisSW/DeepJet | c06a618f776a92e84f56a0b0a313c962777d5387 | [
"Apache-2.0"
] | null | null | null | test/test_predict_heplx.py | HephyAnalysisSW/DeepJet | c06a618f776a92e84f56a0b0a313c962777d5387 | [
"Apache-2.0"
] | 2 | 2018-11-11T18:02:58.000Z | 2021-02-02T08:19:30.000Z | import keras.backend as K
#from tensorflow.python import debug as tf_debug
#sess = K.get_session()
#sess = tf_debug.LocalCLIDebugWrapperSession(sess)
#K.set_session(sess)
#sess.add_tensor_filter("has_inf_or_nan", tf_debug.has_inf_or_nan)
#files = [
#'/afs/hephy.at/data/gmoertl01/DeepLepton/trainings/muons/20181013/DYVsQCD_ptRelSorted_MuonTraining/KERAS_model.h5',
#'/afs/hephy.at/data/gmoertl01/DeepLepton/trainings/muons/20181014/DYVsQCD_ptRelSorted_MuonTraining/KERAS_model.h5',
#'/afs/hephy.at/data/gmoertl01/DeepLepton/trainings/muons/20181015/DYVsQCD_ptRelSorted_MuonTraining/KERAS_model.h5',
#'/afs/hephy.at/data/gmoertl01/DeepLepton/trainings/muons/20181016/DYVsQCD_ptRelSorted_MuonTraining/KERAS_model.h5',
#'/afs/hephy.at/data/gmoertl01/DeepLepton/trainings/muons/20181017/DYVsQCD_ptRelSorted_MuonTraining/KERAS_model.h5',
#'/afs/hephy.at/data/gmoertl01/DeepLepton/trainings/muons/20181021/DYVsQCD_ptRelSorted_MuonTraining/KERAS_model.h5',
#]
from keras.models import load_model
#mymodel = load_model("/afs/hephy.at/data/gmoertl01/DeepLepton/trainings/muons/20181013/DYVsQCD_ptRelSorted_MuonTraining/KERAS_model.h5")
# new training
#mymodel = load_model("/afs/hephy.at/data/gmoertl01/DeepLepton/trainings/muons/TestTraining/KERAS_model.h5")
#for f in files:
# m = load_model(f)
# print f
# print m.get_weights()
from keras.models import model_from_json
import pickle
import numpy as np
mymodel = model_from_json( pickle.load(file('model.pkl')))
weights = pickle.load(file('weights.pkl'))
#_weights = [np.nan_to_num(weights[0])] + weights[1:]
_weights = map( np.nan_to_num, weights)
mymodel.set_weights(_weights)
#
from multi_l_features import features
##from new_training_features import features
print "Make prediction"
prediction = mymodel.predict( features )
print prediction
| 44 | 137 | 0.810421 | 255 | 1,804 | 5.513725 | 0.278431 | 0.045519 | 0.056899 | 0.079659 | 0.534851 | 0.510669 | 0.480797 | 0.480797 | 0.480797 | 0.480797 | 0 | 0.048607 | 0.064856 | 1,804 | 40 | 138 | 45.1 | 0.784825 | 0.733925 | 0 | 0 | 0 | 0 | 0.076754 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.461538 | null | null | 0.153846 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 2 |
a9c5d8476e4b02df7f0e13a5ce03b6f45cdaf2a9 | 849 | py | Python | grano/model/common.py | ANCIR/grano | cee2ec1974df5df2bc6ed5e214f6bd5d201397a4 | [
"MIT"
] | 30 | 2018-08-23T15:42:17.000Z | 2021-11-16T13:11:36.000Z | grano/model/common.py | ANCIR/grano | cee2ec1974df5df2bc6ed5e214f6bd5d201397a4 | [
"MIT"
] | null | null | null | grano/model/common.py | ANCIR/grano | cee2ec1974df5df2bc6ed5e214f6bd5d201397a4 | [
"MIT"
] | 5 | 2019-05-30T11:36:53.000Z | 2021-08-11T16:17:14.000Z | from datetime import datetime
from grano.core import db
from grano.model.util import make_token
class _CoreBase(object):
created_at = db.Column(db.DateTime, default=datetime.utcnow)
updated_at = db.Column(db.DateTime, default=datetime.utcnow,
onupdate=datetime.utcnow)
@classmethod
def by_id(cls, id):
q = db.session.query(cls).filter_by(id=id)
return q.first()
@classmethod
def all(cls):
return db.session.query(cls)
class IntBase(_CoreBase):
id = db.Column(db.Integer, primary_key=True)
def __repr__(self):
return '<%s(%s)>' % (self.__class__.__name__, self.id)
class UUIDBase(_CoreBase):
id = db.Column(db.Unicode, default=make_token, primary_key=True)
def __repr__(self):
return '<%s(%s)>' % (self.__class__.__name__, self.id)
| 24.970588 | 68 | 0.658422 | 114 | 849 | 4.596491 | 0.377193 | 0.061069 | 0.076336 | 0.045802 | 0.431298 | 0.354962 | 0.354962 | 0.354962 | 0.198473 | 0.198473 | 0 | 0 | 0.21437 | 849 | 33 | 69 | 25.727273 | 0.785607 | 0 | 0 | 0.272727 | 0 | 0 | 0.018846 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.181818 | false | 0 | 0.136364 | 0.136364 | 0.818182 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 2 |
a9d6f351882f754828c1e5d9c656765304c9ac75 | 2,168 | py | Python | image_processor.py | mjpaul1/dd2419_detector_baseline | 2490926b45c1bb944173c404a82a499e8bd23c3f | [
"MIT"
] | null | null | null | image_processor.py | mjpaul1/dd2419_detector_baseline | 2490926b45c1bb944173c404a82a499e8bd23c3f | [
"MIT"
] | null | null | null | image_processor.py | mjpaul1/dd2419_detector_baseline | 2490926b45c1bb944173c404a82a499e8bd23c3f | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import utils
from torchvision import models
import os
from PIL import Image
import matplotlib.pyplot as plt
import torch
from detector import Detector
import torchvision.transforms.functional as TF
class ImageProcessor():
def __init__(self, model_path, ann_path):
self.device = torch.device('cpu')
# Init model
self.detector = Detector().to(self.device)
# load a trained model
self.detector = utils.load_model(self.detector, model_path, self.device)
# load category dictionary from annotation file
self.category_dict = utils.get_category_dict(ann_path)
def detect_and_classify(self, image, threshold):
""" Gets an image as PIL image
and performs detection and classification
Returns:
List[List[Dict]]
List containing a list of detected bounding boxes in each image.
Each dictionary contains the following keys:
- "x": Top-left corner column
- "y": Top-left corner row
- "width": Width of bounding box in pixel
- "height": Height of bounding box in pixel
- "category": Category (not implemented yet!)
"""
# torch_image = TF.to_tensor(image)
# torch_image = torch.stack(torch_image)
# torch_image = torch_image.to(self.device)
# for some reason it needs to be torch.stacked
# and therefore it needs to be a list even though it is only one image
torch_image = []
torch_image.append(TF.to_tensor(image))
if torch_image:
torch_image = torch.stack(torch_image)
torch_image = torch_image.to(self.device)
with torch.no_grad():
out = self.detector(torch_image).cpu()
bbs = self.detector.decode_output(out, threshold)
return bbs
def overlay_image(self, pil_image, bbs):
""" Gets a PIL image and the bounding boxes
Returns a PIL image with bounding box and category overlay
"""
return utils.add_bounding_boxes_pil(pil_image, bbs, self.category_dict)
| 31.42029 | 80 | 0.63607 | 277 | 2,168 | 4.841155 | 0.375451 | 0.096943 | 0.089485 | 0.089485 | 0.152125 | 0.099925 | 0.099925 | 0.099925 | 0.099925 | 0.099925 | 0 | 0 | 0.288745 | 2,168 | 68 | 81 | 31.882353 | 0.86965 | 0.403137 | 0 | 0 | 0 | 0 | 0.002573 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.115385 | false | 0 | 0.307692 | 0 | 0.538462 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 2 |
a9d77beb032a578ed9a789e0e697fc853929b529 | 151 | py | Python | forumsweats/commands/forum.py | duckiecousingaming/forum-sweats | 6addf3159e59902a6905c25240bf54f8a7a00e76 | [
"MIT"
] | 10 | 2020-10-15T18:08:53.000Z | 2021-12-11T13:15:05.000Z | forumsweats/commands/forum.py | duckiecousingaming/forum-sweats | 6addf3159e59902a6905c25240bf54f8a7a00e76 | [
"MIT"
] | 59 | 2020-10-06T23:19:25.000Z | 2022-03-06T14:16:31.000Z | forumsweats/commands/forum.py | duckiecousingaming/forum-sweats | 6addf3159e59902a6905c25240bf54f8a7a00e76 | [
"MIT"
] | 13 | 2020-10-19T20:46:47.000Z | 2022-03-05T20:17:40.000Z | name = 'forum'
aliases = ('forums', 'f')
pad_none = False
async def run(message):
await message.send('Forum commands: **!forums user (username)**')
| 18.875 | 66 | 0.668874 | 20 | 151 | 5 | 0.85 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.145695 | 151 | 7 | 67 | 21.571429 | 0.775194 | 0 | 0 | 0 | 0 | 0 | 0.364238 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2 |
a9d7d3f3ff3f224b99d1e3fcd86e45d271f2a501 | 4,757 | py | Python | tensorflow_riemopt/manifolds/product.py | vishalbelsare/tensorflow-riemopt | 4814b0c4a8bc74bf98f34ba99440f24ea21ab93b | [
"MIT"
] | 33 | 2021-07-12T14:03:55.000Z | 2022-03-30T11:44:11.000Z | tensorflow_riemopt/manifolds/product.py | vishalbelsare/tensorflow-riemopt | 4814b0c4a8bc74bf98f34ba99440f24ea21ab93b | [
"MIT"
] | 2 | 2021-04-15T21:56:04.000Z | 2021-05-08T17:36:22.000Z | tensorflow_riemopt/manifolds/product.py | vishalbelsare/tensorflow-riemopt | 4814b0c4a8bc74bf98f34ba99440f24ea21ab93b | [
"MIT"
] | 5 | 2020-11-06T05:22:07.000Z | 2021-06-15T03:21:58.000Z | """Cartesian product of manifolds."""
import tensorflow as tf
from functools import reduce
from operator import mul
from tensorflow_riemopt.manifolds.manifold import Manifold
class Product(Manifold):
"""Product space of manifolds."""
name = "Product"
ndims = 1
def __init__(self, *manifolds):
"""Initialize a product of manifolds.
Args:
*manifolds: an iterable of (`manifold`, `shape`) tuples, where
`manifold` is an instance of `Manifold` and `shape` is a tuple of
`manifold` dimensions
Example:
>>> from tensorflow_riemopt import manifolds
>>> S = manifolds.Sphere()
>>> torus = manifolds.Product((S, (2,)), (S, (2,)))
>>> St = manifolds.EuclideanStiefel()
>>> St_2 = manifolds.Product((St, (5, 3)), (St, (5, 3)))
"""
self._indices = [0]
self._manifolds = []
self._shapes = []
for i, (m, shape) in enumerate(manifolds):
if not isinstance(m, Manifold):
raise ValueError(
"{} should be an instance of Manifold".format(m)
)
if not m.check_shape(shape):
raise ValueError(
"Invalid shape {} for manifold {}".format(shape, m)
)
self._indices.append(self._indices[i] + reduce(mul, shape))
self._manifolds.append(m)
self._shapes.append(list(shape))
super().__init__()
def __repr__(self):
names = [
"{}{}".format(m.name, tuple(shape))
for m, shape in zip(self._manifolds, self._shapes)
]
return " × ".join(names)
def _check_shape(self, shape):
return self._indices[-1] == shape[-1]
def _check_point_on_manifold(self, x, atol, rtol):
checks = [
m.check_point_on_manifold(self._get_slice(x, i), atol, rtol)
for (i, m) in enumerate(self._manifolds)
]
return reduce(tf.logical_and, checks)
def _check_vector_on_tangent(self, x, u, atol, rtol):
checks = [
m.check_vector_on_tangent(
self._get_slice(x, i), self._get_slice(u, i), atol, rtol
)
for (i, m) in enumerate(self._manifolds)
]
return reduce(tf.logical_and, checks)
def _get_slice(self, x, idx):
if not 0 <= idx < len(self._indices) - 1:
raise ValueError("Invalid index {}".format(idx))
slice = x[..., self._indices[idx] : self._indices[idx + 1]]
shape = tf.concat([tf.shape(x)[:-1], self._shapes[idx]], axis=-1)
return tf.reshape(slice, shape)
def _product_fn(self, fn, *args, **kwargs):
results = []
for (i, m) in enumerate(self._manifolds):
arg_slices = [self._get_slice(arg, i) for arg in args]
result = getattr(m, fn)(*arg_slices, **kwargs)
shape = tf.concat([tf.shape(result)[:-1], [-1]], axis=-1)
results.append(tf.reshape(result, shape))
return tf.concat(results, axis=-1)
def random(self, shape, dtype=tf.float32):
if not self.check_shape(shape):
raise ValueError("Invalid shape {}".format(shape))
shape = list(shape)
results = []
for (i, m) in enumerate(self._manifolds):
result = m.random(shape[:-1] + self._shapes[i], dtype=dtype)
results.append(tf.reshape(result, shape[:-1] + [-1]))
return tf.concat(results, axis=-1)
def dist(self, x, y, keepdims=False):
dists = self._product_fn("dist", x, y, keepdims=True)
shape = tf.concat([tf.shape(x)[:-1], [-1]], axis=-1)
sq_dists = tf.reduce_sum(dists * dists, axis=-1, keepdims=keepdims)
return tf.math.sqrt(sq_dists)
def inner(self, x, u, v, keepdims=False):
inners = self._product_fn("inner", x, u, v, keepdims=True)
return tf.reduce_sum(inners, axis=-1, keepdims=keepdims)
def proju(self, x, u):
return self._product_fn("proju", x, u)
def projx(self, x):
return self._product_fn("projx", x)
def exp(self, x, u):
return self._product_fn("exp", x, u)
def retr(self, x, u):
return self._product_fn("retr", x, u)
def log(self, x, y):
return self._product_fn("log", x, y)
def ptransp(self, x, y, v):
return self._product_fn("ptransp", x, y, v)
def transp(self, x, y, v):
return self._product_fn("transp", x, y, v)
def pairmean(self, x, y):
return self._product_fn("pairmean", x, y)
def geodesic(self, x, u, t):
return self._product_fn("geodesic", x, u, t=t)
def pairmean(self, x, y):
return self._product_fn("pairmean", x, y)
| 33.737589 | 77 | 0.565062 | 618 | 4,757 | 4.195793 | 0.199029 | 0.028924 | 0.060162 | 0.073274 | 0.337447 | 0.273814 | 0.248361 | 0.138064 | 0.090243 | 0.090243 | 0 | 0.009169 | 0.289258 | 4,757 | 140 | 78 | 33.978571 | 0.757468 | 0.103847 | 0 | 0.185567 | 0 | 0 | 0.04329 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.206186 | false | 0 | 0.041237 | 0.113402 | 0.474227 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 2 |
a9e9c40d1b0d85c781ba0fefce7890d12b0af317 | 447 | py | Python | graphql_utilities/__init__.py | melvinkcx/graphql-utils | 43ced16ff1ed6b5e8de3978429b14c85dc87044f | [
"MIT"
] | 17 | 2020-01-31T13:34:07.000Z | 2022-03-19T21:06:03.000Z | graphql_utilities/__init__.py | melvinkcx/graphql-utils | 43ced16ff1ed6b5e8de3978429b14c85dc87044f | [
"MIT"
] | 6 | 2020-02-19T00:11:33.000Z | 2020-05-24T01:53:08.000Z | graphql_utilities/__init__.py | melvinkcx/graphql-utils | 43ced16ff1ed6b5e8de3978429b14c85dc87044f | [
"MIT"
] | 1 | 2020-03-08T05:53:57.000Z | 2020-03-08T05:53:57.000Z | from .decorators import run_only_once
from .directives import GraphQLCostDirective, schema_with_cost_directive, cost_directive_source_doc
from .execution import ExtendedExecutionContext
from .utilities import build_schema_with_cost
__version__ = "0.4.0"
__all__ = [
"run_only_once",
"ExtendedExecutionContext",
"GraphQLCostDirective",
"schema_with_cost_directive",
"cost_directive_source_doc",
"build_schema_with_cost"
]
| 27.9375 | 99 | 0.803132 | 51 | 447 | 6.45098 | 0.431373 | 0.121581 | 0.170213 | 0.206687 | 0.395137 | 0.395137 | 0.395137 | 0.395137 | 0.395137 | 0 | 0 | 0.007692 | 0.127517 | 447 | 15 | 100 | 29.8 | 0.835897 | 0 | 0 | 0 | 0 | 0 | 0.302013 | 0.217002 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.307692 | 0 | 0.307692 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 2 |
a9f7986122eb73d770d32e233b56a855192bd0aa | 2,824 | py | Python | lineups.py | antonio36alv/espn-fantasy-discord-bot | 240c1cdca8f91fd6b75ee91fe3b9c87007cf087b | [
"MIT"
] | null | null | null | lineups.py | antonio36alv/espn-fantasy-discord-bot | 240c1cdca8f91fd6b75ee91fe3b9c87007cf087b | [
"MIT"
] | null | null | null | lineups.py | antonio36alv/espn-fantasy-discord-bot | 240c1cdca8f91fd6b75ee91fe3b9c87007cf087b | [
"MIT"
] | null | null | null | import requests
leagueID = 582628976
# leagueID = 835952
year = 2021
# url = f"https://fantasy.espn.com/apis/v3/games/fba/seasons/{str(year)}/segments/0/leagues/{str(leagueID)}"
# url = f"https://fantasy.espn.com/basketball/team?leagueId={str(year)}&teamId=3"
# url = "https://fantasy.espn.com/apis/v3/games/fba/seasons/2021/segments/0/leagues/582628976?rosterForTeamId=3&view=mDraftDetail,mLiveScoring,mMatchupScore,mPendingTransactions,mPositionalRatings,mRoster,mSettings,mTeam,modular,mNav"
# url = f"https://fantasy.espn.com/basketball/team?leagueId={str(leagueID)}&teamId=3&scoringPeriodId=37&statSplit=singleScoringPeriod"
url = f"https://fantasy.espn.com/apis/v3/games/fba/seasons/2021/segments/0/leagues/{str(leagueID)}?forTeamId=3&scoringPeriodId=37&view=mRoster"
# url = f"https://fantasy.espn.com/apis/v3/games/fba/seasons/2020/segments/0/leagues/{leagueID}?rosterForTeamId={1}&view=mDraftDetail&view=mLiveScoring&view=mMatchupScore&view=mPendingTransactions&view=mPositionalRatings&view=mRoster&view=mSettings&view=mTeam&view=modular&view=mNav"
# + \
# "?seasonId=" + str(year)
# find player endpoint
# "https://site.api.espn.com/apis/fantasy/v2/games/fba/news/players?days=30&playerId=4277847"
r = requests.get(url, cookies = { "swid":"{23A4936D-D6C7-4DE4-81D9-9188358D2F69}",
"espn_s2":"AECb%2F6kEWSUPzUA1BEQB9DyFnpFwAucrxLZpHQvim4x%2BaUpNiZ2Azgd9EmZFk09B%2BlKACKtqYAl6FWk%2BGicGf4LMMYnHR0Nl9gU1L4jO2iNxIOZSL0%2B4Blgd%2BKqzOkzbsUn1YkzEhflxt%2FD1RNGYHutwoUPocda2XCJCkIYoyS9ZjDMOVg9E10c%2FTUqc7Z76NQPzVMlmvmDc0UCT7FS7tkcLgglu2oSudTM60AREtsKVvVFgRAvprTUl02PRjV1rWgfFDbwdifDGbHOyQ1a%2FMW9S"})
# d = r.json()
# print(url)
# print(r.json)
print(r.json())
# jawn = requests.get("https://fantasy.espn.com/apis/v3/games/fba/seasons/2021/players?scoringPeriodId=0&view=players_wl")
# print("jawn")
# print(jawn.json())
"""
espnAuth:"{"swid":"{23A4936D-D6C7-4DE4-81D9-9188358D2F69}"}"
espn_s2:"AECb%2F6kEWSUPzUA1BEQB9DyFnpFwAucrxLZpHQvim4x%2BaUpNiZ2Az
gd9EmZFk09B%2BlKACKtqYAl6FWk%2BGicGf4LMMYnHR0Nl9gU1L4jO2iN
xIOZSL0%2B4Blgd%2BKqzOkzbsUn1YkzEhflxt%2FD1RNGYHutwoUPocda
2XCJCkIYoyS9ZjDMOVg9E10c%2FTUqc7Z76NQPzVMlmvmDc0UCT7FS7tkc
Lgglu2oSudTM60AREtsKVvVFgRAvprTUl02PRjV1rWgfFDbwdifDGbHOyQ
1a%2FMW9S"
"""
# https://fantasy.espn.com/apis/v3/games/fba/seasons/2021/segments/0/leagues/582628976?rosterForTeamId=1&view=mDraftDetail&view=mLiveScoring&view=mMatchupScore&view=mPendingTransactions&view=mPositionalRatings&view=mRoster&view=mSettings&view=mTeam&view=modular&view=mNav
# look into it for pending claims
# this jawn:
# https://fantasy.espn.com/apis/v3/games/fba/seasons/2021/segments/0/leagues/582628976?forTeamId=1&scoringPeriodId=37&view=mRoster
# Print schedule every morning at 8am
# When scheudle is printed - get the time for the first game of the day
# | 57.632653 | 312 | 0.797805 | 333 | 2,824 | 6.756757 | 0.345345 | 0.031111 | 0.064 | 0.076 | 0.466667 | 0.449778 | 0.449778 | 0.449778 | 0.449778 | 0.449778 | 0 | 0.094375 | 0.061969 | 2,824 | 49 | 313 | 57.632653 | 0.755002 | 0.615793 | 0 | 0 | 0 | 0.142857 | 0.752739 | 0.525822 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.142857 | 0 | 0.142857 | 0.142857 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2 |
a9fb5e4b6423f7eab85218d17f425d52a2601ef6 | 2,777 | py | Python | webbreaker/webinspectjson.py | one3chens/webbreaker | 2e3e661122bc82f0bc8c9bb105affe700d859035 | [
"MIT"
] | null | null | null | webbreaker/webinspectjson.py | one3chens/webbreaker | 2e3e661122bc82f0bc8c9bb105affe700d859035 | [
"MIT"
] | null | null | null | webbreaker/webinspectjson.py | one3chens/webbreaker | 2e3e661122bc82f0bc8c9bb105affe700d859035 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
from webbreaker.webbreakerlogger import Logger
json_scan_settings = {
"settingsName": "",
"overrides": {
"scanName": ""
}
}
def formatted_settings_payload(settings, scan_name, runenv, scan_mode, scan_scope, login_macro, scan_policy,
scan_start, start_urls, workflow_macros, allowed_hosts):
global json_scan_settings
json_scan_settings['settingsName'] = settings
# scanName option
if runenv == "jenkins":
json_scan_settings['overrides']['scanName'] = os.getenv('BUILD_TAG')
else:
json_scan_settings['overrides']['scanName'] = scan_name
# crawlAuditMode option
if scan_mode:
json_scan_settings['overrides']['crawlAuditMode'] = ""
if scan_mode == "scan":
json_scan_settings['overrides']['crawlAuditMode'] = 'AuditOnly'
elif scan_mode == "crawl":
json_scan_settings['overrides']['crawlAuditMode'] = 'CrawlOnly'
else:
json_scan_settings['overrides']['crawlAuditMode'] = 'CrawlAndAudit'
if scan_scope:
json_scan_settings['overrides']['scanScope'] = ""
if scan_scope == "all":
json_scan_settings['overrides']['scanScope'] = 'Unrestricted'
elif scan_scope == "strict":
json_scan_settings['overrides']['scanScope'] = 'Self'
elif scan_scope == "children":
json_scan_settings['overrides']['scanScope'] = 'Children'
elif scan_scope == "ancestors":
json_scan_settings['overrides']['scanScope'] = 'Ancestors'
else:
#json_scan_settings['overrides']['scanScope'] = 'None'
Logger.app.error("Usage: all, strict, children, or ancestors are options! \n"
"The value {} for scan_scope is not available!".format(scan_scope))
if login_macro:
json_scan_settings['overrides']['loginMacro'] = login_macro
if scan_policy:
json_scan_settings['overrides']['policyId'] = scan_policy
if scan_start:
json_scan_settings['overrides']['startOption'] = ""
if scan_start == "url":
json_scan_settings['overrides']['startOption'] = "Url"
elif scan_start == "macro":
json_scan_settings['overrides']['startOption'] = "Macro"
else:
Logger.app.error("usage: url or macro are options NOT scan_start: {}!".format(scan_start))
if start_urls:
json_scan_settings['overrides']['startUrls'] = start_urls
if workflow_macros:
json_scan_settings['overrides']['workflowMacros'] = workflow_macros
if allowed_hosts:
json_scan_settings['overrides']['allowedHosts'] = allowed_hosts
return json_scan_settings
| 36.064935 | 108 | 0.632697 | 288 | 2,777 | 5.802083 | 0.274306 | 0.114901 | 0.229803 | 0.299222 | 0.347696 | 0 | 0 | 0 | 0 | 0 | 0 | 0.000473 | 0.238747 | 2,777 | 76 | 109 | 36.539474 | 0.789972 | 0.047893 | 0 | 0.071429 | 0 | 0 | 0.265252 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.017857 | false | 0 | 0.035714 | 0 | 0.071429 | 0 | 0 | 0 | 0 | null | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2 |
e713f4fdfd2b30f627820dffa612380a7a204407 | 727 | py | Python | api/source/base/action.py | 1pkg/ReRe | 83f77d2cece0fb5f6d7b86a395fcca7d4e16459f | [
"MIT"
] | 1 | 2019-12-17T10:31:48.000Z | 2019-12-17T10:31:48.000Z | api/source/base/action.py | c-pkg/ReRe | 83f77d2cece0fb5f6d7b86a395fcca7d4e16459f | [
"MIT"
] | null | null | null | api/source/base/action.py | c-pkg/ReRe | 83f77d2cece0fb5f6d7b86a395fcca7d4e16459f | [
"MIT"
] | 1 | 2019-04-29T08:19:36.000Z | 2019-04-29T08:19:36.000Z | from .constant import Constant
class Action:
WILDCARD_ENDPOINT = False
CACHE_EXPIRE = None
CONNECTION_LIMIT = Constant.RAREFIED_CONNECTION_LIMIT
def __init__(self, application):
self._application = application
def __call__(self, request):
self._validate(request)
data = self._process(request)
return self._format(data)
def _validate(self, request):
pass
def _process(self, request):
return NotImplemented
def _format(self, response=None):
return response
def _get(self, request, key, default=None):
if (request.json is not None and key in request.json):
return str(request.json[key])
return default
| 24.233333 | 62 | 0.661623 | 83 | 727 | 5.542169 | 0.457831 | 0.095652 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.261348 | 727 | 29 | 63 | 25.068966 | 0.856611 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.285714 | false | 0.047619 | 0.047619 | 0.095238 | 0.761905 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 2 |
e718f2240ea9c0341709f42699de2038b453c7e3 | 175 | py | Python | quilt_test.py | abhiit89/python_basic_notebooks | 942c60584571394c9317fc7e59cb046d68856c1a | [
"Apache-2.0"
] | null | null | null | quilt_test.py | abhiit89/python_basic_notebooks | 942c60584571394c9317fc7e59cb046d68856c1a | [
"Apache-2.0"
] | null | null | null | quilt_test.py | abhiit89/python_basic_notebooks | 942c60584571394c9317fc7e59cb046d68856c1a | [
"Apache-2.0"
] | null | null | null | import quilt3
p = quilt3.Package.browse('aleksey/hurdat', 's3://quilt-example')
print(p["requirements.txt"])
print(p["notebooks"])
p["notebooks"]["QuickStart.ipynb"].fetch()
| 25 | 65 | 0.714286 | 23 | 175 | 5.434783 | 0.73913 | 0.096 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.018182 | 0.057143 | 175 | 6 | 66 | 29.166667 | 0.739394 | 0 | 0 | 0 | 0 | 0 | 0.468571 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.2 | 0 | 0.2 | 0.4 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2 |
e71cb0a9c431e56a173812415c1ae734a3e8208d | 267 | py | Python | python/labs/rock-paper-scissors/random-numbers.py | TheOtakuNinja/cssi-labs | 2875a1c7cc86a182e367733c1bf9bb79b8e92a94 | [
"Apache-2.0"
] | null | null | null | python/labs/rock-paper-scissors/random-numbers.py | TheOtakuNinja/cssi-labs | 2875a1c7cc86a182e367733c1bf9bb79b8e92a94 | [
"Apache-2.0"
] | null | null | null | python/labs/rock-paper-scissors/random-numbers.py | TheOtakuNinja/cssi-labs | 2875a1c7cc86a182e367733c1bf9bb79b8e92a94 | [
"Apache-2.0"
] | null | null | null |
import random
print("dice roller")
dice1 = random.randint(1,6)
dice2 = random.randint(1,6)
sum = dice1 + dice2
if dice1 == dice2:
print("move %d spaces" %(sum))
print("roll again")
else:
print("move %d spaces" %(sum))
print("Next player's turn")
| 15.705882 | 34 | 0.629213 | 40 | 267 | 4.2 | 0.55 | 0.154762 | 0.166667 | 0.178571 | 0.285714 | 0.285714 | 0 | 0 | 0 | 0 | 0 | 0.046948 | 0.202247 | 267 | 16 | 35 | 16.6875 | 0.741784 | 0 | 0 | 0.181818 | 0 | 0 | 0.25188 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.090909 | 0 | 0.090909 | 0.454545 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 2 |
e72195df063d6b3b46724be70fb075d6d9441a60 | 391 | py | Python | tests/b004.py | cclauss/sentry-flake8 | 862cde9c07ce08a1aca212e59a7d31f286e300a0 | [
"MIT"
] | null | null | null | tests/b004.py | cclauss/sentry-flake8 | 862cde9c07ce08a1aca212e59a7d31f286e300a0 | [
"MIT"
] | null | null | null | tests/b004.py | cclauss/sentry-flake8 | 862cde9c07ce08a1aca212e59a7d31f286e300a0 | [
"MIT"
] | null | null | null | from __future__ import absolute_import
import sys
def this_is_a_bug():
o = object()
if hasattr(o, "__call__"):
sys.stdout.write("Ooh, callable! Or is it?\n")
if getattr(o, "__call__", False):
sys.stdout.write("Ooh, callable! Or is it?\n")
def this_is_fine():
o = object()
if callable(o):
sys.stdout.write("Ooh, this is actually callable.\n")
| 21.722222 | 61 | 0.626598 | 59 | 391 | 3.847458 | 0.440678 | 0.079295 | 0.185022 | 0.22467 | 0.281938 | 0.281938 | 0.281938 | 0.281938 | 0.281938 | 0 | 0 | 0 | 0.227621 | 391 | 17 | 62 | 23 | 0.751656 | 0 | 0 | 0.333333 | 0 | 0 | 0.258312 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.166667 | false | 0 | 0.166667 | 0 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2 |
e7455f261e6e8e09edd7b6d17d5a77e334a9c3f2 | 1,119 | py | Python | WeatherDash/Forecast_IO_py/forecastiopy/FIOCurrently.py | Rcarballo2222/ENGI-301 | 341f7e76ec842e1e1f449b2206633150abab1b31 | [
"MIT"
] | null | null | null | WeatherDash/Forecast_IO_py/forecastiopy/FIOCurrently.py | Rcarballo2222/ENGI-301 | 341f7e76ec842e1e1f449b2206633150abab1b31 | [
"MIT"
] | null | null | null | WeatherDash/Forecast_IO_py/forecastiopy/FIOCurrently.py | Rcarballo2222/ENGI-301 | 341f7e76ec842e1e1f449b2206633150abab1b31 | [
"MIT"
] | 1 | 2019-04-17T19:27:56.000Z | 2019-04-17T19:27:56.000Z | # -*- coding: utf-8 -*-
"""
This module recieves an ForecastIO object and holds the currently weather
conditions. It has one class for this purpose.
"""
class FIOCurrently(object):
"""
This class recieves an ForecastIO object and holds the currently weather
conditions.
"""
currently = None
def __init__(self, forecast_io):
"""
Construct a new 'FIOCurrently' object.
Recieves an ForecastIO object and gets the currently weather conditions
if they are available in the object.
Args:
forecast_io (ForecastIO): The ForecastIO object
"""
if forecast_io.has_currently():
self.currently = forecast_io.get_currently()
for item in self.currently.keys():
setattr(self, item, self.currently[item])
def get(self):
"""
Returns a dictionary with current weather conditions.
Returns None is none are available.
Returns:
Dictionary with current weather conditions.
None is none are available.
"""
return self.currently
| 27.975 | 79 | 0.627346 | 126 | 1,119 | 5.492063 | 0.380952 | 0.122832 | 0.086705 | 0.112717 | 0.397399 | 0.182081 | 0.182081 | 0.182081 | 0.182081 | 0.182081 | 0 | 0.001276 | 0.299374 | 1,119 | 39 | 80 | 28.692308 | 0.881378 | 0.549598 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.222222 | false | 0 | 0 | 0 | 0.555556 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 2 |
e7477ac12e45d099175195c3b8feda9faac83d8d | 375 | py | Python | 10 Days of Statistics/Day 5 - Normal Distribution II/main.py | BrunoASNascimento/hacker-rank-resolve | 9983c91d8783f06c358af0f3eb4a1d2b7015e61b | [
"MIT"
] | null | null | null | 10 Days of Statistics/Day 5 - Normal Distribution II/main.py | BrunoASNascimento/hacker-rank-resolve | 9983c91d8783f06c358af0f3eb4a1d2b7015e61b | [
"MIT"
] | null | null | null | 10 Days of Statistics/Day 5 - Normal Distribution II/main.py | BrunoASNascimento/hacker-rank-resolve | 9983c91d8783f06c358af0f3eb4a1d2b7015e61b | [
"MIT"
] | null | null | null | import math
mean, sd = map(float, input().split())
x, y = float(input()), float(input())
def normal_distribution(x, mean, sd):
return round(0.5 * 100 * (1 + math.erf((x - mean) / (sd * math.sqrt(2)))), 3)
print(round(100 - normal_distribution(x, mean, sd), 2))
print(round(100 - normal_distribution(y, mean, sd), 2))
print(round(normal_distribution(60, 70, 10), 2))
| 26.785714 | 81 | 0.645333 | 61 | 375 | 3.901639 | 0.442623 | 0.12605 | 0.088235 | 0.193277 | 0.470588 | 0 | 0 | 0 | 0 | 0 | 0 | 0.0721 | 0.149333 | 375 | 13 | 82 | 28.846154 | 0.673981 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.125 | false | 0 | 0.125 | 0.125 | 0.375 | 0.375 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 2 |
e7488eba9dd8833e6a19a090aafbe8c41810a1d3 | 286 | py | Python | main.py | fayak/UraRand | a65b0cc8eef1144182c682888441bef11bf41a4e | [
"MIT"
] | 1 | 2020-03-30T01:16:55.000Z | 2020-03-30T01:16:55.000Z | main.py | fayak/UraRand | a65b0cc8eef1144182c682888441bef11bf41a4e | [
"MIT"
] | null | null | null | main.py | fayak/UraRand | a65b0cc8eef1144182c682888441bef11bf41a4e | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import urarand.config
import urarand.collect
def main():
config = urarand.config.get_config()
#urarand.collect.collect(lambda x : print('*', end='', flush=True))
urarand.collect.collect(config["ent_gen"].tick)
if __name__ == "__main__":
main()
| 22 | 71 | 0.685315 | 37 | 286 | 5.027027 | 0.594595 | 0.225806 | 0.225806 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.004098 | 0.146853 | 286 | 12 | 72 | 23.833333 | 0.758197 | 0.304196 | 0 | 0 | 0 | 0 | 0.076142 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.142857 | false | 0 | 0.285714 | 0 | 0.428571 | 0 | 0 | 0 | 0 | null | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2 |
e75e255119b8e0ae5c568c40398592d2ca23ba0d | 9,255 | py | Python | pysnmp/AILUXCONNECT-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 11 | 2021-02-02T16:27:16.000Z | 2021-08-31T06:22:49.000Z | pysnmp/AILUXCONNECT-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 75 | 2021-02-24T17:30:31.000Z | 2021-12-08T00:01:18.000Z | pysnmp/AILUXCONNECT-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 10 | 2019-04-30T05:51:36.000Z | 2022-02-16T03:33:41.000Z | #
# PySNMP MIB module AILUXCONNECT-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/AILUXCONNECT-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 17:00:39 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
AIIConnType, = mibBuilder.importSymbols("AISYSTEM-MIB", "AIIConnType")
ObjectIdentifier, Integer, OctetString = mibBuilder.importSymbols("ASN1", "ObjectIdentifier", "Integer", "OctetString")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueSizeConstraint, ValueRangeConstraint, ConstraintsUnion, ConstraintsIntersection, SingleValueConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueSizeConstraint", "ValueRangeConstraint", "ConstraintsUnion", "ConstraintsIntersection", "SingleValueConstraint")
InterfaceIndex, = mibBuilder.importSymbols("IF-MIB", "InterfaceIndex")
NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance")
Counter32, Bits, Gauge32, enterprises, ObjectIdentity, NotificationType, Counter64, Integer32, MibScalar, MibTable, MibTableRow, MibTableColumn, ModuleIdentity, TimeTicks, Unsigned32, IpAddress, iso, MibIdentifier = mibBuilder.importSymbols("SNMPv2-SMI", "Counter32", "Bits", "Gauge32", "enterprises", "ObjectIdentity", "NotificationType", "Counter64", "Integer32", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "ModuleIdentity", "TimeTicks", "Unsigned32", "IpAddress", "iso", "MibIdentifier")
DisplayString, TextualConvention, TruthValue = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention", "TruthValue")
aii = MibIdentifier((1, 3, 6, 1, 4, 1, 539))
aiLuxConnect = ModuleIdentity((1, 3, 6, 1, 4, 1, 539, 33))
aiLuxConnect.setRevisions(('2001-04-30 17:00',))
if mibBuilder.loadTexts: aiLuxConnect.setLastUpdated('200104301700Z')
if mibBuilder.loadTexts: aiLuxConnect.setOrganization('Applied Innovation Inc.')
aiLCTrapInfo = MibIdentifier((1, 3, 6, 1, 4, 1, 539, 33, 0))
aiLCTrapGtranSwitch = NotificationType((1, 3, 6, 1, 4, 1, 539, 33, 0, 1)).setObjects(("AILUXCONNECT-MIB", "aiLCGtranActiveIndex"), ("AILUXCONNECT-MIB", "aiLCGtranActiveSpan"))
if mibBuilder.loadTexts: aiLCTrapGtranSwitch.setStatus('current')
aiLCTrapGbicInserted = NotificationType((1, 3, 6, 1, 4, 1, 539, 33, 0, 2)).setObjects(("AILUXCONNECT-MIB", "aiLCGbicIndex"), ("AILUXCONNECT-MIB", "aiLCGbicConnectorType"))
if mibBuilder.loadTexts: aiLCTrapGbicInserted.setStatus('current')
aiLCTrapGbicRemoved = NotificationType((1, 3, 6, 1, 4, 1, 539, 33, 0, 3)).setObjects(("AILUXCONNECT-MIB", "aiLCGbicIndex"), ("AILUXCONNECT-MIB", "aiLCGbicConnectorType"))
if mibBuilder.loadTexts: aiLCTrapGbicRemoved.setStatus('current')
aiLCGtranActiveTable = MibTable((1, 3, 6, 1, 4, 1, 539, 33, 1), )
if mibBuilder.loadTexts: aiLCGtranActiveTable.setStatus('current')
aiLCGtranActiveEntry = MibTableRow((1, 3, 6, 1, 4, 1, 539, 33, 1, 1), ).setIndexNames((0, "AILUXCONNECT-MIB", "aiLCGtranActiveIndex"))
if mibBuilder.loadTexts: aiLCGtranActiveEntry.setStatus('current')
aiLCGtranActiveIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 539, 33, 1, 1, 1), InterfaceIndex()).setMaxAccess("readonly")
if mibBuilder.loadTexts: aiLCGtranActiveIndex.setStatus('current')
aiLCGtranActiveBackupIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 539, 33, 1, 1, 2), InterfaceIndex()).setMaxAccess("readonly")
if mibBuilder.loadTexts: aiLCGtranActiveBackupIndex.setStatus('current')
aiLCGtranActiveSpan = MibTableColumn((1, 3, 6, 1, 4, 1, 539, 33, 1, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("work", 1), ("protect", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: aiLCGtranActiveSpan.setStatus('current')
aiLCGtranActiveRxUtilization = MibTableColumn((1, 3, 6, 1, 4, 1, 539, 33, 1, 1, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 100))).setMaxAccess("readonly")
if mibBuilder.loadTexts: aiLCGtranActiveRxUtilization.setStatus('current')
aiLCGtranActiveTxUtilization = MibTableColumn((1, 3, 6, 1, 4, 1, 539, 33, 1, 1, 5), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 100))).setMaxAccess("readonly")
if mibBuilder.loadTexts: aiLCGtranActiveTxUtilization.setStatus('current')
aiLCGtranActiveClockSlave = MibTableColumn((1, 3, 6, 1, 4, 1, 539, 33, 1, 1, 6), TruthValue()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: aiLCGtranActiveClockSlave.setStatus('current')
aiLCGtranActiveCoolerStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 539, 33, 1, 1, 7), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("up", 1), ("down", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: aiLCGtranActiveCoolerStatus.setStatus('current')
aiLCGtranActiveTemperature = MibTableColumn((1, 3, 6, 1, 4, 1, 539, 33, 1, 1, 8), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("okay", 1), ("trouble", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: aiLCGtranActiveTemperature.setStatus('current')
aiLCGtranActiveRxPower = MibTableColumn((1, 3, 6, 1, 4, 1, 539, 33, 1, 1, 9), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("okay", 1), ("under", 2), ("over", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: aiLCGtranActiveRxPower.setStatus('current')
aiLCGtranActiveTxPower = MibTableColumn((1, 3, 6, 1, 4, 1, 539, 33, 1, 1, 10), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("okay", 1), ("under", 2), ("over", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: aiLCGtranActiveTxPower.setStatus('current')
aiLCGtranBackupTable = MibTable((1, 3, 6, 1, 4, 1, 539, 33, 2), )
if mibBuilder.loadTexts: aiLCGtranBackupTable.setStatus('current')
aiLCGtranBackupEntry = MibTableRow((1, 3, 6, 1, 4, 1, 539, 33, 2, 1), ).setIndexNames((0, "AILUXCONNECT-MIB", "aiLCGtranBackupIndex"))
if mibBuilder.loadTexts: aiLCGtranBackupEntry.setStatus('current')
aiLCGtranBackupIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 539, 33, 2, 1, 1), InterfaceIndex()).setMaxAccess("readonly")
if mibBuilder.loadTexts: aiLCGtranBackupIndex.setStatus('current')
aiLCGtranBackupActiveIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 539, 33, 2, 1, 2), InterfaceIndex()).setMaxAccess("readonly")
if mibBuilder.loadTexts: aiLCGtranBackupActiveIndex.setStatus('current')
aiLcGbicTable = MibTable((1, 3, 6, 1, 4, 1, 539, 33, 3), )
if mibBuilder.loadTexts: aiLcGbicTable.setStatus('current')
aiLCGbicEntry = MibTableRow((1, 3, 6, 1, 4, 1, 539, 33, 3, 1), ).setIndexNames((0, "AILUXCONNECT-MIB", "aiLCGbicIndex"))
if mibBuilder.loadTexts: aiLCGbicEntry.setStatus('current')
aiLCGbicIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 539, 33, 3, 1, 1), InterfaceIndex()).setMaxAccess("readonly")
if mibBuilder.loadTexts: aiLCGbicIndex.setStatus('current')
aiLCGbicConnectorType = MibTableColumn((1, 3, 6, 1, 4, 1, 539, 33, 3, 1, 2), AIIConnType()).setMaxAccess("readonly")
if mibBuilder.loadTexts: aiLCGbicConnectorType.setStatus('current')
aiLCGbicTxMode = MibTableColumn((1, 3, 6, 1, 4, 1, 539, 33, 3, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("down", 1), ("up", 2), ("gtran", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: aiLCGbicTxMode.setStatus('current')
aiLCGbicRxUtilization = MibTableColumn((1, 3, 6, 1, 4, 1, 539, 33, 3, 1, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 100))).setMaxAccess("readonly")
if mibBuilder.loadTexts: aiLCGbicRxUtilization.setStatus('current')
aiLCGbicTxUtilization = MibTableColumn((1, 3, 6, 1, 4, 1, 539, 33, 3, 1, 5), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 100))).setMaxAccess("readonly")
if mibBuilder.loadTexts: aiLCGbicTxUtilization.setStatus('current')
mibBuilder.exportSymbols("AILUXCONNECT-MIB", aiLCGtranActiveBackupIndex=aiLCGtranActiveBackupIndex, aiLCGbicConnectorType=aiLCGbicConnectorType, aiLCGtranActiveEntry=aiLCGtranActiveEntry, aiLCGtranActiveClockSlave=aiLCGtranActiveClockSlave, aiLCTrapGbicInserted=aiLCTrapGbicInserted, aiLCGbicEntry=aiLCGbicEntry, aiLCGtranBackupIndex=aiLCGtranBackupIndex, aiLCGtranActiveTxPower=aiLCGtranActiveTxPower, aiLCTrapGbicRemoved=aiLCTrapGbicRemoved, aiLCGtranBackupEntry=aiLCGtranBackupEntry, aiLCTrapGtranSwitch=aiLCTrapGtranSwitch, aiLuxConnect=aiLuxConnect, aii=aii, aiLCGtranActiveRxUtilization=aiLCGtranActiveRxUtilization, aiLCGtranBackupTable=aiLCGtranBackupTable, aiLCGtranActiveIndex=aiLCGtranActiveIndex, aiLCTrapInfo=aiLCTrapInfo, aiLCGtranActiveTable=aiLCGtranActiveTable, aiLCGtranActiveTxUtilization=aiLCGtranActiveTxUtilization, aiLCGtranBackupActiveIndex=aiLCGtranBackupActiveIndex, PYSNMP_MODULE_ID=aiLuxConnect, aiLCGbicTxUtilization=aiLCGbicTxUtilization, aiLCGtranActiveRxPower=aiLCGtranActiveRxPower, aiLcGbicTable=aiLcGbicTable, aiLCGbicIndex=aiLCGbicIndex, aiLCGtranActiveSpan=aiLCGtranActiveSpan, aiLCGtranActiveTemperature=aiLCGtranActiveTemperature, aiLCGbicRxUtilization=aiLCGbicRxUtilization, aiLCGbicTxMode=aiLCGbicTxMode, aiLCGtranActiveCoolerStatus=aiLCGtranActiveCoolerStatus)
| 123.4 | 1,302 | 0.779579 | 955 | 9,255 | 7.55288 | 0.168586 | 0.008596 | 0.012062 | 0.016082 | 0.436989 | 0.37377 | 0.361847 | 0.361847 | 0.31665 | 0.301816 | 0 | 0.063775 | 0.073258 | 9,255 | 74 | 1,303 | 125.067568 | 0.777195 | 0.035224 | 0 | 0 | 0 | 0 | 0.141592 | 0.009641 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.119403 | 0 | 0.119403 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2 |
e760bb7d475f2d8c34b4b6cd607e7c52d81a498c | 593 | py | Python | src/TB3Util/prmimporter.py | csvance/tb3util | 63191c4e9fddec809d3943f9ee402eae0e2d4659 | [
"MIT"
] | null | null | null | src/TB3Util/prmimporter.py | csvance/tb3util | 63191c4e9fddec809d3943f9ee402eae0e2d4659 | [
"MIT"
] | null | null | null | src/TB3Util/prmimporter.py | csvance/tb3util | 63191c4e9fddec809d3943f9ee402eae0e2d4659 | [
"MIT"
] | null | null | null | import os
from tb3bank import TB3Bank
from prmparser import PRMParser
from prmfile import PRMFile
from tb3importer import TB3Importer
class PRMImporter(TB3Importer):
def import_bank(self):
if(not os.path.exists(self.path)):
print "Invalid import path: %s" % self.path
return None
patterns = []
for pattern_no in range(1,TB3Bank.BANK_SIZE+1):
pattern_path = "%s/%s%s.%s" % (self.path,PRMFile.PREFIX,pattern_no,PRMFile.EXTENSION)
pattern_data = open(pattern_path,"r").read()
patterns.append(PRMParser(pattern_data).parse())
return TB3Bank(patterns)
| 24.708333 | 88 | 0.725126 | 82 | 593 | 5.146341 | 0.45122 | 0.056872 | 0.042654 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.018145 | 0.163575 | 593 | 24 | 89 | 24.708333 | 0.832661 | 0 | 0 | 0 | 0 | 0 | 0.057239 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.5 | null | null | 0.0625 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 2 |
e761b4c32cb6cb477aeefdfd40d18a11c4e9442d | 680 | py | Python | Unit4/HomeWorks/p3.py | yuhao1998/PythonStudy | fa678f0352673a6934b8f5b1689777af531f3675 | [
"Apache-2.0"
] | null | null | null | Unit4/HomeWorks/p3.py | yuhao1998/PythonStudy | fa678f0352673a6934b8f5b1689777af531f3675 | [
"Apache-2.0"
] | null | null | null | Unit4/HomeWorks/p3.py | yuhao1998/PythonStudy | fa678f0352673a6934b8f5b1689777af531f3675 | [
"Apache-2.0"
] | null | null | null | '''
用户登录的三次机会
描述
给用户三次输入用户名和密码的机会,要求如下:
1)如输入第一行输入用户名为‘Kate’,第二行输入密码为‘666666’,输出‘登录成功!’,退出程序;
2)当一共有3次输入用户名或密码不正确输出“3次用户名或者密码均有误!退出程序。”。
'''
count = 0
while count < 3:
name = input()
password = input()
if name == 'Kate'and password == '666666':
print("登录成功!")
break
else:
count += 1
if count == 3:
print("3次用户名或者密码均有误!退出程序。") | 28.333333 | 149 | 0.333824 | 46 | 680 | 8.065217 | 0.608696 | 0.086253 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.036458 | 0.152941 | 680 | 24 | 150 | 28.333333 | 0.357639 | 0.625 | 0 | 0 | 0 | 0 | 0.133603 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0.181818 | 0 | 0 | 0 | 0.181818 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 2 |
e764a81bd360d87c8473f7556e4098d4d92d4e3e | 311 | py | Python | neko2020/utils/files.py | tajas20006/neko2020 | 18345db9daf64b2237a1869af9086f9b462a2f44 | [
"MIT"
] | 4 | 2020-04-24T04:03:56.000Z | 2022-02-04T09:34:23.000Z | neko2020/utils/files.py | tajas20006/neko2020 | 18345db9daf64b2237a1869af9086f9b462a2f44 | [
"MIT"
] | 76 | 2020-09-25T07:08:20.000Z | 2022-03-18T12:16:28.000Z | neko2020/utils/files.py | tajas20006/neko2020 | 18345db9daf64b2237a1869af9086f9b462a2f44 | [
"MIT"
] | null | null | null | import os
import random
def get_project_root():
return os.path.dirname(os.path.dirname(os.path.dirname(__file__)))
def select_random_directory(basedir):
files = os.listdir(basedir)
directories = [f for f in files if os.path.isdir(os.path.join(basedir, f))]
return random.choice(directories)
| 23.923077 | 79 | 0.736334 | 47 | 311 | 4.702128 | 0.510638 | 0.135747 | 0.176471 | 0.135747 | 0.176471 | 0.176471 | 0 | 0 | 0 | 0 | 0 | 0 | 0.144695 | 311 | 12 | 80 | 25.916667 | 0.830827 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.25 | false | 0 | 0.25 | 0.125 | 0.75 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 2 |
e7766e556388cce56c334145cf76e60ba80b7bfc | 434 | py | Python | tutorial/extending_wrist_dof/wrist_pitch.py | soumith/stretch_body | 998cec78fae67090060d4474c3029f9db45cde85 | [
"RSA-MD"
] | 19 | 2020-07-14T07:00:45.000Z | 2022-01-26T18:28:59.000Z | tutorial/extending_wrist_dof/wrist_pitch.py | soumith/stretch_body | 998cec78fae67090060d4474c3029f9db45cde85 | [
"RSA-MD"
] | 45 | 2020-05-20T03:05:56.000Z | 2022-01-06T22:35:39.000Z | tutorial/extending_wrist_dof/wrist_pitch.py | soumith/stretch_body | 998cec78fae67090060d4474c3029f9db45cde85 | [
"RSA-MD"
] | 13 | 2020-07-14T23:13:42.000Z | 2022-03-05T02:00:57.000Z | from stretch_body.dynamixel_hello_XL430 import DynamixelHelloXL430
from stretch_body.hello_utils import *
class WristPitch(DynamixelHelloXL430):
def __init__(self, chain=None):
DynamixelHelloXL430.__init__(self, 'wrist_pitch', chain)
self.poses = {'tool_up': deg_to_rad(45),
'tool_down': deg_to_rad(-45)}
def pose(self,p,v_r=None,a_r=None):
self.move_to(self.poses[p],v_r,a_r)
| 33.384615 | 66 | 0.695853 | 62 | 434 | 4.467742 | 0.516129 | 0.079422 | 0.108303 | 0.072202 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.045455 | 0.18894 | 434 | 12 | 67 | 36.166667 | 0.741477 | 0 | 0 | 0 | 0 | 0 | 0.062356 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.222222 | false | 0 | 0.222222 | 0 | 0.555556 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 2 |
e778af7bb427f9d452fec7cec45d6cc3e9aa46fa | 10,151 | py | Python | tests/test_spatial_mesh.py | tnakaicode/ChargedPaticle-LowEnergy | 47b751bcada2af7fc50cef587a48b1a3c12bcbba | [
"MIT"
] | 6 | 2019-04-14T06:19:40.000Z | 2021-09-14T13:46:26.000Z | tests/test_spatial_mesh.py | RickeyEstes/ef_python | 47b751bcada2af7fc50cef587a48b1a3c12bcbba | [
"MIT"
] | 31 | 2018-03-02T12:05:20.000Z | 2019-02-20T09:29:08.000Z | tests/test_spatial_mesh.py | RickeyEstes/ef_python | 47b751bcada2af7fc50cef587a48b1a3c12bcbba | [
"MIT"
] | 10 | 2017-12-21T15:16:55.000Z | 2020-10-31T23:59:50.000Z | import logging
import h5py
import numpy as np
import pytest
from numpy.testing import assert_array_equal
from ef.particle_array import ParticleArray
from ef.spatial_mesh import SpatialMesh, MeshGrid
from ef.config.components import SpatialMeshConf, BoundaryConditionsConf, ParticleSourceConf
class TestDefaultSpatialMesh:
def test_config(self, capsys):
mesh = SpatialMeshConf((4, 2, 3), (2, 1, 3)).make(BoundaryConditionsConf(3.14))
assert mesh.mesh.node_coordinates.shape == (3, 3, 2, 3)
assert mesh.charge_density.shape == (3, 3, 2)
assert mesh.potential.shape == (3, 3, 2)
assert mesh.electric_field.shape == (3, 3, 2, 3)
coords = np.array([[[[0., 0., 0.], [0., 0., 3.]], [[0., 1., 0.], [0., 1., 3.]],
[[0., 2., 0.], [0., 2., 3.]]],
[[[2., 0., 0.], [2., 0., 3.]], [[2., 1., 0.], [2., 1., 3.]],
[[2., 2., 0.], [2., 2., 3.]]],
[[[4., 0., 0.], [4., 0., 3.]], [[4., 1., 0.], [4., 1., 3.]],
[[4., 2., 0.], [4., 2., 3.]]]])
assert_array_equal(mesh.mesh.node_coordinates, coords)
assert_array_equal(mesh.charge_density, np.zeros((3, 3, 2)))
potential = np.full((3, 3, 2), 3.14)
assert_array_equal(mesh.potential, potential)
assert_array_equal(mesh.electric_field, np.zeros((3, 3, 2, 3)))
out, err = capsys.readouterr()
assert out == ""
assert err == ""
def test_do_init_warnings(self, capsys, caplog):
mesh = SpatialMesh.do_init((12, 12, 12), (5, 5, 7), BoundaryConditionsConf(0))
out, err = capsys.readouterr()
assert out == ""
assert err == ""
assert caplog.record_tuples == [
('root', logging.WARNING,
"X step on spatial grid was reduced to 4.000 from 5.000 to fit in a round number of cells."),
('root', logging.WARNING,
"Y step on spatial grid was reduced to 4.000 from 5.000 to fit in a round number of cells."),
('root', logging.WARNING,
"Z step on spatial grid was reduced to 6.000 from 7.000 to fit in a round number of cells."),
]
def test_do_init(self):
mesh = SpatialMesh.do_init((4, 2, 3), (2, 1, 3), BoundaryConditionsConf(3.14))
assert mesh.mesh.node_coordinates.shape == (3, 3, 2, 3)
assert mesh.charge_density.shape == (3, 3, 2)
assert mesh.potential.shape == (3, 3, 2)
assert mesh.electric_field.shape == (3, 3, 2, 3)
coords = np.array([[[[0., 0., 0.], [0., 0., 3.]], [[0., 1., 0.], [0., 1., 3.]],
[[0., 2., 0.], [0., 2., 3.]]],
[[[2., 0., 0.], [2., 0., 3.]], [[2., 1., 0.], [2., 1., 3.]],
[[2., 2., 0.], [2., 2., 3.]]],
[[[4., 0., 0.], [4., 0., 3.]], [[4., 1., 0.], [4., 1., 3.]],
[[4., 2., 0.], [4., 2., 3.]]]])
assert_array_equal(mesh.mesh.node_coordinates, coords)
assert_array_equal(mesh.charge_density, np.zeros((3, 3, 2)))
potential = np.full((3, 3, 2), 3.14)
assert_array_equal(mesh.potential, potential)
assert_array_equal(mesh.electric_field, np.zeros((3, 3, 2, 3)))
def test_do_init_potential(self):
mesh = SpatialMesh.do_init((12, 12, 12), (4, 4, 6),
BoundaryConditionsConf(1, 2, 3, 4, 5, 6))
potential = np.array([[[5., 1., 6.], [5., 1., 6.], [5., 1., 6.], [5., 1., 6.]],
[[5., 3., 6.], [5., 0., 6.], [5., 0., 6.], [5., 4., 6.]],
[[5., 3., 6.], [5., 0., 6.], [5., 0., 6.], [5., 4., 6.]],
[[5., 2., 6.], [5., 2., 6.], [5., 2., 6.], [5., 2., 6.]]])
assert_array_equal(mesh.potential, potential)
def test_is_potential_equal_on_boundaries(self):
for x, y, z in np.ndindex(4, 4, 3):
mesh = SpatialMesh.do_init((12, 12, 12), (4, 4, 6), BoundaryConditionsConf(3.14))
assert mesh.is_potential_equal_on_boundaries()
mesh.potential[x, y, z] = 2.
if np.all([x > 0, y > 0, z > 0]) and np.all([x < 3, y < 3, z < 2]):
assert mesh.is_potential_equal_on_boundaries()
else:
assert not mesh.is_potential_equal_on_boundaries()
def test_do_init_ranges(self):
with pytest.raises(ValueError) as excinfo:
SpatialMesh.do_init((10, 20), (2, 1, 3), BoundaryConditionsConf(3.14))
assert excinfo.value.args == ('grid_size must be a flat triple', (10, 20))
with pytest.raises(ValueError) as excinfo:
SpatialMesh.do_init(((1, 2), 3), (1, 1, 1), BoundaryConditionsConf(3.14))
assert excinfo.value.args == ('grid_size must be a flat triple', ((1, 2), 3))
with pytest.raises(ValueError) as excinfo:
SpatialMesh.do_init((10, 10, 10), [[2, 1, 3], [4, 5, 6], [7, 8, 9]],
BoundaryConditionsConf(3.14))
assert excinfo.value.args == ('step_size must be a flat triple', [[2, 1, 3], [4, 5, 6], [7, 8, 9]],)
with pytest.raises(ValueError) as excinfo:
SpatialMesh.do_init((10, 10, -30), (2, 1, 3), BoundaryConditionsConf(3.14))
assert excinfo.value.args == ('grid_size must be positive', (10, 10, -30))
with pytest.raises(ValueError) as excinfo:
SpatialMesh.do_init((10, 10, 10), (2, -2, 3), BoundaryConditionsConf(3.14))
assert excinfo.value.args == ('step_size must be positive', (2, -2, 3))
with pytest.raises(ValueError) as excinfo:
SpatialMesh.do_init((10, 10, 10), (17, 2, 3), BoundaryConditionsConf(3.14))
assert excinfo.value.args == ('step_size cannot be bigger than grid_size',)
def test_init_h5(self, tmpdir):
fname = tmpdir.join('test_spatialmesh_init.h5')
mesh1 = SpatialMesh.do_init((10, 20, 30), (2, 1, 3), BoundaryConditionsConf(3.14))
with h5py.File(fname, mode="w") as h5file:
mesh1.save_h5(h5file.create_group("/mesh"))
with h5py.File(fname, mode="r") as h5file:
mesh2 = SpatialMesh.load_h5(h5file["/mesh"])
assert mesh1 == mesh2
mesh2 = SpatialMesh.do_init((10, 20, 30), (2, 1, 3), BoundaryConditionsConf(3.14))
assert mesh1 == mesh2
mesh2.potential = np.random.ranf(mesh1.potential.shape)
assert mesh1 != mesh2
mesh2 = SpatialMesh.do_init((10, 20, 30), (2, 1, 3), BoundaryConditionsConf(3.14))
assert mesh1 == mesh2
mesh2.charge_density = np.random.ranf(mesh1.charge_density.shape)
assert mesh1 != mesh2
mesh2 = SpatialMesh.do_init((10, 20, 30), (2, 1, 3), BoundaryConditionsConf(3.14))
assert mesh1 == mesh2
mesh2.electric_field = np.random.ranf(mesh1.electric_field.shape)
assert mesh1 != mesh2
mesh2 = SpatialMesh.do_init((10, 20, 30), (2, 1, 3), BoundaryConditionsConf(3.14))
mesh2.potential = np.random.ranf(mesh1.potential.shape)
mesh2.charge_density = np.random.ranf(mesh1.charge_density.shape)
mesh2.electric_field = np.random.ranf(mesh1.electric_field.shape)
assert mesh1 != mesh2
with h5py.File(fname, mode="w") as h5file:
mesh2.save_h5(h5file.create_group("/SpatialMesh"))
with h5py.File(fname, mode="r") as h5file:
mesh1 = SpatialMesh.load_h5(h5file["/SpatialMesh"])
assert mesh1 == mesh2
def test_dict(self):
mesh = SpatialMesh.do_init((4, 2, 3), (2, 1, 3), BoundaryConditionsConf())
d = mesh.dict
assert d.keys() == {"mesh", "electric_field", "potential", "charge_density"}
assert d["mesh"] == MeshGrid((4, 2, 3), (3, 3, 2))
assert_array_equal(d["electric_field"], np.zeros((3, 3, 2, 3)))
assert_array_equal(d["potential"], np.zeros((3, 3, 2)))
assert_array_equal(d["charge_density"], np.zeros((3, 3, 2)))
def test_weight_particles_charge_to_mesh(self):
mesh = SpatialMeshConf((2, 4, 8), (1, 2, 4)).make(BoundaryConditionsConf())
particle_arrays = [ParticleArray(1, -2, 4, [(1, 1, 3)], [(0, 0, 0)])]
mesh.weight_particles_charge_to_mesh(particle_arrays)
assert_array_equal(mesh.charge_density,
np.array([[[0, 0, 0], [0, 0, 0], [0, 0, 0]],
[[-0.25 / 8, -0.75 / 8, 0], [-0.25 / 8, -0.75 / 8, 0], [0, 0, 0]],
[[0, 0, 0], [0, 0, 0], [0, 0, 0]]]))
particle_arrays = [ParticleArray([1, 2], -2, 4, [(1, 1, 3), (1, 1, 3)], np.zeros((2, 3)))]
mesh.clear_old_density_values()
mesh.weight_particles_charge_to_mesh(particle_arrays)
assert_array_equal(mesh.charge_density,
np.array([[[0, 0, 0], [0, 0, 0], [0, 0, 0]],
[[-0.25 / 4, -0.75 / 4, 0], [-0.25 / 4, -0.75 / 4, 0], [0, 0, 0]],
[[0, 0, 0], [0, 0, 0], [0, 0, 0]]]))
mesh.clear_old_density_values()
particle_arrays = [ParticleArray(1, -2, 4, [(2, 4, 8)], [(0, 0, 0)])]
mesh.weight_particles_charge_to_mesh(particle_arrays)
assert_array_equal(mesh.charge_density,
np.array([[[0, 0, 0], [0, 0, 0], [0, 0, 0]],
[[0, 0, 0], [0, 0, 0], [0, 0, 0]],
[[0, 0, 0], [0, 0, 0], [0, 0, -0.25]]]))
particle_arrays = [ParticleArray(1, -2, 4, [(1, 2, 8.1)], [(0, 0, 0)])]
with pytest.raises(ValueError, match="Position is out of meshgrid bounds"):
mesh.weight_particles_charge_to_mesh(particle_arrays)
def test_field_at_position(self):
mesh = SpatialMeshConf((2, 4, 8), (1, 2, 4)).make(BoundaryConditionsConf())
mesh.electric_field[1:2, 0:2, 0:2] = np.array([[[2, 1, 0], [-3, 1, 0]],
[[0, -1, 0], [-1, 0, 0]]])
assert_array_equal(mesh.field_at_position([(1, 1, 3)]), [(-1.25, 0.375, 0)])
| 54.87027 | 108 | 0.529012 | 1,409 | 10,151 | 3.69198 | 0.1022 | 0.03614 | 0.041522 | 0.047674 | 0.779508 | 0.737409 | 0.714917 | 0.665513 | 0.598039 | 0.573818 | 0 | 0.100401 | 0.287656 | 10,151 | 184 | 109 | 55.168478 | 0.619002 | 0 | 0 | 0.542683 | 0 | 0.018293 | 0.063344 | 0.002364 | 0 | 0 | 0 | 0 | 0.304878 | 1 | 0.060976 | false | 0 | 0.04878 | 0 | 0.115854 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2 |
e78a9f0341435055deebdfeba5c1bf3f2f848f0f | 753 | gyp | Python | node_modules/bswap/binding.gyp | msaadsadiq/GoogleREST | 4afa60109292c25a12e2866b3501c4cf77a2c85e | [
"MIT"
] | null | null | null | node_modules/bswap/binding.gyp | msaadsadiq/GoogleREST | 4afa60109292c25a12e2866b3501c4cf77a2c85e | [
"MIT"
] | null | null | null | node_modules/bswap/binding.gyp | msaadsadiq/GoogleREST | 4afa60109292c25a12e2866b3501c4cf77a2c85e | [
"MIT"
] | null | null | null | {
"conditions": [
['OS=="win"', {
"variables": {
"has_avx2%": "<!(.\util\cpuinfo.exe 1 5 7)"
}
}]
],
"targets": [
{
"target_name": "bswap",
"sources": [ "src/bswap.cc" ],
"include_dirs" : [
"<!(node -e \"require('nan')\")"
],
"cflags":[
"-march=native"
],
"conditions": [
['OS=="win" and has_avx2==1', {
"msvs_settings": {
"VCCLCompilerTool": {
"EnableEnhancedInstructionSet": 5 # /arch:AVX2
}
}
}, {
"msvs_settings": {
"VCCLCompilerTool": {
"EnableEnhancedInstructionSet": 3 # /arch:AVX
}
}
}]
]
}
]
}
| 20.351351 | 60 | 0.386454 | 50 | 753 | 5.7 | 0.72 | 0.084211 | 0.105263 | 0.392982 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.020642 | 0.420983 | 753 | 36 | 61 | 20.916667 | 0.633028 | 0.02656 | 0 | 0.305556 | 0 | 0 | 0.415068 | 0.105479 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0 | null | null | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2 |
e78e1b9eadb6745dde918cb9fc504c0f61ab1784 | 631 | py | Python | apps/grid/forms.py | jamespacileo/packaginator | d4b51ae16e0658fade91e1a6c4ce987ee747b053 | [
"MIT"
] | 1 | 2015-11-05T05:09:38.000Z | 2015-11-05T05:09:38.000Z | apps/grid/forms.py | jamespacileo/packaginator | d4b51ae16e0658fade91e1a6c4ce987ee747b053 | [
"MIT"
] | null | null | null | apps/grid/forms.py | jamespacileo/packaginator | d4b51ae16e0658fade91e1a6c4ce987ee747b053 | [
"MIT"
] | null | null | null | from django.forms import ModelForm
from grid.models import Element, Feature, Grid, GridPackage
class GridForm(ModelForm):
def clean_slug(self):
return self.cleaned_data['slug'].lower()
class Meta:
model = Grid
fields = ['title', 'slug', 'description']
class ElementForm(ModelForm):
class Meta:
model = Element
fields = ['text',]
class FeatureForm(ModelForm):
class Meta:
model = Feature
fields = ['title', 'description',]
class GridPackageForm(ModelForm):
class Meta:
model = GridPackage
fields = ['package'] | 21.033333 | 60 | 0.608558 | 62 | 631 | 6.16129 | 0.483871 | 0.094241 | 0.146597 | 0.180628 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.285261 | 631 | 30 | 61 | 21.033333 | 0.847007 | 0 | 0 | 0.2 | 0 | 0 | 0.080696 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.05 | false | 0 | 0.1 | 0.05 | 0.6 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 2 |
e795d0cbe6f6231c526deb01c713eb32255572c9 | 433 | gyp | Python | benchmarks/shbench/shbench.gyp | cksystemsgroup/scalloc-artifact | 118de2d4947c8c6e6d15754bcd5054396533d31f | [
"BSD-2-Clause"
] | 9 | 2015-10-27T15:09:47.000Z | 2019-04-26T13:45:07.000Z | benchmarks/shbench/shbench.gyp | cksystemsgroup/scalloc-artifact | 118de2d4947c8c6e6d15754bcd5054396533d31f | [
"BSD-2-Clause"
] | 1 | 2016-06-18T10:54:23.000Z | 2016-08-18T12:16:59.000Z | benchmarks/shbench/shbench.gyp | cksystemsgroup/scalloc-artifact | 118de2d4947c8c6e6d15754bcd5054396533d31f | [
"BSD-2-Clause"
] | 3 | 2016-09-25T04:37:09.000Z | 2018-03-16T18:34:00.000Z | {
'includes': [ '../common.gypi' ],
'targets': [
{
'target_name': 'shbench',
'product_name': 'shbench',
'type' : 'executable',
'conditions': [
['OS=="linux"', {
'ldflags': [
'-pthread'
]
}],
],
'defines': [
'SYS_MULTI_THREAD'
],
'sources': [
'src/sh6bench.c'
],
'include_dirs': [ 'src' ]
}
]
}
| 16.653846 | 35 | 0.378753 | 28 | 433 | 5.678571 | 0.892857 | 0.138365 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.003937 | 0.413395 | 433 | 25 | 36 | 17.32 | 0.622047 | 0 | 0 | 0.125 | 0 | 0 | 0.405093 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 2 |
e796f3dac103098c0c514e14973c1e02c9dc0a2f | 392 | py | Python | scripts/config.py | ci010/libnode | 85da4acb5a0c7bb3c9bee809c226f3623e260365 | [
"MIT"
] | 39 | 2019-08-23T23:34:30.000Z | 2022-03-30T09:10:12.000Z | scripts/config.py | ci010/libnode | 85da4acb5a0c7bb3c9bee809c226f3623e260365 | [
"MIT"
] | 3 | 2020-10-16T01:05:51.000Z | 2021-11-24T00:23:46.000Z | scripts/config.py | ci010/libnode | 85da4acb5a0c7bb3c9bee809c226f3623e260365 | [
"MIT"
] | 11 | 2019-06-17T13:26:41.000Z | 2022-02-18T15:49:59.000Z | assert __name__ != "__main__"
import os
nodeVersion = os.environ['LIBNODE_NODE_VERSION']
configFlags = (os.environ.get('LIBNODE_CONFIG_FLAGS') or '').split()
x86 = os.environ.get('LIBNODE_X86') == '1'
zipBasenameSuffix = os.environ.get('LIBNODE_ZIP_SUFFIX', '')
if os.environ.get('LIBNODE_SMALL_ICU', '') == '1':
configFlags += ['--with-intl=small-icu']
zipBasenameSuffix += '-smallicu'
| 30.153846 | 68 | 0.709184 | 49 | 392 | 5.326531 | 0.55102 | 0.172414 | 0.183908 | 0.291188 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.016997 | 0.09949 | 392 | 12 | 69 | 32.666667 | 0.72238 | 0 | 0 | 0 | 0 | 0 | 0.321429 | 0.053571 | 0 | 0 | 0 | 0 | 0.111111 | 1 | 0 | false | 0 | 0.111111 | 0 | 0.111111 | 0 | 0 | 0 | 0 | null | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2 |
e79addbb56f1cb91e20cdba8d1e7c43a55879770 | 2,039 | py | Python | tests/test_aligned_sents_long_distance_cross.py | BramVanroy/astred | 450e4d071319ea02768db9fd0b2c6e12b037676c | [
"Apache-2.0"
] | 10 | 2020-03-25T10:23:49.000Z | 2021-12-18T02:35:37.000Z | tests/test_aligned_sents_long_distance_cross.py | BramVanroy/astred | 450e4d071319ea02768db9fd0b2c6e12b037676c | [
"Apache-2.0"
] | 2 | 2021-10-07T09:56:55.000Z | 2022-03-01T10:57:24.000Z | tests/test_aligned_sents_long_distance_cross.py | BramVanroy/astred | 450e4d071319ea02768db9fd0b2c6e12b037676c | [
"Apache-2.0"
] | null | null | null | from pytest_cases import parametrize_with_cases
from astred.pairs import IdxPair
from .conftest import TestAlignedSents
@parametrize_with_cases("aligned", cases=TestAlignedSents, glob="*long_distance_cross")
def test_aligned_sents__aligned_cross(aligned):
# NULL-to-NULL alignments included
assert len(aligned.aligned_words) == 5
assert aligned.word_cross == 5
@parametrize_with_cases("aligned", cases=TestAlignedSents, glob="*long_distance_cross")
def test_aligned_sents__words_cross(aligned):
# NULL-to-NULL alignments included
assert aligned.src[0].cross is None
assert aligned.src[1].cross == 3
assert aligned.src[2].cross == 2
assert aligned.src[3].cross == 2
assert aligned.src[4].cross == 3
assert aligned.tgt[0].cross is None
assert aligned.tgt[1].cross == 3
assert aligned.tgt[2].cross == 2
assert aligned.tgt[3].cross == 2
assert aligned.tgt[4].cross == 3
@parametrize_with_cases("aligned", cases=TestAlignedSents, glob="*long_distance_cross")
def test_aligned_sents__words_avg_cross(aligned):
# all these are the same as regular cross because there are no m-to-n alignments
# avg_cross is a float, though
# NULL-to-NULL alignments included
assert aligned.src[0].avg_cross is None
assert aligned.src[1].avg_cross == 3.0
assert aligned.src[2].avg_cross == 2.0
assert aligned.src[3].avg_cross == 2.0
assert aligned.src[4].avg_cross == 3.0
assert aligned.tgt[0].avg_cross is None
assert aligned.tgt[1].avg_cross == 3.0
assert aligned.tgt[2].avg_cross == 2.0
assert aligned.tgt[3].avg_cross == 2.0
assert aligned.tgt[4].avg_cross == 3.0
@parametrize_with_cases("aligned", cases=TestAlignedSents, glob="*long_distance_cross")
def test_aligned_sents__seq(aligned):
# NULL-NULL is a separate group
assert len(aligned.aligned_seq_spans) == 4
assert aligned.seq_aligns == [
IdxPair(0, 0),
IdxPair(1, 3),
IdxPair(2, 2),
IdxPair(3, 1),
]
assert aligned.seq_cross == 3
| 33.983333 | 87 | 0.712114 | 308 | 2,039 | 4.532468 | 0.188312 | 0.214183 | 0.114613 | 0.077364 | 0.702006 | 0.582378 | 0.581662 | 0.36533 | 0.323782 | 0.259312 | 0 | 0.033195 | 0.172634 | 2,039 | 59 | 88 | 34.559322 | 0.794309 | 0.115743 | 0 | 0.097561 | 0 | 0 | 0.060134 | 0 | 0 | 0 | 0 | 0 | 0.609756 | 1 | 0.097561 | false | 0 | 0.073171 | 0 | 0.170732 | 0 | 0 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2 |
e79f307c1a256ddf14e1098f9e4e9028bac9bcda | 895 | py | Python | fixture/orm.py | rwajs75/python_mantis_szkolenie | 94df5d45366786d84868979d193ac9e127d1ffb9 | [
"Apache-2.0"
] | null | null | null | fixture/orm.py | rwajs75/python_mantis_szkolenie | 94df5d45366786d84868979d193ac9e127d1ffb9 | [
"Apache-2.0"
] | null | null | null | fixture/orm.py | rwajs75/python_mantis_szkolenie | 94df5d45366786d84868979d193ac9e127d1ffb9 | [
"Apache-2.0"
] | null | null | null | __autor__ = 'Roman'
from pony.orm import *
from datetime import datetime
from model.project import Project
from pymysql.converters import decoders
class ORMFixture:
db = Database()
class ORMProject(db.Entity):
_table_ = 'mantis_project_table'
id = PrimaryKey(int, column='id')
name = Optional(str, column='name')
def __init__(self, host, name, user, password):
self.db.bind('mysql', host=host, database=name, user=user, password=password, conv=decoders)
self.db.generate_mapping()
sql_debug(True)
def convert_projects_to_model(self, projects):
def convert(project):
return Project(id=str(project.id), name=project.name)
return list(map(convert, projects))
@db_session
def get_projects_list(self):
return self.convert_projects_to_model(select(p for p in ORMFixture.ORMProject))
| 27.121212 | 100 | 0.686034 | 114 | 895 | 5.184211 | 0.464912 | 0.076142 | 0.05753 | 0.07445 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.211173 | 895 | 32 | 101 | 27.96875 | 0.83711 | 0 | 0 | 0 | 0 | 0 | 0.040223 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.181818 | false | 0.090909 | 0.181818 | 0.090909 | 0.636364 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 0 | 0 | 2 |
e7a06818f86e8dac8d97fd50d21fa5ca0d11197b | 7,755 | py | Python | generate_examples.py | RashidSH/coinmath | c5fbcfa238482719ea6a59fe3b314c68bee1bcc2 | [
"MIT"
] | null | null | null | generate_examples.py | RashidSH/coinmath | c5fbcfa238482719ea6a59fe3b314c68bee1bcc2 | [
"MIT"
] | null | null | null | generate_examples.py | RashidSH/coinmath | c5fbcfa238482719ea6a59fe3b314c68bee1bcc2 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import math
import random
from io import BytesIO
from PIL import Image, ImageDraw, ImageFont
def generate_example(level):
answer, s, draw_data = gen(level)
image = Image.new('RGBA', (700, 540))
frame = Image.open('assets/frame.png')
image.paste(frame)
draw = ImageDraw.Draw(image)
font = ImageFont.truetype('assets/Arcon-Regular.otf', 84)
if level in (11, 12):
center = image.height // 2
draw.line((170, center, 270, center), (0, 0, 0), 5)
draw.line((430, center, 530, center), (0, 0, 0), 5)
draw.text((330, center - 60), draw_data[0], (0, 0, 0), font=font)
x1 = 200 if draw_data[1] < 10 else 185
x2 = 200 if draw_data[2] < 10 else 185
x3 = 460 if draw_data[3] < 10 else 445
x4 = 460 if draw_data[4] < 10 else 445
draw.text((x1, center - 110), str(draw_data[1]), (0, 0, 0), font=font)
draw.text((x2, center + 10), str(draw_data[2]), (0, 0, 0), font=font)
draw.text((x3, center - 110), str(draw_data[3]), (0, 0, 0), font=font)
draw.text((x4, center + 10), str(draw_data[4]), (0, 0, 0), font=font)
else:
text = s.replace(' ', '')
width, height = draw.multiline_textsize(text, font=font)
if level == 8 or level == 9:
offset = 170
else:
offset = 210
draw.multiline_text((350 - width // 2, offset), text, (0, 0, 0), font=font, align='center')
buf = BytesIO()
image.save(buf, format='png')
buf.seek(0)
return answer, s, buf
def gen(level):
draw_data = None
if level == 1:
m1 = random.randint(1, 20)
m2 = random.randint(1, 20)
op = random.randint(1, 2)
if op == 1:
op_str = '+'
else:
op_str = '-'
if m1 < m2:
m1, m2 = m2, m1
s = f'{m1} {op_str} {m2}'
elif level == 2:
m1 = random.randint(2, 10)
m2 = random.randint(2, 10)
op = random.randint(1, 2)
if op == 1:
op_str = '*'
else:
op_str = '/'
m1 *= m2
s = f'{m1} {op_str} {m2}'
elif level == 3:
m1 = random.randint(8, 30)
m2 = random.randint(8, 30)
op = random.randint(1, 4)
if op == 1:
op_str = '+'
elif op == 2:
op_str = '-'
if m1 < m2:
m1, m2 = m2, m1
elif op == 3:
op_str = '*'
else:
op_str = '/'
m1 *= m2
s = f'{m1} {op_str} {m2}'
elif level == 4:
ops = ('+', '-', '*')
m1 = random.randint(5, 25)
m2 = random.randint(5, 25)
m3 = random.randint(5, 25)
op1 = random.choice(ops)
op2 = random.choice(ops)
s = f'{m1} {op1} {m2} {op2} {m3}'
elif level == 5:
ops = ('+', '-', '*')
m1 = random.randint(4, 20)
m2 = random.randint(4, 20)
m3 = random.randint(4, 20)
m4 = random.randint(2, 5)
p4 = random.randint(1, 3)
op1 = random.choice(ops)
op2 = random.choice(ops)
s4 = '/' + str(m4)
if p4 == 1:
m1 *= m4
m1 = str(m1) + s4
elif p4 == 2:
m2 *= m4
m2 = str(m2) + s4
else:
m3 *= m4
m3 = str(m3) + s4
s = f'{m1} {op1} {m2} {op2} {m3}'
elif level == 6:
ops = ('+', '-')
m1 = random.randint(4, 20)
m2 = random.randint(4, 20)
m3 = random.randint(2, 11)
p = random.randint(1, 2)
op1 = random.choice(ops)
s1 = f'({m1} {op1} {m2})'
if p == 1:
s = f'{s1} * {m3}'
else:
s = f'{m3} * {s1}'
elif level == 7:
ops = ('+', '-')
m1 = random.randint(4, 11)
m2 = random.randint(4, 11)
m3 = random.randint(2, 11)
p1 = random.randint(1, 2)
p2 = random.randint(1, 3)
op1 = random.choice(ops)
if p2 == 1:
m1 = str(m1) + '**2'
elif p2 == 2:
m2 = str(m2) + '**2'
else:
m3 = str(m3) + '**2'
s1 = f'({m1} {op1} {m2})'
if p1 == 1:
s = f'{s1} * {m3}'
else:
s = f'{m3} * {s1}'
elif level == 8:
ops = ('+', '-')
m1 = random.randint(2, 10)
m2 = random.randint(2, 10)
x = random.randint(2, 10)
p = random.randint(1, 2)
op1 = random.choice(ops)
if p == 1:
s1 = f'{m1}x {op1} {m2}'
else:
s1 = f'{m1} {op1} {m2}x'
s = f"{s1}={eval(s1.replace('x', '*' + str(x)))}\nx=?"
answer = x
elif level == 9:
ops = ('+', '-', '*', '/')
m1 = random.randint(3, 15)
m2 = random.randint(3, 15)
x = random.randint(3, 15)
p = random.randint(1, 2)
op1 = random.choice(ops)
if op1 == '/':
if p == 2:
p = 1
m2 //= 2
x //= 2
x *= m2
if p == 1:
s1 = f'{m1}x {op1} {m2}'
else:
s1 = f'{m1} {op1} {m2}x'
s = f"{s1}={int(eval(s1.replace('x', '*' + str(x))))}\nx=?"
answer = x
elif level == 10:
ops = ('+', '-', '*')
m1 = random.randint(3, 10)
m2 = random.randint(3, 10)
m3 = random.randint(2, 10)
m4 = random.randint(2, 3)
p1 = random.randint(1, 2)
p2 = random.randint(1, 3)
op1 = random.choice(ops)
if p2 == 1:
m1 = f'{m1 // 2}**{m4}'
elif p2 == 2:
m1 = f'{m2 // 2}**{m4}'
else:
m1 = f'{m3 // 2}**{m4}'
s1 = f'({m1} {op1} {m2})'
if p1 == 1:
s = f'{s1} * {m3}'
else:
s = f'{m3} * {s1}'
elif level == 11:
ops = ('+', '-')
m1 = random.randint(1, 10)
m2 = random.randint(1, 10)
d1 = random.randint(2, 20)
mult = random.randint(1, 3)
p = random.randint(1, 2)
op = random.choice(ops)
if op == '+':
res1, res2 = m1 + m2, d1
else:
res1, res2 = m1 - m2, d1
k = math.gcd(res1, res2)
if res2 // k == 1:
answer = str(res1 // k)
else:
answer = f'{res1 // k}/{res2 // k}'
m2 *= mult
d2 = d1 * mult
if p == 2 and op == '+':
m1, m2, d1, d2 = m2, m1, d2, d1
s = f'{m1}/{d1} {op} {m2}/{d2}=?'
draw_data = (op, m1, d1, m2, d2)
elif level == 12:
ops = ('*', '/')
m1 = random.randint(1, 10)
m2 = random.randint(1, 10)
d1 = random.randint(2, 10)
d2 = random.randint(2, 10)
mult = random.randint(1, 3)
p = random.randint(1, 4)
op = random.choice(ops)
if p == 1:
m1 *= mult
elif p == 2:
m2 *= mult
elif p == 3:
d1 *= mult
else:
d2 *= mult
if op == '*':
res1 = m1 * m2
res2 = d1 * d2
else:
res1 = m1 * d2
res2 = d1 * m2
k = math.gcd(res1, res2)
if res2 // k == 1:
answer = str(res1 // k)
else:
answer = f'{res1 // k}/{res2 // k}'
if p == 2 and op == '*':
m1, m2, d1, d2 = m2, m1, d2, d1
s = f'({m1}/{d1}) {op} ({m2}/{d2})=?'
draw_data = (op, m1, d1, m2, d2)
else:
raise ValueError('Этот уровень не существует.')
if level not in (8, 9, 11, 12):
answer = int(eval(s))
s = s.replace('**2', '²').replace('**3', '³') + '=?'
return answer, s, draw_data
| 24.386792 | 99 | 0.412379 | 1,041 | 7,755 | 3.045149 | 0.122959 | 0.209148 | 0.092744 | 0.051104 | 0.54858 | 0.470032 | 0.45142 | 0.433438 | 0.399685 | 0.342587 | 0 | 0.123168 | 0.410574 | 7,755 | 317 | 100 | 24.463722 | 0.570335 | 0.002708 | 0 | 0.502024 | 1 | 0.004049 | 0.086653 | 0.010347 | 0 | 0 | 0 | 0 | 0 | 1 | 0.008097 | false | 0 | 0.016194 | 0 | 0.032389 | 0 | 0 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2 |
e7a18461f78d2c1a2e2ff37128e8d8783d4b002e | 2,981 | py | Python | pyteal/ast/if_test.py | thadguidry/pyteal | 529ee6e96e1acb300bb30982c886bbbdb6430b19 | [
"MIT"
] | null | null | null | pyteal/ast/if_test.py | thadguidry/pyteal | 529ee6e96e1acb300bb30982c886bbbdb6430b19 | [
"MIT"
] | null | null | null | pyteal/ast/if_test.py | thadguidry/pyteal | 529ee6e96e1acb300bb30982c886bbbdb6430b19 | [
"MIT"
] | 1 | 2021-05-26T02:41:37.000Z | 2021-05-26T02:41:37.000Z | import pytest
from .. import *
# this is not necessary but mypy complains if it's not included
from .. import CompileOptions
options = CompileOptions()
def test_if_int():
args = [Int(0), Int(1), Int(2)]
expr = If(args[0], args[1], args[2])
assert expr.type_of() == TealType.uint64
expected, _ = args[0].__teal__(options)
thenBlock, _ = args[1].__teal__(options)
elseBlock, _ = args[2].__teal__(options)
expectedBranch = TealConditionalBlock([])
expectedBranch.setTrueBlock(thenBlock)
expectedBranch.setFalseBlock(elseBlock)
expected.setNextBlock(expectedBranch)
end = TealSimpleBlock([])
thenBlock.setNextBlock(end)
elseBlock.setNextBlock(end)
actual, _ = expr.__teal__(options)
assert actual == expected
def test_if_bytes():
args = [Int(1), Txn.sender(), Txn.receiver()]
expr = If(args[0], args[1], args[2])
assert expr.type_of() == TealType.bytes
expected, _ = args[0].__teal__(options)
thenBlock, _ = args[1].__teal__(options)
elseBlock, _ = args[2].__teal__(options)
expectedBranch = TealConditionalBlock([])
expectedBranch.setTrueBlock(thenBlock)
expectedBranch.setFalseBlock(elseBlock)
expected.setNextBlock(expectedBranch)
end = TealSimpleBlock([])
thenBlock.setNextBlock(end)
elseBlock.setNextBlock(end)
actual, _ = expr.__teal__(options)
assert actual == expected
def test_if_none():
args = [Int(0), Pop(Txn.sender()), Pop(Txn.receiver())]
expr = If(args[0], args[1], args[2])
assert expr.type_of() == TealType.none
expected, _ = args[0].__teal__(options)
thenBlockStart, thenBlockEnd = args[1].__teal__(options)
elseBlockStart, elseBlockEnd = args[2].__teal__(options)
expectedBranch = TealConditionalBlock([])
expectedBranch.setTrueBlock(thenBlockStart)
expectedBranch.setFalseBlock(elseBlockStart)
expected.setNextBlock(expectedBranch)
end = TealSimpleBlock([])
thenBlockEnd.setNextBlock(end)
elseBlockEnd.setNextBlock(end)
actual, _ = expr.__teal__(options)
assert actual == expected
def test_if_single():
args = [Int(1), Pop(Int(1))]
expr = If(args[0], args[1])
assert expr.type_of() == TealType.none
expected, _ = args[0].__teal__(options)
thenBlockStart, thenBlockEnd = args[1].__teal__(options)
end = TealSimpleBlock([])
expectedBranch = TealConditionalBlock([])
expectedBranch.setTrueBlock(thenBlockStart)
expectedBranch.setFalseBlock(end)
expected.setNextBlock(expectedBranch)
thenBlockEnd.setNextBlock(end)
actual, _ = expr.__teal__(options)
assert actual == expected
def test_if_invalid():
with pytest.raises(TealTypeError):
If(Int(0), Txn.amount(), Txn.sender())
with pytest.raises(TealTypeError):
If(Txn.sender(), Int(1), Int(0))
with pytest.raises(TealTypeError):
If(Int(0), Int(1))
with pytest.raises(TealTypeError):
If(Int(0), Txn.sender())
| 30.111111 | 63 | 0.685676 | 323 | 2,981 | 6.06192 | 0.176471 | 0.08427 | 0.022983 | 0.022472 | 0.784985 | 0.742594 | 0.734423 | 0.644025 | 0.566394 | 0.566394 | 0 | 0.015158 | 0.181147 | 2,981 | 98 | 64 | 30.418367 | 0.786973 | 0.020463 | 0 | 0.68 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.106667 | 1 | 0.066667 | false | 0 | 0.04 | 0 | 0.106667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2 |
e7af5e5359e19c940c77ef4720f67485a7568c01 | 1,271 | py | Python | src/pyrin/database/audit/hooks.py | wilsonGmn/pyrin | 25dbe3ce17e80a43eee7cfc7140b4c268a6948e0 | [
"BSD-3-Clause"
] | null | null | null | src/pyrin/database/audit/hooks.py | wilsonGmn/pyrin | 25dbe3ce17e80a43eee7cfc7140b4c268a6948e0 | [
"BSD-3-Clause"
] | null | null | null | src/pyrin/database/audit/hooks.py | wilsonGmn/pyrin | 25dbe3ce17e80a43eee7cfc7140b4c268a6948e0 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
"""
database audit hooks module.
"""
import pyrin.database.audit.services as database_audit_services
from pyrin.audit.decorators import audit_hook
from pyrin.audit.hooks import AuditHookBase
@audit_hook()
class AuditHook(AuditHookBase):
"""
audit hook class.
"""
audit_name = 'database'
def inspect(self, **options):
"""
this method will be called to inspect the status of a package or resource.
it returns a tuple of two values. first value is a dict containing the inspection
data. and the second value is a bool value indicating that inspection has been
succeeded or failed.
:keyword bool traceback: specifies that on failure report, it must include
the traceback of errors.
defaults to True if not provided.
:keyword bool raise_error: specifies that it must raise error
if any of registered audits failed
instead of returning a failure response.
defaults to False if not provided.
:rtype: tuple[dict, bool]
"""
return database_audit_services.inspect(**options)
| 31 | 89 | 0.61133 | 149 | 1,271 | 5.161074 | 0.543624 | 0.06762 | 0.081925 | 0.070221 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.001175 | 0.330448 | 1,271 | 40 | 90 | 31.775 | 0.902468 | 0.624705 | 0 | 0 | 0 | 0 | 0.023324 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.125 | false | 0 | 0.375 | 0 | 0.875 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 2 |
e7b971829edf83181e57c19347ed794186f514fb | 276 | py | Python | setup.py | SamP20/hackspace-flask-theme | ad3b22c6ed1391217acd83d985620d7e8d190880 | [
"MIT"
] | null | null | null | setup.py | SamP20/hackspace-flask-theme | ad3b22c6ed1391217acd83d985620d7e8d190880 | [
"MIT"
] | null | null | null | setup.py | SamP20/hackspace-flask-theme | ad3b22c6ed1391217acd83d985620d7e8d190880 | [
"MIT"
] | null | null | null | from setuptools import setup, find_namespace_packages
setup(
name='bristolhackspace.flask_theme',
packages=find_namespace_packages(include=['bristolhackspace.*']),
include_package_data=True,
zip_safe=False,
install_requires=[
"flask>=2.0",
]
) | 25.090909 | 69 | 0.717391 | 30 | 276 | 6.3 | 0.733333 | 0.137566 | 0.222222 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.008696 | 0.166667 | 276 | 11 | 70 | 25.090909 | 0.813043 | 0 | 0 | 0 | 0 | 0 | 0.202166 | 0.101083 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0.1 | 0 | 0.1 | 0 | 0 | 0 | 0 | null | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 2 |
e7c442614c575afcfd66094cd7a14ab271070651 | 527 | py | Python | tests/functional/__init__.py | dimkonko/pyramid_api_example | b3bfb8dbe623544af9b147081d3e25c75a191c24 | [
"MIT"
] | null | null | null | tests/functional/__init__.py | dimkonko/pyramid_api_example | b3bfb8dbe623544af9b147081d3e25c75a191c24 | [
"MIT"
] | null | null | null | tests/functional/__init__.py | dimkonko/pyramid_api_example | b3bfb8dbe623544af9b147081d3e25c75a191c24 | [
"MIT"
] | null | null | null | import os
import unittest
from pyramid import testing
from pyramid.paster import get_appsettings
from webtest import TestApp
from pyramid_api_example import main
from pyramid_api_example.path import SERVER_ROOT_PATH
app_settings = get_appsettings(
config_uri=os.path.join(SERVER_ROOT_PATH, 'etc', 'local.ini'))
app = main({}, **app_settings)
class BaseFunctionalTest(unittest.TestCase):
def setUp(self):
testing.setUp()
self.app = TestApp(app)
def tearDown(self):
testing.tearDown()
| 21.08 | 66 | 0.745731 | 70 | 527 | 5.428571 | 0.457143 | 0.115789 | 0.073684 | 0.110526 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.16888 | 527 | 24 | 67 | 21.958333 | 0.86758 | 0 | 0 | 0 | 0 | 0 | 0.02277 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.125 | false | 0 | 0.4375 | 0 | 0.625 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 2 |
e7cb3db8c7fc0b39d890fb111fdb76076b8d97bf | 145 | py | Python | backend/model_api/apps.py | lizajor/ReCOVER-COVID-19 | 29d5872d5da0aa3621c520f1edbac510ae99b8df | [
"MIT"
] | 3 | 2022-03-25T01:48:49.000Z | 2022-03-25T01:49:50.000Z | backend/model_api/apps.py | lizajor/ReCOVER-COVID-19 | 29d5872d5da0aa3621c520f1edbac510ae99b8df | [
"MIT"
] | null | null | null | backend/model_api/apps.py | lizajor/ReCOVER-COVID-19 | 29d5872d5da0aa3621c520f1edbac510ae99b8df | [
"MIT"
] | null | null | null | from django.apps import AppConfig
class ModelApiConfig(AppConfig):
name = 'model_api'
default_auto_field = 'django.db.models.AutoField' | 24.166667 | 53 | 0.765517 | 18 | 145 | 6 | 0.888889 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.144828 | 145 | 6 | 53 | 24.166667 | 0.870968 | 0 | 0 | 0 | 0 | 0 | 0.239726 | 0.178082 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.25 | 0 | 1 | 0 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2 |
e7cbe7afbcb0820235ccd4d97c09f14da529a90d | 924 | py | Python | tests/test_data.py | mjbommar/amos3 | 7364b18e514db601b6c7d8208ffab05524c2a608 | [
"MIT"
] | 4 | 2018-07-04T14:14:31.000Z | 2018-12-05T21:15:34.000Z | tests/test_data.py | mjbommar/amos3 | 7364b18e514db601b6c7d8208ffab05524c2a608 | [
"MIT"
] | 9 | 2018-06-27T16:34:50.000Z | 2018-07-05T22:45:37.000Z | tests/test_data.py | mjbommar/amos3 | 7364b18e514db601b6c7d8208ffab05524c2a608 | [
"MIT"
] | null | null | null | # Imports
import os
# Package imports
from nose.tools import assert_true, assert_equal, assert_in
# Project imports
from amos3.data import build_image_database, build_camera_database
def test_build_camera_database():
"""
Test build_camera_database.
:return:
"""
camera_list = build_camera_database(num_cameras=10)
assert_equal(type(camera_list), list)
assert_equal(len(camera_list), 10)
assert_equal(type(camera_list[0]), dict)
assert_in("id", camera_list[0])
def test_build_image_database():
"""
Test build_image_database.
:return:
"""
build_image_database([30815])
assert_true(os.path.exists("data/30815/20150330_163044.jpg"))
def test_build_image_database_parallel():
"""
Test build_image_database.
:return:
"""
build_image_database([30815, 30816, 30817], workers=2)
assert_true(os.path.exists("data/30815/20150330_163044.jpg"))
| 23.692308 | 66 | 0.720779 | 123 | 924 | 5.065041 | 0.333333 | 0.11236 | 0.202247 | 0.141252 | 0.484751 | 0.404494 | 0.317817 | 0.317817 | 0.317817 | 0.154093 | 0 | 0.085492 | 0.164502 | 924 | 38 | 67 | 24.315789 | 0.721503 | 0.161255 | 0 | 0.133333 | 0 | 0 | 0.086713 | 0.083916 | 0 | 0 | 0 | 0 | 0.466667 | 1 | 0.2 | false | 0 | 0.2 | 0 | 0.4 | 0 | 0 | 0 | 0 | null | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2 |
e7d333dc481dbbba354d441627c108f5adf2bcfb | 873 | py | Python | day02/t01/views.py | SunShuoJia/pyproject | 71f3cada463fd90243b2cdac8c982fb622f9ef9c | [
"Apache-2.0"
] | null | null | null | day02/t01/views.py | SunShuoJia/pyproject | 71f3cada463fd90243b2cdac8c982fb622f9ef9c | [
"Apache-2.0"
] | null | null | null | day02/t01/views.py | SunShuoJia/pyproject | 71f3cada463fd90243b2cdac8c982fb622f9ef9c | [
"Apache-2.0"
] | null | null | null | from django.shortcuts import render
from django.http import HttpResponse
from .models import FireCart
# Create your views here.
def my_carts(req):
#拿数据
data = FireCart.objects.filter(speed__lte=500).order_by("speed")
#准备返回数据
result ={
"title":"火cart",
"carts":data
}
return render(req,"trains.html",result)
def search_by_name(req):
# param = req.GET
# kw = param.get("kw")
# 根据参数,搜索数据
# res = FireCart.objects.filter( name__contains=kw )
# 搜索name结尾是kw的数据
# res =FireCart.objects.filter( name__endswith=kw )
# 搜索speed是250或300的数据
# res = FireCart.objects.filter( speed__in=[250,300] )
# 搜索出厂年份是2018的数据
# res = FireCart.objects.filter ( create_date__year=2018 )
# 搜索出厂月份大于等于9的数据
res = FireCart.objects.filter( create_date__month__gte=9 )
return render(req,"trains.html",{"carts":res}) | 32.333333 | 68 | 0.678121 | 109 | 873 | 5.247706 | 0.513761 | 0.157343 | 0.22028 | 0.20979 | 0.304196 | 0.118881 | 0 | 0 | 0 | 0 | 0 | 0.035714 | 0.198167 | 873 | 27 | 69 | 32.333333 | 0.781429 | 0.405498 | 0 | 0 | 0 | 0 | 0.093069 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.153846 | false | 0 | 0.230769 | 0 | 0.538462 | 0 | 0 | 0 | 0 | null | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2 |
99af4575eeff74c37be28df0dc6b151bc759bbe0 | 158 | py | Python | pydarknetserver/__init__.py | yoyonel/pyDarknetServer | 51c573971a55b1b565ffab9530a4955799a81b06 | [
"MIT"
] | null | null | null | pydarknetserver/__init__.py | yoyonel/pyDarknetServer | 51c573971a55b1b565ffab9530a4955799a81b06 | [
"MIT"
] | 234 | 2019-07-24T05:39:34.000Z | 2022-03-28T11:38:20.000Z | pydarknetserver/__init__.py | yoyonel/pyDarknetServer | 51c573971a55b1b565ffab9530a4955799a81b06 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""Top-level package for pyDarknetServer."""
__author__ = """Lionel Atty"""
__email__ = 'yoyonel@hotmail.com'
__version__ = '0.2.0'
| 19.75 | 44 | 0.651899 | 19 | 158 | 4.789474 | 0.947368 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.029197 | 0.132911 | 158 | 7 | 45 | 22.571429 | 0.635037 | 0.386076 | 0 | 0 | 0 | 0 | 0.384615 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2 |
99d62a0da385a238fe1b2816a235cfb166355849 | 270 | py | Python | test_ssh.py | PACELab/merit_system | 3398aa4d0ec322017ec91dbde8f276fb2bbe4435 | [
"MIT"
] | null | null | null | test_ssh.py | PACELab/merit_system | 3398aa4d0ec322017ec91dbde8f276fb2bbe4435 | [
"MIT"
] | null | null | null | test_ssh.py | PACELab/merit_system | 3398aa4d0ec322017ec91dbde8f276fb2bbe4435 | [
"MIT"
] | null | null | null | import subprocess, sys, os
vm = sys.argv[1]
try:
subprocess.check_output('ssh -i ~/graybox.pem ubuntu@%s "ls"'%(vm), shell=True)
except subprocess.CalledProcessError as grepexc:
print "SSH threw error code", grepexc.returncode, grepexc.output
print "\t\tVM IP:",vm
| 27 | 80 | 0.733333 | 41 | 270 | 4.804878 | 0.756098 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.004219 | 0.122222 | 270 | 9 | 81 | 30 | 0.827004 | 0 | 0 | 0 | 0 | 0 | 0.241636 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.142857 | null | null | 0.285714 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2 |
99e881ea11b35b65a062a78e2429e93eb5752476 | 1,851 | py | Python | RecoEcal/EgammaCoreTools/python/EcalSCDynamicDPhiParametersESProducer_cfi.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 852 | 2015-01-11T21:03:51.000Z | 2022-03-25T21:14:00.000Z | RecoEcal/EgammaCoreTools/python/EcalSCDynamicDPhiParametersESProducer_cfi.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 30,371 | 2015-01-02T00:14:40.000Z | 2022-03-31T23:26:05.000Z | RecoEcal/EgammaCoreTools/python/EcalSCDynamicDPhiParametersESProducer_cfi.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 3,240 | 2015-01-02T05:53:18.000Z | 2022-03-31T17:24:21.000Z | import FWCore.ParameterSet.Config as cms
ecalSCDynamicDPhiParametersESProducer = cms.ESProducer("EcalSCDynamicDPhiParametersESProducer",
# Parameters from the analysis by L. Zygala [https://indico.cern.ch/event/949294/contributions/3988389/attachments/2091573/3514649/2020_08_26_Clustering.pdf]
# dynamic dPhi parameters depending on cluster energy and seed crystal eta
dynamicDPhiParameterSets = cms.VPSet(
cms.PSet(
eMin = cms.double(0.),
etaMin = cms.double(2.),
yoffset = cms.double(0.0928887),
scale = cms.double(1.22321),
xoffset = cms.double(-0.260256),
width = cms.double(0.345852),
saturation = cms.double(0.12),
cutoff = cms.double(0.3)
),
cms.PSet(
eMin = cms.double(0.),
etaMin = cms.double(1.75),
yoffset = cms.double(0.05643),
scale = cms.double(1.60429),
xoffset = cms.double(-0.642352),
width = cms.double(0.458106),
saturation = cms.double(0.12),
cutoff = cms.double(0.45)
),
cms.PSet(
eMin = cms.double(0.),
etaMin = cms.double(1.479),
yoffset = cms.double(0.0497038),
scale = cms.double(0.975707),
xoffset = cms.double(-0.18149),
width = cms.double(0.431729),
saturation = cms.double(0.14),
cutoff = cms.double(0.55)
),
cms.PSet(
eMin = cms.double(0.),
etaMin = cms.double(0.),
yoffset = cms.double(0.0280506),
scale = cms.double(0.946048),
xoffset = cms.double(-0.101172),
width = cms.double(0.432767),
saturation = cms.double(0.14),
cutoff = cms.double(0.6)
)
)
)
| 37.02 | 161 | 0.545111 | 209 | 1,851 | 4.813397 | 0.382775 | 0.286282 | 0.26839 | 0.055666 | 0.296223 | 0.296223 | 0.296223 | 0.296223 | 0.296223 | 0.073559 | 0 | 0.145032 | 0.32577 | 1,851 | 49 | 162 | 37.77551 | 0.661058 | 0.123177 | 0 | 0.333333 | 0 | 0 | 0.022854 | 0.022854 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.022222 | 0 | 0.022222 | 0 | 0 | 0 | 0 | null | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2 |
99edda12568671d7476a03969ac5452b03417fea | 350 | py | Python | tp1/respuesta6.py | ae77em/7526-tp-grupal-2c2018 | 2ed236031c74c87e1ea3c9176c02819c5d5b1738 | [
"MIT"
] | null | null | null | tp1/respuesta6.py | ae77em/7526-tp-grupal-2c2018 | 2ed236031c74c87e1ea3c9176c02819c5d5b1738 | [
"MIT"
] | null | null | null | tp1/respuesta6.py | ae77em/7526-tp-grupal-2c2018 | 2ed236031c74c87e1ea3c9176c02819c5d5b1738 | [
"MIT"
] | null | null | null | # RESPUESTA 6
import plotly.plotly as py
import plotly.graph_objs as go
import plotly.tools as tls
import matplotlib.pyplot as plt
from funciones import experimento_geometrica
# hago los experimentos
datos_equilibrada = experimento_geometrica(0.5)
data = [go.Histogram(x=datos_equilibrada )]
py.plot(data, filename='histograma-moneda-equilibrada')
| 26.923077 | 55 | 0.817143 | 50 | 350 | 5.62 | 0.64 | 0.128114 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.009615 | 0.108571 | 350 | 12 | 56 | 29.166667 | 0.891026 | 0.094286 | 0 | 0 | 0 | 0 | 0.092357 | 0.092357 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.625 | 0 | 0.625 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 2 |
99efe0e1092def4147ee68368c7df1283c5a9958 | 7,996 | py | Python | src/gui/draw_board.py | leonardodelfinodev/chess | 94b0e09642b320776ea941ae01c0e0c7420be18c | [
"MIT"
] | 1 | 2021-12-22T09:58:29.000Z | 2021-12-22T09:58:29.000Z | src/gui/draw_board.py | leonardodelfinodev/chess | 94b0e09642b320776ea941ae01c0e0c7420be18c | [
"MIT"
] | null | null | null | src/gui/draw_board.py | leonardodelfinodev/chess | 94b0e09642b320776ea941ae01c0e0c7420be18c | [
"MIT"
] | null | null | null | import pygame
from src.utils import constants as const
class DrawBoard(object):
def __init__(self, state, screen, color):
self.state = state
self.screen = screen
self.color = color
self.colors = [
const.COLORS[self.color][0],
const.COLORS[self.color][1]
]
@staticmethod
def laod_assets():
pieces = [
const.WHITE_ROOK,
const.WHITE_KNIGHT,
const.WHITE_BISHOP,
const.WHITE_QUEEN,
const.WHITE_KING,
const.WHITE_PAWN,
const.BLACK_ROOK,
const.BLACK_KNIGHT,
const.BLACK_BISHOP,
const.BLACK_QUEEN,
const.BLACK_KING,
const.BLACK_PAWN
]
for piece in pieces:
const.IMAGES[piece] = pygame.transform.scale(
pygame.image.load(
"./img/assets/{}.png".format(piece)
),
(const.SQ_SIZE, const.SQ_SIZE)
)
for i in range(1, 9):
const.NUMBERS[str(i)] = pygame.transform.scale(
pygame.image.load(
"./img/numbers/{}.png".format(str(i))
),
(const.SQ_SIZE, const.SQ_SIZE)
)
const.LETTERS[chr(ord('a') + i - 1)] = pygame.transform.scale(
pygame.image.load(
"./img/letters/{}.png".format(chr(ord('a')+i-1))
),
(const.SQ_SIZE, const.SQ_SIZE)
)
def __draw_board(self):
for row in range(const.DIM):
for col in range(const.DIM):
color = self.colors[((row+col) % 2)]
if row == 0:
pygame.draw.rect(
self.screen,
const.EDGES_RGB,
pygame.Rect(
col * const.SQ_SIZE,
row * const.SQ_SIZE,
const.SQ_SIZE, const.HALF_SQUARE
)
)
if row == const.DIM - 1:
if col == const.DIM - 1:
pygame.draw.rect(
self.screen,
const.EDGES_RGB,
pygame.Rect(
col * const.SQ_SIZE + const.SQ_SIZE,
row * const.SQ_SIZE + const.SQ_SIZE,
const.SQ_SIZE, const.SQ_SIZE
)
)
self.screen.blit(
const.LETTERS[chr(ord('a') + col)],
pygame.draw.rect(
self.screen,
const.EDGES_RGB,
pygame.Rect(
col * const.SQ_SIZE + const.HALF_SQUARE,
row * const.SQ_SIZE + const.SQ_SIZE + const.HALF_SQUARE,
const.SQ_SIZE, const.HALF_SQUARE
)
)
)
if col == 0:
if row == const.DIM - 1:
pygame.draw.rect(
self.screen,
const.EDGES_RGB,
pygame.Rect(
col * const.SQ_SIZE,
row * const.SQ_SIZE + const.SQ_SIZE,
const.HALF_SQUARE, const.SQ_SIZE
)
)
self.screen.blit(
const.NUMBERS[str(const.DIM - row)],
pygame.draw.rect(
self.screen,
const.EDGES_RGB,
pygame.Rect(
col * const.SQ_SIZE,
row * const.SQ_SIZE + const.HALF_SQUARE,
const.HALF_SQUARE, const.SQ_SIZE
)
)
)
if col == const.DIM - 1:
if row == 0:
pygame.draw.rect(
self.screen,
const.EDGES_RGB,
pygame.Rect(
(col + 1) * const.SQ_SIZE, row * const.SQ_SIZE,
const.SQ_SIZE, const.HALF_SQUARE
)
)
pygame.draw.rect(
self.screen,
const.EDGES_RGB,
pygame.Rect(
col * const.SQ_SIZE + const.SQ_SIZE + const.HALF_SQUARE,
row * const.SQ_SIZE + const.HALF_SQUARE,
const.HALF_SQUARE, const.SQ_SIZE
)
)
pygame.draw.rect(
self.screen,
color,
pygame.Rect(
col * const.SQ_SIZE + const.HALF_SQUARE,
row * const.SQ_SIZE + const.HALF_SQUARE,
const.SQ_SIZE, const.SQ_SIZE
)
)
def __draw_pieces(self):
for row in range(const.DIM):
for col in range(const.DIM):
piece = self.state.board[row][col]
if piece != const.EMPTY_CELL:
self.screen.blit(
const.IMAGES[piece],
pygame.Rect(
col * const.SQ_SIZE + const.HALF_SQUARE,
row * const.SQ_SIZE + const.HALF_SQUARE,
const.SQ_SIZE, const.SQ_SIZE
)
)
def __highlight_cells(self, selected_square, valid_moves):
if selected_square != ():
r, c = selected_square
if self.state.board[r][c][0] == ("w" if self.state.white_move else "b"):
surface_1 = pygame.Surface((const.SQ_SIZE, const.SQ_SIZE))
surface_2 = pygame.Surface((const.SQ_SIZE, const.SQ_SIZE))
surface_1.set_alpha(200)
surface_2.set_alpha(120 if self.color == 0 else 160)
# yellow, blue
surface_1.fill((253, 216, 53) if self.color == 0 else (1, 87, 155))
self.screen.blit(surface_1, (c * const.SQ_SIZE + const.HALF_SQUARE, r * const.SQ_SIZE + const.HALF_SQUARE))
# green, yellow
surface_2.fill((100, 221, 23) if self.color == 0 else (253, 216, 53))
for move in valid_moves:
if move.start_row == r and move.start_col == c:
self.screen.blit(
surface_2,
(const.SQ_SIZE * move.end_col + const.HALF_SQUARE,
const.SQ_SIZE * move.end_row + const.HALF_SQUARE)
)
def __highlight_king(self):
if self.state.check:
surface = pygame.Surface((const.SQ_SIZE, const.SQ_SIZE))
surface.set_alpha(200)
# red
surface.fill((250, 10, 0))
r, c = self.state.wk_cell if self.state.white_move else self.state.bk_cell
self.screen.blit(
surface,
(const.SQ_SIZE * c + const.HALF_SQUARE,
const.SQ_SIZE * r + const.HALF_SQUARE)
)
def draw_game_state(self, valid_moves, selected_square):
self.__draw_board()
if selected_square != () and selected_square[0] != -1 and selected_square[1] != -1:
self.__highlight_cells(selected_square, valid_moves)
self.__highlight_king()
self.__draw_pieces()
| 39.004878 | 123 | 0.414207 | 765 | 7,996 | 4.12549 | 0.141176 | 0.117554 | 0.184728 | 0.1673 | 0.579531 | 0.525032 | 0.469899 | 0.408745 | 0.398289 | 0.352978 | 0 | 0.019681 | 0.497999 | 7,996 | 204 | 124 | 39.196078 | 0.766567 | 0.003752 | 0 | 0.420765 | 0 | 0 | 0.008039 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.038251 | false | 0 | 0.010929 | 0 | 0.054645 | 0 | 0 | 0 | 0 | null | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2 |
8219c16352a4ae8fac4784b9508b1e95ddd2eaab | 578 | py | Python | test/test_bin_data.py | teristam/pyneurode | 7222704b531fe0a17c67a76cafa78ad666eda041 | [
"MIT"
] | 1 | 2022-02-03T17:07:31.000Z | 2022-02-03T17:07:31.000Z | test/test_bin_data.py | teristam/pyneurode | 7222704b531fe0a17c67a76cafa78ad666eda041 | [
"MIT"
] | null | null | null | test/test_bin_data.py | teristam/pyneurode | 7222704b531fe0a17c67a76cafa78ad666eda041 | [
"MIT"
] | null | null | null | from pyneurode.utils import pos_bin_data, get_binned_mean
import pytest
import numpy as np
@pytest.fixture
def pos():
return np.array([1,2,3,1,2,3])
@pytest.fixture
def x():
return np.array([1,2,3,4,5,6])
def test_pos_bin_data(pos,x):
binned_idx,bins = pos_bin_data(pos,3)
expected = np.array([0,1,2,0,1,2])
assert np.allclose(binned_idx,expected)
def test_get_binned_mean(pos,x):
binned_idx,bins = pos_bin_data(pos,3)
mean_x = get_binned_mean(binned_idx,bins,x)
expected = np.array([2.5,3.5,4.5])
assert np.allclose(mean_x,expected)
| 22.230769 | 57 | 0.700692 | 111 | 578 | 3.45045 | 0.288288 | 0.02611 | 0.104439 | 0.101828 | 0.245431 | 0.245431 | 0.16188 | 0.16188 | 0.16188 | 0.16188 | 0 | 0.053061 | 0.152249 | 578 | 25 | 58 | 23.12 | 0.728571 | 0 | 0 | 0.222222 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.111111 | 1 | 0.222222 | false | 0 | 0.166667 | 0.111111 | 0.5 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 2 |
4154298e7a81a7f50f5b9cd2af2853685e2eaded | 1,637 | py | Python | inkpy/inklist.py | facelesspanda/inkpy | f21949b856f19f0b4781fa878760a7d1a03d3623 | [
"MIT"
] | null | null | null | inkpy/inklist.py | facelesspanda/inkpy | f21949b856f19f0b4781fa878760a7d1a03d3623 | [
"MIT"
] | null | null | null | inkpy/inklist.py | facelesspanda/inkpy | f21949b856f19f0b4781fa878760a7d1a03d3623 | [
"MIT"
] | null | null | null | from ._runtime import InkList as RInkList
class InkList:
# Constructor
def __init__(self, *, l=RInkList()):
if not isinstance(l, RInkList): raise TypeError
self._l = l
# Properties
@property
def min(self):
return self._l.min
@property
def max(self):
return self._l.max
@property
def all(self):
return self._l.all
# Methods
def copy(self):
l = InkList()
l._l = self._l.copy()
return l
# Specials
def __contains__(self, other):
return other in self._l
def __hash__(self):
return hash(self._l)
def __str__(self):
return str(self._l)
def __len__(self):
return len(self._l)
def __invert__(self):
return ~self._l
# Comparisons
def __lt__(self, other):
return self._l < other._l
def __le__(self, other):
return self._l <= other._l
def __eq__(self, other):
return self._l == other._l
def __ne__(self, other):
return self._l != other._l
def __gt__(self, other):
return self._l > other._l
def __ge__(self, other):
return self._l >= other._l
# Alterations
def __or__(self, other):
return self._l | other._l
def __and__(self, other):
return self._l & other._l
def __sub__(self, other):
return self._l - other._l
def __ior__(self, other):
if isinstance(other, InkList): other = other._l
self._l |= other._l
def __isub__(self, other):
if isinstance(other, InkList): other = other._l
self._l -= other
| 19.963415 | 55 | 0.576054 | 211 | 1,637 | 3.981043 | 0.218009 | 0.136905 | 0.170238 | 0.130952 | 0.440476 | 0.435714 | 0.435714 | 0.404762 | 0.128571 | 0.128571 | 0 | 0 | 0.317043 | 1,637 | 81 | 56 | 20.209877 | 0.751342 | 0.038485 | 0 | 0.096154 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.403846 | false | 0 | 0.019231 | 0.326923 | 0.788462 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 2 |
415a8b16e01d7953974c1ab48c1e8329dff8ada3 | 289 | py | Python | mundo 2/aula 14/exer64.py | jonatan098/cursopython | 6e4cbaef6229e230fdbc66d80ec1b5a089887b0d | [
"MIT"
] | null | null | null | mundo 2/aula 14/exer64.py | jonatan098/cursopython | 6e4cbaef6229e230fdbc66d80ec1b5a089887b0d | [
"MIT"
] | null | null | null | mundo 2/aula 14/exer64.py | jonatan098/cursopython | 6e4cbaef6229e230fdbc66d80ec1b5a089887b0d | [
"MIT"
] | 1 | 2020-02-22T17:21:05.000Z | 2020-02-22T17:21:05.000Z | #TRATANDO VÁRIOS VALORES
total = soma = numb = 0
numb = int(input('digite o numero [999 para parar]: '))
while numb != 999:
total += 1
soma = numb + soma
numb = int(input('digite o numero [999 para parar]: '))
print(f'voce digitou {total} numeros e a soma entre eles é {soma}') | 36.125 | 67 | 0.650519 | 46 | 289 | 4.086957 | 0.586957 | 0.12766 | 0.12766 | 0.191489 | 0.393617 | 0.393617 | 0.393617 | 0.393617 | 0.393617 | 0 | 0 | 0.048458 | 0.214533 | 289 | 8 | 67 | 36.125 | 0.779736 | 0.079585 | 0 | 0.285714 | 0 | 0 | 0.469925 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.142857 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2 |
415c0b6755b52067dfdfbfed381f93aed9b49667 | 2,002 | py | Python | src/codeforces.py | manishbisht/notify | 0e2448d44e97321ae7e44f3af6d098fc457efe6f | [
"MIT"
] | null | null | null | src/codeforces.py | manishbisht/notify | 0e2448d44e97321ae7e44f3af6d098fc457efe6f | [
"MIT"
] | null | null | null | src/codeforces.py | manishbisht/notify | 0e2448d44e97321ae7e44f3af6d098fc457efe6f | [
"MIT"
] | null | null | null | import urllib, json, time
url = "http://codeforces.com/api/contest.list?gym=false"
response = urllib.urlopen(url)
data = json.loads(response.read())
speech_output = "There are no upcoming contest on codeforces."
if data["status"] == "OK":
result = []
for d in data["result"]:
if d["phase"] != "BEFORE":
break
result = d
if result == []:
speech_output = "There are no upcoming contest on codeforces."
else:
now = result["startTimeSeconds"]
then = int(time.time())
d = divmod(now - then, 86400)
h = divmod(d[1], 3600)
m = divmod(h[1], 60)
s = m[1]
speech_output = "The next contest on codeforces " + result["name"] + " will start in " \
'%d days, %d hours, %d minutes, %d seconds' % (
d[0], h[0], m[0], s)
print speech_output
url = "http://codeforces.com/api/contest.list?gym=false"
response = urllib.urlopen(url)
data = json.loads(response.read())
speech_output = "There is no contest running on codeforces."
if data["status"] == "OK":
result = []
for d in data["result"]:
result = d
if d["phase"] == "CODING":
break
elif d["phase"] == "FINISHED":
result = []
break
if result == []:
speech_output = "There is no contest running on codeforces."
else:
then = result["startTimeSeconds"] + result["durationSeconds"]
now = int(time.time())
d = divmod(then - now, 86400)
h = divmod(d[1], 3600)
m = divmod(h[1], 60)
s = m[1]
speech_output = "The contest " + result["name"] + " will end in " \
'%d days, %d hours, %d minutes, %d seconds' % (
d[0], h[0], m[0], s)
print speech_output
| 37.773585 | 124 | 0.486513 | 231 | 2,002 | 4.181818 | 0.285714 | 0.099379 | 0.070393 | 0.041408 | 0.722567 | 0.668737 | 0.668737 | 0.668737 | 0.668737 | 0.530021 | 0 | 0.027222 | 0.376124 | 2,002 | 52 | 125 | 38.5 | 0.746197 | 0 | 0 | 0.76 | 0 | 0 | 0.269231 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.02 | null | null | 0.04 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2 |
4162796b024d8ec9fab1aa58ec80ead0e7a6c5e9 | 788 | py | Python | reddit.py | Get-Luusy/get.luusy | 7a165a05a1231dda6f2ab083e54788f0bd2617cd | [
"MIT"
] | null | null | null | reddit.py | Get-Luusy/get.luusy | 7a165a05a1231dda6f2ab083e54788f0bd2617cd | [
"MIT"
] | null | null | null | reddit.py | Get-Luusy/get.luusy | 7a165a05a1231dda6f2ab083e54788f0bd2617cd | [
"MIT"
] | null | null | null | import praw
import keyboard
import time
from PIL import Image
import math
from glob import glob
import random
import requests
import re
import pprint
import importlib
import datetime
import urllib.request
import string
import sys
from config import config
reddit = praw.Reddit(client_id=config["rd_client"], client_secret=config["rd_secret"],
username=config["rd_dev_name"], password=config["rd_dev_pass"], user_agent="tesh254")
def DLimage(url, filepath, filename):
fullpath = filepath + filename + ".jpg"
urllib.request.urlretrieve(url, fullpath)
def IsImageLink(url):
LinkRegex = re.compile('((https:|http:)?\/\/.*\.(png|jpg|jpeg))')
results = LinkRegex.findall(url)
if results:
return results[0][2]
else:
return False
| 23.176471 | 106 | 0.713198 | 103 | 788 | 5.368932 | 0.543689 | 0.057866 | 0.039783 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.007704 | 0.176396 | 788 | 33 | 107 | 23.878788 | 0.844376 | 0 | 0 | 0 | 0 | 0 | 0.114358 | 0.049555 | 0 | 0 | 0 | 0 | 0 | 1 | 0.071429 | false | 0.035714 | 0.571429 | 0 | 0.714286 | 0.035714 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 2 |
416be66f15b774bf6ab7defff620cd07bba053e3 | 46,367 | py | Python | env/lib/python3.8/site-packages/apache_beam/portability/api/beam_artifact_api_pb2.py | paulowe/apache-beam-redocumentation | d1b0f345d8e46f9893f56c2bb890edc07be09f2a | [
"MIT"
] | null | null | null | env/lib/python3.8/site-packages/apache_beam/portability/api/beam_artifact_api_pb2.py | paulowe/apache-beam-redocumentation | d1b0f345d8e46f9893f56c2bb890edc07be09f2a | [
"MIT"
] | null | null | null | env/lib/python3.8/site-packages/apache_beam/portability/api/beam_artifact_api_pb2.py | paulowe/apache-beam-redocumentation | d1b0f345d8e46f9893f56c2bb890edc07be09f2a | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: beam_artifact_api.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from . import beam_runner_api_pb2 as beam__runner__api__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='beam_artifact_api.proto',
package='org.apache.beam.model.job_management.v1',
syntax='proto3',
serialized_options=b'\n&org.apache.beam.model.jobmanagement.v1B\013ArtifactApiZRgithub.com/apache/beam/sdks/v2/go/pkg/beam/model/jobmanagement_v1;jobmanagement_v1',
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n\x17\x62\x65\x61m_artifact_api.proto\x12\'org.apache.beam.model.job_management.v1\x1a\x15\x62\x65\x61m_runner_api.proto\"|\n\x17ResolveArtifactsRequest\x12I\n\tartifacts\x18\x01 \x03(\x0b\x32\x36.org.apache.beam.model.pipeline.v1.ArtifactInformation\x12\x16\n\x0epreferred_urns\x18\x02 \x03(\t\"h\n\x18ResolveArtifactsResponse\x12L\n\x0creplacements\x18\x01 \x03(\x0b\x32\x36.org.apache.beam.model.pipeline.v1.ArtifactInformation\"^\n\x12GetArtifactRequest\x12H\n\x08\x61rtifact\x18\x01 \x01(\x0b\x32\x36.org.apache.beam.model.pipeline.v1.ArtifactInformation\"#\n\x13GetArtifactResponse\x12\x0c\n\x04\x64\x61ta\x18\x01 \x01(\x0c\"\xd8\x01\n\x16\x41rtifactRequestWrapper\x12]\n\x10resolve_artifact\x18\xe8\x07 \x01(\x0b\x32@.org.apache.beam.model.job_management.v1.ResolveArtifactsRequestH\x00\x12T\n\x0cget_artifact\x18\xe9\x07 \x01(\x0b\x32;.org.apache.beam.model.job_management.v1.GetArtifactRequestH\x00\x42\t\n\x07request\"\x96\x02\n\x17\x41rtifactResponseWrapper\x12\x15\n\rstaging_token\x18\x01 \x01(\t\x12\x0f\n\x07is_last\x18\x02 \x01(\x08\x12g\n\x19resolve_artifact_response\x18\xe8\x07 \x01(\x0b\x32\x41.org.apache.beam.model.job_management.v1.ResolveArtifactsResponseH\x00\x12^\n\x15get_artifact_response\x18\xe9\x07 \x01(\x0b\x32<.org.apache.beam.model.job_management.v1.GetArtifactResponseH\x00\x42\n\n\x08response\"E\n\x10\x41rtifactMetadata\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x13\n\x0bpermissions\x18\x02 \x01(\r\x12\x0e\n\x06sha256\x18\x04 \x01(\t\"W\n\x08Manifest\x12K\n\x08\x61rtifact\x18\x01 \x03(\x0b\x32\x39.org.apache.beam.model.job_management.v1.ArtifactMetadata\"\xce\x01\n\rProxyManifest\x12\x43\n\x08manifest\x18\x01 \x01(\x0b\x32\x31.org.apache.beam.model.job_management.v1.Manifest\x12Q\n\x08location\x18\x02 \x03(\x0b\x32?.org.apache.beam.model.job_management.v1.ProxyManifest.Location\x1a%\n\x08Location\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0b\n\x03uri\x18\x02 \x01(\t\"-\n\x12GetManifestRequest\x12\x17\n\x0fretrieval_token\x18\x01 \x01(\t\"Z\n\x13GetManifestResponse\x12\x43\n\x08manifest\x18\x01 \x01(\x0b\x32\x31.org.apache.beam.model.job_management.v1.Manifest\"A\n\x18LegacyGetArtifactRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x17\n\x0fretrieval_token\x18\x02 \x01(\t\"\x1d\n\rArtifactChunk\x12\x0c\n\x04\x64\x61ta\x18\x01 \x01(\x0c\"\x81\x01\n\x13PutArtifactMetadata\x12\x1d\n\x15staging_session_token\x18\x01 \x01(\t\x12K\n\x08metadata\x18\x02 \x01(\x0b\x32\x39.org.apache.beam.model.job_management.v1.ArtifactMetadata\"\xb9\x01\n\x12PutArtifactRequest\x12P\n\x08metadata\x18\x01 \x01(\x0b\x32<.org.apache.beam.model.job_management.v1.PutArtifactMetadataH\x00\x12\x46\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\x36.org.apache.beam.model.job_management.v1.ArtifactChunkH\x00\x42\t\n\x07\x63ontent\"\x15\n\x13PutArtifactResponse\"{\n\x15\x43ommitManifestRequest\x12\x43\n\x08manifest\x18\x01 \x01(\x0b\x32\x31.org.apache.beam.model.job_management.v1.Manifest\x12\x1d\n\x15staging_session_token\x18\x02 \x01(\t\"|\n\x16\x43ommitManifestResponse\x12\x17\n\x0fretrieval_token\x18\x01 \x01(\t\"I\n\tConstants\x12<\n\x19NO_ARTIFACTS_STAGED_TOKEN\x10\x00\x1a\x1d\xaa\xb4\xfa\xc2\x05\x17__no_artifacts_staged__2\xc1\x02\n\x18\x41rtifactRetrievalService\x12\x97\x01\n\x10ResolveArtifacts\x12@.org.apache.beam.model.job_management.v1.ResolveArtifactsRequest\x1a\x41.org.apache.beam.model.job_management.v1.ResolveArtifactsResponse\x12\x8a\x01\n\x0bGetArtifact\x12;.org.apache.beam.model.job_management.v1.GetArtifactRequest\x1a<.org.apache.beam.model.job_management.v1.GetArtifactResponse0\x01\x32\xc3\x01\n\x16\x41rtifactStagingService\x12\xa8\x01\n\x1fReverseArtifactRetrievalService\x12@.org.apache.beam.model.job_management.v1.ArtifactResponseWrapper\x1a?.org.apache.beam.model.job_management.v1.ArtifactRequestWrapper(\x01\x30\x01\x32\xbf\x02\n\x1cLegacyArtifactStagingService\x12\x8a\x01\n\x0bPutArtifact\x12;.org.apache.beam.model.job_management.v1.PutArtifactRequest\x1a<.org.apache.beam.model.job_management.v1.PutArtifactResponse(\x01\x12\x91\x01\n\x0e\x43ommitManifest\x12>.org.apache.beam.model.job_management.v1.CommitManifestRequest\x1a?.org.apache.beam.model.job_management.v1.CommitManifestResponse2\xb8\x02\n\x1eLegacyArtifactRetrievalService\x12\x88\x01\n\x0bGetManifest\x12;.org.apache.beam.model.job_management.v1.GetManifestRequest\x1a<.org.apache.beam.model.job_management.v1.GetManifestResponse\x12\x8a\x01\n\x0bGetArtifact\x12\x41.org.apache.beam.model.job_management.v1.LegacyGetArtifactRequest\x1a\x36.org.apache.beam.model.job_management.v1.ArtifactChunk0\x01\x42\x89\x01\n&org.apache.beam.model.jobmanagement.v1B\x0b\x41rtifactApiZRgithub.com/apache/beam/sdks/v2/go/pkg/beam/model/jobmanagement_v1;jobmanagement_v1b\x06proto3'
,
dependencies=[beam__runner__api__pb2.DESCRIPTOR,])
_COMMITMANIFESTRESPONSE_CONSTANTS = _descriptor.EnumDescriptor(
name='Constants',
full_name='org.apache.beam.model.job_management.v1.CommitManifestResponse.Constants',
filename=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name='NO_ARTIFACTS_STAGED_TOKEN', index=0, number=0,
serialized_options=b'\252\264\372\302\005\027__no_artifacts_staged__',
type=None,
create_key=_descriptor._internal_create_key),
],
containing_type=None,
serialized_options=None,
serialized_start=2081,
serialized_end=2154,
)
_sym_db.RegisterEnumDescriptor(_COMMITMANIFESTRESPONSE_CONSTANTS)
_RESOLVEARTIFACTSREQUEST = _descriptor.Descriptor(
name='ResolveArtifactsRequest',
full_name='org.apache.beam.model.job_management.v1.ResolveArtifactsRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='artifacts', full_name='org.apache.beam.model.job_management.v1.ResolveArtifactsRequest.artifacts', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='preferred_urns', full_name='org.apache.beam.model.job_management.v1.ResolveArtifactsRequest.preferred_urns', index=1,
number=2, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=91,
serialized_end=215,
)
_RESOLVEARTIFACTSRESPONSE = _descriptor.Descriptor(
name='ResolveArtifactsResponse',
full_name='org.apache.beam.model.job_management.v1.ResolveArtifactsResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='replacements', full_name='org.apache.beam.model.job_management.v1.ResolveArtifactsResponse.replacements', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=217,
serialized_end=321,
)
_GETARTIFACTREQUEST = _descriptor.Descriptor(
name='GetArtifactRequest',
full_name='org.apache.beam.model.job_management.v1.GetArtifactRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='artifact', full_name='org.apache.beam.model.job_management.v1.GetArtifactRequest.artifact', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=323,
serialized_end=417,
)
_GETARTIFACTRESPONSE = _descriptor.Descriptor(
name='GetArtifactResponse',
full_name='org.apache.beam.model.job_management.v1.GetArtifactResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='data', full_name='org.apache.beam.model.job_management.v1.GetArtifactResponse.data', index=0,
number=1, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=b"",
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=419,
serialized_end=454,
)
_ARTIFACTREQUESTWRAPPER = _descriptor.Descriptor(
name='ArtifactRequestWrapper',
full_name='org.apache.beam.model.job_management.v1.ArtifactRequestWrapper',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='resolve_artifact', full_name='org.apache.beam.model.job_management.v1.ArtifactRequestWrapper.resolve_artifact', index=0,
number=1000, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='get_artifact', full_name='org.apache.beam.model.job_management.v1.ArtifactRequestWrapper.get_artifact', index=1,
number=1001, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='request', full_name='org.apache.beam.model.job_management.v1.ArtifactRequestWrapper.request',
index=0, containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[]),
],
serialized_start=457,
serialized_end=673,
)
_ARTIFACTRESPONSEWRAPPER = _descriptor.Descriptor(
name='ArtifactResponseWrapper',
full_name='org.apache.beam.model.job_management.v1.ArtifactResponseWrapper',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='staging_token', full_name='org.apache.beam.model.job_management.v1.ArtifactResponseWrapper.staging_token', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='is_last', full_name='org.apache.beam.model.job_management.v1.ArtifactResponseWrapper.is_last', index=1,
number=2, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='resolve_artifact_response', full_name='org.apache.beam.model.job_management.v1.ArtifactResponseWrapper.resolve_artifact_response', index=2,
number=1000, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='get_artifact_response', full_name='org.apache.beam.model.job_management.v1.ArtifactResponseWrapper.get_artifact_response', index=3,
number=1001, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='response', full_name='org.apache.beam.model.job_management.v1.ArtifactResponseWrapper.response',
index=0, containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[]),
],
serialized_start=676,
serialized_end=954,
)
_ARTIFACTMETADATA = _descriptor.Descriptor(
name='ArtifactMetadata',
full_name='org.apache.beam.model.job_management.v1.ArtifactMetadata',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='org.apache.beam.model.job_management.v1.ArtifactMetadata.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='permissions', full_name='org.apache.beam.model.job_management.v1.ArtifactMetadata.permissions', index=1,
number=2, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='sha256', full_name='org.apache.beam.model.job_management.v1.ArtifactMetadata.sha256', index=2,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=956,
serialized_end=1025,
)
_MANIFEST = _descriptor.Descriptor(
name='Manifest',
full_name='org.apache.beam.model.job_management.v1.Manifest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='artifact', full_name='org.apache.beam.model.job_management.v1.Manifest.artifact', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1027,
serialized_end=1114,
)
_PROXYMANIFEST_LOCATION = _descriptor.Descriptor(
name='Location',
full_name='org.apache.beam.model.job_management.v1.ProxyManifest.Location',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='org.apache.beam.model.job_management.v1.ProxyManifest.Location.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='uri', full_name='org.apache.beam.model.job_management.v1.ProxyManifest.Location.uri', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1286,
serialized_end=1323,
)
_PROXYMANIFEST = _descriptor.Descriptor(
name='ProxyManifest',
full_name='org.apache.beam.model.job_management.v1.ProxyManifest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='manifest', full_name='org.apache.beam.model.job_management.v1.ProxyManifest.manifest', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='location', full_name='org.apache.beam.model.job_management.v1.ProxyManifest.location', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[_PROXYMANIFEST_LOCATION, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1117,
serialized_end=1323,
)
_GETMANIFESTREQUEST = _descriptor.Descriptor(
name='GetManifestRequest',
full_name='org.apache.beam.model.job_management.v1.GetManifestRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='retrieval_token', full_name='org.apache.beam.model.job_management.v1.GetManifestRequest.retrieval_token', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1325,
serialized_end=1370,
)
_GETMANIFESTRESPONSE = _descriptor.Descriptor(
name='GetManifestResponse',
full_name='org.apache.beam.model.job_management.v1.GetManifestResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='manifest', full_name='org.apache.beam.model.job_management.v1.GetManifestResponse.manifest', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1372,
serialized_end=1462,
)
_LEGACYGETARTIFACTREQUEST = _descriptor.Descriptor(
name='LegacyGetArtifactRequest',
full_name='org.apache.beam.model.job_management.v1.LegacyGetArtifactRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='org.apache.beam.model.job_management.v1.LegacyGetArtifactRequest.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='retrieval_token', full_name='org.apache.beam.model.job_management.v1.LegacyGetArtifactRequest.retrieval_token', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1464,
serialized_end=1529,
)
_ARTIFACTCHUNK = _descriptor.Descriptor(
name='ArtifactChunk',
full_name='org.apache.beam.model.job_management.v1.ArtifactChunk',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='data', full_name='org.apache.beam.model.job_management.v1.ArtifactChunk.data', index=0,
number=1, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=b"",
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1531,
serialized_end=1560,
)
_PUTARTIFACTMETADATA = _descriptor.Descriptor(
name='PutArtifactMetadata',
full_name='org.apache.beam.model.job_management.v1.PutArtifactMetadata',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='staging_session_token', full_name='org.apache.beam.model.job_management.v1.PutArtifactMetadata.staging_session_token', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='metadata', full_name='org.apache.beam.model.job_management.v1.PutArtifactMetadata.metadata', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1563,
serialized_end=1692,
)
_PUTARTIFACTREQUEST = _descriptor.Descriptor(
name='PutArtifactRequest',
full_name='org.apache.beam.model.job_management.v1.PutArtifactRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='metadata', full_name='org.apache.beam.model.job_management.v1.PutArtifactRequest.metadata', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='data', full_name='org.apache.beam.model.job_management.v1.PutArtifactRequest.data', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='content', full_name='org.apache.beam.model.job_management.v1.PutArtifactRequest.content',
index=0, containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[]),
],
serialized_start=1695,
serialized_end=1880,
)
_PUTARTIFACTRESPONSE = _descriptor.Descriptor(
name='PutArtifactResponse',
full_name='org.apache.beam.model.job_management.v1.PutArtifactResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1882,
serialized_end=1903,
)
_COMMITMANIFESTREQUEST = _descriptor.Descriptor(
name='CommitManifestRequest',
full_name='org.apache.beam.model.job_management.v1.CommitManifestRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='manifest', full_name='org.apache.beam.model.job_management.v1.CommitManifestRequest.manifest', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='staging_session_token', full_name='org.apache.beam.model.job_management.v1.CommitManifestRequest.staging_session_token', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1905,
serialized_end=2028,
)
_COMMITMANIFESTRESPONSE = _descriptor.Descriptor(
name='CommitManifestResponse',
full_name='org.apache.beam.model.job_management.v1.CommitManifestResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='retrieval_token', full_name='org.apache.beam.model.job_management.v1.CommitManifestResponse.retrieval_token', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
_COMMITMANIFESTRESPONSE_CONSTANTS,
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2030,
serialized_end=2154,
)
_RESOLVEARTIFACTSREQUEST.fields_by_name['artifacts'].message_type = beam__runner__api__pb2._ARTIFACTINFORMATION
_RESOLVEARTIFACTSRESPONSE.fields_by_name['replacements'].message_type = beam__runner__api__pb2._ARTIFACTINFORMATION
_GETARTIFACTREQUEST.fields_by_name['artifact'].message_type = beam__runner__api__pb2._ARTIFACTINFORMATION
_ARTIFACTREQUESTWRAPPER.fields_by_name['resolve_artifact'].message_type = _RESOLVEARTIFACTSREQUEST
_ARTIFACTREQUESTWRAPPER.fields_by_name['get_artifact'].message_type = _GETARTIFACTREQUEST
_ARTIFACTREQUESTWRAPPER.oneofs_by_name['request'].fields.append(
_ARTIFACTREQUESTWRAPPER.fields_by_name['resolve_artifact'])
_ARTIFACTREQUESTWRAPPER.fields_by_name['resolve_artifact'].containing_oneof = _ARTIFACTREQUESTWRAPPER.oneofs_by_name['request']
_ARTIFACTREQUESTWRAPPER.oneofs_by_name['request'].fields.append(
_ARTIFACTREQUESTWRAPPER.fields_by_name['get_artifact'])
_ARTIFACTREQUESTWRAPPER.fields_by_name['get_artifact'].containing_oneof = _ARTIFACTREQUESTWRAPPER.oneofs_by_name['request']
_ARTIFACTRESPONSEWRAPPER.fields_by_name['resolve_artifact_response'].message_type = _RESOLVEARTIFACTSRESPONSE
_ARTIFACTRESPONSEWRAPPER.fields_by_name['get_artifact_response'].message_type = _GETARTIFACTRESPONSE
_ARTIFACTRESPONSEWRAPPER.oneofs_by_name['response'].fields.append(
_ARTIFACTRESPONSEWRAPPER.fields_by_name['resolve_artifact_response'])
_ARTIFACTRESPONSEWRAPPER.fields_by_name['resolve_artifact_response'].containing_oneof = _ARTIFACTRESPONSEWRAPPER.oneofs_by_name['response']
_ARTIFACTRESPONSEWRAPPER.oneofs_by_name['response'].fields.append(
_ARTIFACTRESPONSEWRAPPER.fields_by_name['get_artifact_response'])
_ARTIFACTRESPONSEWRAPPER.fields_by_name['get_artifact_response'].containing_oneof = _ARTIFACTRESPONSEWRAPPER.oneofs_by_name['response']
_MANIFEST.fields_by_name['artifact'].message_type = _ARTIFACTMETADATA
_PROXYMANIFEST_LOCATION.containing_type = _PROXYMANIFEST
_PROXYMANIFEST.fields_by_name['manifest'].message_type = _MANIFEST
_PROXYMANIFEST.fields_by_name['location'].message_type = _PROXYMANIFEST_LOCATION
_GETMANIFESTRESPONSE.fields_by_name['manifest'].message_type = _MANIFEST
_PUTARTIFACTMETADATA.fields_by_name['metadata'].message_type = _ARTIFACTMETADATA
_PUTARTIFACTREQUEST.fields_by_name['metadata'].message_type = _PUTARTIFACTMETADATA
_PUTARTIFACTREQUEST.fields_by_name['data'].message_type = _ARTIFACTCHUNK
_PUTARTIFACTREQUEST.oneofs_by_name['content'].fields.append(
_PUTARTIFACTREQUEST.fields_by_name['metadata'])
_PUTARTIFACTREQUEST.fields_by_name['metadata'].containing_oneof = _PUTARTIFACTREQUEST.oneofs_by_name['content']
_PUTARTIFACTREQUEST.oneofs_by_name['content'].fields.append(
_PUTARTIFACTREQUEST.fields_by_name['data'])
_PUTARTIFACTREQUEST.fields_by_name['data'].containing_oneof = _PUTARTIFACTREQUEST.oneofs_by_name['content']
_COMMITMANIFESTREQUEST.fields_by_name['manifest'].message_type = _MANIFEST
_COMMITMANIFESTRESPONSE_CONSTANTS.containing_type = _COMMITMANIFESTRESPONSE
DESCRIPTOR.message_types_by_name['ResolveArtifactsRequest'] = _RESOLVEARTIFACTSREQUEST
DESCRIPTOR.message_types_by_name['ResolveArtifactsResponse'] = _RESOLVEARTIFACTSRESPONSE
DESCRIPTOR.message_types_by_name['GetArtifactRequest'] = _GETARTIFACTREQUEST
DESCRIPTOR.message_types_by_name['GetArtifactResponse'] = _GETARTIFACTRESPONSE
DESCRIPTOR.message_types_by_name['ArtifactRequestWrapper'] = _ARTIFACTREQUESTWRAPPER
DESCRIPTOR.message_types_by_name['ArtifactResponseWrapper'] = _ARTIFACTRESPONSEWRAPPER
DESCRIPTOR.message_types_by_name['ArtifactMetadata'] = _ARTIFACTMETADATA
DESCRIPTOR.message_types_by_name['Manifest'] = _MANIFEST
DESCRIPTOR.message_types_by_name['ProxyManifest'] = _PROXYMANIFEST
DESCRIPTOR.message_types_by_name['GetManifestRequest'] = _GETMANIFESTREQUEST
DESCRIPTOR.message_types_by_name['GetManifestResponse'] = _GETMANIFESTRESPONSE
DESCRIPTOR.message_types_by_name['LegacyGetArtifactRequest'] = _LEGACYGETARTIFACTREQUEST
DESCRIPTOR.message_types_by_name['ArtifactChunk'] = _ARTIFACTCHUNK
DESCRIPTOR.message_types_by_name['PutArtifactMetadata'] = _PUTARTIFACTMETADATA
DESCRIPTOR.message_types_by_name['PutArtifactRequest'] = _PUTARTIFACTREQUEST
DESCRIPTOR.message_types_by_name['PutArtifactResponse'] = _PUTARTIFACTRESPONSE
DESCRIPTOR.message_types_by_name['CommitManifestRequest'] = _COMMITMANIFESTREQUEST
DESCRIPTOR.message_types_by_name['CommitManifestResponse'] = _COMMITMANIFESTRESPONSE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
ResolveArtifactsRequest = _reflection.GeneratedProtocolMessageType('ResolveArtifactsRequest', (_message.Message,), {
'DESCRIPTOR' : _RESOLVEARTIFACTSREQUEST,
'__module__' : 'beam_artifact_api_pb2'
# @@protoc_insertion_point(class_scope:org.apache.beam.model.job_management.v1.ResolveArtifactsRequest)
})
_sym_db.RegisterMessage(ResolveArtifactsRequest)
ResolveArtifactsResponse = _reflection.GeneratedProtocolMessageType('ResolveArtifactsResponse', (_message.Message,), {
'DESCRIPTOR' : _RESOLVEARTIFACTSRESPONSE,
'__module__' : 'beam_artifact_api_pb2'
# @@protoc_insertion_point(class_scope:org.apache.beam.model.job_management.v1.ResolveArtifactsResponse)
})
_sym_db.RegisterMessage(ResolveArtifactsResponse)
GetArtifactRequest = _reflection.GeneratedProtocolMessageType('GetArtifactRequest', (_message.Message,), {
'DESCRIPTOR' : _GETARTIFACTREQUEST,
'__module__' : 'beam_artifact_api_pb2'
# @@protoc_insertion_point(class_scope:org.apache.beam.model.job_management.v1.GetArtifactRequest)
})
_sym_db.RegisterMessage(GetArtifactRequest)
GetArtifactResponse = _reflection.GeneratedProtocolMessageType('GetArtifactResponse', (_message.Message,), {
'DESCRIPTOR' : _GETARTIFACTRESPONSE,
'__module__' : 'beam_artifact_api_pb2'
# @@protoc_insertion_point(class_scope:org.apache.beam.model.job_management.v1.GetArtifactResponse)
})
_sym_db.RegisterMessage(GetArtifactResponse)
ArtifactRequestWrapper = _reflection.GeneratedProtocolMessageType('ArtifactRequestWrapper', (_message.Message,), {
'DESCRIPTOR' : _ARTIFACTREQUESTWRAPPER,
'__module__' : 'beam_artifact_api_pb2'
# @@protoc_insertion_point(class_scope:org.apache.beam.model.job_management.v1.ArtifactRequestWrapper)
})
_sym_db.RegisterMessage(ArtifactRequestWrapper)
ArtifactResponseWrapper = _reflection.GeneratedProtocolMessageType('ArtifactResponseWrapper', (_message.Message,), {
'DESCRIPTOR' : _ARTIFACTRESPONSEWRAPPER,
'__module__' : 'beam_artifact_api_pb2'
# @@protoc_insertion_point(class_scope:org.apache.beam.model.job_management.v1.ArtifactResponseWrapper)
})
_sym_db.RegisterMessage(ArtifactResponseWrapper)
ArtifactMetadata = _reflection.GeneratedProtocolMessageType('ArtifactMetadata', (_message.Message,), {
'DESCRIPTOR' : _ARTIFACTMETADATA,
'__module__' : 'beam_artifact_api_pb2'
# @@protoc_insertion_point(class_scope:org.apache.beam.model.job_management.v1.ArtifactMetadata)
})
_sym_db.RegisterMessage(ArtifactMetadata)
Manifest = _reflection.GeneratedProtocolMessageType('Manifest', (_message.Message,), {
'DESCRIPTOR' : _MANIFEST,
'__module__' : 'beam_artifact_api_pb2'
# @@protoc_insertion_point(class_scope:org.apache.beam.model.job_management.v1.Manifest)
})
_sym_db.RegisterMessage(Manifest)
ProxyManifest = _reflection.GeneratedProtocolMessageType('ProxyManifest', (_message.Message,), {
'Location' : _reflection.GeneratedProtocolMessageType('Location', (_message.Message,), {
'DESCRIPTOR' : _PROXYMANIFEST_LOCATION,
'__module__' : 'beam_artifact_api_pb2'
# @@protoc_insertion_point(class_scope:org.apache.beam.model.job_management.v1.ProxyManifest.Location)
})
,
'DESCRIPTOR' : _PROXYMANIFEST,
'__module__' : 'beam_artifact_api_pb2'
# @@protoc_insertion_point(class_scope:org.apache.beam.model.job_management.v1.ProxyManifest)
})
_sym_db.RegisterMessage(ProxyManifest)
_sym_db.RegisterMessage(ProxyManifest.Location)
GetManifestRequest = _reflection.GeneratedProtocolMessageType('GetManifestRequest', (_message.Message,), {
'DESCRIPTOR' : _GETMANIFESTREQUEST,
'__module__' : 'beam_artifact_api_pb2'
# @@protoc_insertion_point(class_scope:org.apache.beam.model.job_management.v1.GetManifestRequest)
})
_sym_db.RegisterMessage(GetManifestRequest)
GetManifestResponse = _reflection.GeneratedProtocolMessageType('GetManifestResponse', (_message.Message,), {
'DESCRIPTOR' : _GETMANIFESTRESPONSE,
'__module__' : 'beam_artifact_api_pb2'
# @@protoc_insertion_point(class_scope:org.apache.beam.model.job_management.v1.GetManifestResponse)
})
_sym_db.RegisterMessage(GetManifestResponse)
LegacyGetArtifactRequest = _reflection.GeneratedProtocolMessageType('LegacyGetArtifactRequest', (_message.Message,), {
'DESCRIPTOR' : _LEGACYGETARTIFACTREQUEST,
'__module__' : 'beam_artifact_api_pb2'
# @@protoc_insertion_point(class_scope:org.apache.beam.model.job_management.v1.LegacyGetArtifactRequest)
})
_sym_db.RegisterMessage(LegacyGetArtifactRequest)
ArtifactChunk = _reflection.GeneratedProtocolMessageType('ArtifactChunk', (_message.Message,), {
'DESCRIPTOR' : _ARTIFACTCHUNK,
'__module__' : 'beam_artifact_api_pb2'
# @@protoc_insertion_point(class_scope:org.apache.beam.model.job_management.v1.ArtifactChunk)
})
_sym_db.RegisterMessage(ArtifactChunk)
PutArtifactMetadata = _reflection.GeneratedProtocolMessageType('PutArtifactMetadata', (_message.Message,), {
'DESCRIPTOR' : _PUTARTIFACTMETADATA,
'__module__' : 'beam_artifact_api_pb2'
# @@protoc_insertion_point(class_scope:org.apache.beam.model.job_management.v1.PutArtifactMetadata)
})
_sym_db.RegisterMessage(PutArtifactMetadata)
PutArtifactRequest = _reflection.GeneratedProtocolMessageType('PutArtifactRequest', (_message.Message,), {
'DESCRIPTOR' : _PUTARTIFACTREQUEST,
'__module__' : 'beam_artifact_api_pb2'
# @@protoc_insertion_point(class_scope:org.apache.beam.model.job_management.v1.PutArtifactRequest)
})
_sym_db.RegisterMessage(PutArtifactRequest)
PutArtifactResponse = _reflection.GeneratedProtocolMessageType('PutArtifactResponse', (_message.Message,), {
'DESCRIPTOR' : _PUTARTIFACTRESPONSE,
'__module__' : 'beam_artifact_api_pb2'
# @@protoc_insertion_point(class_scope:org.apache.beam.model.job_management.v1.PutArtifactResponse)
})
_sym_db.RegisterMessage(PutArtifactResponse)
CommitManifestRequest = _reflection.GeneratedProtocolMessageType('CommitManifestRequest', (_message.Message,), {
'DESCRIPTOR' : _COMMITMANIFESTREQUEST,
'__module__' : 'beam_artifact_api_pb2'
# @@protoc_insertion_point(class_scope:org.apache.beam.model.job_management.v1.CommitManifestRequest)
})
_sym_db.RegisterMessage(CommitManifestRequest)
CommitManifestResponse = _reflection.GeneratedProtocolMessageType('CommitManifestResponse', (_message.Message,), {
'DESCRIPTOR' : _COMMITMANIFESTRESPONSE,
'__module__' : 'beam_artifact_api_pb2'
# @@protoc_insertion_point(class_scope:org.apache.beam.model.job_management.v1.CommitManifestResponse)
})
_sym_db.RegisterMessage(CommitManifestResponse)
DESCRIPTOR._options = None
_COMMITMANIFESTRESPONSE_CONSTANTS.values_by_name["NO_ARTIFACTS_STAGED_TOKEN"]._options = None
_ARTIFACTRETRIEVALSERVICE = _descriptor.ServiceDescriptor(
name='ArtifactRetrievalService',
full_name='org.apache.beam.model.job_management.v1.ArtifactRetrievalService',
file=DESCRIPTOR,
index=0,
serialized_options=None,
create_key=_descriptor._internal_create_key,
serialized_start=2157,
serialized_end=2478,
methods=[
_descriptor.MethodDescriptor(
name='ResolveArtifacts',
full_name='org.apache.beam.model.job_management.v1.ArtifactRetrievalService.ResolveArtifacts',
index=0,
containing_service=None,
input_type=_RESOLVEARTIFACTSREQUEST,
output_type=_RESOLVEARTIFACTSRESPONSE,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='GetArtifact',
full_name='org.apache.beam.model.job_management.v1.ArtifactRetrievalService.GetArtifact',
index=1,
containing_service=None,
input_type=_GETARTIFACTREQUEST,
output_type=_GETARTIFACTRESPONSE,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
])
_sym_db.RegisterServiceDescriptor(_ARTIFACTRETRIEVALSERVICE)
DESCRIPTOR.services_by_name['ArtifactRetrievalService'] = _ARTIFACTRETRIEVALSERVICE
_ARTIFACTSTAGINGSERVICE = _descriptor.ServiceDescriptor(
name='ArtifactStagingService',
full_name='org.apache.beam.model.job_management.v1.ArtifactStagingService',
file=DESCRIPTOR,
index=1,
serialized_options=None,
create_key=_descriptor._internal_create_key,
serialized_start=2481,
serialized_end=2676,
methods=[
_descriptor.MethodDescriptor(
name='ReverseArtifactRetrievalService',
full_name='org.apache.beam.model.job_management.v1.ArtifactStagingService.ReverseArtifactRetrievalService',
index=0,
containing_service=None,
input_type=_ARTIFACTRESPONSEWRAPPER,
output_type=_ARTIFACTREQUESTWRAPPER,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
])
_sym_db.RegisterServiceDescriptor(_ARTIFACTSTAGINGSERVICE)
DESCRIPTOR.services_by_name['ArtifactStagingService'] = _ARTIFACTSTAGINGSERVICE
_LEGACYARTIFACTSTAGINGSERVICE = _descriptor.ServiceDescriptor(
name='LegacyArtifactStagingService',
full_name='org.apache.beam.model.job_management.v1.LegacyArtifactStagingService',
file=DESCRIPTOR,
index=2,
serialized_options=None,
create_key=_descriptor._internal_create_key,
serialized_start=2679,
serialized_end=2998,
methods=[
_descriptor.MethodDescriptor(
name='PutArtifact',
full_name='org.apache.beam.model.job_management.v1.LegacyArtifactStagingService.PutArtifact',
index=0,
containing_service=None,
input_type=_PUTARTIFACTREQUEST,
output_type=_PUTARTIFACTRESPONSE,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='CommitManifest',
full_name='org.apache.beam.model.job_management.v1.LegacyArtifactStagingService.CommitManifest',
index=1,
containing_service=None,
input_type=_COMMITMANIFESTREQUEST,
output_type=_COMMITMANIFESTRESPONSE,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
])
_sym_db.RegisterServiceDescriptor(_LEGACYARTIFACTSTAGINGSERVICE)
DESCRIPTOR.services_by_name['LegacyArtifactStagingService'] = _LEGACYARTIFACTSTAGINGSERVICE
_LEGACYARTIFACTRETRIEVALSERVICE = _descriptor.ServiceDescriptor(
name='LegacyArtifactRetrievalService',
full_name='org.apache.beam.model.job_management.v1.LegacyArtifactRetrievalService',
file=DESCRIPTOR,
index=3,
serialized_options=None,
create_key=_descriptor._internal_create_key,
serialized_start=3001,
serialized_end=3313,
methods=[
_descriptor.MethodDescriptor(
name='GetManifest',
full_name='org.apache.beam.model.job_management.v1.LegacyArtifactRetrievalService.GetManifest',
index=0,
containing_service=None,
input_type=_GETMANIFESTREQUEST,
output_type=_GETMANIFESTRESPONSE,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='GetArtifact',
full_name='org.apache.beam.model.job_management.v1.LegacyArtifactRetrievalService.GetArtifact',
index=1,
containing_service=None,
input_type=_LEGACYGETARTIFACTREQUEST,
output_type=_ARTIFACTCHUNK,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
])
_sym_db.RegisterServiceDescriptor(_LEGACYARTIFACTRETRIEVALSERVICE)
DESCRIPTOR.services_by_name['LegacyArtifactRetrievalService'] = _LEGACYARTIFACTRETRIEVALSERVICE
# @@protoc_insertion_point(module_scope)
| 42.813481 | 4,705 | 0.786119 | 5,355 | 46,367 | 6.463492 | 0.068161 | 0.034843 | 0.043944 | 0.060846 | 0.705651 | 0.677193 | 0.665347 | 0.63686 | 0.592367 | 0.579077 | 0 | 0.030509 | 0.097979 | 46,367 | 1,082 | 4,706 | 42.85305 | 0.797054 | 0.044816 | 0 | 0.649284 | 1 | 0.046012 | 0.215996 | 0.175215 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.005112 | 0 | 0.005112 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2 |
418826cde5a1f948d4c38128cdcc64a197f10760 | 1,357 | py | Python | zellortlstreamer/__init__.py | trgosk/zello-channel-api | 732cf5da03caba6a479cd605377943da57b5c257 | [
"MIT"
] | null | null | null | zellortlstreamer/__init__.py | trgosk/zello-channel-api | 732cf5da03caba6a479cd605377943da57b5c257 | [
"MIT"
] | null | null | null | zellortlstreamer/__init__.py | trgosk/zello-channel-api | 732cf5da03caba6a479cd605377943da57b5c257 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
from enum import Enum
from bitstream import BitStream
#MAJOR.MINOR.PATCH
__version__ = "1.0.1"
#Buffer
class DataBufferState(Enum):
BUFFER_ENABLED = 33
BUFFER_DISABLED = 55
class DataBuffer:
"""
Buffer data and control class
state = used to control data in buffer are valid for use
flush = used to control if new data is written to buffer
watch = used to take an action if size data in buffer at some value
"""
def __init__(self,
buffer=BitStream(),
state=DataBufferState.BUFFER_ENABLED,
flush=False,
watch=False):
self.buffer = buffer
self.state = state
self.flush = flush
self.watch = watch
def Enable(self):
self.state = DataBufferState.BUFFER_ENABLED
def Disable(self):
self.state = DataBufferState.BUFFER_DISABLED
def isEnabled(self):
return self.state == DataBufferState.BUFFER_ENABLED
def isDisabled(self):
return self.state == DataBufferState.BUFFER_DISABLED
def GetState(self):
return self.state
def ResetBuffer(self):
self.buffer = BitStream()
def GetSizeInBits(self):
return len(self.buffer.buffer)
def GetSizeInBytes(self):
return int(len(self.buffer)/8)
databuffer = DataBuffer()
| 23.807018 | 71 | 0.646279 | 162 | 1,357 | 5.320988 | 0.382716 | 0.062645 | 0.150812 | 0.139211 | 0.220418 | 0.211137 | 0 | 0 | 0 | 0 | 0 | 0.00813 | 0.274871 | 1,357 | 56 | 72 | 24.232143 | 0.867886 | 0.187915 | 0 | 0 | 0 | 0 | 0.004664 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.272727 | false | 0 | 0.060606 | 0.151515 | 0.606061 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 2 |
418fd13337441f7ea91dca713b9a80bc78c77101 | 2,505 | py | Python | records/schema.py | marcelotokarnia/mountain-catalog | 6753a3369f9d3523bc94decc1a59f57737e03e16 | [
"MIT"
] | 1 | 2018-06-15T22:55:29.000Z | 2018-06-15T22:55:29.000Z | records/schema.py | marcelotokarnia/mountain-catalog | 6753a3369f9d3523bc94decc1a59f57737e03e16 | [
"MIT"
] | 10 | 2020-06-05T18:14:03.000Z | 2022-03-02T02:42:15.000Z | records/schema.py | marcelotokarnia/mountain-catalog | 6753a3369f9d3523bc94decc1a59f57737e03e16 | [
"MIT"
] | null | null | null | import graphene
from mountains.models import Mountain
from django.contrib.gis.measure import D
from django.contrib.gis.db.models.functions import Distance
from django.contrib.gis.geos import GEOSGeometry
from utils.schema_utils import find_operation_field, get_selections
class ActivityType(graphene.ObjectType):
""" Strava Activity representation """
id = graphene.ID()
# distance = graphene.Float(description="Distance from given position on query")
# position = graphene.Field(PositionType, description="Mountain peak geo point")
# elevation = graphene.Int(description="Mountain height in meters")
# name = graphene.String()
# country = graphene.String()
# image = graphene.String()
# province = graphene.String()
# state = graphene.String()
# curiosities = graphene.String()
# region = graphene.String()
# created_by = graphene.String()
class Query(graphene.ObjectType):
activities = graphene.List(
ActivityType,
description="Query strava activities"
)
def resolve_activities(self, info, **args):
# distance = args.get('distance')
# elevation = args.get('elevation')
# position = args.get('position')
# sort = args.get('sort')
# selections = get_selections(
# find_operation_field(info.field_asts, 'mountains'),
# info.fragments
# )
# queryset = Mountain.objects.all()
# if elevation is not None:
# if elevation.min is not None:
# queryset = queryset.filter(
# elevation__gte=elevation.min
# )
# if elevation.max is not None:
# queryset = queryset.filter(
# elevation__lte=elevation.max
# )
# if position is not None:
# pnt = position.point
# if distance is not None:
# if distance.max is not None:
# queryset = queryset.filter(
# spot__distance_lte=(pnt, D(km=distance.max))
# )
# if distance.min is not None:
# queryset = queryset.filter(
# spot__distance_gte=(pnt, D(km=distance.min))
# )
# queryset = queryset.annotate(distance=Distance('spot', pnt))
results = []
# for mount in queryset:
# results.append(MountainType(mount, selections))
return results
| 36.838235 | 84 | 0.589621 | 249 | 2,505 | 5.851406 | 0.349398 | 0.07687 | 0.04324 | 0.046671 | 0.122169 | 0.122169 | 0.122169 | 0.059025 | 0 | 0 | 0 | 0 | 0.309381 | 2,505 | 67 | 85 | 37.38806 | 0.842197 | 0.611976 | 0 | 0 | 0 | 0 | 0.024919 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.0625 | false | 0 | 0.375 | 0 | 0.75 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 2 |
419537199b41a50b7711bddf05560e93793c8d3a | 3,677 | py | Python | utils/myjpeg.py | I2-Multimedia-Lab/Countering-Adversarial-Examples-Using-JPEG-Compression | 5c80091dcf2b80d6d22af8e5e1b103218c36e889 | [
"MIT"
] | 2 | 2020-06-25T00:34:10.000Z | 2020-10-05T01:30:22.000Z | utils/myjpeg.py | I2-Multimedia-Lab/Countering-Adversarial-Examples-Using-JPEG-Compression | 5c80091dcf2b80d6d22af8e5e1b103218c36e889 | [
"MIT"
] | 2 | 2020-09-25T22:40:46.000Z | 2020-09-26T01:15:30.000Z | utils/myjpeg.py | I2-Multimedia-Lab/Countering-Adversarial-Examples-Using-JPEG-Compression | 5c80091dcf2b80d6d22af8e5e1b103218c36e889 | [
"MIT"
] | null | null | null | import numpy as np
import sys
import tensorflow as tf
from PIL import Image
from scipy.fftpack import dct, idct
import cv2
T = np.array([
[0.3536, 0.3536, 0.3536, 0.3536, 0.3536, 0.3536, 0.3536, 0.3536],
[0.4904, 0.4157, 0.2778, 0.0975, -0.0975, -0.2778, -0.4157, -0.4904],
[0.4619, 0.1913, -0.1913, -0.4619, -0.4619, -0.1913, 0.1913, 0.4619],
[0.4157, -0.0975, -0.4904, -0.2778, 0.2778, 0.4904, 0.0975, -0.4157],
[0.3536, -0.3536, -0.3536, 0.3536, 0.3536, -0.3536, -0.3536, 0.3536],
[0.2778, -0.4904, 0.0975, 0.4157, -0.4157, -0.0975, 0.4904, -0.2778],
[0.1913, -0.4619, 0.4619, -0.1913, -0.1913, 0.4619, -0.4619, 0.1913],
[0.0975, -0.2778, 0.4157, -0.4904, 0.4904, -0.4157, 0.2778, -0.0975]
])
""
Jpeg_def_table = np.array([
[16, 11, 10, 16, 24, 40, 51, 61],
[12, 12, 14, 19, 26, 58, 60, 55],
[14, 13, 16, 24, 40, 57, 69, 56],
[14, 17, 22, 29, 51, 87, 80, 62],
[18, 22, 37, 56, 68, 109, 103, 77],
[24, 36, 55, 64, 81, 104, 113, 92],
[49, 64, 78, 87, 103, 121, 120, 101],
[72, 92, 95, 98, 112, 100, 103, 99],
])
'''
num = 8
q_table = np.ones((num, num)) * 25
# q_table[0:4, 0:4] = 20
print(q_table)
'''
def dct2(block):
return dct(dct(block.T, norm='ortho').T, norm='ortho')
def idct2(block):
return idct(idct(block.T, norm='ortho').T, norm='ortho')
def myjpeg(input_matrix):
output = []
num = 8
q_table = np.array([[5, 5, 20, 20, 46, 46, 46, 46, ],
[5, 20, 20, 46, 46, 46, 46, 46, ],
[20, 20, 46, 46, 46, 46, 46, 46, ],
[20, 46, 46, 46, 46, 46, 46, 46, ],
[46, 46, 46, 46, 46, 46, 46, 46, ],
[46, 46, 46, 46, 46, 46, 46, 20, ],
[46, 46, 46, 46, 46, 46, 20, 20, ],
[46, 46, 46, 46, 460, 20, 20, 20, ]])
n = input_matrix.shape[0]
h = input_matrix.shape[1]
w = input_matrix.shape[2]
c = input_matrix.shape[3]
horizontal_blocks_num = w / num
output2 = np.zeros((c, h, w))
output3 = np.zeros((n, 3, h, w))
vertical_blocks_num = h / num
n_block = np.split(input_matrix, n, axis=0)
for i in range(n):
c_block = np.split(n_block[i], c, axis=3)
j = 0
for ch_block in c_block:
vertical_blocks = np.split(ch_block, vertical_blocks_num, axis=1)
k = 0
for block_ver in vertical_blocks:
hor_blocks = np.split(block_ver, horizontal_blocks_num, axis=2)
m = 0
for block in hor_blocks:
block = np.reshape(block, (num, num))
block = dct2(block)
# quantization
table_quantized = np.matrix.round(np.divide(block, q_table))
table_quantized = np.squeeze(np.asarray(table_quantized))
table_unquantized = table_quantized * q_table
IDCT_table = idct2(table_unquantized)
if m == 0:
output = IDCT_table
else:
output = np.concatenate((output, IDCT_table), axis=1)
m = m + 1
if k == 0:
output1 = output
else:
output1 = np.concatenate((output1, output), axis=0)
k = k + 1
output2[j] = output1
j = j + 1
output3[i] = output2
output3 = np.transpose(output3, (0, 2, 1, 3))
output3 = np.transpose(output3, (0, 1, 3, 2))
output3 = output3 / 255
output3 = np.clip(np.float32(output3), 0.0, 1.0)
return output3
| 35.355769 | 80 | 0.495785 | 567 | 3,677 | 3.13933 | 0.243386 | 0.092135 | 0.117978 | 0.130337 | 0.319101 | 0.276404 | 0.276404 | 0.266292 | 0.213483 | 0.147191 | 0 | 0.269136 | 0.339135 | 3,677 | 103 | 81 | 35.699029 | 0.463374 | 0.003264 | 0 | 0.047059 | 0 | 0 | 0.005598 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.035294 | false | 0 | 0.070588 | 0.023529 | 0.141176 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2 |
41965b0e9c9e74aa594c98a71c4b4fc2a1ea3a8b | 4,321 | py | Python | modelvshuman/datasets/noise_generalisation.py | TizianThieringer/model-vs-human | 17729b8167520f682d93d55c340c27de07bb2681 | [
"Apache-2.0",
"CC-BY-4.0",
"MIT"
] | 158 | 2021-06-04T15:19:58.000Z | 2022-03-30T00:31:28.000Z | modelvshuman/datasets/noise_generalisation.py | TizianThieringer/model-vs-human | 17729b8167520f682d93d55c340c27de07bb2681 | [
"Apache-2.0",
"CC-BY-4.0",
"MIT"
] | 7 | 2021-07-20T03:57:34.000Z | 2022-02-01T11:00:47.000Z | modelvshuman/datasets/noise_generalisation.py | TizianThieringer/model-vs-human | 17729b8167520f682d93d55c340c27de07bb2681 | [
"Apache-2.0",
"CC-BY-4.0",
"MIT"
] | 14 | 2021-06-16T13:33:11.000Z | 2022-03-29T15:04:09.000Z | from dataclasses import dataclass, field
from os.path import join as pjoin
from typing import List
from .registry import register_dataset
from .. import constants as c
from . import decision_mappings, info_mappings
from .dataloaders import PytorchLoader
from ..evaluation import metrics as m
from .base import Dataset
from .experiments import *
__all__ = ["colour", "contrast", "high_pass", "low_pass",
"phase_scrambling", "power_equalisation",
"false_colour", "rotation", "eidolonI",
"eidolonII", "eidolonIII", "uniform_noise"]
@dataclass
class NoiseGeneralisationParams:
path: str = ""
experiments: List = field(default_factory=list)
image_size: int = 224
metrics: list = field(default_factory=lambda: [m.Accuracy(topk=1)])
decision_mapping: object = decision_mappings.ImageNetProbabilitiesTo16ClassesMapping()
info_mapping: object = info_mappings.InfoMappingWithSessions()
contains_sessions: bool = True
def _get_dataset(name, params, *args, **kwargs):
assert params is not None, "Dataset params are missing"
params.path = pjoin(c.DATASET_DIR, name)
return Dataset(name=name,
params=params,
loader=PytorchLoader,
*args,
**kwargs)
@register_dataset(name="colour")
def colour(*args, **kwargs):
return _get_dataset(name="colour",
params=NoiseGeneralisationParams(experiments=[colour_experiment]),
*args, **kwargs)
@register_dataset(name="contrast")
def contrast(*args, **kwargs):
return _get_dataset(name="contrast",
params=NoiseGeneralisationParams(experiments=[contrast_experiment]),
*args, **kwargs)
@register_dataset(name="high-pass")
def high_pass(*args, **kwargs):
return _get_dataset(name="high-pass",
params=NoiseGeneralisationParams(experiments=[high_pass_experiment]),
*args, **kwargs)
@register_dataset(name="low-pass")
def low_pass(*args, **kwargs):
return _get_dataset(name="low-pass",
params=NoiseGeneralisationParams(experiments=[low_pass_experiment]),
*args, **kwargs)
@register_dataset(name="phase-scrambling")
def phase_scrambling(*args, **kwargs):
return _get_dataset(name="phase-scrambling",
params=NoiseGeneralisationParams(experiments=[phase_scrambling_experiment]),
*args, **kwargs)
@register_dataset(name="power-equalisation")
def power_equalisation(*args, **kwargs):
return _get_dataset(name="power-equalisation",
params=NoiseGeneralisationParams(experiments=[power_equalisation_experiment]),
*args, **kwargs)
@register_dataset(name="false-colour")
def false_colour(*args, **kwargs):
return _get_dataset(name="false-colour",
params=NoiseGeneralisationParams(experiments=[false_colour_experiment]),
*args, **kwargs)
@register_dataset(name="rotation")
def rotation(*args, **kwargs):
return _get_dataset(name="rotation",
params=NoiseGeneralisationParams(experiments=[rotation_experiment]),
*args, **kwargs)
@register_dataset(name="eidolonI")
def eidolonI(*args, **kwargs):
return _get_dataset(name="eidolonI",
params=NoiseGeneralisationParams(experiments=[eidolonI_experiment]),
*args, **kwargs)
@register_dataset(name="eidolonII")
def eidolonII(*args, **kwargs):
return _get_dataset(name="eidolonII",
params=NoiseGeneralisationParams(experiments=[eidolonII_experiment]),
*args, **kwargs)
@register_dataset(name="eidolonIII")
def eidolonIII(*args, **kwargs):
return _get_dataset(name="eidolonIII",
params=NoiseGeneralisationParams(experiments=[eidolonIII_experiment]),
*args, **kwargs)
@register_dataset(name="uniform-noise")
def uniform_noise(*args, **kwargs):
return _get_dataset(name="uniform-noise",
params=NoiseGeneralisationParams(experiments=[uniform_noise_experiment]),
*args, **kwargs)
| 35.130081 | 102 | 0.64823 | 406 | 4,321 | 6.692118 | 0.199507 | 0.105263 | 0.066986 | 0.110416 | 0.315789 | 0.305116 | 0.116305 | 0 | 0 | 0 | 0 | 0.001823 | 0.238371 | 4,321 | 122 | 103 | 35.418033 | 0.823762 | 0 | 0 | 0.131868 | 0 | 0 | 0.092803 | 0 | 0 | 0 | 0 | 0 | 0.010989 | 1 | 0.142857 | false | 0.098901 | 0.10989 | 0.131868 | 0.483516 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 2 |
41a8b26f388922d89d647623e162a4767fb5d786 | 115 | py | Python | docker/optimization/pyOpt/tags/v1.2.0/pyOpt/pyCONMIN/__init__.py | liujiamingustc/phd | 4f815a738abad43531d02ac66f5bd0d9a1def52a | [
"Apache-2.0"
] | 3 | 2021-01-06T03:01:18.000Z | 2022-03-21T03:02:55.000Z | docker/optimization/pyOpt/tags/v1.2.0/pyOpt/pyCONMIN/__init__.py | liujiamingustc/phd | 4f815a738abad43531d02ac66f5bd0d9a1def52a | [
"Apache-2.0"
] | null | null | null | docker/optimization/pyOpt/tags/v1.2.0/pyOpt/pyCONMIN/__init__.py | liujiamingustc/phd | 4f815a738abad43531d02ac66f5bd0d9a1def52a | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
try:
from pyCONMIN import CONMIN
__all__ = ['CONMIN']
except:
__all__ = []
#end
| 12.777778 | 31 | 0.617391 | 14 | 115 | 4.5 | 0.857143 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.243478 | 115 | 8 | 32 | 14.375 | 0.724138 | 0.2 | 0 | 0 | 0 | 0 | 0.066667 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.2 | 0 | 0.2 | 0 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2 |
41b09ca21a191757bcadd1d09af9d01092e1c250 | 1,241 | py | Python | libsaas/services/basecamp/accesses.py | MidtownFellowship/libsaas | 541bb731b996b08ede1d91a235cb82895765c38a | [
"MIT"
] | 155 | 2015-01-27T15:17:59.000Z | 2022-02-20T00:14:08.000Z | libsaas/services/basecamp/accesses.py | MidtownFellowship/libsaas | 541bb731b996b08ede1d91a235cb82895765c38a | [
"MIT"
] | 14 | 2015-01-12T08:22:37.000Z | 2021-06-16T19:49:31.000Z | libsaas/services/basecamp/accesses.py | MidtownFellowship/libsaas | 541bb731b996b08ede1d91a235cb82895765c38a | [
"MIT"
] | 43 | 2015-01-28T22:41:45.000Z | 2021-09-21T04:44:26.000Z | from libsaas import http, parsers
from libsaas.services import base
from .resource import BasecampResource
class AccessResource(BasecampResource):
path = 'accesses'
def create(self, *args, **kwargs):
raise base.MethodNotSupported()
def update(self, *args, **kwargs):
raise base.MethodNotSupported()
def delete(self, *args, **kwargs):
raise base.MethodNotSupported()
class Accesses(AccessResource):
@base.apimethod
def grant(self, obj):
"""
Create a new resource.
:var obj: a Python object representing the resource to be created,
usually in the same format as returned from `get`. Refer to the
upstream documentation for details.
"""
self.require_collection()
request = http.Request('POST', self.get_url(), self.wrap_object(obj))
return request, parsers.parse_empty
class Access(AccessResource):
def get(self, *args, **kwargs):
raise base.MethodNotSupported()
@base.apimethod
def revoke(self):
"""
Delete this resource.
"""
self.require_item()
request = http.Request('DELETE', self.get_url())
return request, parsers.parse_empty
| 24.333333 | 77 | 0.641418 | 136 | 1,241 | 5.801471 | 0.441176 | 0.040558 | 0.070976 | 0.096324 | 0.291508 | 0.215463 | 0.111534 | 0 | 0 | 0 | 0 | 0 | 0.257857 | 1,241 | 50 | 78 | 24.82 | 0.856678 | 0.177276 | 0 | 0.32 | 0 | 0 | 0.018947 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.24 | false | 0 | 0.12 | 0 | 0.6 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 2 |
41bc8a8dd97e9fa436c0c60e3cf2099324d086f2 | 8,671 | py | Python | honeysnap/ftpDecode.py | honeynet/honeysnap | 9b5e9ab6b5557692b78efe788cdaf24404ddf1eb | [
"FSFAP"
] | 7 | 2016-06-30T14:19:27.000Z | 2017-07-12T12:14:53.000Z | honeysnap/ftpDecode.py | honeynet/honeysnap | 9b5e9ab6b5557692b78efe788cdaf24404ddf1eb | [
"FSFAP"
] | null | null | null | honeysnap/ftpDecode.py | honeynet/honeysnap | 9b5e9ab6b5557692b78efe788cdaf24404ddf1eb | [
"FSFAP"
] | 2 | 2017-02-03T19:46:28.000Z | 2018-11-21T18:14:09.000Z | ################################################################################
# (c) 2006, The Honeynet Project
# Author: Jed Haile jed.haile@thelogangroup.biz
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
################################################################################
# $Id$
import re
from util import renameFile
from flow import reverse as freverse
import tcpflow
import pcap
from flowIdentify import flowIdentify
from flowDecode import flowDecode
cmds = ['STOR', 'STOU', 'RETR', 'LIST', 'NLST', 'APPE']
class ftpDecode(flowDecode):
def __init__(self):
super(ftpDecode, self).__init__()
self.tf = self.options['time_convert_fn']
self.statemgr = None
# for some reason the data samples I'm using
# often have UUUUUUPORT, compensate for that in the RE
# It turns out these are being stuck in the stream due to duplicate ACKS
self.activeRE = re.compile("^U*PORT", re.M)
self.passiveRE = re.compile("PASV")
self.portIPRE = re.compile("(\d+,){5}\d+")
self.userRE = re.compile("USER\s(.*)")
self.passRE = re.compile("PASS\s(.*)")
# response code 227 is PASV
# response code 229 is EPASV
self._227re = re.compile("^227|^229", re.M)
self.id = flowIdentify()
def print_summary(self):
"""Print summary info"""
super(ftpDecode, self).print_summary('\nFTP summary for %s\n\n')
def decode(self, state, statemgr):
self.statemgr = statemgr
state.open(flags="rb", statemgr = self.statemgr)
d = state.fp.readlines()
state.close()
#t, req = self.determineType(d)
d = "".join(d)
f = state.flow
#print '%s.%s-%s.%s' % (f.src, f.sport, f.dst, f.dport)
if f.dport == 21:
# ftp control connection
# use these to figure out filenames for other flows
m = self.passiveRE.search(d)
if m is not None:
self.extractPassive(state, d)
else:
# if we didn't find a PASV command, assume active
self.extractActive(state, d)
def extractActive(self, state, d):
#print "Active FTP"
username, password = "Unknown", "Unknown"
# look for port lines
m = self.activeRE.search(d)
if m is None:
return
# split data into a list of lines
lines = d.splitlines()
iterlines = iter(lines)
for l in iterlines:
m = self.userRE.search(l)
if m:
username = m.group(1)
continue
m = self.passRE.search(l)
if m:
password = m.group(1)
continue
if l.find("PORT")>=0:
try:
nextl = iterlines.next()
except StopIteration:
return
if nextl.find("RETR")>=0:
# this means the current PORT will be
# a data channel for a downaload
filename = nextl.split(" ")[1]
ip_port = l.split(" ")[1].split(",")
#ip = ".".join(ip_port[0:4])
port = int(ip_port[4])*256 + int(ip_port[5])
# now we know the ip and port of the client
# data channel.
# find the correct state
# it will look like the reverse flow, with a different dport
rflow = freverse(state.flow)
rflow.dport = port
rflow.sport = 20
# find the state that carries the data
rstate = self.statemgr.find_flow_state(rflow)
# rename the data file
if rstate is not None:
fn = renameFile(rstate, filename)
id, m5 = self.id.identify(rstate)
output = "%s requested %s from %s (%s, %s) at %s\n\tfile: %s, filetype: %s, md5 sum: %s\n" % (rstate.flow.dst, filename,
rstate.flow.src, username, password, self.tf(rstate.ts), fn, id, m5)
self.add_flow(rstate.ts, rstate.flow.src, rstate.flow.dst, output)
def extractPassive(self, state, d):
#print "Passive FTP"
username, password = "Unknown", "Unknown"
# repr(port/256), repr(port%256)
# first we have to find the reverse flow/state
# from it we will extract the ip and port info
rflow = freverse(state.flow)
rstate = self.statemgr.find_flow_state(rflow)
if rstate is None:
# no reverse state, bail
return
rstate.open(flags="rb", statemgr=self.statemgr)
dchannel = rstate.fp.readlines()
rstate.close()
lines = d.splitlines()
iterlines = iter(lines)
portlines = []
cmdlines = []
# find all the lines from the server
# that open a data port
# find all the 227 lines in the data channel
for l in dchannel:
m = self._227re.search(l)
if m is not None:
portlines.append(l)
# find all the client lines that use
# a data port
for l in lines:
m = self.userRE.search(l)
if m:
username = m.group(1)
continue
else:
username = "Unknown"
m = self.passRE.search(l)
if m:
password = m.group(1)
continue
else:
password = "Unknown"
w = [i for i in cmds if i in l.split()[0]]
if len(w) == 0:
# this line doesn't contain a data command
continue
cmdlines.append(l)
# zip the 2 lists together
# should give [(227 response, Client CMD),...]
pairs = zip(portlines, cmdlines)
for p in pairs:
if p[1].find("RETR") < 0:
# not a RETR command
continue
m = self.portIPRE.search(p[0])
if m is not None:
# the last 2 items in the RE result are the port info
info = m.group().split(",")
p256 = int(info[-2])
p1 = int(info[-1])
ip = ".".join(info[0:4])
port = 256*p256 + p1
else:
continue
filename = p[1].split(" ")[1]
rflow.sport = port
# passive ftp transactions happen on high ports
# so the stream extractor has not extracted the data
# create a new stream extractor to pull the data
p = pcap.pcap(self.options["tmpf"])
de = tcpflow.tcpFlow(p)
filter = "src host %s and src port %d" % (rflow.src, rflow.sport)
de.setFilter(filter)
de.setOutdir(self.options["output_data_directory"]+ "/%s/ftp")
# run the flow extractor
de.start()
# now find the correct state
flows = [f for f in de.states.getFlows() if f.isSrcSport(ip, port)]
if len(flows) > 0:
if len(flows) > 1:
print "hmmm, got more than 1 flow"
rflow = flows[0]
rstate = de.states.find_flow_state(rflow)
# rename the data file
if rstate is not None:
fn = renameFile(rstate, filename)
id, m5 = self.id.identify(rstate)
output = "%s requested %s from %s (%s, %s) at %s\n\tfile: %s, filetype: %s, md5 sum: %s\n" % (rstate.flow.dst, filename,
rstate.flow.src, username, password, self.tf(rstate.ts), fn, id, m5)
self.add_flow(rstate.ts, rstate.flow.src, rstate.flow.dst, output)
| 40.330233 | 145 | 0.51505 | 1,058 | 8,671 | 4.194707 | 0.28828 | 0.011266 | 0.01014 | 0.011266 | 0.252141 | 0.217215 | 0.175304 | 0.163137 | 0.163137 | 0.163137 | 0 | 0.018868 | 0.37043 | 8,671 | 214 | 146 | 40.518692 | 0.794101 | 0.262369 | 0 | 0.372263 | 0 | 0.014599 | 0.068651 | 0.003416 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0.10219 | 0.051095 | null | null | 0.021898 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 2 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.