hexsha
string
size
int64
ext
string
lang
string
max_stars_repo_path
string
max_stars_repo_name
string
max_stars_repo_head_hexsha
string
max_stars_repo_licenses
list
max_stars_count
int64
max_stars_repo_stars_event_min_datetime
string
max_stars_repo_stars_event_max_datetime
string
max_issues_repo_path
string
max_issues_repo_name
string
max_issues_repo_head_hexsha
string
max_issues_repo_licenses
list
max_issues_count
int64
max_issues_repo_issues_event_min_datetime
string
max_issues_repo_issues_event_max_datetime
string
max_forks_repo_path
string
max_forks_repo_name
string
max_forks_repo_head_hexsha
string
max_forks_repo_licenses
list
max_forks_count
int64
max_forks_repo_forks_event_min_datetime
string
max_forks_repo_forks_event_max_datetime
string
content
string
avg_line_length
float64
max_line_length
int64
alphanum_fraction
float64
qsc_code_num_words_quality_signal
int64
qsc_code_num_chars_quality_signal
float64
qsc_code_mean_word_length_quality_signal
float64
qsc_code_frac_words_unique_quality_signal
float64
qsc_code_frac_chars_top_2grams_quality_signal
float64
qsc_code_frac_chars_top_3grams_quality_signal
float64
qsc_code_frac_chars_top_4grams_quality_signal
float64
qsc_code_frac_chars_dupe_5grams_quality_signal
float64
qsc_code_frac_chars_dupe_6grams_quality_signal
float64
qsc_code_frac_chars_dupe_7grams_quality_signal
float64
qsc_code_frac_chars_dupe_8grams_quality_signal
float64
qsc_code_frac_chars_dupe_9grams_quality_signal
float64
qsc_code_frac_chars_dupe_10grams_quality_signal
float64
qsc_code_frac_chars_replacement_symbols_quality_signal
float64
qsc_code_frac_chars_digital_quality_signal
float64
qsc_code_frac_chars_whitespace_quality_signal
float64
qsc_code_size_file_byte_quality_signal
float64
qsc_code_num_lines_quality_signal
float64
qsc_code_num_chars_line_max_quality_signal
float64
qsc_code_num_chars_line_mean_quality_signal
float64
qsc_code_frac_chars_alphabet_quality_signal
float64
qsc_code_frac_chars_comments_quality_signal
float64
qsc_code_cate_xml_start_quality_signal
float64
qsc_code_frac_lines_dupe_lines_quality_signal
float64
qsc_code_cate_autogen_quality_signal
float64
qsc_code_frac_lines_long_string_quality_signal
float64
qsc_code_frac_chars_string_length_quality_signal
float64
qsc_code_frac_chars_long_word_length_quality_signal
float64
qsc_code_frac_lines_string_concat_quality_signal
float64
qsc_code_cate_encoded_data_quality_signal
float64
qsc_code_frac_chars_hex_words_quality_signal
float64
qsc_code_frac_lines_prompt_comments_quality_signal
float64
qsc_code_frac_lines_assert_quality_signal
float64
qsc_codepython_cate_ast_quality_signal
float64
qsc_codepython_frac_lines_func_ratio_quality_signal
float64
qsc_codepython_cate_var_zero_quality_signal
bool
qsc_codepython_frac_lines_pass_quality_signal
float64
qsc_codepython_frac_lines_import_quality_signal
float64
qsc_codepython_frac_lines_simplefunc_quality_signal
float64
qsc_codepython_score_lines_no_logic_quality_signal
float64
qsc_codepython_frac_lines_print_quality_signal
float64
qsc_code_num_words
int64
qsc_code_num_chars
int64
qsc_code_mean_word_length
int64
qsc_code_frac_words_unique
null
qsc_code_frac_chars_top_2grams
int64
qsc_code_frac_chars_top_3grams
int64
qsc_code_frac_chars_top_4grams
int64
qsc_code_frac_chars_dupe_5grams
int64
qsc_code_frac_chars_dupe_6grams
int64
qsc_code_frac_chars_dupe_7grams
int64
qsc_code_frac_chars_dupe_8grams
int64
qsc_code_frac_chars_dupe_9grams
int64
qsc_code_frac_chars_dupe_10grams
int64
qsc_code_frac_chars_replacement_symbols
int64
qsc_code_frac_chars_digital
int64
qsc_code_frac_chars_whitespace
int64
qsc_code_size_file_byte
int64
qsc_code_num_lines
int64
qsc_code_num_chars_line_max
int64
qsc_code_num_chars_line_mean
int64
qsc_code_frac_chars_alphabet
int64
qsc_code_frac_chars_comments
int64
qsc_code_cate_xml_start
int64
qsc_code_frac_lines_dupe_lines
int64
qsc_code_cate_autogen
int64
qsc_code_frac_lines_long_string
int64
qsc_code_frac_chars_string_length
int64
qsc_code_frac_chars_long_word_length
int64
qsc_code_frac_lines_string_concat
null
qsc_code_cate_encoded_data
int64
qsc_code_frac_chars_hex_words
int64
qsc_code_frac_lines_prompt_comments
int64
qsc_code_frac_lines_assert
int64
qsc_codepython_cate_ast
int64
qsc_codepython_frac_lines_func_ratio
int64
qsc_codepython_cate_var_zero
int64
qsc_codepython_frac_lines_pass
int64
qsc_codepython_frac_lines_import
int64
qsc_codepython_frac_lines_simplefunc
int64
qsc_codepython_score_lines_no_logic
int64
qsc_codepython_frac_lines_print
int64
effective
string
hits
int64
6004eed2e1c2c6d3dd16ff04ead53384598be124
776
py
Python
adversarialAE/__init__.py
sb1705/AdversarialAutoencoder
7e030da0e1986380b2a7f9d18f146b66be5861d3
[ "MIT" ]
null
null
null
adversarialAE/__init__.py
sb1705/AdversarialAutoencoder
7e030da0e1986380b2a7f9d18f146b66be5861d3
[ "MIT" ]
null
null
null
adversarialAE/__init__.py
sb1705/AdversarialAutoencoder
7e030da0e1986380b2a7f9d18f146b66be5861d3
[ "MIT" ]
null
null
null
from . import aae_celeba from . import nets from . import utils AAE = aae_celeba.AAE #model_generator = nets.model_generator #model_encoder = nets.model_encoder #model_discriminator = nets.model_discriminator retrieve_data = utils.data_utils.retrieve_data data_process = utils.data_utils.data_process dim_ordering_fix = utils.image_utils.dim_ordering_fix dim_ordering_unfix = utils.image_utils.dim_ordering_unfix dim_ordering_shape = utils.image_utils.dim_ordering_shape dim_ordering_shape = utils.image_utils.dim_ordering_shape dim_ordering_input = utils.image_utils.dim_ordering_input dim_ordering_reshape = utils.image_utils.dim_ordering_reshape channel_axis = utils.image_utils.channel_axis resize = utils.resize_imgs.resize bulkResize = utils.resize_imgs.bulkResize
32.333333
61
0.859536
114
776
5.421053
0.22807
0.213592
0.169903
0.174757
0.338188
0.169903
0.169903
0.169903
0.169903
0.169903
0
0
0.079897
776
23
62
33.73913
0.865546
0.152062
0
0.133333
0
0
0
0
0
0
0
0
0
1
0
false
0
0.2
0
0.2
0
0
0
0
null
1
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
2
600f5b7d23689c020516f58097e84aab084e320b
795
py
Python
pyopenproject/business/services/command/user_preferences/find.py
webu/pyopenproject
40b2cb9fe0fa3f89bc0fe2a3be323422d9ecf966
[ "MIT" ]
5
2021-02-25T15:54:28.000Z
2021-04-22T15:43:36.000Z
pyopenproject/business/services/command/user_preferences/find.py
webu/pyopenproject
40b2cb9fe0fa3f89bc0fe2a3be323422d9ecf966
[ "MIT" ]
7
2021-03-15T16:26:23.000Z
2022-03-16T13:45:18.000Z
pyopenproject/business/services/command/user_preferences/find.py
webu/pyopenproject
40b2cb9fe0fa3f89bc0fe2a3be323422d9ecf966
[ "MIT" ]
6
2021-06-18T18:59:11.000Z
2022-03-27T04:58:52.000Z
from pyopenproject.api_connection.exceptions.request_exception import RequestError from pyopenproject.api_connection.requests.get_request import GetRequest from pyopenproject.business.exception.business_error import BusinessError from pyopenproject.business.services.command.user_preferences.user_preferences_command import UserPreferencesCommand from pyopenproject.model.user_preferences import UserPreferences class Find(UserPreferencesCommand): def __init__(self, connection): super().__init__(connection) def execute(self): try: json_obj = GetRequest(self.connection, f"{self.CONTEXT}").execute() return UserPreferences(json_obj) except RequestError as re: raise BusinessError("Error finding user preferences") from re
41.842105
116
0.786164
84
795
7.214286
0.47619
0.140264
0.066007
0.09901
0
0
0
0
0
0
0
0
0.148428
795
18
117
44.166667
0.895126
0
0
0
0
0
0.055346
0
0
0
0
0
0
1
0.142857
false
0
0.357143
0
0.642857
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
2
6013d9408adc25915892cb13c3f6ed9bff7cbb9f
5,574
py
Python
plugins/countdown_timer_2.1s.py
longpdo/bitbar-plugins-custom
58cff1571ae4a939f7edac9c42fcd1156e3c8661
[ "MIT" ]
4
2020-07-08T23:47:51.000Z
2021-04-15T12:03:08.000Z
plugins/countdown_timer_2.1s.py
longpdo/bitbar-plugins-custom
58cff1571ae4a939f7edac9c42fcd1156e3c8661
[ "MIT" ]
null
null
null
plugins/countdown_timer_2.1s.py
longpdo/bitbar-plugins-custom
58cff1571ae4a939f7edac9c42fcd1156e3c8661
[ "MIT" ]
3
2020-07-08T23:48:29.000Z
2021-03-17T07:37:02.000Z
#!/usr/bin/env python # -*- coding: UTF-8 -*- # <bitbar.title>Countdown Timer 2</bitbar.title> # <bitbar.version>v1.0</bitbar.version> # <bitbar.author>Federico Ferri</bitbar.author> # <bitbar.author.github>fferri</bitbar.author.github> # <bitbar.desc>Simple countdown timer.</bitbar.desc> # <bitbar.dependencies>python</bitbar.dependencies> # <bitbar.image>https://raw.githubusercontent.com/fferri/bitbar-countdown-timer/master/screenshot.gif</bitbar.image> # <bitbar.abouturl>https://github.com/fferri/bitbar-countdown-timer</bitbar.abouturl> import os import re import subprocess import sys import time icon = 'iVBORw0KGgoAAAANSUhEUgAAACQAAAAkCAYAAADhAJiYAAAAAXNSR0IArs4c6QAAAAlwSFlzAAAWJQAAFiUBSVIk8AAAAVlpVFh0WE1MOmNvbS5hZG9iZS54bXAAAAAAADx4OnhtcG1ldGEgeG1sbnM6eD0iYWRvYmU6bnM6bWV0YS8iIHg6eG1wdGs9IlhNUCBDb3JlIDUuNC4wIj4KICAgPHJkZjpSREYgeG1sbnM6cmRmPSJodHRwOi8vd3d3LnczLm9yZy8xOTk5LzAyLzIyLXJkZi1zeW50YXgtbnMjIj4KICAgICAgPHJkZjpEZXNjcmlwdGlvbiByZGY6YWJvdXQ9IiIKICAgICAgICAgICAgeG1sbnM6dGlmZj0iaHR0cDovL25zLmFkb2JlLmNvbS90aWZmLzEuMC8iPgogICAgICAgICA8dGlmZjpPcmllbnRhdGlvbj4xPC90aWZmOk9yaWVudGF0aW9uPgogICAgICA8L3JkZjpEZXNjcmlwdGlvbj4KICAgPC9yZGY6UkRGPgo8L3g6eG1wbWV0YT4KTMInWQAAAxZJREFUWAntlluITVEYx8+4jbuQ++CFSC655Tau8aA8eFKaJzKvlHfFwzwg1+TVRJNQU5SSUAZNIyNyy4gQcikkJLn9/lnLrHXss9bee868zVe/sy7f9//WOmvvvdYqFLosvAIVYXfQW4n3JPRLiNpGX3NCf6d29Sf77xKszztyt7zCiK6z8kaGLRTWEvEY3JXaTXtoVJkzYD66IQGt3qM74E6oJhDfIddkM1ArpTupWbTr4IXxu5Ox9a/46mEZlMWqyPIT7ADDqWs16p0++b7DXTgNjXAV3oPVqWyBBZDbqlG6CTWxBnji9B+lvhAGQbH1pGMCbIa3YHPtpJ7LZqJqA5vILa/QvyhD1t7E7nJy7cmg9UL1eFbDWbATOuhFZGusJFyPV7nqskn9aL0fSnLI787Vsh+J8ulxZ7bzKCR+kFnpC3o5zVrqyvnR6UtVHWeEEk9Lpfg/SF/q8wS93kPl1SuR2vYRKdGR1Ao/cKTR64DVV+ea/Yq1oaYy3QA+gya0JpXCD+pjtLoJlLI3OJR/SqkAt3+eCf5BOdh1BOrd8ekaMhZ+QQOETJ+/JqS9yrOkU3muibhN+cGLTm4sN3GvKe/BMYidZ03EyPQHPEua0GgToZ05ZtJfghOgk78vHICYvTQBA4sDkyZkX0I9sphZvVblogleERPh13Eks2P9bfHb41+tvfLJVIe1d5WsadLb4QK8gsuwH2Jmbw+6FXiWNKFHJmKGF1m6sQNXC0yCNI9LmabqB9PNIGpjiNAXIGZHo/MFXDP5U9+9bxnB1nzjBVVVJrf+8IBgpONcZ0TPnL5yVfea3IezJrSPrSPXjuIxx5vJKPeoYmesXeuIs1zKQnm1t2kyx0NBId85k0DHwZJQYMSnr1kbqCbzFHKbDsqHoERiI2S16QhugPTfQO0OWSXq+2An1UR9aYqM2j7sISqtbhC6NQatIuj1ndqBtzhd2kCvQyu0gXZtnYNagWqYA9bOUNkE72xHucpVJGoEu1qxUkfJhiyDZ1khN+9EGtplF8MI0AanXF9Aq3ATTkEzdFlZV+APQ77IUZhTv+IAAAAASUVORK5CYII=' def prompt(text='', defaultAnswer='', icon='note', buttons=('Cancel', 'Ok'), defaultButton=1): try: d = locals() d['buttonsStr'] = ', '.join('"%s"' % button for button in buttons) d['defaultButtonStr'] = isinstance(defaultButton, int) and buttons[defaultButton] or defaultButton user_input = subprocess.check_output(['osascript', '-l', 'JavaScript', '-e', ''' const app = Application.currentApplication() app.includeStandardAdditions = true const response = app.displayDialog("{text}", {{ defaultAnswer: "{defaultAnswer}", withIcon: "{icon}", buttons: [{buttonsStr}], defaultButton: "{defaultButtonStr}" }}) response.textReturned '''.format(**d)]).rstrip() notify(user_input, 'debug prompt') return user_input except subprocess.CalledProcessError: pass def notify(text, title, sound='Glass'): os.system('osascript -e \'display notification "{}" with title "{}" sound name "{}"\''.format(text, title, sound)) def entry(title='---', **kwargs): args = ' '.join('{}=\'{}\''.format(k, v) for k, v in kwargs.items() if v is not None) if args: args = '|' + args print(title + args) def parse_time(s): m = re.match('^((\d+)h)?((\d+)m)?((\d+)s?)?$', s) if m is None: raise Exception('invalid time: %s' % s) h, m, s = map(int, (m.group(i) or 0 for i in (2, 4, 6))) return s + 60 * (m + 60 * h) def render_time(t): t = int(round(t)) h = t // 3600 t -= h * 3600 m = t // 60 t -= m * 60 k, v = 'hms', (h, m, t) return ''.join('%d%s' % (v[i], k[i]) for i in range(3) if i == 2 or any(v[:i+1])) def read_data_file(filename): with open(data_file, 'rt') as f: lines = f.readlines() t = float(lines[0]) task = lines[1].rstrip() if len(lines) > 1 else None return t, task def write_data_file(filename, t, task=None): with open(data_file, 'wt') as f: f.write('{:f}{}{}'.format(t, '\n' if task else '', task or '')) data_file = os.path.join(os.path.dirname(os.path.realpath(__file__)), '.' + os.path.basename(__file__) + '.countdown') if len(sys.argv) == 1: if os.path.isfile(data_file): t, task = read_data_file(data_file) remain = int(round(max(0, t - time.time()))) if remain == 0: notify('Times up!', task or 'Times up!') os.remove(data_file) title = '{}{}{}'.format(task or '', task and ': ' or '', render_time(remain)) entry(title, color=('red' if remain <= 10 else 'orange' if remain < 60 else None)) else: entry('|templateImage=\'%s\'' % icon) entry('---') if os.path.isfile(data_file): entry('Cancel timer', bash=__file__, param1='cancel', terminal='false') else: entry('Set timer...', bash=__file__, param1='set', terminal='false') elif len(sys.argv) == 2 and sys.argv[1] == 'set': timestr = prompt('Input time (example: 30s, 15m, 1h, 1m30s)', '5m', 'note', ('Cancel', 'Set'), 1) task = prompt('Input task name') t = time.time() + parse_time(timestr) write_data_file(data_file, t, task) elif len(sys.argv) == 2 and sys.argv[1] == 'cancel': os.remove(data_file)
50.216216
1,661
0.705418
562
5,574
6.923488
0.371886
0.026728
0.00771
0.012336
0.039579
0.024672
0.013364
0.013364
0.013364
0
0
0.060059
0.154647
5,574
110
1,662
50.672727
0.765705
0.094187
0
0.075
0
0.0125
0.480849
0.355229
0
1
0
0
0
1
0.0875
false
0.0125
0.0625
0
0.2
0.0125
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
2
601c02305bed9be7ed433237734693be81237d7a
53,881
py
Python
pysnmp-with-texts/EFDATA-MIB.py
agustinhenze/mibs.snmplabs.com
1fc5c07860542b89212f4c8ab807057d9a9206c7
[ "Apache-2.0" ]
8
2019-05-09T17:04:00.000Z
2021-06-09T06:50:51.000Z
pysnmp-with-texts/EFDATA-MIB.py
agustinhenze/mibs.snmplabs.com
1fc5c07860542b89212f4c8ab807057d9a9206c7
[ "Apache-2.0" ]
4
2019-05-31T16:42:59.000Z
2020-01-31T21:57:17.000Z
pysnmp-with-texts/EFDATA-MIB.py
agustinhenze/mibs.snmplabs.com
1fc5c07860542b89212f4c8ab807057d9a9206c7
[ "Apache-2.0" ]
10
2019-04-30T05:51:36.000Z
2022-02-16T03:33:41.000Z
# # PySNMP MIB module EFDATA-MIB (http://snmplabs.com/pysmi) # ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/EFDATA-MIB # Produced by pysmi-0.3.4 at Wed May 1 12:59:31 2019 # On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4 # Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15) # OctetString, ObjectIdentifier, Integer = mibBuilder.importSymbols("ASN1", "OctetString", "ObjectIdentifier", "Integer") NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues") SingleValueConstraint, ValueSizeConstraint, ConstraintsIntersection, ConstraintsUnion, ValueRangeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "SingleValueConstraint", "ValueSizeConstraint", "ConstraintsIntersection", "ConstraintsUnion", "ValueRangeConstraint") NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance") MibScalar, MibTable, MibTableRow, MibTableColumn, IpAddress, iso, Counter64, Counter32, Integer32, ModuleIdentity, enterprises, Unsigned32, ObjectIdentity, Gauge32, TimeTicks, MibIdentifier, NotificationType, Bits = mibBuilder.importSymbols("SNMPv2-SMI", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "IpAddress", "iso", "Counter64", "Counter32", "Integer32", "ModuleIdentity", "enterprises", "Unsigned32", "ObjectIdentity", "Gauge32", "TimeTicks", "MibIdentifier", "NotificationType", "Bits") PhysAddress, TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "PhysAddress", "TextualConvention", "DisplayString") efdata = MibIdentifier((1, 3, 6, 1, 4, 1, 6247)) spectracast = MibIdentifier((1, 3, 6, 1, 4, 1, 6247, 3)) dtmx5000 = MibIdentifier((1, 3, 6, 1, 4, 1, 6247, 3, 1)) cbGateway = MibIdentifier((1, 3, 6, 1, 4, 1, 6247, 3, 1, 1)) cbStatistics = MibIdentifier((1, 3, 6, 1, 4, 1, 6247, 3, 1, 1, 1)) cbStatGeneral = MibIdentifier((1, 3, 6, 1, 4, 1, 6247, 3, 1, 1, 1, 1)) cbStatNumBytesTXed = MibScalar((1, 3, 6, 1, 4, 1, 6247, 3, 1, 1, 1, 1, 1), Counter32()).setMaxAccess("readonly") if mibBuilder.loadTexts: cbStatNumBytesTXed.setStatus('mandatory') if mibBuilder.loadTexts: cbStatNumBytesTXed.setDescription('Number of bytes transmitted since last statistics reset.') cbStatNumOfPackets = MibScalar((1, 3, 6, 1, 4, 1, 6247, 3, 1, 1, 1, 1, 2), Counter32()).setMaxAccess("readonly") if mibBuilder.loadTexts: cbStatNumOfPackets.setStatus('mandatory') if mibBuilder.loadTexts: cbStatNumOfPackets.setDescription('Number of data packets transmitted since last statistics reset.') cbStatAvrPktSize = MibScalar((1, 3, 6, 1, 4, 1, 6247, 3, 1, 1, 1, 1, 3), Counter32()).setMaxAccess("readonly") if mibBuilder.loadTexts: cbStatAvrPktSize.setStatus('mandatory') if mibBuilder.loadTexts: cbStatAvrPktSize.setDescription('Average packet size since last statistics reset.') cbStatAvrBytesPerSec = MibScalar((1, 3, 6, 1, 4, 1, 6247, 3, 1, 1, 1, 1, 4), Counter32()).setMaxAccess("readonly") if mibBuilder.loadTexts: cbStatAvrBytesPerSec.setStatus('mandatory') if mibBuilder.loadTexts: cbStatAvrBytesPerSec.setDescription('Average speed in bytes per second since last statistics reset.') cbStatNumPacketDiscarded = MibScalar((1, 3, 6, 1, 4, 1, 6247, 3, 1, 1, 1, 1, 5), Counter32()).setMaxAccess("readonly") if mibBuilder.loadTexts: cbStatNumPacketDiscarded.setStatus('mandatory') if mibBuilder.loadTexts: cbStatNumPacketDiscarded.setDescription('Number of data packets that were discarded since last statistics reset.') cbStatNumNMSFrames = MibScalar((1, 3, 6, 1, 4, 1, 6247, 3, 1, 1, 1, 1, 6), Counter32()).setMaxAccess("readonly") if mibBuilder.loadTexts: cbStatNumNMSFrames.setStatus('mandatory') if mibBuilder.loadTexts: cbStatNumNMSFrames.setDescription('Number of NMS packets received since last statistics reset.') cbCPULoad = MibScalar((1, 3, 6, 1, 4, 1, 6247, 3, 1, 1, 1, 1, 7), Counter32()).setMaxAccess("readonly") if mibBuilder.loadTexts: cbCPULoad.setStatus('mandatory') if mibBuilder.loadTexts: cbCPULoad.setDescription('Current CPU Load in percents (0-100).') cbMemoryUsage = MibScalar((1, 3, 6, 1, 4, 1, 6247, 3, 1, 1, 1, 1, 8), Counter32()).setMaxAccess("readonly") if mibBuilder.loadTexts: cbMemoryUsage.setStatus('mandatory') if mibBuilder.loadTexts: cbMemoryUsage.setDescription('Current Memory Usage in percents (0-100).') cbStatReset = MibScalar((1, 3, 6, 1, 4, 1, 6247, 3, 1, 1, 1, 1, 9), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 0))).clone(namedValues=NamedValues(("cbTrue", 1), ("cbFalse", 0)))).setMaxAccess("writeonly") if mibBuilder.loadTexts: cbStatReset.setStatus('mandatory') if mibBuilder.loadTexts: cbStatReset.setDescription('Set to cbTrue in order to reset the general statistics values (either in active or non-active mode).') cbStatNumClients = MibScalar((1, 3, 6, 1, 4, 1, 6247, 3, 1, 1, 1, 1, 10), Counter32()).setMaxAccess("readonly") if mibBuilder.loadTexts: cbStatNumClients.setStatus('mandatory') if mibBuilder.loadTexts: cbStatNumClients.setDescription('Number of clients currently connected to the Gateway. It is not part of the General Statistics since the cbStatReset does not change its value.') cbStatClient = MibIdentifier((1, 3, 6, 1, 4, 1, 6247, 3, 1, 1, 1, 2)) cbClientIP = MibScalar((1, 3, 6, 1, 4, 1, 6247, 3, 1, 1, 1, 2, 1), IpAddress()).setMaxAccess("readwrite") if mibBuilder.loadTexts: cbClientIP.setStatus('mandatory') if mibBuilder.loadTexts: cbClientIP.setDescription('The IP address of the client. The rest of the params in cbStatClient reffers to this IP. In order to get a statistics on a single clients, set cbClientIP to the IP of the desired client and get the results under cbClientStatistics. Continuously get operations of the rest of the params will give the updated statistics values without a need to set cbClientIP again and again.') cbClientStatistics = MibIdentifier((1, 3, 6, 1, 4, 1, 6247, 3, 1, 1, 1, 2, 2)) cbClNumSeconds = MibScalar((1, 3, 6, 1, 4, 1, 6247, 3, 1, 1, 1, 2, 2, 1), Counter32()).setMaxAccess("readonly") if mibBuilder.loadTexts: cbClNumSeconds.setStatus('mandatory') if mibBuilder.loadTexts: cbClNumSeconds.setDescription('The number of seconds since the client statistics are active. The statistics values are reset automaticaly by the gateway (as well as by setting cbClReset) according to the value of cbFreqClientsInfoReset.') cbClNumKBytes = MibScalar((1, 3, 6, 1, 4, 1, 6247, 3, 1, 1, 1, 2, 2, 2), Counter32()).setMaxAccess("readonly") if mibBuilder.loadTexts: cbClNumKBytes.setStatus('mandatory') if mibBuilder.loadTexts: cbClNumKBytes.setDescription('Number of bytes transmitted to IP==cbClientIP in the last cbClNumSeconds seconds.') cbClNumPackets = MibScalar((1, 3, 6, 1, 4, 1, 6247, 3, 1, 1, 1, 2, 2, 3), Counter32()).setMaxAccess("readonly") if mibBuilder.loadTexts: cbClNumPackets.setStatus('mandatory') if mibBuilder.loadTexts: cbClNumPackets.setDescription('Number of packets transmitted to IP==cbClientIP in the last cbClNumSeconds seconds.') cbClAvrBytesPerSecond = MibScalar((1, 3, 6, 1, 4, 1, 6247, 3, 1, 1, 1, 2, 2, 4), Counter32()).setMaxAccess("readonly") if mibBuilder.loadTexts: cbClAvrBytesPerSecond.setStatus('mandatory') if mibBuilder.loadTexts: cbClAvrBytesPerSecond.setDescription('Average transfer rate in bytes per second for this client.') cbClNumPacketsDiscarded = MibScalar((1, 3, 6, 1, 4, 1, 6247, 3, 1, 1, 1, 2, 2, 5), Counter32()).setMaxAccess("readonly") if mibBuilder.loadTexts: cbClNumPacketsDiscarded.setStatus('mandatory') if mibBuilder.loadTexts: cbClNumPacketsDiscarded.setDescription('Number of packets discarded to IP==cbClientIP in the last cbClNumSeconds seconds.') cbClStatReset = MibScalar((1, 3, 6, 1, 4, 1, 6247, 3, 1, 1, 1, 2, 2, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 0))).clone(namedValues=NamedValues(("cbTrue", 1), ("cbFalse", 0)))).setMaxAccess("writeonly") if mibBuilder.loadTexts: cbClStatReset.setStatus('mandatory') if mibBuilder.loadTexts: cbClStatReset.setDescription('Set ot non-zero - Reset the statistics values for the client cbClientIP.') cbClEncrEnbled = MibScalar((1, 3, 6, 1, 4, 1, 6247, 3, 1, 1, 1, 2, 2, 7), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 0))).clone(namedValues=NamedValues(("cbTrue", 1), ("cbFalse", 0)))).setMaxAccess("readonly") if mibBuilder.loadTexts: cbClEncrEnbled.setStatus('mandatory') if mibBuilder.loadTexts: cbClEncrEnbled.setDescription('If this variable is True then the user desire encryption. This value may not changed and it is NOT changed by setting cbClStatReset.') cbStatClTable = MibIdentifier((1, 3, 6, 1, 4, 1, 6247, 3, 1, 1, 1, 3)) cbClTable = MibTable((1, 3, 6, 1, 4, 1, 6247, 3, 1, 1, 1, 3, 1), ) if mibBuilder.loadTexts: cbClTable.setStatus('mandatory') if mibBuilder.loadTexts: cbClTable.setDescription('This table contains updated statistics of all clients known to the gateway.') cbClTableNode = MibTableRow((1, 3, 6, 1, 4, 1, 6247, 3, 1, 1, 1, 3, 1, 1), ).setIndexNames((0, "EFDATA-MIB", "cbClTableIP")) if mibBuilder.loadTexts: cbClTableNode.setStatus('mandatory') if mibBuilder.loadTexts: cbClTableNode.setDescription('Information about a particular client.') cbClTableIP = MibTableColumn((1, 3, 6, 1, 4, 1, 6247, 3, 1, 1, 1, 3, 1, 1, 1), IpAddress()).setMaxAccess("readonly") if mibBuilder.loadTexts: cbClTableIP.setStatus('mandatory') if mibBuilder.loadTexts: cbClTableIP.setDescription('The clients IP.') cbClTableStampTime = MibTableColumn((1, 3, 6, 1, 4, 1, 6247, 3, 1, 1, 1, 3, 1, 1, 2), Integer32()).setMaxAccess("readonly") if mibBuilder.loadTexts: cbClTableStampTime.setStatus('mandatory') if mibBuilder.loadTexts: cbClTableStampTime.setDescription('The clients Stamp Time.') cbClTableStartTime = MibTableColumn((1, 3, 6, 1, 4, 1, 6247, 3, 1, 1, 1, 3, 1, 1, 3), Integer32()).setMaxAccess("readonly") if mibBuilder.loadTexts: cbClTableStartTime.setStatus('mandatory') if mibBuilder.loadTexts: cbClTableStartTime.setDescription('The clients Start Time.') cbClTableTotalPackets = MibTableColumn((1, 3, 6, 1, 4, 1, 6247, 3, 1, 1, 1, 3, 1, 1, 4), Integer32()).setMaxAccess("readonly") if mibBuilder.loadTexts: cbClTableTotalPackets.setStatus('mandatory') if mibBuilder.loadTexts: cbClTableTotalPackets.setDescription('Total Packets transmitted to this client.') cbClTableBytesInSec = MibTableColumn((1, 3, 6, 1, 4, 1, 6247, 3, 1, 1, 1, 3, 1, 1, 5), Integer32()).setMaxAccess("readonly") if mibBuilder.loadTexts: cbClTableBytesInSec.setStatus('mandatory') if mibBuilder.loadTexts: cbClTableBytesInSec.setDescription('The clients Rate in Bytes/Sec.') cbClTablePacketsDiscr = MibTableColumn((1, 3, 6, 1, 4, 1, 6247, 3, 1, 1, 1, 3, 1, 1, 6), Integer32()).setMaxAccess("readonly") if mibBuilder.loadTexts: cbClTablePacketsDiscr.setStatus('mandatory') if mibBuilder.loadTexts: cbClTablePacketsDiscr.setDescription('The Total Packets which were discarded to this client') cbClTableKBytesTxed = MibTableColumn((1, 3, 6, 1, 4, 1, 6247, 3, 1, 1, 1, 3, 1, 1, 7), Integer32()).setMaxAccess("readonly") if mibBuilder.loadTexts: cbClTableKBytesTxed.setStatus('mandatory') if mibBuilder.loadTexts: cbClTableKBytesTxed.setDescription('The Total KBytes transmitted to this client.') cbClTableReset = MibTableColumn((1, 3, 6, 1, 4, 1, 6247, 3, 1, 1, 1, 3, 1, 1, 8), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("cbNo", 0), ("cbYes", 1)))).setMaxAccess("readwrite") if mibBuilder.loadTexts: cbClTableReset.setStatus('mandatory') if mibBuilder.loadTexts: cbClTableReset.setDescription('Reset the client statistics.') cbConfig = MibIdentifier((1, 3, 6, 1, 4, 1, 6247, 3, 1, 1, 2)) cbNetworkParam = MibIdentifier((1, 3, 6, 1, 4, 1, 6247, 3, 1, 1, 2, 1)) cbNetGatewayMngIP = MibScalar((1, 3, 6, 1, 4, 1, 6247, 3, 1, 1, 2, 1, 1), IpAddress()).setMaxAccess("readonly") if mibBuilder.loadTexts: cbNetGatewayMngIP.setStatus('mandatory') if mibBuilder.loadTexts: cbNetGatewayMngIP.setDescription('C&M IP Address. Changing this parameter will affect after system reset.') cbNetGatewayMngSubnetMask = MibScalar((1, 3, 6, 1, 4, 1, 6247, 3, 1, 1, 2, 1, 2), IpAddress()).setMaxAccess("readonly") if mibBuilder.loadTexts: cbNetGatewayMngSubnetMask.setStatus('mandatory') if mibBuilder.loadTexts: cbNetGatewayMngSubnetMask.setDescription('C&M subnet mask. Changing this parameter will affect after system reset.') cbNetDefaultGateway = MibScalar((1, 3, 6, 1, 4, 1, 6247, 3, 1, 1, 2, 1, 3), IpAddress()).setMaxAccess("readwrite") if mibBuilder.loadTexts: cbNetDefaultGateway.setStatus('mandatory') if mibBuilder.loadTexts: cbNetDefaultGateway.setDescription('The default gateway IP Address. The term gateway here, is reffering to another station in the same LAN of the CATV-Gateway. All IP packets that the CATV-Gateway is sending to the LAN (and not over the viedo) and their IP Address do not belong to the CATV-Gateway local ring will be sent to this gateway station unless cbNetDefaultGateway is 0.0.0.0 Changing this parameter will affect after system reset.') cbNetPromiscuous = MibScalar((1, 3, 6, 1, 4, 1, 6247, 3, 1, 1, 2, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 0))).clone(namedValues=NamedValues(("cbEnabled", 1), ("cbDisabled", 0)))).setMaxAccess("readwrite") if mibBuilder.loadTexts: cbNetPromiscuous.setStatus('mandatory') if mibBuilder.loadTexts: cbNetPromiscuous.setDescription('Enables/Disables Promiscuous Mode. Changing this parameter will affect after system reset.') cbNetUnregisteredUsers = MibScalar((1, 3, 6, 1, 4, 1, 6247, 3, 1, 1, 2, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 0))).clone(namedValues=NamedValues(("cbEnabled", 1), ("cbDisabled", 0)))).setMaxAccess("readwrite") if mibBuilder.loadTexts: cbNetUnregisteredUsers.setStatus('mandatory') if mibBuilder.loadTexts: cbNetUnregisteredUsers.setDescription('Enables/Disables Unregistered Users.') cbNetMulticast = MibScalar((1, 3, 6, 1, 4, 1, 6247, 3, 1, 1, 2, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 0))).clone(namedValues=NamedValues(("cbEnabled", 1), ("cbDisabled", 0)))).setMaxAccess("readwrite") if mibBuilder.loadTexts: cbNetMulticast.setStatus('mandatory') if mibBuilder.loadTexts: cbNetMulticast.setDescription('Enables/Disables receive Multicast Packets.') cbNetDualNIC = MibScalar((1, 3, 6, 1, 4, 1, 6247, 3, 1, 1, 2, 1, 7), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 0))).clone(namedValues=NamedValues(("cbEnabled", 1), ("cbDisabled", 0)))).setMaxAccess("readwrite") if mibBuilder.loadTexts: cbNetDualNIC.setStatus('mandatory') if mibBuilder.loadTexts: cbNetDualNIC.setDescription('Enables/Disables Transportation NIC Changing this parameter will affect after system reset.') cbNetGatewayDataIP = MibScalar((1, 3, 6, 1, 4, 1, 6247, 3, 1, 1, 2, 1, 8), IpAddress()).setMaxAccess("readwrite") if mibBuilder.loadTexts: cbNetGatewayDataIP.setStatus('mandatory') if mibBuilder.loadTexts: cbNetGatewayDataIP.setDescription('Transportation IP Address. Changing this parameter will affect after system reset.') cbNetGatewayDataSubnetMask = MibScalar((1, 3, 6, 1, 4, 1, 6247, 3, 1, 1, 2, 1, 9), IpAddress()).setMaxAccess("readwrite") if mibBuilder.loadTexts: cbNetGatewayDataSubnetMask.setStatus('mandatory') if mibBuilder.loadTexts: cbNetGatewayDataSubnetMask.setDescription('Transportation subnet mask. Changing this parameter will affect after system reset.') cbNetTelnet = MibScalar((1, 3, 6, 1, 4, 1, 6247, 3, 1, 1, 2, 1, 10), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 0))).clone(namedValues=NamedValues(("cbEnabled", 1), ("cbDisabled", 0)))).setMaxAccess("readwrite") if mibBuilder.loadTexts: cbNetTelnet.setStatus('mandatory') if mibBuilder.loadTexts: cbNetTelnet.setDescription('Enables/Disables the Telnet Server') cbNetFTP = MibScalar((1, 3, 6, 1, 4, 1, 6247, 3, 1, 1, 2, 1, 11), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 0))).clone(namedValues=NamedValues(("cbEnabled", 1), ("cbDisabled", 0)))).setMaxAccess("readwrite") if mibBuilder.loadTexts: cbNetFTP.setStatus('mandatory') if mibBuilder.loadTexts: cbNetFTP.setDescription('Enables/Disables the FTP Server') cbDVBOutputParam = MibIdentifier((1, 3, 6, 1, 4, 1, 6247, 3, 1, 1, 2, 2)) cbDVBOutputBitRate = MibScalar((1, 3, 6, 1, 4, 1, 6247, 3, 1, 1, 2, 2, 1), Integer32()).setMaxAccess("readwrite") if mibBuilder.loadTexts: cbDVBOutputBitRate.setStatus('mandatory') if mibBuilder.loadTexts: cbDVBOutputBitRate.setDescription('PLL Frequency') cbDVBPAT = MibScalar((1, 3, 6, 1, 4, 1, 6247, 3, 1, 1, 2, 2, 2), Integer32()).setMaxAccess("readwrite") if mibBuilder.loadTexts: cbDVBPAT.setStatus('mandatory') if mibBuilder.loadTexts: cbDVBPAT.setDescription('PAT Rate') cbDVBPMT = MibScalar((1, 3, 6, 1, 4, 1, 6247, 3, 1, 1, 2, 2, 3), Integer32()).setMaxAccess("readwrite") if mibBuilder.loadTexts: cbDVBPMT.setStatus('mandatory') if mibBuilder.loadTexts: cbDVBPMT.setDescription('PMT Rate') cbDVBFraming = MibScalar((1, 3, 6, 1, 4, 1, 6247, 3, 1, 1, 2, 2, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("cbFraming188", 1), ("cbFraming204", 2)))).setMaxAccess("readwrite") if mibBuilder.loadTexts: cbDVBFraming.setStatus('mandatory') if mibBuilder.loadTexts: cbDVBFraming.setDescription('188/204 Framing.') cbStuffingMode = MibScalar((1, 3, 6, 1, 4, 1, 6247, 3, 1, 1, 2, 2, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("cbFFStuffing", 0), ("cbAdaptationField", 1)))).setMaxAccess("readwrite") if mibBuilder.loadTexts: cbStuffingMode.setStatus('mandatory') if mibBuilder.loadTexts: cbStuffingMode.setDescription('Stuffing mode: either FF stuffing or Adaptation field stuffing') cbMpeMode = MibScalar((1, 3, 6, 1, 4, 1, 6247, 3, 1, 1, 2, 2, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("cbPacked", 0), ("cbNotPacked", 1)))).setMaxAccess("readwrite") if mibBuilder.loadTexts: cbMpeMode.setStatus('mandatory') if mibBuilder.loadTexts: cbMpeMode.setDescription('MPE mode: Packed MPE mode or Not packed MPE mode.') cbCRCMode = MibScalar((1, 3, 6, 1, 4, 1, 6247, 3, 1, 1, 2, 2, 7), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2))).clone(namedValues=NamedValues(("cbZero", 0), ("cbCheckSum", 1), ("cbCRC", 2)))).setMaxAccess("readwrite") if mibBuilder.loadTexts: cbCRCMode.setStatus('mandatory') if mibBuilder.loadTexts: cbCRCMode.setDescription('CRC type') cbDVBClockPolarity = MibScalar((1, 3, 6, 1, 4, 1, 6247, 3, 1, 1, 2, 2, 8), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("cbNotInverted", 0), ("cbInverted", 1)))).setMaxAccess("readonly") if mibBuilder.loadTexts: cbDVBClockPolarity.setStatus('mandatory') if mibBuilder.loadTexts: cbDVBClockPolarity.setDescription('DVB Clock Polarity. (read only value - may be changed in CFG.INI only).') cbDVBAuxInput = MibScalar((1, 3, 6, 1, 4, 1, 6247, 3, 1, 1, 2, 2, 9), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 0))).clone(namedValues=NamedValues(("cbEnabled", 1), ("cbDisabled", 0)))).setMaxAccess("readwrite") if mibBuilder.loadTexts: cbDVBAuxInput.setStatus('mandatory') if mibBuilder.loadTexts: cbDVBAuxInput.setDescription('Aux Input Enable') cbDVBAuxNullPackets = MibScalar((1, 3, 6, 1, 4, 1, 6247, 3, 1, 1, 2, 2, 10), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 0))).clone(namedValues=NamedValues(("cbEnabled", 1), ("cbDisabled", 0)))).setMaxAccess("readwrite") if mibBuilder.loadTexts: cbDVBAuxNullPackets.setStatus('mandatory') if mibBuilder.loadTexts: cbDVBAuxNullPackets.setDescription('Aux Null Packets') cbDVBAuxInputType = MibScalar((1, 3, 6, 1, 4, 1, 6247, 3, 1, 1, 2, 2, 11), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 0))).clone(namedValues=NamedValues(("cbASI", 1), ("cbLVDS", 0)))).setMaxAccess("readwrite") if mibBuilder.loadTexts: cbDVBAuxInputType.setStatus('mandatory') if mibBuilder.loadTexts: cbDVBAuxInputType.setDescription('Aux Input Type') cbDVBLlcSnap = MibScalar((1, 3, 6, 1, 4, 1, 6247, 3, 1, 1, 2, 2, 12), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 0))).clone(namedValues=NamedValues(("cbEnabled", 1), ("cbDisabled", 0)))).setMaxAccess("readwrite") if mibBuilder.loadTexts: cbDVBLlcSnap.setStatus('mandatory') if mibBuilder.loadTexts: cbDVBLlcSnap.setDescription('Enable LLC-SNAP in MPE') cbGeneralParam = MibIdentifier((1, 3, 6, 1, 4, 1, 6247, 3, 1, 1, 2, 3)) cbGatewayEnabled = MibScalar((1, 3, 6, 1, 4, 1, 6247, 3, 1, 1, 2, 3, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 0))).clone(namedValues=NamedValues(("cbTrue", 1), ("cbFalse", 0)))).setMaxAccess("readwrite") if mibBuilder.loadTexts: cbGatewayEnabled.setStatus('mandatory') if mibBuilder.loadTexts: cbGatewayEnabled.setDescription('Enables/Disables all the Gateway operations.') cbGatewaySWReset = MibScalar((1, 3, 6, 1, 4, 1, 6247, 3, 1, 1, 2, 3, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 0))).clone(namedValues=NamedValues(("cbTrue", 1), ("cbFalse", 0)))).setMaxAccess("writeonly") if mibBuilder.loadTexts: cbGatewaySWReset.setStatus('mandatory') if mibBuilder.loadTexts: cbGatewaySWReset.setDescription('CAUTION: Setting this param to cbTrue cause a S/W reset of the gateway.') cbTraceInfo = MibIdentifier((1, 3, 6, 1, 4, 1, 6247, 3, 1, 1, 2, 3, 3)) cbTraceMask = MibScalar((1, 3, 6, 1, 4, 1, 6247, 3, 1, 1, 2, 3, 3, 1), Integer32()).setMaxAccess("readwrite") if mibBuilder.loadTexts: cbTraceMask.setStatus('mandatory') if mibBuilder.loadTexts: cbTraceMask.setDescription('Mask to select elements for trace.') cbTraceLevel = MibScalar((1, 3, 6, 1, 4, 1, 6247, 3, 1, 1, 2, 3, 3, 2), Integer32()).setMaxAccess("readwrite") if mibBuilder.loadTexts: cbTraceLevel.setStatus('mandatory') if mibBuilder.loadTexts: cbTraceLevel.setDescription('Trace level for elements specified by cbTraceMask') cbTraceOutputChannel = MibScalar((1, 3, 6, 1, 4, 1, 6247, 3, 1, 1, 2, 3, 3, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("cbTraceToVGA", 1), ("cbTraceToCOM1", 2), ("cbTraceToCOM2", 3)))).setMaxAccess("readwrite") if mibBuilder.loadTexts: cbTraceOutputChannel.setStatus('mandatory') if mibBuilder.loadTexts: cbTraceOutputChannel.setDescription('Trace output channel.') cbPktEncrypt = MibScalar((1, 3, 6, 1, 4, 1, 6247, 3, 1, 1, 2, 3, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 0))).clone(namedValues=NamedValues(("cbTrue", 1), ("cbFalse", 0)))).setMaxAccess("readwrite") if mibBuilder.loadTexts: cbPktEncrypt.setStatus('mandatory') if mibBuilder.loadTexts: cbPktEncrypt.setDescription('Enable/Disable encryption of the the transmitted packets. If cbPktEncrypt==cbTrue, packets will be encrypted only if cbClEncrEnable==cbTrue for that client.') cbGatewayDescription = MibScalar((1, 3, 6, 1, 4, 1, 6247, 3, 1, 1, 2, 3, 5), DisplayString()).setMaxAccess("readwrite") if mibBuilder.loadTexts: cbGatewayDescription.setStatus('mandatory') if mibBuilder.loadTexts: cbGatewayDescription.setDescription('A general description of this gateway. The description may be changed as needed.') cbSWVersion = MibScalar((1, 3, 6, 1, 4, 1, 6247, 3, 1, 1, 2, 3, 6), DisplayString()).setMaxAccess("readonly") if mibBuilder.loadTexts: cbSWVersion.setStatus('mandatory') if mibBuilder.loadTexts: cbSWVersion.setDescription('TV Gateway Software Version.') cbApplicationFileName = MibScalar((1, 3, 6, 1, 4, 1, 6247, 3, 1, 1, 2, 3, 7), DisplayString()).setMaxAccess("readwrite") if mibBuilder.loadTexts: cbApplicationFileName.setStatus('mandatory') if mibBuilder.loadTexts: cbApplicationFileName.setDescription('TV Gateway Application Software File Name. Changing this parameter will affect after system reset.') cbDataMappingMode = MibScalar((1, 3, 6, 1, 4, 1, 6247, 3, 1, 1, 2, 3, 8), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(2, 3))).clone(namedValues=NamedValues(("cbDataStreaming", 2), ("cbProtocolEncapsulation", 3)))).setMaxAccess("readwrite") if mibBuilder.loadTexts: cbDataMappingMode.setStatus('mandatory') if mibBuilder.loadTexts: cbDataMappingMode.setDescription('Data Boradcast Mode - Encodding mode of data from network.') cbMaxAllowableDelay = MibScalar((1, 3, 6, 1, 4, 1, 6247, 3, 1, 1, 2, 3, 9), Integer32()).setMaxAccess("readwrite") if mibBuilder.loadTexts: cbMaxAllowableDelay.setStatus('mandatory') if mibBuilder.loadTexts: cbMaxAllowableDelay.setDescription('The Maximum allowable time (in mSec) which a packet can be delayed in the gateway. ') cbQualityOfService = MibIdentifier((1, 3, 6, 1, 4, 1, 6247, 3, 1, 1, 2, 3, 10)) cbQOSMode = MibScalar((1, 3, 6, 1, 4, 1, 6247, 3, 1, 1, 2, 3, 10, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("cbPermissive", 1), ("cbRestrictive", 2)))).setMaxAccess("readwrite") if mibBuilder.loadTexts: cbQOSMode.setStatus('mandatory') if mibBuilder.loadTexts: cbQOSMode.setDescription('Permissive mode will allow transmit to users obove their maximum rate when when band-width is available. Restrictive mode will not transmit any data to users above their maximum rate even if band-width is available.') cbQOSActive = MibScalar((1, 3, 6, 1, 4, 1, 6247, 3, 1, 1, 2, 3, 10, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("cbFalse", 0), ("cbTrue", 1)))).setMaxAccess("readwrite") if mibBuilder.loadTexts: cbQOSActive.setStatus('mandatory') if mibBuilder.loadTexts: cbQOSActive.setDescription('Turn on (cbTrue) or off (cbFalse) the Quality of Service mechanism. When Quality of Service is turned off, the minimum CIR promised to users is ignored and data is transffered to users in the order it is received from the Ethernet by the gateway.') cbFlushing = MibScalar((1, 3, 6, 1, 4, 1, 6247, 3, 1, 1, 2, 3, 11), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("cbNo", 0), ("cbYes", 1)))).setMaxAccess("readwrite") if mibBuilder.loadTexts: cbFlushing.setStatus('mandatory') if mibBuilder.loadTexts: cbFlushing.setDescription('Flushing packets on IDLE') cbFPGAFileName = MibScalar((1, 3, 6, 1, 4, 1, 6247, 3, 1, 1, 2, 3, 13), DisplayString()).setMaxAccess("readwrite") if mibBuilder.loadTexts: cbFPGAFileName.setStatus('mandatory') if mibBuilder.loadTexts: cbFPGAFileName.setDescription("A string that holds the MCS file name loaded to the Gateway's Encoder. Changing this parameter will affect after system reset.") cbGroupsTable = MibIdentifier((1, 3, 6, 1, 4, 1, 6247, 3, 1, 1, 2, 4)) cbGrTable = MibTable((1, 3, 6, 1, 4, 1, 6247, 3, 1, 1, 2, 4, 1), ) if mibBuilder.loadTexts: cbGrTable.setStatus('mandatory') if mibBuilder.loadTexts: cbGrTable.setDescription('This table contains the Groups definitions.') cbGroupsTableNode = MibTableRow((1, 3, 6, 1, 4, 1, 6247, 3, 1, 1, 2, 4, 1, 1), ).setIndexNames((0, "EFDATA-MIB", "cbGrTableIndex")) if mibBuilder.loadTexts: cbGroupsTableNode.setStatus('mandatory') if mibBuilder.loadTexts: cbGroupsTableNode.setDescription('Information about a particular group.') cbGrTableIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 6247, 3, 1, 1, 2, 4, 1, 1, 1), Integer32()).setMaxAccess("readwrite") if mibBuilder.loadTexts: cbGrTableIndex.setStatus('mandatory') if mibBuilder.loadTexts: cbGrTableIndex.setDescription('Group Index.') cbGrTablePID = MibTableColumn((1, 3, 6, 1, 4, 1, 6247, 3, 1, 1, 2, 4, 1, 1, 2), Integer32()).setMaxAccess("readwrite") if mibBuilder.loadTexts: cbGrTablePID.setStatus('mandatory') if mibBuilder.loadTexts: cbGrTablePID.setDescription('The Group PID.') cbGrTableQosMode = MibTableColumn((1, 3, 6, 1, 4, 1, 6247, 3, 1, 1, 2, 4, 1, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("cbIndividual", 0), ("cbGlobal", 1)))).setMaxAccess("readwrite") if mibBuilder.loadTexts: cbGrTableQosMode.setStatus('mandatory') if mibBuilder.loadTexts: cbGrTableQosMode.setDescription('The Group Qos Mode.') cbGrTableMinRate = MibTableColumn((1, 3, 6, 1, 4, 1, 6247, 3, 1, 1, 2, 4, 1, 1, 4), Integer32()).setMaxAccess("readwrite") if mibBuilder.loadTexts: cbGrTableMinRate.setStatus('mandatory') if mibBuilder.loadTexts: cbGrTableMinRate.setDescription('The Group Minimum rate. This parameter affects only if QosMode=Global') cbGrTableMaxRate = MibTableColumn((1, 3, 6, 1, 4, 1, 6247, 3, 1, 1, 2, 4, 1, 1, 5), Integer32()).setMaxAccess("readwrite") if mibBuilder.loadTexts: cbGrTableMaxRate.setStatus('mandatory') if mibBuilder.loadTexts: cbGrTableMaxRate.setDescription('The Group Maximum rate. This parameter affects only if QosMode=Global') cbConfigSTUTable = MibIdentifier((1, 3, 6, 1, 4, 1, 6247, 3, 1, 1, 2, 5)) cbStaticUserTable = MibTable((1, 3, 6, 1, 4, 1, 6247, 3, 1, 1, 2, 5, 1), ) if mibBuilder.loadTexts: cbStaticUserTable.setStatus('mandatory') if mibBuilder.loadTexts: cbStaticUserTable.setDescription('This table contains the all the static users.') cbStaticUserEntry = MibTableRow((1, 3, 6, 1, 4, 1, 6247, 3, 1, 1, 2, 5, 1, 1), ).setIndexNames((0, "EFDATA-MIB", "cbStaticUserIP")) if mibBuilder.loadTexts: cbStaticUserEntry.setStatus('mandatory') if mibBuilder.loadTexts: cbStaticUserEntry.setDescription('Information about a particular static user.') cbStaticUserIP = MibTableColumn((1, 3, 6, 1, 4, 1, 6247, 3, 1, 1, 2, 5, 1, 1, 1), IpAddress()).setMaxAccess("readwrite") if mibBuilder.loadTexts: cbStaticUserIP.setStatus('mandatory') if mibBuilder.loadTexts: cbStaticUserIP.setDescription('IP of static user.') cbStaticUserMask = MibTableColumn((1, 3, 6, 1, 4, 1, 6247, 3, 1, 1, 2, 5, 1, 1, 2), IpAddress()).setMaxAccess("readwrite") if mibBuilder.loadTexts: cbStaticUserMask.setStatus('mandatory') if mibBuilder.loadTexts: cbStaticUserMask.setDescription('The static user mask.') cbStaticUserGroup = MibTableColumn((1, 3, 6, 1, 4, 1, 6247, 3, 1, 1, 2, 5, 1, 1, 3), Integer32()).setMaxAccess("readwrite") if mibBuilder.loadTexts: cbStaticUserGroup.setStatus('mandatory') if mibBuilder.loadTexts: cbStaticUserGroup.setDescription("The static user's Group.") cbStaticUserMAC = MibTableColumn((1, 3, 6, 1, 4, 1, 6247, 3, 1, 1, 2, 5, 1, 1, 4), PhysAddress()).setMaxAccess("readwrite") if mibBuilder.loadTexts: cbStaticUserMAC.setStatus('mandatory') if mibBuilder.loadTexts: cbStaticUserMAC.setDescription('The group in which the static user resides.') cbStaticUserMinRate = MibTableColumn((1, 3, 6, 1, 4, 1, 6247, 3, 1, 1, 2, 5, 1, 1, 5), Integer32()).setMaxAccess("readwrite") if mibBuilder.loadTexts: cbStaticUserMinRate.setStatus('mandatory') if mibBuilder.loadTexts: cbStaticUserMinRate.setDescription('The static user Minimum rate (CIR).') cbStaticUserMaxRate = MibTableColumn((1, 3, 6, 1, 4, 1, 6247, 3, 1, 1, 2, 5, 1, 1, 6), Integer32()).setMaxAccess("readwrite") if mibBuilder.loadTexts: cbStaticUserMaxRate.setStatus('mandatory') if mibBuilder.loadTexts: cbStaticUserMaxRate.setDescription('The static user Maximum rate.') cbConfigMulticastTable = MibIdentifier((1, 3, 6, 1, 4, 1, 6247, 3, 1, 1, 2, 6)) cbMulticastTable = MibTable((1, 3, 6, 1, 4, 1, 6247, 3, 1, 1, 2, 6, 1), ) if mibBuilder.loadTexts: cbMulticastTable.setStatus('mandatory') if mibBuilder.loadTexts: cbMulticastTable.setDescription('This table contains the all the multicasts.') cbMulticastEntry = MibTableRow((1, 3, 6, 1, 4, 1, 6247, 3, 1, 1, 2, 6, 1, 1), ).setIndexNames((0, "EFDATA-MIB", "cbMulticastIP")) if mibBuilder.loadTexts: cbMulticastEntry.setStatus('mandatory') if mibBuilder.loadTexts: cbMulticastEntry.setDescription('Information about a particular multicast.') cbMulticastIP = MibTableColumn((1, 3, 6, 1, 4, 1, 6247, 3, 1, 1, 2, 6, 1, 1, 1), IpAddress()).setMaxAccess("readwrite") if mibBuilder.loadTexts: cbMulticastIP.setStatus('mandatory') if mibBuilder.loadTexts: cbMulticastIP.setDescription('IP of multicast.') cbMulticastGroup = MibTableColumn((1, 3, 6, 1, 4, 1, 6247, 3, 1, 1, 2, 6, 1, 1, 2), Integer32()).setMaxAccess("readwrite") if mibBuilder.loadTexts: cbMulticastGroup.setStatus('mandatory') if mibBuilder.loadTexts: cbMulticastGroup.setDescription("The multicast's Group.") cbMulticastSID = MibTableColumn((1, 3, 6, 1, 4, 1, 6247, 3, 1, 1, 2, 6, 1, 1, 3), Integer32()).setMaxAccess("readwrite") if mibBuilder.loadTexts: cbMulticastSID.setStatus('mandatory') if mibBuilder.loadTexts: cbMulticastSID.setDescription('The group in which the multicast resides.') cbMulticastMinRate = MibTableColumn((1, 3, 6, 1, 4, 1, 6247, 3, 1, 1, 2, 6, 1, 1, 4), Integer32()).setMaxAccess("readwrite") if mibBuilder.loadTexts: cbMulticastMinRate.setStatus('mandatory') if mibBuilder.loadTexts: cbMulticastMinRate.setDescription('The multicast Minimum rate (CIR).') cbMulticastMaxRate = MibTableColumn((1, 3, 6, 1, 4, 1, 6247, 3, 1, 1, 2, 6, 1, 1, 5), Integer32()).setMaxAccess("readwrite") if mibBuilder.loadTexts: cbMulticastMaxRate.setStatus('mandatory') if mibBuilder.loadTexts: cbMulticastMaxRate.setDescription('The multicast Maximum rate.') cbConfigClTable = MibIdentifier((1, 3, 6, 1, 4, 1, 6247, 3, 1, 1, 2, 7)) cbCfgClTable = MibTable((1, 3, 6, 1, 4, 1, 6247, 3, 1, 1, 2, 7, 1), ) if mibBuilder.loadTexts: cbCfgClTable.setStatus('mandatory') if mibBuilder.loadTexts: cbCfgClTable.setDescription('This table contains updated configuration of all clients known to the gateway.') cbCfgClTableNode = MibTableRow((1, 3, 6, 1, 4, 1, 6247, 3, 1, 1, 2, 7, 1, 1), ).setIndexNames((0, "EFDATA-MIB", "cbCfgClTableIP")) if mibBuilder.loadTexts: cbCfgClTableNode.setStatus('mandatory') if mibBuilder.loadTexts: cbCfgClTableNode.setDescription('Information about a particular client configuration.') cbCfgClTableIP = MibTableColumn((1, 3, 6, 1, 4, 1, 6247, 3, 1, 1, 2, 7, 1, 1, 1), IpAddress()).setMaxAccess("readonly") if mibBuilder.loadTexts: cbCfgClTableIP.setStatus('mandatory') if mibBuilder.loadTexts: cbCfgClTableIP.setDescription('The clients IP.') cbCfgClTableMask = MibTableColumn((1, 3, 6, 1, 4, 1, 6247, 3, 1, 1, 2, 7, 1, 1, 2), IpAddress()).setMaxAccess("readonly") if mibBuilder.loadTexts: cbCfgClTableMask.setStatus('mandatory') if mibBuilder.loadTexts: cbCfgClTableMask.setDescription('The clients IP Mask.') cbCfgClTableMAC = MibTableColumn((1, 3, 6, 1, 4, 1, 6247, 3, 1, 1, 2, 7, 1, 1, 3), PhysAddress()).setMaxAccess("readonly") if mibBuilder.loadTexts: cbCfgClTableMAC.setStatus('mandatory') if mibBuilder.loadTexts: cbCfgClTableMAC.setDescription('The clients MAC Address.') cbCfgClTableGroup = MibTableColumn((1, 3, 6, 1, 4, 1, 6247, 3, 1, 1, 2, 7, 1, 1, 4), Integer32()).setMaxAccess("readonly") if mibBuilder.loadTexts: cbCfgClTableGroup.setStatus('mandatory') if mibBuilder.loadTexts: cbCfgClTableGroup.setDescription('The clients Group.') cbCfgClTableBy = MibTableColumn((1, 3, 6, 1, 4, 1, 6247, 3, 1, 1, 2, 7, 1, 1, 5), DisplayString()).setMaxAccess("readonly") if mibBuilder.loadTexts: cbCfgClTableBy.setStatus('mandatory') if mibBuilder.loadTexts: cbCfgClTableBy.setDescription('By whom the client was added.') cbCfgClTableMinRate = MibTableColumn((1, 3, 6, 1, 4, 1, 6247, 3, 1, 1, 2, 7, 1, 1, 6), Integer32()).setMaxAccess("readonly") if mibBuilder.loadTexts: cbCfgClTableMinRate.setStatus('mandatory') if mibBuilder.loadTexts: cbCfgClTableMinRate.setDescription('The clients Minimum rate (CIR).') cbCfgClTableMaxRate = MibTableColumn((1, 3, 6, 1, 4, 1, 6247, 3, 1, 1, 2, 7, 1, 1, 7), Integer32()).setMaxAccess("readonly") if mibBuilder.loadTexts: cbCfgClTableMaxRate.setStatus('mandatory') if mibBuilder.loadTexts: cbCfgClTableMaxRate.setDescription('The clients Maximum rate.') cbCfgClTableEncrypt = MibTableColumn((1, 3, 6, 1, 4, 1, 6247, 3, 1, 1, 2, 7, 1, 1, 8), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("cbFalse", 0), ("cbTrue", 1)))).setMaxAccess("readonly") if mibBuilder.loadTexts: cbCfgClTableEncrypt.setStatus('mandatory') if mibBuilder.loadTexts: cbCfgClTableEncrypt.setDescription('The clients Encryption parameter True/False.') cbTimeDate = MibIdentifier((1, 3, 6, 1, 4, 1, 6247, 3, 1, 1, 2, 8)) cbTime = MibScalar((1, 3, 6, 1, 4, 1, 6247, 3, 1, 1, 2, 8, 1), DisplayString()).setMaxAccess("readwrite") if mibBuilder.loadTexts: cbTime.setStatus('mandatory') if mibBuilder.loadTexts: cbTime.setDescription('A string in the form HH:MM:SS that represents the gateway idea of the current time. Single digits should be preceeded by 0. Examples: 12:35:27 01:50:00 09:01:59') cbDate = MibScalar((1, 3, 6, 1, 4, 1, 6247, 3, 1, 1, 2, 8, 2), DisplayString()).setMaxAccess("readwrite") if mibBuilder.loadTexts: cbDate.setStatus('mandatory') if mibBuilder.loadTexts: cbDate.setDescription('A string representing the gateway idea of the current date. In order to set a different date, use the following format: <Full Month Name> <1 or 2 Digits of Day of Month>,<4 Digits of Year> Examples: September 1,1998 Januray 12, 2002') cbClientsInfoReset = MibScalar((1, 3, 6, 1, 4, 1, 6247, 3, 1, 1, 2, 9), Integer32()).setMaxAccess("readwrite") if mibBuilder.loadTexts: cbClientsInfoReset.setStatus('mandatory') if mibBuilder.loadTexts: cbClientsInfoReset.setDescription('This parameter is applicable only for clients that were NOT added by the CCU. The gateway will delete from its lists clients information (statistics and encryption parameters) for each client registered in the system for more then cbTClientsInfoReset seconds. cbTClientsInfoReset must be greater then 0.') cbCCUParam = MibIdentifier((1, 3, 6, 1, 4, 1, 6247, 3, 1, 1, 2, 10)) cbCCU1 = MibScalar((1, 3, 6, 1, 4, 1, 6247, 3, 1, 1, 2, 10, 1), IpAddress()).setMaxAccess("readwrite") if mibBuilder.loadTexts: cbCCU1.setStatus('mandatory') if mibBuilder.loadTexts: cbCCU1.setDescription('IP of CCU Server #1 (set to 0.0.0.0 to disable CCU #1)') cbCCU2 = MibScalar((1, 3, 6, 1, 4, 1, 6247, 3, 1, 1, 2, 10, 2), IpAddress()).setMaxAccess("readwrite") if mibBuilder.loadTexts: cbCCU2.setStatus('mandatory') if mibBuilder.loadTexts: cbCCU2.setDescription('IP of CCU Server #2 (set to 0.0.0.0 to disable CCU #2)') cbCCU3 = MibScalar((1, 3, 6, 1, 4, 1, 6247, 3, 1, 1, 2, 10, 3), IpAddress()).setMaxAccess("readwrite") if mibBuilder.loadTexts: cbCCU3.setStatus('mandatory') if mibBuilder.loadTexts: cbCCU3.setDescription('IP of CCU Server #3 (set to 0.0.0.0 to disable CCU #3)') cbCCU4 = MibScalar((1, 3, 6, 1, 4, 1, 6247, 3, 1, 1, 2, 10, 4), IpAddress()).setMaxAccess("readwrite") if mibBuilder.loadTexts: cbCCU4.setStatus('mandatory') if mibBuilder.loadTexts: cbCCU4.setDescription('IP of CCU Server #4 (set to 0.0.0.0 to disable CCU #4)') cbCCU5 = MibScalar((1, 3, 6, 1, 4, 1, 6247, 3, 1, 1, 2, 10, 5), IpAddress()).setMaxAccess("readwrite") if mibBuilder.loadTexts: cbCCU5.setStatus('mandatory') if mibBuilder.loadTexts: cbCCU5.setDescription('IP of CCU Server #5 (set to 0.0.0.0 to disable CCU #5)') cbCCU6 = MibScalar((1, 3, 6, 1, 4, 1, 6247, 3, 1, 1, 2, 10, 6), IpAddress()).setMaxAccess("readwrite") if mibBuilder.loadTexts: cbCCU6.setStatus('mandatory') if mibBuilder.loadTexts: cbCCU6.setDescription('IP of CCU Server #6 (set to 0.0.0.0 to disable CCU #6)') cbCCU7 = MibScalar((1, 3, 6, 1, 4, 1, 6247, 3, 1, 1, 2, 10, 7), IpAddress()).setMaxAccess("readwrite") if mibBuilder.loadTexts: cbCCU7.setStatus('mandatory') if mibBuilder.loadTexts: cbCCU7.setDescription('IP of CCU Server #7 (set to 0.0.0.0 to disable CCU #7)') cbCCU8 = MibScalar((1, 3, 6, 1, 4, 1, 6247, 3, 1, 1, 2, 10, 8), IpAddress()).setMaxAccess("readwrite") if mibBuilder.loadTexts: cbCCU8.setStatus('mandatory') if mibBuilder.loadTexts: cbCCU8.setDescription('IP of CCU Server #8 (set to 0.0.0.0 to disable CCU #8)') cbCCU9 = MibScalar((1, 3, 6, 1, 4, 1, 6247, 3, 1, 1, 2, 10, 9), IpAddress()).setMaxAccess("readwrite") if mibBuilder.loadTexts: cbCCU9.setStatus('mandatory') if mibBuilder.loadTexts: cbCCU9.setDescription('IP of CCU Server #9 (set to 0.0.0.0 to disable CCU #9)') cbCCU10 = MibScalar((1, 3, 6, 1, 4, 1, 6247, 3, 1, 1, 2, 10, 10), IpAddress()).setMaxAccess("readwrite") if mibBuilder.loadTexts: cbCCU10.setStatus('mandatory') if mibBuilder.loadTexts: cbCCU10.setDescription('IP of CCU Server #10 (set to 0.0.0.0 to disable CCU #10)') cbHASParam = MibIdentifier((1, 3, 6, 1, 4, 1, 6247, 3, 1, 1, 2, 11)) cbHasEnable = MibScalar((1, 3, 6, 1, 4, 1, 6247, 3, 1, 1, 2, 11, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 0))).clone(namedValues=NamedValues(("cbEnabled", 1), ("cbDisabled", 0)))).setMaxAccess("readwrite") if mibBuilder.loadTexts: cbHasEnable.setStatus('mandatory') if mibBuilder.loadTexts: cbHasEnable.setDescription('Enables/Disables High Availability Mode.') cbHasCpu = MibScalar((1, 3, 6, 1, 4, 1, 6247, 3, 1, 1, 2, 11, 2), Integer32()).setMaxAccess("readwrite") if mibBuilder.loadTexts: cbHasCpu.setStatus('mandatory') if mibBuilder.loadTexts: cbHasCpu.setDescription('Maximum CPU') cbHasMemory = MibScalar((1, 3, 6, 1, 4, 1, 6247, 3, 1, 1, 2, 11, 3), Integer32()).setMaxAccess("readwrite") if mibBuilder.loadTexts: cbHasMemory.setStatus('mandatory') if mibBuilder.loadTexts: cbHasMemory.setDescription('Maximum Memory Usage') cbDiagnostics = MibIdentifier((1, 3, 6, 1, 4, 1, 6247, 3, 1, 1, 3)) cbDiagTestTx = MibIdentifier((1, 3, 6, 1, 4, 1, 6247, 3, 1, 1, 3, 1)) cbDiagTestTxParam = MibIdentifier((1, 3, 6, 1, 4, 1, 6247, 3, 1, 1, 3, 1, 1)) cbTestTxDestIP = MibScalar((1, 3, 6, 1, 4, 1, 6247, 3, 1, 1, 3, 1, 1, 1), IpAddress()).setMaxAccess("readwrite") if mibBuilder.loadTexts: cbTestTxDestIP.setStatus('mandatory') if mibBuilder.loadTexts: cbTestTxDestIP.setDescription('Test Transfer Packet ID') cbTestTxType = MibScalar((1, 3, 6, 1, 4, 1, 6247, 3, 1, 1, 3, 1, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("cbTestTypeOnePacket", 1), ("cbTestTypeLowSpeedCont", 2), ("cbTestTypeHighSpeedCont", 3)))).setMaxAccess("readwrite") if mibBuilder.loadTexts: cbTestTxType.setStatus('mandatory') if mibBuilder.loadTexts: cbTestTxType.setDescription('READ/WRITE Test Transfer Type: cbTestTypeOnePacket - one packet, cbTestTypeLowSpeedCont - Low Speed Continuous. cbTestTypeHighSpeedCont - High Speed Continuous.') cbDiagTestTxActive = MibScalar((1, 3, 6, 1, 4, 1, 6247, 3, 1, 1, 3, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 0))).clone(namedValues=NamedValues(("cbTrue", 1), ("cbFalse", 0)))).setMaxAccess("readwrite") if mibBuilder.loadTexts: cbDiagTestTxActive.setStatus('mandatory') if mibBuilder.loadTexts: cbDiagTestTxActive.setDescription('Set to 0 in order to stop Test Transfer. Set to non-0 in order to activate it. (in case cbTestTxType = 1, set to 0 and to non-zero in order to re-send the single test packet)') cbSWDownload = MibIdentifier((1, 3, 6, 1, 4, 1, 6247, 3, 1, 1, 4)) cbSWServerIP = MibScalar((1, 3, 6, 1, 4, 1, 6247, 3, 1, 1, 4, 1), IpAddress()).setMaxAccess("readwrite") if mibBuilder.loadTexts: cbSWServerIP.setStatus('mandatory') if mibBuilder.loadTexts: cbSWServerIP.setDescription('The TFTP server IP address. The S/W file will be TFTPed from this station. Use 0.0.0.0 to load a different local file (without TFTP).') cbAppDownload = MibIdentifier((1, 3, 6, 1, 4, 1, 6247, 3, 1, 1, 4, 2)) cbSWSourceFileName = MibScalar((1, 3, 6, 1, 4, 1, 6247, 3, 1, 1, 4, 2, 1), DisplayString()).setMaxAccess("readwrite") if mibBuilder.loadTexts: cbSWSourceFileName.setStatus('mandatory') if mibBuilder.loadTexts: cbSWSourceFileName.setDescription('The software file name and its optional path (relative to the TFTP server root definition) to be downloaded from the server. Example: catvgw.dat') cbSWTargetFileName = MibScalar((1, 3, 6, 1, 4, 1, 6247, 3, 1, 1, 4, 2, 2), DisplayString()).setMaxAccess("readwrite") if mibBuilder.loadTexts: cbSWTargetFileName.setStatus('mandatory') if mibBuilder.loadTexts: cbSWTargetFileName.setDescription('The S/W file name (without path) on the Gateway. Example: ram.abs WARNING: cbApplicationFileName (under cbGeneralParam) is the name of the running S/W. If cbSWTargetFileName is different from cbApplicationFileName, it will be just downloaded to the Gateway and not used until cbApplicationFileName will be changed (in CFG.INI) to be equal to cbSWTargetFileName.') cbSWDownloadStart = MibScalar((1, 3, 6, 1, 4, 1, 6247, 3, 1, 1, 4, 2, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 0))).clone(namedValues=NamedValues(("cbTrue", 1), ("cbFalse", 0)))).setMaxAccess("writeonly") if mibBuilder.loadTexts: cbSWDownloadStart.setStatus('mandatory') if mibBuilder.loadTexts: cbSWDownloadStart.setDescription('Set cbSWDownloadStart to cbTrue in order to start the S/W download process. Set cbSWDownloadStart to cbFalse to interrupt (and stop) S/W download in progress (when cbSWDownloadStatus = cbDownloadInProgress).') cbSWDownloadStatus = MibScalar((1, 3, 6, 1, 4, 1, 6247, 3, 1, 1, 4, 2, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3, 4, 5, 6, 7))).clone(namedValues=NamedValues(("cbIdle", 0), ("cbDownloadInProgress", 1), ("cbERRORTFTPServernotFound", 2), ("cbERRORFileNotFound", 3), ("cbERRORNotASWFile", 4), ("cbERRORBadChecksum", 5), ("cbERRORCommunicationFailed", 6), ("cbDownloadAborted", 7)))).setMaxAccess("readonly") if mibBuilder.loadTexts: cbSWDownloadStatus.setStatus('mandatory') if mibBuilder.loadTexts: cbSWDownloadStatus.setDescription('Status of SW Download: cbIdle - Download has not started yet or has comleted and gateway already restarted with new version (not an error). cbDownloadInProgress - Download is currently in progrees (not an error). cbERRORTFTPServernotFound - Cannot find a TFTP server in the specified IP address - check and correct cbSWServerIP. cbERRORFileNotFound - Cannot find the specified file - check and correct cbSWFileName. cbERRORNotaSWFile - The specified file is not a SW file - check and correct cbSWFileName. cbERRORBadChecksum - Bad checksum - try to download again. cbERRORCommunicationFailed - Communication with server failed - try to download again. cbDownloadAborted - Download aborted by SNMP manager (cbSWDownloadStart was set to cbFalse during download).') cbFPGADownload = MibIdentifier((1, 3, 6, 1, 4, 1, 6247, 3, 1, 1, 4, 3)) cbFPGASourceFileName = MibScalar((1, 3, 6, 1, 4, 1, 6247, 3, 1, 1, 4, 3, 1), DisplayString()).setMaxAccess("readwrite") if mibBuilder.loadTexts: cbFPGASourceFileName.setStatus('mandatory') if mibBuilder.loadTexts: cbFPGASourceFileName.setDescription('The FPGA file name and its optional path (relative to the TFTP server root definition) to be downloaded from the server. Example: FPGA.DAT') cbFPGATargetFileName = MibScalar((1, 3, 6, 1, 4, 1, 6247, 3, 1, 1, 4, 3, 2), DisplayString()).setMaxAccess("readwrite") if mibBuilder.loadTexts: cbFPGATargetFileName.setStatus('mandatory') if mibBuilder.loadTexts: cbFPGATargetFileName.setDescription('The FPGA file name (without path) on the Gateway. Example: FPGA.DAT') cbFPGADownloadStart = MibScalar((1, 3, 6, 1, 4, 1, 6247, 3, 1, 1, 4, 3, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 0))).clone(namedValues=NamedValues(("cbTrue", 1), ("cbFalse", 0)))).setMaxAccess("writeonly") if mibBuilder.loadTexts: cbFPGADownloadStart.setStatus('mandatory') if mibBuilder.loadTexts: cbFPGADownloadStart.setDescription('Set cbFPGADownloadStart to cbTrue in order to start the FPGA download process. Set cbFPGADownloadStart to cbFalse to interrupt (and stop) FPGA download in progress (when cbFPGADownloadStatus = cbDownloadInProgress).') cbFPGADownloadStatus = MibScalar((1, 3, 6, 1, 4, 1, 6247, 3, 1, 1, 4, 3, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3, 4, 5, 6, 7))).clone(namedValues=NamedValues(("cbIdle", 0), ("cbDownloadInProgress", 1), ("cbERRORTFTPServernotFound", 2), ("cbERRORFileNotFound", 3), ("cbERRORNotASWFile", 4), ("cbERRORBadChecksum", 5), ("cbERRORCommunicationFailed", 6), ("cbDownloadAborted", 7)))).setMaxAccess("readonly") if mibBuilder.loadTexts: cbFPGADownloadStatus.setStatus('mandatory') if mibBuilder.loadTexts: cbFPGADownloadStatus.setDescription('Status of FPGA Download: cbIdle - Download has not started yet or has comleted and gateway already restarted with new version (not an error). cbDownloadInProgress - Download is currently in progrees (not an error). cbERRORTFTPServernotFound - Cannot find a TFTP server in the specified IP address - check and correct cbSWServerIP. cbERRORFileNotFound - Cannot find the specified file - check and correct cbFPGAFileName. cbERRORNotaSWFile - The specified file is not a SW file - check and correct cbFPGAFileName. cbERRORBadChecksum - Bad checksum - try to download again. cbERRORCommunicationFailed - Communication with server failed - try to download again. cbDownloadAborted - Download aborted by SNMP manager (cbFPGADownloadStart was set to cbFalse during download).') mibBuilder.exportSymbols("EFDATA-MIB", cbClNumPacketsDiscarded=cbClNumPacketsDiscarded, cbClStatReset=cbClStatReset, cbNetGatewayDataSubnetMask=cbNetGatewayDataSubnetMask, cbStatClient=cbStatClient, cbSWDownloadStart=cbSWDownloadStart, cbFPGASourceFileName=cbFPGASourceFileName, cbStatAvrBytesPerSec=cbStatAvrBytesPerSec, cbStatNumOfPackets=cbStatNumOfPackets, cbQOSMode=cbQOSMode, cbFlushing=cbFlushing, cbStatNumPacketDiscarded=cbStatNumPacketDiscarded, cbClNumKBytes=cbClNumKBytes, cbStatAvrPktSize=cbStatAvrPktSize, cbStaticUserMaxRate=cbStaticUserMaxRate, cbGrTableMaxRate=cbGrTableMaxRate, cbStaticUserEntry=cbStaticUserEntry, cbQualityOfService=cbQualityOfService, cbGroupsTableNode=cbGroupsTableNode, cbCfgClTableMask=cbCfgClTableMask, cbMulticastEntry=cbMulticastEntry, cbFPGADownload=cbFPGADownload, cbDVBFraming=cbDVBFraming, cbClientStatistics=cbClientStatistics, cbCfgClTableMAC=cbCfgClTableMAC, cbCCU2=cbCCU2, cbDataMappingMode=cbDataMappingMode, cbCCU3=cbCCU3, cbGatewaySWReset=cbGatewaySWReset, cbStaticUserIP=cbStaticUserIP, cbCCU4=cbCCU4, cbCfgClTableMinRate=cbCfgClTableMinRate, cbClTableIP=cbClTableIP, cbClientIP=cbClientIP, cbCCU7=cbCCU7, cbDVBOutputBitRate=cbDVBOutputBitRate, cbGroupsTable=cbGroupsTable, cbCfgClTableEncrypt=cbCfgClTableEncrypt, dtmx5000=dtmx5000, cbMulticastGroup=cbMulticastGroup, spectracast=spectracast, cbSWTargetFileName=cbSWTargetFileName, cbMpeMode=cbMpeMode, cbStaticUserTable=cbStaticUserTable, cbAppDownload=cbAppDownload, cbMulticastSID=cbMulticastSID, cbPktEncrypt=cbPktEncrypt, cbCfgClTableBy=cbCfgClTableBy, cbFPGADownloadStart=cbFPGADownloadStart, cbConfigClTable=cbConfigClTable, cbClTableStampTime=cbClTableStampTime, cbDVBPMT=cbDVBPMT, cbStatNumBytesTXed=cbStatNumBytesTXed, cbHasEnable=cbHasEnable, cbCCU6=cbCCU6, cbNetGatewayMngIP=cbNetGatewayMngIP, cbCCU10=cbCCU10, cbTestTxDestIP=cbTestTxDestIP, cbTraceOutputChannel=cbTraceOutputChannel, cbStatNumNMSFrames=cbStatNumNMSFrames, cbSWDownloadStatus=cbSWDownloadStatus, cbHasCpu=cbHasCpu, cbClTableStartTime=cbClTableStartTime, cbQOSActive=cbQOSActive, cbConfigMulticastTable=cbConfigMulticastTable, efdata=efdata, cbDate=cbDate, cbDVBOutputParam=cbDVBOutputParam, cbDVBAuxInputType=cbDVBAuxInputType, cbDVBAuxNullPackets=cbDVBAuxNullPackets, cbDVBAuxInput=cbDVBAuxInput, cbNetGatewayDataIP=cbNetGatewayDataIP, cbStatReset=cbStatReset, cbClTableNode=cbClTableNode, cbGrTableQosMode=cbGrTableQosMode, cbNetFTP=cbNetFTP, cbDiagTestTxParam=cbDiagTestTxParam, cbGrTablePID=cbGrTablePID, cbNetTelnet=cbNetTelnet, cbApplicationFileName=cbApplicationFileName, cbDiagnostics=cbDiagnostics, cbMemoryUsage=cbMemoryUsage, cbTimeDate=cbTimeDate, cbClTableBytesInSec=cbClTableBytesInSec, cbCfgClTableGroup=cbCfgClTableGroup, cbGeneralParam=cbGeneralParam, cbStaticUserMinRate=cbStaticUserMinRate, cbClientsInfoReset=cbClientsInfoReset, cbTraceLevel=cbTraceLevel, cbClAvrBytesPerSecond=cbClAvrBytesPerSecond, cbHasMemory=cbHasMemory, cbNetworkParam=cbNetworkParam, cbStaticUserMAC=cbStaticUserMAC, cbStatGeneral=cbStatGeneral, cbMulticastTable=cbMulticastTable, cbConfig=cbConfig, cbDVBClockPolarity=cbDVBClockPolarity, cbFPGADownloadStatus=cbFPGADownloadStatus, cbClTablePacketsDiscr=cbClTablePacketsDiscr, cbClEncrEnbled=cbClEncrEnbled, cbClTableReset=cbClTableReset, cbCCU9=cbCCU9, cbNetPromiscuous=cbNetPromiscuous, cbCfgClTableMaxRate=cbCfgClTableMaxRate, cbMulticastMaxRate=cbMulticastMaxRate, cbClNumSeconds=cbClNumSeconds, cbSWVersion=cbSWVersion, cbGateway=cbGateway, cbDiagTestTx=cbDiagTestTx, cbTraceMask=cbTraceMask, cbTestTxType=cbTestTxType, cbCRCMode=cbCRCMode, cbClTableKBytesTxed=cbClTableKBytesTxed, cbCCU5=cbCCU5, cbHASParam=cbHASParam, cbTraceInfo=cbTraceInfo, cbTime=cbTime, cbClNumPackets=cbClNumPackets, cbStatNumClients=cbStatNumClients, cbGatewayEnabled=cbGatewayEnabled, cbDVBPAT=cbDVBPAT, cbNetDefaultGateway=cbNetDefaultGateway, cbMulticastIP=cbMulticastIP, cbStatistics=cbStatistics, cbCPULoad=cbCPULoad, cbCfgClTableIP=cbCfgClTableIP, cbFPGATargetFileName=cbFPGATargetFileName, cbStaticUserMask=cbStaticUserMask, cbCCU1=cbCCU1, cbCfgClTableNode=cbCfgClTableNode, cbSWServerIP=cbSWServerIP, cbClTable=cbClTable, cbStaticUserGroup=cbStaticUserGroup, cbNetGatewayMngSubnetMask=cbNetGatewayMngSubnetMask, cbCCUParam=cbCCUParam, cbCfgClTable=cbCfgClTable, cbConfigSTUTable=cbConfigSTUTable, cbGatewayDescription=cbGatewayDescription, cbNetUnregisteredUsers=cbNetUnregisteredUsers, cbStuffingMode=cbStuffingMode, cbSWDownload=cbSWDownload, cbCCU8=cbCCU8, cbNetDualNIC=cbNetDualNIC, cbNetMulticast=cbNetMulticast, cbDVBLlcSnap=cbDVBLlcSnap, cbFPGAFileName=cbFPGAFileName, cbGrTableIndex=cbGrTableIndex, cbClTableTotalPackets=cbClTableTotalPackets, cbGrTableMinRate=cbGrTableMinRate, cbMulticastMinRate=cbMulticastMinRate, cbMaxAllowableDelay=cbMaxAllowableDelay, cbGrTable=cbGrTable, cbStatClTable=cbStatClTable, cbSWSourceFileName=cbSWSourceFileName, cbDiagTestTxActive=cbDiagTestTxActive)
127.983373
4,910
0.765892
6,783
53,881
6.083886
0.097007
0.073279
0.128238
0.014927
0.559164
0.409819
0.362348
0.310539
0.295466
0.262122
0
0.064364
0.09688
53,881
420
4,911
128.288095
0.783687
0.005828
0
0
0
0.079903
0.26209
0.009989
0
0
0
0
0
1
0
false
0
0.014528
0
0.014528
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
1
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
2
603057ad2039ff4930eee3907257562856f71ff3
115
py
Python
custom_components/ziggonext/const.py
BioGeekJoey/Home-Assistant-config
1ccafc77cf8059924b272fc81b511a3418785f64
[ "MIT" ]
1
2019-07-01T21:57:06.000Z
2019-07-01T21:57:06.000Z
custom_components/ziggonext/const.py
BioGeekJoey/Home-Assistant-config
1ccafc77cf8059924b272fc81b511a3418785f64
[ "MIT" ]
3
2019-10-21T02:21:37.000Z
2019-10-21T02:31:43.000Z
custom_components/ziggonext/const.py
BioGeekJoey/hassio-config
1ccafc77cf8059924b272fc81b511a3418785f64
[ "MIT" ]
null
null
null
"""Constants for the Ziggo Mediabox Next integration.""" ZIGGO_API = "ziggo_api" CONF_COUNTRY_CODE = "country_code"
38.333333
56
0.782609
16
115
5.3125
0.6875
0.188235
0
0
0
0
0
0
0
0
0
0
0.104348
115
3
57
38.333333
0.825243
0.434783
0
0
0
0
0.35
0
0
0
0
0
0
1
0
false
0
0
0
0
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
2
603db25de26b4acf2345e32fb7dda4e46c9d48a8
9,620
py
Python
configs/rl.py
jozhang97/Side-tuning
dea345691fb7ee0230150fe56ddd644efdffa6ac
[ "MIT" ]
56
2020-01-12T05:45:59.000Z
2022-03-17T15:04:15.000Z
configs/rl.py
jozhang97/Side-tuning
dea345691fb7ee0230150fe56ddd644efdffa6ac
[ "MIT" ]
7
2020-01-28T23:14:45.000Z
2022-02-10T01:56:48.000Z
configs/rl.py
jozhang97/Side-tuning
dea345691fb7ee0230150fe56ddd644efdffa6ac
[ "MIT" ]
2
2020-02-29T14:51:23.000Z
2020-03-07T03:23:27.000Z
# Habitat configs # This should be sourced by the training script, # which must save a sacred experiment in the variable "ex" # For descriptions of all fields, see configs/core.py #################################### # Standard methods #################################### @ex.named_config def taskonomy_features(): ''' Implements an agent with some mid-level feature. From the paper: From Learning to Navigate Using Mid-Level Visual Priors (Sax et al. '19) Taskonomy: Disentangling Task Transfer Learning Amir R. Zamir, Alexander Sax*, William B. Shen*, Leonidas Guibas, Jitendra Malik, Silvio Savarese. 2018 Viable feature options are: [] ''' uuid = 'habitat_taskonomy_feature' cfg = {} cfg['learner'] = { 'perception_network': 'TaskonomyFeaturesOnlyNet', 'perception_network_kwargs': { 'extra_kwargs': { 'main_perception_network': 'TaskonomyFeaturesOnlyNet', # for sidetune } } } cfg['env'] = { 'env_specific_kwargs': { 'target_dim': 16, # Taskonomy reps: 16, scratch: 9, map_only: 1 }, 'transform_fn_pre_aggregation_fn': 'TransformFactory.independent', 'transform_fn_pre_aggregation_kwargs': { 'names_to_transforms': { 'taskonomy':'rescale_centercrop_resize((3,256,256))', }, }, 'transform_fn_post_aggregation_fn': 'TransformFactory.independent', 'transform_fn_post_aggregation_kwargs': { 'names_to_transforms': { 'taskonomy':"taskonomy_features_transform('/mnt/models/curvature_encoder.dat')", }, 'keep_unnamed': True, } } @ex.named_config def blind(): ''' Implements a blinded agent. This has no visual input, but is still able to reason about its movement via path integration. ''' uuid = 'blind' cfg = {} cfg['learner'] = { 'perception_network': 'TaskonomyFeaturesOnlyNet', } cfg['env'] = { 'env_specific_kwargs': { 'target_dim': 16, # Taskonomy reps: 16, scratch: 9, map_only: 1 }, 'transform_fn_pre_aggregation_fn': 'TransformFactory.independent', 'transform_fn_pre_aggregation_kwargs': { 'names_to_transforms': { 'taskonomy': 'blind((8,16,16))', # 'rgb_filled': 'rescale_centercrop_resize((3,84,84))', }, }, } @ex.named_config def midtune(): # Specific type of finetune where we train the policy then open the representation to be learned. # Specifically, we take trained midlevel agents and finetune all the weights. uuid = 'habitat_midtune' cfg = {} cfg['learner'] = { 'perception_network_reinit': True, # reinitialize the perception_module, used when checkpoint is used 'rollout_value_batch_multiplier': 1, 'perception_network': 'RLSidetuneWrapper', 'perception_network_kwargs': { 'extra_kwargs': { 'main_perception_network': 'TaskonomyFeaturesOnlyNet', # for sidetune 'sidetune_kwargs': { 'n_channels_in': 3, 'n_channels_out': 8, 'normalize_pre_transfer': False, 'base_class': 'FCN5', 'base_kwargs': {'normalize_outputs': False}, 'base_weights_path': None, # user needs to specify 'side_class': 'FCN5', 'side_kwargs': {'normalize_outputs': False}, 'side_weights_path': None, # user needs to specify } } }, } cfg['saving'] = { 'checkpoint': None, } cfg['env'] = { 'env_specific_kwargs': { 'target_dim': 16, # Taskonomy reps: 16, scratch: 9, map_only: 1 }, 'transform_fn_pre_aggregation_fn': 'TransformFactory.independent', 'transform_fn_pre_aggregation_kwargs': { 'names_to_transforms': { 'rgb_filled': 'rescale_centercrop_resize((3,256,256))', }, }, } @ex.named_config def finetune(): uuid = 'habitat_finetune' cfg = {} cfg['learner'] = { 'perception_network': 'RLSidetuneWrapper', 'perception_network_kwargs': { 'extra_kwargs': { 'main_perception_network': 'TaskonomyFeaturesOnlyNet', # for sidetune 'sidetune_kwargs': { 'n_channels_in': 3, 'n_channels_out': 8, 'normalize_pre_transfer': False, 'side_class': 'FCN5', 'side_kwargs': {'normalize_outputs': False}, 'side_weights_path': None, # user needs to specify } } }, 'rollout_value_batch_multiplier': 1, } cfg['env'] = { 'env_specific_kwargs': { 'target_dim': 16, # Taskonomy reps: 16, scratch: 9, map_only: 1 }, 'transform_fn_pre_aggregation_fn': 'TransformFactory.independent', 'transform_fn_pre_aggregation_kwargs': { 'names_to_transforms': { 'rgb_filled': 'rescale_centercrop_resize((3,256,256))', }, }, } @ex.named_config def sidetune(): uuid = 'habitat_sidetune' cfg = {} cfg['learner'] = { 'perception_network': 'RLSidetuneWrapper', 'perception_network_kwargs': { 'extra_kwargs': { 'sidetune_kwargs': { 'n_channels_in': 3, 'n_channels_out': 8, 'normalize_pre_transfer': False, 'base_class': 'TaskonomyEncoder', 'base_weights_path': None, 'base_kwargs': {'eval_only': True, 'normalize_outputs': False}, 'side_class': 'FCN5', 'side_kwargs': {'normalize_outputs': False}, 'side_weights_path': None, 'alpha_blend': True, }, 'attrs_to_remember': ['base_encoding', 'side_output', 'merged_encoding'], # things to remember for supp. losses / visualization } }, 'rollout_value_batch_multiplier': 1, } cfg['env'] = { 'transform_fn_pre_aggregation_fn': 'TransformFactory.independent', 'transform_fn_pre_aggregation_kwargs': { 'names_to_transforms': { 'rgb_filled': 'rescale_centercrop_resize((3,256,256))', }, }, } #################################### # Base Network #################################### @ex.named_config def rlgsn_base_resnet50(): # base is frozen by default cfg = {} cfg['learner'] = { 'perception_network': 'RLSidetuneWrapper', 'perception_network_kwargs': { 'extra_kwargs': { 'sidetune_kwargs': { 'base_class': 'TaskonomyEncoder', 'base_weights_path': None, # user needs to input 'base_kwargs': {'eval_only': True, 'normalize_outputs': False}, } } }, } @ex.named_config def rlgsn_base_fcn5s(): # base is frozen by default cfg = {} cfg['learner'] = { 'perception_network': 'RLSidetuneWrapper', 'perception_network_kwargs': { 'extra_kwargs': { 'sidetune_kwargs': { 'base_class': 'FCN5', 'base_weights_path': None, # user needs to input 'base_kwargs': {'eval_only': True, 'normalize_outputs': False}, } } }, } @ex.named_config def rlgsn_base_learned(): cfg = {} cfg['learner'] = { 'perception_network': 'RLSidetuneWrapper', 'perception_network_kwargs': { 'extra_kwargs': { 'sidetune_kwargs': { 'base_kwargs': {'eval_only': False}, } } }, } #################################### # Side Network #################################### @ex.named_config def rlgsn_side_resnet50(): # side is learned by default cfg = {} cfg['learner'] = { 'perception_network': 'RLSidetuneWrapper', 'perception_network_kwargs': { 'extra_kwargs': { 'sidetune_kwargs': { 'side_class': 'TaskonomyEncoder', 'side_weights_path': None, # user needs to input 'side_kwargs': {'eval_only': False, 'normalize_outputs': False}, } } }, } @ex.named_config def rlgsn_side_fcn5s(): # side is learned by default cfg = {} cfg['learner'] = { 'perception_network': 'RLSidetuneWrapper', 'perception_network_kwargs': { 'extra_kwargs': { 'sidetune_kwargs': { 'side_class': 'FCN5', 'side_weights_path': None, # user needs to input 'side_kwargs': {'eval_only': False, 'normalize_outputs': False}, } } }, } @ex.named_config def rlgsn_side_frozen(): cfg = {} cfg['learner'] = { 'perception_network': 'RLSidetuneWrapper', 'perception_network_kwargs': { 'extra_kwargs': { 'sidetune_kwargs': { 'side_kwargs': {'eval_only': True}, } } }, }
33.402778
145
0.523805
846
9,620
5.638298
0.235225
0.089099
0.029979
0.036897
0.733333
0.721174
0.652201
0.620755
0.612788
0.612788
0
0.014036
0.340852
9,620
287
146
33.519164
0.738212
0.156029
0
0.609244
0
0
0.425048
0.18343
0
0
0
0
0
1
0.046218
false
0
0
0
0.046218
0
0
0
0
null
0
0
0
0
1
0
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
2
603de51b39d0171cbf957c04d7c5752dd23eec22
7,104
py
Python
src/platform_vision/scripts/platform_vision/featureMatching.py
ahmohamed1/activeStereoVisionPlatform
6c928ca242e4de68c7b15a8748bff1d9f7fa1382
[ "MIT" ]
null
null
null
src/platform_vision/scripts/platform_vision/featureMatching.py
ahmohamed1/activeStereoVisionPlatform
6c928ca242e4de68c7b15a8748bff1d9f7fa1382
[ "MIT" ]
null
null
null
src/platform_vision/scripts/platform_vision/featureMatching.py
ahmohamed1/activeStereoVisionPlatform
6c928ca242e4de68c7b15a8748bff1d9f7fa1382
[ "MIT" ]
null
null
null
#!/usr/bin/env python import numpy as np import cv2 def findCenterOfTarget(dst): return np.mean(dst, axis=0) # def kaze_match(im1_path, im2_path): def kaze_match(img1, img2): if img1.shape[2] == 1: gray1 = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY) gray2 = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY) else: gray1 = img1 gray2 = img2 # initialize the AKAZE descriptor, then detect keypoints and extract # local invariant descriptors from the image detector = cv2.AKAZE_create() (kp1, descs1) = detector.detectAndCompute(gray1, None) (kp2, descs2) = detector.detectAndCompute(gray2, None) print("keypoints: {}, descriptors: {}".format(len(kp1), descs1.shape)) print("keypoints: {}, descriptors: {}".format(len(kp2), descs2.shape)) # Match the features bf = cv2.BFMatcher(cv2.NORM_HAMMING) matches = bf.knnMatch(descs1,descs2, k=2) # typo fixed # Apply ratio test good = [] for m,n in matches: if m.distance < 0.9*n.distance: good.append(m) MIN_MATCH_COUNT = 5 if len(good)>MIN_MATCH_COUNT: src_pts = np.float32([ kp1[m.queryIdx].pt for m in good ]).reshape(-1,1,2) dst_pts = np.float32([ kp2[m.trainIdx].pt for m in good ]).reshape(-1,1,2) M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC,5.0) matchesMask = mask.ravel().tolist() h,w = gray1.shape pts = np.float32([ [0,0],[0,h-1],[w-1,h-1],[w-1,0] ]).reshape(-1,1,2) dst = cv2.perspectiveTransform(pts,M) # Find the center and draw the it center = findCenterOfTarget(dst) img2 = cv2.circle(img2,(center[0][0], center[0][1]),10, (255,0,0), -1) img2 = cv2.polylines(img2,[np.int32(dst)],True,255,3, cv2.LINE_AA) else: print ("Not enough matches are found - %d/%d" % (len(good),MIN_MATCH_COUNT)) matchesMask = None draw_params = dict(matchColor = (0,255,0), # draw matches in green color singlePointColor = None, matchesMask = matchesMask, # draw only inliers flags = 2) # cv2.drawMatchesKnn expects list of lists as matches. img3 = cv2.drawMatches(img1,kp1,img2,kp2,good,None) cv2.imshow("AKAZE matching", img3) cv2.waitKey(10) return img3 def FLANNBasedMatcher(img1,img2): if img1.shape[2] == 1: gray1 = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY) gray2 = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY) else: gray1 = img1 gray2 = img2 # Initiate SIFT detector sift = cv2.xfeatures2d.SIFT_create() # find the keypoints and descriptors with SIFT kp1, des1 = sift.detectAndCompute(img1,None) kp2, des2 = sift.detectAndCompute(img2,None) FLANN_INDEX_KDTREE = 0 index_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5) search_params = dict(checks = 50) flann = cv2.FlannBasedMatcher(index_params, search_params) matches = flann.knnMatch(des1,des2,k=2) # store all the good matches as per Lowe's ratio test. good = [] for m,n in matches: if m.distance < 0.7*n.distance: good.append(m) MIN_MATCH_COUNT = 5 if len(good)>MIN_MATCH_COUNT: src_pts = np.float32([ kp1[m.queryIdx].pt for m in good ]).reshape(-1,1,2) dst_pts = np.float32([ kp2[m.trainIdx].pt for m in good ]).reshape(-1,1,2) M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC,5.0) matchesMask = mask.ravel().tolist() h,w = gray1.shape pts = np.float32([ [0,0],[0,h-1],[w-1,h-1],[w-1,0] ]).reshape(-1,1,2) dst = cv2.perspectiveTransform(pts,M) # Find the center and draw the it center = findCenterOfTarget(dst) img2 = cv2.circle(img2,(center[0][0], center[0][1]),10, (255,0,0), -1) img2 = cv2.polylines(img2,[np.int32(dst)],True,255,3, cv2.LINE_AA) else: print "Not enough matches are found - %d/%d" % (len(good),MIN_MATCH_COUNT) matchesMask = None draw_params = dict(matchColor = (0,255,0), # draw matches in green color singlePointColor = None, matchesMask = matchesMask, # draw only inliers flags = 2) img3 = cv2.drawMatches(img1,kp1,img2,kp2,good,None,**draw_params) cv2.imshow("FLANNBasedMatcher", img3) cv2.waitKey(10) return img3 def BruteForceMatchingwithSIFTDescriptorsandRatioTest(img1,img2): center = None gray1 = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY) gray2 = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY) # Initiate SIFT detector sift = cv2.xfeatures2d.SIFT_create() # find the keypoints and descriptors with SIFT kp1, des1 = sift.detectAndCompute(gray1,None) kp2, des2 = sift.detectAndCompute(gray2,None) # BFMatcher with default params bf = cv2.BFMatcher() matches = bf.knnMatch(des1,des2, k=2) # Apply ratio test good = [] for m,n in matches: if m.distance < 0.75*n.distance: good.append(m) MIN_MATCH_COUNT = 10 if len(good)>MIN_MATCH_COUNT: src_pts = np.float32([ kp1[m.queryIdx].pt for m in good ]).reshape(-1,1,2) dst_pts = np.float32([ kp2[m.trainIdx].pt for m in good ]).reshape(-1,1,2) # center = np.mean(dst_pts, axis=0) # print center[0] M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC,5.0) matchesMask = mask.ravel().tolist() h,w = gray1.shape pts = np.float32([ [0,0],[0,h-1],[w-1,h-1],[w-1,0] ]).reshape(-1,1,2) dst = cv2.perspectiveTransform(pts,M) center = findCenterOfTarget(dst) img2 = cv2.circle(img2,(center[0][0], center[0][1]),10, (255,0,0), -1) img2 = cv2.polylines(img2,[np.int32(dst)],True,255,3, cv2.LINE_AA) else: print "Not enough matches are found - %d/%d" % (len(good),MIN_MATCH_COUNT) matchesMask = None draw_params = dict(matchColor = (0,255,0), # draw matches in green color singlePointColor = None, matchesMask = matchesMask, # draw only inliers flags = 2) # cv2.drawMatchesKnn expects list of lists as matches. # img3 = cv2.drawMatchesKnn(img1,kp1,img2,kp2,good,None) img3 = cv2.drawMatches(img1,kp1,img2,kp2,good,None,**draw_params) cv2.imshow("Brute Force Matching", img3) cv2.waitKey(3) return img3, center cap = cv2.VideoCapture(0) state = False template = None while(True): ret, frame = cap.read() if template is None and state: r = cv2.selectROI('Frame', frame) template = frame[int(r[1]):int(r[1]+r[3]), int(r[0]):int(r[0]+r[2])] state = False if template is not None: # frame = kaze_match(template, frame) # frame = FLANNBasedMatcher(template, frame) frame = BruteForceMatchingwithSIFTDescriptorsandRatioTest(template, frame) cv2.imshow('Frame', frame) ikey = cv2.waitKey(10) if ikey == ord('q'): break elif ikey == ord('n'): template = None state = True
34.153846
84
0.615428
990
7,104
4.353535
0.179798
0.005568
0.027146
0.020882
0.697448
0.658933
0.653828
0.640371
0.632715
0.625058
0
0.066254
0.25
7,104
207
85
34.318841
0.74268
0.132461
0
0.638298
0
0
0.037659
0
0
0
0
0
0
0
null
null
0
0.014184
null
null
0.035461
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
2
604ed9aaf3bf9d0149bbba59bfa4c33c93c49c2e
1,419
py
Python
mango/oraclefactory.py
mschneider/mango-explorer
ed50880ef80b31b679c9c89fa9bf0579391d71c9
[ "MIT" ]
null
null
null
mango/oraclefactory.py
mschneider/mango-explorer
ed50880ef80b31b679c9c89fa9bf0579391d71c9
[ "MIT" ]
null
null
null
mango/oraclefactory.py
mschneider/mango-explorer
ed50880ef80b31b679c9c89fa9bf0579391d71c9
[ "MIT" ]
1
2021-09-02T17:06:09.000Z
2021-09-02T17:06:09.000Z
# # ⚠ Warning # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT # LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN # NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE # SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. # # [🥭 Mango Markets](https://mango.markets/) support is available at: # [Docs](https://docs.mango.markets/) # [Discord](https://discord.gg/67jySBhxrg) # [Twitter](https://twitter.com/mangomarkets) # [Github](https://github.com/blockworks-foundation) # [Email](mailto:hello@blockworks.foundation) from .oracle import OracleProvider from .oracles.ftx import ftx from .oracles.pythnetwork import pythnetwork from .oracles.serum import serum # # 🥭 Oracle Factory # # This file allows you to create a concreate OracleProvider for a specified provider name. # def create_oracle_provider(provider_name: str) -> OracleProvider: if provider_name == "serum": return serum.SerumOracleProvider() elif provider_name == "ftx": return ftx.FtxOracleProvider() elif provider_name == "pyth": return pythnetwork.PythOracleProvider() raise Exception(f"Unknown oracle provider '{provider_name}'.")
40.542857
104
0.744186
187
1,419
5.625668
0.561497
0.068441
0.041825
0.04943
0
0
0
0
0
0
0
0.001675
0.158562
1,419
34
105
41.735294
0.876884
0.613108
0
0
0
0
0.102467
0
0
0
0
0
0
1
0.083333
false
0
0.333333
0
0.666667
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
2
604f3cb983198b95e07de3e871d91347e79aa951
4,234
py
Python
experiments/tuning/50units_1.py
samuilstoychev/research_project
897bde82471ef92ded396aa31d91ec19826d4ce2
[ "MIT" ]
null
null
null
experiments/tuning/50units_1.py
samuilstoychev/research_project
897bde82471ef92ded396aa31d91ec19826d4ce2
[ "MIT" ]
null
null
null
experiments/tuning/50units_1.py
samuilstoychev/research_project
897bde82471ef92ded396aa31d91ec19826d4ce2
[ "MIT" ]
null
null
null
RAM AT BEGINNING: 0.22328948974609375 Latent replay turned on CUDA is used RAM BEFORE LOADING DATA: 0.2279205322265625 Preparing the data... SPLIT RATIO: [50000, 10000] --> mnist: 'train'-dataset consisting of 60000 samples --> mnist: 'test'-dataset consisting of 10000 samples RAM AFTER LOADING DATA: 0.2888832092285156 RAM BEFORE CLASSIFER: 2.2372283935546875 RAM AFTER CLASSIFER: 2.238208770751953 RAM BEFORE PRE-TRAINING 2.238208770751953 RAM AFTER PRE-TRAINING 2.2537994384765625 RAM BEFORE GENERATOR: 2.2537994384765625 RAM AFTER DECLARING GENERATOR: 2.2537994384765625 MACs of root classifier 412000 MACs of top classifier: 7680 RAM BEFORE REPORTING: 2.2537994384765625 Parameter-stamp... --> task: splitMNIST5-task --> model: CNN_CLASSIFIER_c10 --> hyper-params: i500-lr0.001-b128-adam --> replay: generative-VAE(MLP([50, 50, 50])--z100-c10) splitMNIST5-task--CNN_CLASSIFIER_c10--i500-lr0.001-b128-adam--generative-VAE(MLP([50, 50, 50])--z100-c10)-s13544 ----------------------------------------TOP---------------------------------------- CNNTopClassifier( (dropout2): Dropout(p=0.5, inplace=False) (fc1): Linear(in_features=50, out_features=128, bias=True) (fc2): Linear(in_features=128, out_features=10, bias=True) ) ------------------------------------------------------------------------------------------ --> this network has 7818 parameters (~0.0 million) of which: - learnable: 7818 (~0.0 million) - fixed: 0 (~0.0 million) ------------------------------------------------------------------------------------------ ----------------------------------------ROOT---------------------------------------- CNNRootClassifier( (conv1): Conv2d(1, 10, kernel_size=(5, 5), stride=(1, 1)) (conv2): Conv2d(10, 10, kernel_size=(5, 5), stride=(1, 1)) (dropout1): Dropout(p=0.25, inplace=False) (fc0): Linear(in_features=1440, out_features=50, bias=True) ) ------------------------------------------------------------------------------------------ --> this network has 74820 parameters (~0.1 million) of which: - learnable: 74820 (~0.1 million) - fixed: 0 (~0.0 million) ------------------------------------------------------------------------------------------ ----------------------------------------GENERATOR---------------------------------------- AutoEncoderLatent( (fcE): MLP( (fcLayer1): fc_layer( (linear): LinearExcitability(in_features=50, out_features=50) (nl): ReLU() ) (fcLayer2): fc_layer( (linear): LinearExcitability(in_features=50, out_features=50) (nl): ReLU() ) ) (toZ): fc_layer_split( (mean): fc_layer( (linear): LinearExcitability(in_features=50, out_features=100) ) (logvar): fc_layer( (linear): LinearExcitability(in_features=50, out_features=100) ) ) (classifier): fc_layer( (linear): LinearExcitability(in_features=50, out_features=10) ) (fromZ): fc_layer( (linear): LinearExcitability(in_features=100, out_features=50) (nl): ReLU() ) (fcD): MLP( (fcLayer1): fc_layer( (linear): LinearExcitability(in_features=50, out_features=50) (nl): ReLU() ) (fcLayer2): fc_layer( (linear): LinearExcitability(in_features=50, out_features=50) (nl): Sigmoid() ) ) ) ------------------------------------------------------------------------------------------ --> this network has 25860 parameters (~0.0 million) of which: - learnable: 25860 (~0.0 million) - fixed: 0 (~0.0 million) ------------------------------------------------------------------------------------------ RAM BEFORE TRAINING: 2.2537994384765625 CPU BEFORE TRAINING: (27.66, 2.64) INITIALISING GPU TRACKER Training... PEAK TRAINING RAM: 2.257415771484375 Peak mem and init mem: 965 953 GPU BEFORE EVALUATION: (10.857142857142858, 12) RAM BEFORE EVALUATION: 2.257415771484375 CPU BEFORE EVALUATION: (116.92, 4.63) EVALUATION RESULTS: Precision on test-set: - Task 1: 0.9954 - Task 2: 0.9986 - Task 3: 0.9936 - Task 4: 0.9919 - Task 5: 0.9830 => Average precision over all 5 tasks: 0.9925 => Total training time = 60.9 seconds RAM AT THE END: 2.257476806640625 CPU AT THE END: (118.72, 4.65)
34.704918
112
0.559518
475
4,234
4.907368
0.355789
0.06006
0.041184
0.05148
0.366795
0.314457
0.28743
0.2574
0.19305
0.169884
0
0.149001
0.148795
4,234
121
113
34.991736
0.49778
0
0
0.209091
0
0
0.002126
0
0
0
0
0
0
0
null
null
0
0
null
null
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
2
6057e75355f41f7bc701d058fd81e53c05c436f2
561
py
Python
tests/test_run/example_conf.py
nizaevka/pycnfg
f3bf307982ba830c8b35393614be153bbfdc7da1
[ "Apache-2.0" ]
null
null
null
tests/test_run/example_conf.py
nizaevka/pycnfg
f3bf307982ba830c8b35393614be153bbfdc7da1
[ "Apache-2.0" ]
null
null
null
tests/test_run/example_conf.py
nizaevka/pycnfg
f3bf307982ba830c8b35393614be153bbfdc7da1
[ "Apache-2.0" ]
null
null
null
"""Conf as separate file.""" import logging import pycnfg CNFG = { 'path': { 'default': { 'init': pycnfg.utils.find_path, 'producer': pycnfg.Producer, 'global': {}, 'patch': {}, 'priority': 1, 'steps': [], }, }, 'logger': { 'default': { 'init': logging.getLogger('default'), 'producer': pycnfg.Producer, 'global': {}, 'patch': {}, 'priority': 1, 'steps': [], }, }, }
20.035714
49
0.385027
38
561
5.657895
0.552632
0.102326
0.204651
0.260465
0.437209
0.437209
0.437209
0.437209
0
0
0
0.00627
0.431373
561
27
50
20.777778
0.667712
0.039216
0
0.5
0
0
0.193246
0
0
0
0
0
0
1
0
false
0
0.083333
0
0.083333
0
0
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
2
6058cf2da87af8cee9cbfd02c17903e120351b07
2,758
py
Python
weightnet_pytorch.py
khy0809/WeightNet
cd5ea53b42c6169ffd5a0d7d883788fdc871cd1e
[ "MIT" ]
null
null
null
weightnet_pytorch.py
khy0809/WeightNet
cd5ea53b42c6169ffd5a0d7d883788fdc871cd1e
[ "MIT" ]
null
null
null
weightnet_pytorch.py
khy0809/WeightNet
cd5ea53b42c6169ffd5a0d7d883788fdc871cd1e
[ "MIT" ]
null
null
null
import torch.nn.functional as F import torch.nn as nn class WeightNet(nn.Module): r"""Applies WeightNet to a standard convolution. The grouped fc layer directly generates the convolutional kernel, this layer has M*inp inputs, G*oup groups and oup*inp*ksize*ksize outputs. M/G control the amount of parameters. """ def __init__(self, inp, oup, ksize, stride): super().__init__() self.M = 2 self.G = 2 self.pad = ksize // 2 inp_gap = max(16, inp//16) self.inp = inp self.oup = oup self.ksize = ksize self.stride = stride self.wn_fc1 = nn.Conv2d(inp_gap, self.M*oup, 1, 1, 0, groups=1, bias=True) self.sigmoid = nn.Sigmoid() self.wn_fc2 = nn.Conv2d(self.M*oup, oup*inp*ksize*ksize, 1, 1, 0, groups=self.G*oup, bias=False) def forward(self, x, x_gap): x_w = self.wn_fc1(x_gap) x_w = self.sigmoid(x_w) x_w = self.wn_fc2(x_w) # if x.shape[0] == 1: # case of batch size = 1 # x_w = x_w.reshape(self.oup, self.inp, self.ksize, self.ksize) # x = F.conv2d(x, weight=x_w, stride=self.stride, padding=self.pad) # return x x = x.reshape(1, -1, x.shape[2], x.shape[3]) x_w = x_w.reshape(-1, self.oup, self.inp, self.ksize, self.ksize) x = F.conv2d(x, weight=x_w, stride=self.stride, padding=self.pad, groups=x_w.shape[0]) x = x.reshape(-1, self.oup, x.shape[2], x.shape[3]) return x class WeightNet_DW(nn.Module): r""" Here we show a grouping manner when we apply WeightNet to a depthwise convolution. The grouped fc layer directly generates the convolutional kernel, has fewer parameters while achieving comparable results. This layer has M/G*inp inputs, inp groups and inp*ksize*ksize outputs. """ def __init__(self, inp, ksize, stride): super().__init__() self.M = 2 self.G = 2 self.pad = ksize // 2 inp_gap = max(16, inp//16) self.inp = inp self.ksize = ksize self.stride = stride self.wn_fc1 = nn.Conv2d(inp_gap, self.M//self.G*inp, 1, 1, 0, groups=1, bias=True) self.sigmoid = nn.Sigmoid() self.wn_fc2 = nn.Conv2d(self.M//self.G*inp, inp*ksize*ksize, 1, 1, 0, groups=inp, bias=False) def forward(self, x, x_gap): x_w = self.wn_fc1(x_gap) x_w = self.sigmoid(x_w) x_w = self.wn_fc2(x_w) x = x.reshape(1, -1, x.shape[2], x.shape[3]) x_w = x_w.reshape(-1, 1, 1, self.ksize, self.ksize) x = F.conv2d(x, weight=x_w, stride=self.stride, padding=self.pad, groups=x_w.shape[0]) x = x.reshape(-1, self.inp, x.shape[2], x.shape[3]) return x
34.049383
126
0.595722
460
2,758
3.454348
0.184783
0.026432
0.022656
0.012587
0.707363
0.690371
0.690371
0.662681
0.636249
0.636249
0
0.032689
0.267948
2,758
80
127
34.475
0.754334
0.252719
0
0.653061
1
0
0
0
0
0
0
0
0
1
0.081633
false
0
0.040816
0
0.204082
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
2
605e0a5917bf48fc636e03ec8e849fb23b2ee34d
580
py
Python
2020/Day6.py
vypxl/aoc
4187837ecd8bf7464efa4953588b8c53d5675cfb
[ "WTFPL" ]
1
2022-01-08T23:39:52.000Z
2022-01-08T23:39:52.000Z
2020/Day6.py
vypxl/aoc
4187837ecd8bf7464efa4953588b8c53d5675cfb
[ "WTFPL" ]
null
null
null
2020/Day6.py
vypxl/aoc
4187837ecd8bf7464efa4953588b8c53d5675cfb
[ "WTFPL" ]
2
2020-12-19T16:44:54.000Z
2020-12-19T19:00:55.000Z
#! /usr/bin/env python # pylint: disable=unused-wildcard-import from util import * def parse(inp): return compose(list, map(compose(list, map(set), str.splitlines)))(inp.split('\n\n')) def p1(inp): return compose(sum, map(compose(count, reduce(set.union))))(inp) def p2(inp): return compose(sum, map(compose(count, reduce(set.intersection))))(inp) def main(): inp = parse(data()) print(f"Solution for part 1:\n{p1(inp)}") print(f"Solution for part 2:\n{p2(inp)}") if __name__ == "__main__": main() # Solution part 1: 6551 # Solution part 2: 3358
24.166667
89
0.660345
90
580
4.166667
0.477778
0.072
0.128
0.101333
0.341333
0.229333
0.229333
0.229333
0.229333
0
0
0.03272
0.156897
580
23
90
25.217391
0.734151
0.17931
0
0
0
0
0.15678
0
0
0
0
0
0
1
0.307692
false
0
0.076923
0.230769
0.615385
0.153846
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
0
0
0
2
6063b0fdb80a92b41934535bd05f1e61acf89996
2,015
py
Python
SecurityController.py
juanmafn/control-horario-iti
8446eb52faaf977fbdad05558f70d9e02439aa43
[ "Apache-2.0" ]
null
null
null
SecurityController.py
juanmafn/control-horario-iti
8446eb52faaf977fbdad05558f70d9e02439aa43
[ "Apache-2.0" ]
null
null
null
SecurityController.py
juanmafn/control-horario-iti
8446eb52faaf977fbdad05558f70d9e02439aa43
[ "Apache-2.0" ]
null
null
null
#!/usr/bin/env python3 # coding: utf8 __author__ = "Juan Manuel Fernández Nácher" from requests.auth import HTTPBasicAuth import requests class SecurityController: __instance = None @staticmethod def getInstance(): if SecurityController.__instance is None: SecurityController() return SecurityController.__instance def __init__(self): if SecurityController.__instance is None: SecurityController.__instance = self self.credentials = {} self.username = 'user' self.password = 'pass' self.urlBase = 'https://intranet.iti.upv.es' self.urlParcialInicial = '/controlhorario' def isLogged(self, chatId): return chatId in self.credentials and \ self.credentials[chatId][self.username] is not None and \ self.credentials[chatId][self.password] is not None def getHTTPBasicAuth(self, chatId): username = self.credentials[chatId][self.username] password = self.credentials[chatId][self.password] return HTTPBasicAuth(username, password) def login(self, chatId, username, password): self.credentials[chatId] = {self.username: username, self.password: password} def setUsername(self, chatId, username): if chatId not in self.credentials: self.credentials[chatId] = {} self.credentials[chatId][self.username] = username def setPassword(self, chatId, password): if chatId not in self.credentials: self.credentials[chatId] = {} self.credentials[chatId][self.password] = password def logout(self, chatId): if chatId in self.credentials: del self.credentials[chatId] def testCredentials(self, chatId): request = requests.get(self.urlBase + self.urlParcialInicial, auth=self.getHTTPBasicAuth(chatId)) if request.status_code == 200: return True else: self.logout(chatId) return False
33.032787
105
0.656576
206
2,015
6.330097
0.296117
0.172546
0.161043
0.172546
0.367331
0.280675
0.113497
0.113497
0.113497
0.113497
0
0.003313
0.251117
2,015
60
106
33.583333
0.860835
0.016873
0
0.130435
0
0
0.039414
0
0
0
0
0
0
1
0.195652
false
0.173913
0.043478
0.021739
0.391304
0
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
1
0
0
0
0
0
2
6068f8b9c76144cdba6cbb25cb059560f20c7950
66
py
Python
couchbase_readme/__init__.py
thejcfactor/couchbase-python-readme
7412b250cd62cbc1c7b74cd348df3259142511fd
[ "Apache-2.0" ]
null
null
null
couchbase_readme/__init__.py
thejcfactor/couchbase-python-readme
7412b250cd62cbc1c7b74cd348df3259142511fd
[ "Apache-2.0" ]
null
null
null
couchbase_readme/__init__.py
thejcfactor/couchbase-python-readme
7412b250cd62cbc1c7b74cd348df3259142511fd
[ "Apache-2.0" ]
null
null
null
# Version of couchbase-python-readme package __version__ = "0.1.1"
33
44
0.772727
10
66
4.7
0.8
0
0
0
0
0
0
0
0
0
0
0.050847
0.106061
66
2
45
33
0.745763
0.636364
0
0
0
0
0.217391
0
0
0
0
0
0
1
0
false
0
0
0
0
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
2
6069dbae583241be0be12516491bd2fb326af732
2,476
py
Python
tools/generate_praat_samples.py
MichaelGoodale/opensauce-python
cafad071fa1ed675b4e7177b37ed41af94b39c5f
[ "Apache-2.0" ]
38
2015-02-10T08:35:50.000Z
2022-03-15T10:56:40.000Z
tools/generate_praat_samples.py
MichaelGoodale/opensauce-python
cafad071fa1ed675b4e7177b37ed41af94b39c5f
[ "Apache-2.0" ]
37
2015-09-23T00:17:07.000Z
2022-02-24T17:52:56.000Z
tools/generate_praat_samples.py
CobiELF/opensauce-python
03c278ca92b150188821dadfc9702ff9f939aa4e
[ "Apache-2.0" ]
11
2018-08-28T06:41:41.000Z
2022-01-21T05:07:40.000Z
# Script to generate raw Praat samples from test wav files # The data is used for comparison in unit tests # Licensed under Apache v2 (see LICENSE) import sys import os import glob import numpy as np from opensauce.praat import praat_raw_pitch, praat_raw_formants def save_samples(data, fn, col_name, sample, out_dir): """Dump data in txt format using fn, col_name, and sample strings in file name """ fn = os.path.splitext(os.path.basename(fn))[0] fn = '-'.join(('sample', fn, col_name, sample)) fn = os.path.join(out_dir, fn) + '.txt' np.savetxt(fn, data) def main(wav_dir, out_dir): # Find all .wav files in test/data directory wav_files = glob.glob(os.path.join(wav_dir, '*.wav')) # Generate Praat data for each wav file and save it to text files praat_path = '/usr/bin/praat' for wav_file in wav_files: wav_basename = os.path.basename(wav_file) print('Processing wav file {}'.format(wav_file)) # Generate raw Praat pitch samples # Use VoiceSauce default parameter values t_raw, F0_raw = praat_raw_pitch(wav_file, praat_path, frame_shift=1, method='cc', min_pitch=40, max_pitch=500, silence_threshold=0.03, voice_threshold=0.45, octave_cost=0.01, octave_jumpcost=0.35, voiced_unvoiced_cost=0.14, kill_octave_jumps=0, interpolate=0 smooth=0, smooth_bandwidth=5) # Save raw Praat pitch samples # Save F0 data to text file save_samples(t_raw, wav_basename, 'ptF0', '1ms', out_dir) save_samples(F0_raw, wav_basename, 'pF0', '1ms', out_dir) # Generate raw Praat formant samples # Use VoiceSauce default parameter values estimates_raw = praat_raw_formants(wav_file, praat_path, frame_shift=1, window_size=25, num_formants=4, max_formant_freq=6000) # Save raw Praat formant samples to text files for n in estimates_raw: save_samples(estimates_raw[n], wav_basename, n, '1ms', out_dir) if __name__ == '__main__': main(sys.argv[1], sys.argv[2])
39.301587
79
0.571082
321
2,476
4.190031
0.367601
0.041636
0.035688
0.022305
0.102602
0.102602
0.040149
0
0
0
0
0.027812
0.346527
2,476
62
80
39.935484
0.803461
0.200323
0
0
1
0
0.041622
0
0
0
0
0
0
0
null
null
0
0.142857
null
null
0.028571
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
2
6074ff76122c2643f5b1555e4010168e6ecae260
694
py
Python
party/autocomplete_light_registry.py
opendream/asip
20583aca6393102d425401d55ea32ac6b78be048
[ "MIT" ]
null
null
null
party/autocomplete_light_registry.py
opendream/asip
20583aca6393102d425401d55ea32ac6b78be048
[ "MIT" ]
8
2020-03-24T17:11:49.000Z
2022-01-13T01:18:11.000Z
party/autocomplete_light_registry.py
opendream/asip
20583aca6393102d425401d55ea32ac6b78be048
[ "MIT" ]
null
null
null
import autocomplete_light from party.functions import portfolio_render_reference from party.models import Portfolio class PortfolioAutocomplete(autocomplete_light.AutocompleteModelBase): choices = Portfolio.objects.filter().order_by('-ordering') search_fields = ['title'] display_edit_link = True field_name = 'portfolios' def choice_label(self, choice): return portfolio_render_reference(choice, self.display_edit_link, self.field_name) def choice_html(self, choice): return self.choice_html_format % ( self.choice_value(choice), self.choice_label(choice)) autocomplete_light.register(Portfolio, PortfolioAutocomplete)
26.692308
90
0.753602
77
694
6.519481
0.493506
0.099602
0.095618
0
0
0
0
0
0
0
0
0
0.167147
694
25
91
27.76
0.868512
0
0
0
0
0
0.034632
0
0
0
0
0
0
1
0.133333
false
0
0.2
0.133333
0.8
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
1
1
0
0
2
607af487208ebf2b12c8a0b606ed1c87d1bd52b2
735
py
Python
rocketgram/api/input_venue_message_content.py
rocketgram/rocketgram
b94d8f83e577c0618a650c113d688ef8689ac3f5
[ "MIT" ]
35
2019-09-19T22:56:22.000Z
2022-03-12T10:49:47.000Z
rocketgram/api/input_venue_message_content.py
rocketgram/rocketgram
b94d8f83e577c0618a650c113d688ef8689ac3f5
[ "MIT" ]
2
2020-10-20T05:24:25.000Z
2021-03-27T18:21:23.000Z
rocketgram/api/input_venue_message_content.py
rocketgram/rocketgram
b94d8f83e577c0618a650c113d688ef8689ac3f5
[ "MIT" ]
4
2020-06-26T01:12:30.000Z
2022-01-16T13:55:47.000Z
# Copyright (C) 2015-2021 by Vd. # This file is part of Rocketgram, the modern Telegram bot framework. # Rocketgram is released under the MIT License (see LICENSE). from dataclasses import dataclass from typing import Optional from .input_message_content import InputMessageContent @dataclass(frozen=True) class InputVenueMessageContent(InputMessageContent): """\ Represents InputVenueMessageContent object: https://core.telegram.org/bots/api#inputvenuemessagecontent """ latitude: float longitude: float title: str address: str foursquare_id: Optional[str] = None foursquare_type: Optional[str] = None google_place_id: Optional[str] = None google_place_type: Optional[str] = None
27.222222
69
0.75102
86
735
6.325581
0.639535
0.080882
0.110294
0.0625
0.095588
0
0
0
0
0
0
0.013158
0.172789
735
26
70
28.269231
0.881579
0.357823
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.230769
0
0.923077
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
1
0
0
2
607bbe26dd68fd9d15e8491b0b0ca819b9a877a8
3,688
py
Python
test/python/topology/test_types.py
markheger/streamsx.topology
8118513146399fa6a9490a1debd8037615d7acd1
[ "Apache-2.0" ]
31
2015-06-24T06:21:14.000Z
2020-08-28T21:45:50.000Z
test/python/topology/test_types.py
markheger/streamsx.topology
8118513146399fa6a9490a1debd8037615d7acd1
[ "Apache-2.0" ]
1,203
2015-06-15T02:11:49.000Z
2021-03-22T09:47:54.000Z
test/python/topology/test_types.py
markheger/streamsx.topology
8118513146399fa6a9490a1debd8037615d7acd1
[ "Apache-2.0" ]
53
2015-05-28T21:14:16.000Z
2021-12-23T12:58:59.000Z
# Licensed Materials - Property of IBM # Copyright IBM Corp. 2016 import unittest import sys import dill import pickle import datetime import time import random from streamsx.spl.types import Timestamp from streamsx.topology import schema class TestTypes(unittest.TestCase): def test_Timestamp(self): s = random.randint(0, 999999999999) ns = random.randint(0, 1000000000) mid = random.randint(0, 200000) ts = Timestamp(s, ns, mid) self.assertEqual(s, ts.seconds) self.assertEqual(ns, ts.nanoseconds) self.assertEqual(mid, ts.machine_id) self.assertEqual(Timestamp, type(ts)) self.assertTrue(isinstance(ts, tuple)) self.assertEqual(3, len(ts)) self.assertEqual(s, ts[0]) self.assertEqual(ns, ts[1]) self.assertEqual(mid, ts[2]) ts2 = Timestamp(ts.seconds, ts.nanoseconds, ts.machine_id) self.assertEqual(ts, ts2) now = time.time() ts2 = Timestamp(now, 0) self.assertEqual(int(now), ts2.seconds) self.assertEqual(0, ts2.nanoseconds) self.assertEqual(0, ts2.machine_id) s = random.randint(0, 999999999999) ns = random.randint(0, 1000000000) ts = Timestamp(s, ns) self.assertEqual(s, ts.seconds) self.assertEqual(ns, ts.nanoseconds) self.assertEqual(0, ts.machine_id) ft = ts.time() self.assertIsInstance(ft, float) eft = s + (ns / 1000.0 / 1000.0 / 1000.0) self.assertEqual(eft, ft) tsft = Timestamp.from_time(23423.02, 93) self.assertEqual(23423, tsft.seconds) self.assertEqual(20*1000.0*1000.0, float(tsft.nanoseconds)) self.assertEqual(93, tsft.machine_id) def test_timestamp_pickle(self): ts = Timestamp(1,2,3) tsp = pickle.loads(pickle.dumps(ts)) self.assertEqual(ts, tsp) def test_timestamp_dill(self): ts = Timestamp(4,5,6) tsp = dill.loads(dill.dumps(ts)) self.assertEqual(ts, tsp) def test_timestamp_now(self): now = time.time() ts = Timestamp.now() self.assertTrue(ts.time() >= now) def test_timestamp_nanos(self): Timestamp(1, 0) Timestamp(1, 999999999) self.assertRaises(ValueError, Timestamp, 1, -1) self.assertRaises(ValueError, Timestamp, 1, -2) self.assertRaises(ValueError, Timestamp, 1, 1000000000) self.assertRaises(ValueError, Timestamp, 1, 5000000000) def test_TimestampToDatetime(self): # 2017-06-04 11:48:25.008880 ts = Timestamp(1496576905, 888000000, 0) dt = ts.datetime() self.assertIsInstance(dt, datetime.datetime) self.assertIsNone(dt.tzinfo) self.assertEqual(2017, dt.year) self.assertEqual(6, dt.month) self.assertEqual(4, dt.day) self.assertEqual(11, dt.hour) self.assertEqual(48, dt.minute) self.assertEqual(25, dt.second) def test_DatetimeToTimestamp(self): dt = datetime.datetime.now() ts = Timestamp.from_datetime(dt) self.assertEqual(dt, ts.datetime()) self.assertEqual(0, ts.machine_id) ts = Timestamp.from_datetime(dt, 892) self.assertEqual(dt, ts.datetime()) self.assertEqual(892, ts.machine_id) dt = ts.datetime() self.assertIs(dt, ts.datetime()) # Check Timestamp is duck-typed as a datetime self.assertEqual(dt.year, ts.year) self.assertEqual(dt.month, ts.month) self.assertEqual(dt.day, ts.day) self.assertEqual(dt.hour, ts.hour) self.assertEqual(dt.minute, ts.minute) self.assertEqual(dt.second, ts.second) self.assertEqual(dt.microsecond, ts.microsecond) self.assertEqual(dt.tzinfo, ts.tzinfo) self.assertEqual(dt.ctime(), ts.ctime())
30.479339
65
0.663232
483
3,688
5.020704
0.217391
0.247423
0.077113
0.057732
0.282887
0.181443
0.171546
0.136907
0.136907
0.101443
0
0.075335
0.211768
3,688
120
66
30.733333
0.758858
0.035792
0
0.191489
0
0
0
0
0
0
0
0
0.531915
1
0.074468
false
0
0.095745
0
0.180851
0
0
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
1
0
0
0
0
0
0
0
0
0
2
607dcd9a637122c323880d5537bfbc376d5285a2
1,568
py
Python
antipetros_discordbot/utility/enums.py
Preen1/Antipetros_Discord_Bot
25143f09faf7abede35dee5672a7402595680e9b
[ "MIT" ]
null
null
null
antipetros_discordbot/utility/enums.py
Preen1/Antipetros_Discord_Bot
25143f09faf7abede35dee5672a7402595680e9b
[ "MIT" ]
null
null
null
antipetros_discordbot/utility/enums.py
Preen1/Antipetros_Discord_Bot
25143f09faf7abede35dee5672a7402595680e9b
[ "MIT" ]
null
null
null
# region [Imports] # * Standard Library Imports --> from enum import Enum, Flag, auto # endregion[Imports] class RequestStatus(Enum): Ok = 200 NotFound = 404 NotAuthorized = 401 class WatermarkPosition(Flag): Top = auto() Bottom = auto() Left = auto() Right = auto() Center = auto() WATERMARK_COMBINATIONS = {WatermarkPosition.Left | WatermarkPosition.Top, WatermarkPosition.Left | WatermarkPosition.Bottom, WatermarkPosition.Right | WatermarkPosition.Top, WatermarkPosition.Right | WatermarkPosition.Bottom, WatermarkPosition.Center | WatermarkPosition.Top, WatermarkPosition.Center | WatermarkPosition.Bottom, WatermarkPosition.Center | WatermarkPosition.Left, WatermarkPosition.Center | WatermarkPosition.Right, WatermarkPosition.Center | WatermarkPosition.Center} class DataSize(Enum): Bytes = 1024**0 KiloBytes = 1024**1 MegaBytes = 1024**2 GigaBytes = 1024**3 TerraBytes = 1024**4 @property def short_name(self): if self.name != "Bytes": return self.name[0].lower() + 'b' return 'b' def convert(self, in_bytes: int, round_digits=3, annotate=False): converted_bytes = round(in_bytes / self.value, ndigits=round_digits) if annotate is True: return str(converted_bytes) + ' ' + self.short_name return converted_bytes
30.153846
78
0.607781
143
1,568
6.594406
0.41958
0.146341
0.212089
0.097561
0.133616
0
0
0
0
0
0
0.032997
0.304209
1,568
51
79
30.745098
0.831347
0.042092
0
0
0
0
0.00534
0
0
0
0
0
0
1
0.055556
false
0
0.027778
0
0.638889
0
0
0
0
null
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
2
607ed6b49fd5faa8b880535cd9e2296fedcaefcb
3,564
py
Python
scripts/models.py
SkewedAspect/precursors-server
0a15c5d14faa59a57082cdcd5639429cc56c082c
[ "MIT" ]
5
2015-04-17T07:43:23.000Z
2020-09-24T21:44:47.000Z
scripts/models.py
SkewedAspect/precursors-server
0a15c5d14faa59a57082cdcd5639429cc56c082c
[ "MIT" ]
null
null
null
scripts/models.py
SkewedAspect/precursors-server
0a15c5d14faa59a57082cdcd5639429cc56c082c
[ "MIT" ]
null
null
null
import rkit from rkit import models #TODO: Move out into some other file. Settings? #rkit.connect("localhost", 8081) class Game(models.Model): name = models.StringField(required=True) class Entity(models.Model): type = models.StringField(choices=["ship", "other"], default="other") name = models.StringField(required=True) game = models.LinkField(related_model=Game, related_name="entities", required=True) class ItemDefinition(models.Model): class Meta: bucket_name = "item_def" name = models.StringField(required=True) game = models.LinkField(related_model=Game, related_name="item_defs", required=True) class Item(models.Model): definition = models.LinkField(related_model=ItemDefinition, related_name="instances", required=True) class Class(models.Model): name = models.StringField(required=True, index=True) primary_stat = models.StringField(choices=["strength", "dexterity", "constitution", "intelligence", "charisma", "willpower"], required=True) game = models.LinkField(related_model=Game, related_name="classes", required=True) class Power(models.Model): name = models.StringField(required=True) class_ = models.LinkField(name="class", related_model=Class, related_name="powers") game = models.LinkField(related_model=Game, related_name="powers", required=True) class Account(models.Model): email = models.StringField(required=True, primary=True) real_name = models.StringField(index=True) nickname = models.StringField(index=True) class Credential(models.Model): type = models.StringField(choices=["local"], default="local", required=True) prf = models.StringField(required=True) hash = models.StringField() salt = models.StringField() iterations = models.NumberField() account = models.LinkField(related_model=Account, related_name="credentials") class Character(models.Model): account = models.LinkField(related_model=Account, related_name="characters", required=True) game = models.LinkField(related_model=Game, related_name="characters", required=True) first_name = models.StringField(required=True) middle_name = models.StringField() last_name = models.StringField(required=True) nickname = models.StringField() # Character stuff strength = models.NumberField(required=True, default=1.0) dexterity = models.NumberField(required=True, default=1.0) constitution = models.NumberField(required=True, default=1.0) intelligence = models.NumberField(required=True, default=1.0) charisma = models.NumberField(required=True, default=1.0) willpower = models.NumberField(required=True, default=1.0) level = models.NumberField(required=True, default=1) experience = models.NumberField(required=True, default=0) class_ = models.LinkField(name="class", related_model=Class, related_name="characters", required=True) # Ships, cars, etc. possession = models.LinkCollection(related_model=Entity) # Guns, armor, etc. inventory = models.LinkCollection(related_model=Item) def full_name(self): return "{} {} {}".format(self.first_name, self.middle_name, self.last_name) class Subscription(models.Model): type = models.StringField(choices=["unbilled", "timespan", "hours"], default="unbilled", required=True) expires = models.StringField(null=True) game = models.LinkField(related_model=Game, related_name="subscriptions", required=True) account = models.LinkField(related_model=Account, related_name="subscriptions", required=True)
37.914894
144
0.739899
418
3,564
6.212919
0.217703
0.138621
0.084713
0.103966
0.525606
0.446284
0.386985
0.282249
0.168656
0.149403
0
0.005825
0.132997
3,564
93
145
38.322581
0.834628
0.036195
0
0.067797
0
0
0.072595
0
0
0
0
0.010753
0
1
0.016949
false
0
0.033898
0.016949
0.983051
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
1
0
0
0
0
0
0
0
1
0
0
2
6088160b88a564267f30076f46a3a0bfc40e702b
4,687
py
Python
ldap_peoples/hash_functions.py
fx74/django-ldap-academia-ou-manager
c5bffa963e389f970e1a8e257fe107ebbc201b54
[ "BSD-2-Clause" ]
16
2019-01-13T10:37:20.000Z
2021-11-25T09:51:19.000Z
ldap_peoples/hash_functions.py
fx74/django-ldap-academia-ou-manager
c5bffa963e389f970e1a8e257fe107ebbc201b54
[ "BSD-2-Clause" ]
1
2019-04-02T14:26:35.000Z
2019-04-02T14:26:35.000Z
ldap_peoples/hash_functions.py
fx74/django-ldap-academia-ou-manager
c5bffa963e389f970e1a8e257fe107ebbc201b54
[ "BSD-2-Clause" ]
4
2019-01-17T14:50:33.000Z
2020-12-03T11:47:05.000Z
import crypt from base64 import encodestring try: from django.conf import settings _CHARSET = settings.DEFAULT_CHARSET _LDAP_SALT_LENGHT = settings.LDAP_PASSWORD_SALT_SIZE except: _CHARSET = 'utf-8' _LDAP_SALT_LENGHT = 8 from hashlib import (sha1, sha256, sha384, sha512) from passlib.hash import (ldap_plaintext, lmhash, nthash, ldap_md5, ldap_md5_crypt, ldap_salted_md5, ldap_sha1, ldap_salted_sha1, atlassian_pbkdf2_sha1, ldap_md5_crypt, ldap_sha256_crypt, ldap_sha512_crypt) from os import urandom # how many bytes the salt is long def encode_secret(enc, new_value=None): """ https://docs.python.org/3.5/library/hashlib.html http://passlib.readthedocs.io/en/stable/lib/passlib.hash.ldap_std.html """ password_renewed = None if enc == 'Plaintext': password_renewed = ldap_plaintext.hash(new_value) elif enc == 'NT': password_renewed = nthash.hash(new_value) elif enc == 'LM': password_renewed = lmhash.hash(new_value) elif enc == 'MD5': password_renewed = ldap_md5.hash(new_value.encode(_CHARSET)) elif enc == 'SMD5': password_renewed = ldap_salted_md5.hash(new_value.encode(_CHARSET)) elif enc == 'SHA': password_renewed = ldap_sha1.hash(new_value.encode(_CHARSET)) elif enc == 'SSHA': salt = urandom(8) hash = sha1(new_value.encode(_CHARSET)) hash.update(salt) hash_encoded = encodestring(hash.digest() + salt) password_renewed = hash_encoded.decode(_CHARSET)[:-1] password_renewed = '{%s}%s' % (enc, password_renewed) elif enc == 'SHA256': password_renewed = sha256(new_value.encode(_CHARSET)).digest() password_renewed = '{%s}%s' % (enc, encodestring(password_renewed).decode(_CHARSET)[:-1]) elif enc == 'SSHA256': salt = urandom(_LDAP_SALT_LENGHT) hash = sha256(new_value.encode(_CHARSET)) hash.update(salt) hash_encoded = encodestring(hash.digest() + salt) password_renewed = hash_encoded.decode(_CHARSET)[:-1] password_renewed = '{%s}%s' % (enc, password_renewed) elif enc == 'SHA384': password_renewed = sha384(new_value.encode(_CHARSET)).digest() password_renewed = '{%s}%s' % (enc, encodestring(password_renewed).decode(_CHARSET)[:-1]) elif enc == 'SSHA384': salt = urandom(_LDAP_SALT_LENGHT) hash = sha384(new_value.encode(_CHARSET)) hash.update(salt) hash_encoded = encodestring(hash.digest() + salt) password_renewed = hash_encoded.decode(_CHARSET)[:-1] password_renewed = '{%s}%s' % (enc, password_renewed) elif enc == 'SHA512': password_renewed = sha512(new_value.encode(_CHARSET)).digest() password_renewed = '{%s}%s' % (enc, encodestring(password_renewed).decode(_CHARSET)[:-1]) elif enc == 'SSHA512': salt = urandom(_LDAP_SALT_LENGHT) hash = sha512(new_value.encode(_CHARSET)) hash.update(salt) hash_encoded = encodestring(hash.digest() + salt) password_renewed = hash_encoded.decode(_CHARSET)[:-1] password_renewed = '{%s}%s' % (enc, password_renewed) elif enc == 'PKCS5S2': return atlassian_pbkdf2_sha1.encrypt(new_value) elif enc == 'CRYPT': password_renewed = crypt.crypt(new_value, crypt.mksalt(crypt.METHOD_CRYPT)) password_renewed = '{%s}%s' % (enc, password_renewed) elif enc == 'CRYPT-MD5': # this worked too # return ldap_md5_crypt.encrypt(new_value) password_renewed = crypt.crypt(new_value, crypt.mksalt(crypt.METHOD_MD5)) password_renewed = '{CRYPT}%s' % (password_renewed) elif enc == 'CRYPT-SHA-256': password_renewed = crypt.crypt(new_value, crypt.mksalt(crypt.METHOD_SHA256)) password_renewed = '{CRYPT}%s' % (password_renewed) elif enc == 'CRYPT-SHA-512': password_renewed = crypt.crypt(new_value, crypt.mksalt(crypt.METHOD_SHA512)) password_renewed = '{CRYPT}%s' % (password_renewed) return password_renewed def test_encoding_secrets(): for i in settings.SECRET_PASSWD_TYPE: p = encode_secret(i, 'zio') print(i, ':', p) # additionals for i in ['NT', 'LM']: p = encode_secret(i, 'zio') print(i, ':', p) if __name__ == '__main__': test_encoding_secrets()
40.756522
97
0.614892
539
4,687
5.059369
0.19295
0.225523
0.051338
0.077008
0.599927
0.56399
0.518885
0.507151
0.46388
0.448478
0
0.028746
0.265202
4,687
114
98
41.114035
0.763066
0.047152
0
0.316832
0
0
0.047941
0
0
0
0
0
0
1
0.019802
false
0.326733
0.059406
0
0.09901
0.019802
0
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
1
0
0
0
0
0
2
608ac2dc8c32bbc12b8f0330342bb38d8fdc4366
849
py
Python
tests/fixtures/defxmlschema/chapter13/example13101.py
nimish/xsdata
7afe2781b66982428cc1731f53c065086acd35c1
[ "MIT" ]
null
null
null
tests/fixtures/defxmlschema/chapter13/example13101.py
nimish/xsdata
7afe2781b66982428cc1731f53c065086acd35c1
[ "MIT" ]
null
null
null
tests/fixtures/defxmlschema/chapter13/example13101.py
nimish/xsdata
7afe2781b66982428cc1731f53c065086acd35c1
[ "MIT" ]
null
null
null
from dataclasses import dataclass, field from typing import Optional @dataclass class ItemType: """ :ivar id: :ivar lang: """ id: Optional[str] = field( default=None, metadata=dict( type="Attribute", required=True ) ) lang: Optional[str] = field( default=None, metadata=dict( type="Attribute", namespace="http://www.w3.org/XML/1998/namespace" ) ) @dataclass class ProductType(ItemType): """ :ivar eff_date: :ivar lang: """ eff_date: Optional[str] = field( default=None, metadata=dict( name="effDate", type="Attribute" ) ) lang: Optional[str] = field( default=None, metadata=dict( type="Attribute" ) )
18.456522
60
0.51119
78
849
5.538462
0.423077
0.101852
0.148148
0.212963
0.469907
0.469907
0.469907
0.37963
0.37963
0.259259
0
0.009346
0.369847
849
45
61
18.866667
0.798131
0.057715
0
0.484848
0
0
0.103811
0
0
0
0
0
0
1
0
true
0
0.060606
0
0.242424
0
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
2
608d5db7720f1303afc78cb89898768d67a2329d
398
py
Python
colour_segmentation/base/exceptions/FuzzyPaletteInvalidRepresentation.py
mmunar97/colour-segmentation
3a7f19e043bd3a8aebb67ae3a181d19011678986
[ "MIT" ]
null
null
null
colour_segmentation/base/exceptions/FuzzyPaletteInvalidRepresentation.py
mmunar97/colour-segmentation
3a7f19e043bd3a8aebb67ae3a181d19011678986
[ "MIT" ]
null
null
null
colour_segmentation/base/exceptions/FuzzyPaletteInvalidRepresentation.py
mmunar97/colour-segmentation
3a7f19e043bd3a8aebb67ae3a181d19011678986
[ "MIT" ]
null
null
null
class FuzzyPaletteInvalidRepresentation(Exception): """ An Exception indicating that not enough classes have been provided to represent the fuzzy sets. """ def __init__(self, provided_labels: int, needed_labels: int): super().__init__(f"{needed_labels} labels are needed to represent the selected method. " f"Only {provided_labels} are provided.")
44.222222
99
0.69598
46
398
5.76087
0.630435
0.083019
0.10566
0
0
0
0
0
0
0
0
0
0.223618
398
8
100
49.75
0.857605
0.238693
0
0
0
0
0.362369
0
0
0
0
0
0
1
0.25
false
0
0
0
0.5
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
0
0
0
2
60945bf5058fc7295019e4204cd54021208f43f2
217
py
Python
telegram_rss/feed/__init__.py
pentatester/telegram-rss
de96efde83fa62abb112e9a37945f6e065e541d3
[ "MIT" ]
8
2021-02-01T15:19:31.000Z
2021-05-30T17:11:14.000Z
telegram_rss/feed/__init__.py
pentatester/telegram-rss
de96efde83fa62abb112e9a37945f6e065e541d3
[ "MIT" ]
5
2021-02-01T09:28:33.000Z
2022-03-07T23:24:03.000Z
telegram_rss/feed/__init__.py
pentatester/telegram-rss
de96efde83fa62abb112e9a37945f6e065e541d3
[ "MIT" ]
3
2021-02-10T17:45:39.000Z
2021-04-18T14:22:31.000Z
from .img import Img from .entry import Entry from .channel import Channel from .feed import Feed from .updater import FeedUpdater __all__ = [ "Img", "Entry", "Channel", "Feed", "FeedUpdater", ]
14.466667
32
0.658986
26
217
5.346154
0.346154
0
0
0
0
0
0
0
0
0
0
0
0.235023
217
14
33
15.5
0.837349
0
0
0
0
0
0.138249
0
0
0
0
0
0
1
0
false
0
0.416667
0
0.416667
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
2
60b71cbb9f7d313edc1ee22cd6095a825651a318
1,484
py
Python
claripy/claripy/frontend_mixins/constraint_deduplicator_mixin.py
Ruide/angr-dev
964dc80c758e25c698c2cbcc454ef5954c5fa0a0
[ "BSD-2-Clause" ]
null
null
null
claripy/claripy/frontend_mixins/constraint_deduplicator_mixin.py
Ruide/angr-dev
964dc80c758e25c698c2cbcc454ef5954c5fa0a0
[ "BSD-2-Clause" ]
null
null
null
claripy/claripy/frontend_mixins/constraint_deduplicator_mixin.py
Ruide/angr-dev
964dc80c758e25c698c2cbcc454ef5954c5fa0a0
[ "BSD-2-Clause" ]
null
null
null
class ConstraintDeduplicatorMixin(object): def __init__(self, *args, **kwargs): super(ConstraintDeduplicatorMixin, self).__init__(*args, **kwargs) self._constraint_hashes = set() def _blank_copy(self, c): super(ConstraintDeduplicatorMixin, self)._blank_copy(c) c._constraint_hashes = set() def _copy(self, c): super(ConstraintDeduplicatorMixin, self)._copy(c) c._constraint_hashes = set(self._constraint_hashes) # # Serialization # def _ana_getstate(self): return self._constraint_hashes, super(ConstraintDeduplicatorMixin, self)._ana_getstate() def _ana_setstate(self, s): self._constraint_hashes, base_state = s super(ConstraintDeduplicatorMixin, self)._ana_setstate(base_state) def simplify(self, **kwargs): added = super(ConstraintDeduplicatorMixin, self).simplify(**kwargs) # we only add to the constraint hashes because we want to # prevent previous (now simplified) constraints from # being re-added self._constraint_hashes.update(map(hash, added)) return added def add(self, constraints, **kwargs): filtered = tuple(c for c in constraints if hash(c) not in self._constraint_hashes) if len(filtered) == 0: return filtered added = super(ConstraintDeduplicatorMixin, self).add(filtered, **kwargs) self._constraint_hashes.update(map(hash, added)) return added
36.195122
96
0.679919
162
1,484
5.969136
0.314815
0.16546
0.2606
0.053775
0.246122
0.246122
0.101344
0.101344
0.101344
0
0
0.000867
0.223046
1,484
40
97
37.1
0.837814
0.09097
0
0.153846
0
0
0
0
0
0
0
0
0
1
0.269231
false
0
0
0.038462
0.461538
0
0
0
0
null
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
0
0
0
2
60b851b9b6fa0eb880f99cdeb1425c8de32d8d3f
4,756
py
Python
a10_neutron_lbaas/neutron_ext/services/a10_certificate/plugin.py
hthompson6/a10-neutron-lbaas
f1639758cd3abcc6c86c8e6b64dcb0397c359621
[ "Apache-2.0" ]
10
2015-09-15T05:16:15.000Z
2020-03-18T02:34:39.000Z
a10_neutron_lbaas/neutron_ext/services/a10_certificate/plugin.py
hthompson6/a10-neutron-lbaas
f1639758cd3abcc6c86c8e6b64dcb0397c359621
[ "Apache-2.0" ]
334
2015-02-11T23:45:00.000Z
2020-02-28T08:58:51.000Z
a10_neutron_lbaas/neutron_ext/services/a10_certificate/plugin.py
hthompson6/a10-neutron-lbaas
f1639758cd3abcc6c86c8e6b64dcb0397c359621
[ "Apache-2.0" ]
24
2015-01-13T21:14:45.000Z
2021-06-02T17:22:14.000Z
# Copyright (C) 2016, A10 Networks Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License.from oslo_log.helpers import logging as logging import logging from neutron_lbaas.services.loadbalancer import plugin from a10_neutron_lbaas import constants as certificate_constants from a10_neutron_lbaas.neutron_ext.common import constants from a10_neutron_lbaas.neutron_ext.db import certificate_db as certificate_db LOG = logging.getLogger(__name__) class A10CertificatePlugin(certificate_db.A10CertificateDbMixin): """Implementation of the Neutron SSL Certificate Plugin.""" supported_extension_aliases = [constants.A10_CERTIFICATE_EXT] def __init__(self): super(A10CertificatePlugin, self).__init__() self.lbplugin = plugin.LoadBalancerPluginv2() def get_a10_certificates(self, context, filters=None, fields=None): return super(A10CertificatePlugin, self).get_a10_certificates(context, filters, fields) def create_a10_certificate(self, context, a10_certificate): return super(A10CertificatePlugin, self).create_a10_certificate(context, a10_certificate) def get_a10_certificate(self, context, id, fields=None): return super(A10CertificatePlugin, self).get_a10_certificate(context, id, fields) def update_a10_certificate(self, context, id, a10_certificate): return super(A10CertificatePlugin, self).update_a10_certificate(context, id, a10_certificate) def delete_a10_certificate(self, context, id): return super(A10CertificatePlugin, self).delete_a10_certificate(context, id) def get_a10_certificate_bindings(self, context, filters=None, fields=None): return super(A10CertificatePlugin, self).get_a10_certificate_bindings(context, filters, fields) def _set_a10_certificate_binding_status(self, context, id, status): update_binding = { "id": id, "status": status } update_a10_certificate_binding = {constants.A10_CERTIFICATE_BINDING: update_binding} result = super(A10CertificatePlugin, self).update_a10_certificate_binding(context, update_a10_certificate_binding) return result def _update_listener(self, context, listener_id): # Create an empty listener structure - we just want to trigger the update logic fake_listener = {"listener": {}} # Below will raise exception if listener doesn't exist self.lbplugin.update_listener(context, listener_id, fake_listener) def create_a10_certificate_binding(self, context, a10_certificate_binding): binding = a10_certificate_binding[constants.A10_CERTIFICATE_BINDING] created_binding = super(A10CertificatePlugin, self).create_a10_certificate_binding(context, a10_certificate_binding) # All of the real work happens in the listener handler. self._update_listener(context, binding["listener_id"]) result = self._set_a10_certificate_binding_status(context, created_binding["id"], certificate_constants.STATUS_CREATED) return result def get_a10_certificate_binding(self, context, id, fields=None): return super(A10CertificatePlugin, self).get_a10_certificate_binding(context, id, fields) def delete_a10_certificate_binding(self, context, id): binding = self._set_a10_certificate_binding_status(context, id, certificate_constants.STATUS_DELETING) # All of the real work happens in the listener handler. # Try to update the listener - it could be gone by now. try: self._update_listener(context, binding["listener_id"]) except Exception as ex: LOG.exception(ex) pass return super(A10CertificatePlugin, self).delete_a10_certificate_binding(context, id)
46.627451
97
0.679563
528
4,756
5.869318
0.278409
0.144563
0.115198
0.090352
0.41949
0.367215
0.273314
0.126815
0.126815
0.123266
0
0.031347
0.255467
4,756
101
98
47.089109
0.843829
0.210892
0
0.068966
0
0
0.010724
0
0
0
0
0
0
1
0.206897
false
0.017241
0.086207
0.12069
0.5
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
0
0
0
2
60bf09100b125ccbd42a5784558b7bae64d7eb73
208
py
Python
daily/5.py
tuket/challenges
456979020c78dfcae2f8681245000bb64a6aaf38
[ "Unlicense" ]
null
null
null
daily/5.py
tuket/challenges
456979020c78dfcae2f8681245000bb64a6aaf38
[ "Unlicense" ]
null
null
null
daily/5.py
tuket/challenges
456979020c78dfcae2f8681245000bb64a6aaf38
[ "Unlicense" ]
null
null
null
def cons(a, b): def pair(f): return f(a, b) return pair def car(p): return p(lambda x, y: x) def cdr(p): return p(lambda x, y: y) print car(cons("a", "b")) print cdr(cons("a", "b"))
16
28
0.528846
41
208
2.682927
0.341463
0.072727
0.163636
0.254545
0.290909
0.290909
0
0
0
0
0
0
0.269231
208
13
29
16
0.723684
0
0
0
0
0
0.019139
0
0
0
0
0
0
0
null
null
0
0
null
null
0.2
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
2
60ddc2f5f174092b010d05830fdc98fca8db5fec
1,081
py
Python
termtris/interface.py
BobWhitelock/termtris
a9cb5c9c34a8280c68a0ebbf3f273ad83ee7345d
[ "MIT" ]
null
null
null
termtris/interface.py
BobWhitelock/termtris
a9cb5c9c34a8280c68a0ebbf3f273ad83ee7345d
[ "MIT" ]
null
null
null
termtris/interface.py
BobWhitelock/termtris
a9cb5c9c34a8280c68a0ebbf3f273ad83ee7345d
[ "MIT" ]
null
null
null
import curses import config from debug import debug class CursesGraphics: def __init__(self, stdscr): curses.curs_set(0) # make cursor invisible stdscr.nodelay(1) # make reading input with getch() non-blocking self.stdscr = stdscr def set_point(self, x, y, symbol): self.stdscr.addstr(y, x, symbol) def refresh(self): self.stdscr.refresh() def read_input(self): # read first char and skip any others key = self.stdscr.getch() while self.stdscr.getch() != -1: pass return key class DebugGraphics: def set_point(self, x, y, symbol): pass # if symbol != config.EMPTY: # debug("({0}, {1}) = '{2}'".format(x, y, symbol)) def refresh(self): debug("REFRESH") def read_input(self): user_input = input("Input: ") if len(user_input) > 0: key = user_input[0] debug("Input read: '{0}'; key returned: '{1}'".format(user_input, key)) return ord(key) else: return ''
25.139535
83
0.563367
136
1,081
4.382353
0.382353
0.100671
0.040268
0.050336
0.154362
0.077181
0.077181
0
0
0
0
0.013441
0.311748
1,081
43
84
25.139535
0.787634
0.164662
0
0.266667
0
0
0.057906
0
0
0
0
0
0
1
0.233333
false
0.066667
0.1
0
0.5
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
1
0
0
0
0
0
2
60eb6500f0b7e9a2d1164714dcb28cd25ee65da9
196
py
Python
const.py
bluevariant/alphazero-caro
7694d316a2f8b3878633662c1da98942b2d1dea0
[ "MIT" ]
null
null
null
const.py
bluevariant/alphazero-caro
7694d316a2f8b3878633662c1da98942b2d1dea0
[ "MIT" ]
null
null
null
const.py
bluevariant/alphazero-caro
7694d316a2f8b3878633662c1da98942b2d1dea0
[ "MIT" ]
null
null
null
class Const: board_width = 19 board_height = 19 n_in_row = 5 # n to win! train_core = "keras" check_freq = 10 # auto save current model check_freq_best = 500 # auto save best model
24.5
47
0.683673
33
196
3.818182
0.727273
0.142857
0
0
0
0
0
0
0
0
0
0.068027
0.25
196
7
48
28
0.789116
0.27551
0
0
0
0
0.036232
0
0
0
0
0
0
1
0
false
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
2
60f21c14e00020e97015428650c5a214569c2622
3,333
py
Python
cloudmesh/management/test_user.py
JulienPalard/cloudmesh
1759b88daef3a13917492d028fdabe08f03ca996
[ "Apache-2.0" ]
null
null
null
cloudmesh/management/test_user.py
JulienPalard/cloudmesh
1759b88daef3a13917492d028fdabe08f03ca996
[ "Apache-2.0" ]
4
2021-06-08T20:20:08.000Z
2022-03-11T23:30:22.000Z
cloudmesh/management/test_user.py
JulienPalard/cloudmesh
1759b88daef3a13917492d028fdabe08f03ca996
[ "Apache-2.0" ]
null
null
null
from user import User, Users import mongoengine from cloudmeshobject import order, make_form_list def main(): # users = Users() # users.clear() gregor = User( title="", firstname="Hallo", lastname="von Laszewski", email="laszewski@gmail.com", username="gregvon", active=True, password="none", phone="6625768900", department="School of Informatics and Computing", institution="Indiana University", address="Bloomington", country="USA", citizenship="Germany", bio="I work at Indiana University Bloomington", ) from pprint import pprint import sys print 70 * "=" print 70 * "=" pprint(User.__dict__.keys()) print 70 * "=" pprint(User._db_field_map) print 70 * "=" pprint(User._fields_ordered) pprint(User.__dict__) print 70 * "=" pprint(User._fields) print 70 * "=" print type(User._fields["bio"]) print type(User._fields["bio"]) == mongoengine.fields.StringField print type(User._fields["bio"]) == mongoengine.fields.URLField print 70 * "x" print order(User) print order(User, include=['username']) print order(User, exclude=['id']) print order(User, include=['username', 'lastname'], exclude=['lastname']) print 70 * "o" print User._fields print 70 * "p" print order(User, kind="required") print order(User, kind="all") make_form_list( User, ['username', 'firstname'], format="table", capital=False) """ # print gregor.fields() # print gregor.fields("optinal") # print gregor.fields("required") print "\n".join(gregor._fields) print "ORDER", gregor.order print gregor.json() print gregor.yaml() print gregor.__dict__ d = { "title" : "", "firstname" : "Gregor", "lastname" : "von Laszewski", "email" : "laszewski@gmail.com", "username" : "gregvon", "active" : True, "password" : "none", "phone" : "6625768900", "department" : "School of Informatics and Computing", "institution" : "Indiana University", "address" : "Bloomington", "country" : "USA", "citizenship" : "Germany", "bio" : "I work at Indiana University Bloomington", } print d n = User() n.set_from_dict(d) print "NNNNN", n #n.save() sys.exit() users.add(gregor) print "Gregor username: ", gregor.username print gregor.date_created print gregor.date_deactivate sys.exit() print fugang = User( title = "", firstname = "Fungang", lastname = "Nelson", email = "nelsonfug@gmail.com", username = "fugang", active = True, password = "none", phone = "6627865400", department = "School of Informatics and Computing", institution = "Indiana University", address = "Bloomington", country = "USA", citizenship = "China", bio = "I work at Indiana University Bloomington" # add the other fields ) users.add(fugang) print print "Fugang username: "#, fugang.username print print users.find_user("gregvon12") #users.find() """ if __name__ == "__main__": main()
24.688889
77
0.580858
344
3,333
5.511628
0.299419
0.033228
0.044304
0.035865
0.459388
0.378692
0.378692
0.317511
0.317511
0.317511
0
0.020903
0.282328
3,333
134
78
24.873134
0.771739
0.008701
0
0.12
0
0
0.169437
0
0
0
0
0
0
0
null
null
0.02
0.1
null
null
0.5
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
1
0
2
60fe124e21c115050b0cc11cdd75370b7d21e73e
847
py
Python
lib/codegen/meta-python/build.py
froydnj/cretonne
3cfe087f6b31144471b6ee2bb2a287bc258cea5b
[ "Apache-2.0" ]
null
null
null
lib/codegen/meta-python/build.py
froydnj/cretonne
3cfe087f6b31144471b6ee2bb2a287bc258cea5b
[ "Apache-2.0" ]
null
null
null
lib/codegen/meta-python/build.py
froydnj/cretonne
3cfe087f6b31144471b6ee2bb2a287bc258cea5b
[ "Apache-2.0" ]
null
null
null
# Second-level build script. # # This script is run from lib/codegen/build.rs to generate Rust files. from __future__ import absolute_import import argparse import isa import gen_instr import gen_settings import gen_build_deps import gen_encoding import gen_legalizer import gen_binemit def main(): # type: () -> None parser = argparse.ArgumentParser( description='Generate sources for Cranelift.') parser.add_argument('--out-dir', help='set output directory') args = parser.parse_args() out_dir = args.out_dir isas = isa.all_isas() gen_instr.generate(isas, out_dir) gen_settings.generate(isas, out_dir) gen_encoding.generate(isas, out_dir) gen_legalizer.generate(isas, out_dir) gen_binemit.generate(isas, out_dir) gen_build_deps.generate() if __name__ == "__main__": main()
22.891892
70
0.729634
117
847
4.965812
0.435897
0.082616
0.129088
0.154905
0.180723
0
0
0
0
0
0
0
0.178276
847
36
71
23.527778
0.83477
0.132231
0
0
1
0
0.093151
0
0
0
0
0
0
1
0.041667
false
0
0.375
0
0.416667
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
2
880bf5f0d30bd7ff61ee959d4a62c873b9f47190
3,685
py
Python
OracleSOASuite/kubernetes/monitoring-service/scripts/deploy-weblogic-monitoring-exporter.py
rishiagarwal-oracle/fmw-kubernetes
cf53d0aac782cacaa26cb1f8f1cdb7130f69d64f
[ "UPL-1.0", "MIT" ]
20
2020-09-18T08:28:06.000Z
2021-11-04T11:48:53.000Z
OracleSOASuite/kubernetes/monitoring-service/scripts/deploy-weblogic-monitoring-exporter.py
rishiagarwal-oracle/fmw-kubernetes
cf53d0aac782cacaa26cb1f8f1cdb7130f69d64f
[ "UPL-1.0", "MIT" ]
17
2020-10-29T03:52:52.000Z
2022-03-29T06:47:05.000Z
OracleSOASuite/kubernetes/monitoring-service/scripts/deploy-weblogic-monitoring-exporter.py
rishiagarwal-oracle/fmw-kubernetes
cf53d0aac782cacaa26cb1f8f1cdb7130f69d64f
[ "UPL-1.0", "MIT" ]
27
2020-04-30T09:06:37.000Z
2022-03-29T06:49:06.000Z
# Copyright (c) 2021, Oracle and/or its affiliates. # Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. # import sys #======================================================= # Function for fresh plain deployment #======================================================= def newDeploy(appName,target): try: print 'Deploying .........' deploy(appName,'/u01/oracle/wls-exporter-deploy/'+appName+'.war', target, upload="true",remote="true") startApplication(appName) except Exception, ex: print ex.toString() #======================================================== # Main program here... # Target you can change as per your need #======================================================== def usage(): argsList = ' -domainName <domainUID> -adminServerName <adminServerName> -adminURL <adminURL> -username <username> -password <password>' argsList=argsList + ' -soaClusterName <soaClusterName>' + ' -wlsMonitoringExporterTosoaCluster <wlsMonitoringExporterTosoaCluster>' argsList=argsList + ' -osbClusterName <osbClusterName>' + ' -wlsMonitoringExporterToosbCluster <wlsMonitoringExporterToosbCluster>' print sys.argv[0] + argsList sys.exit(0) if len(sys.argv) < 1: usage() # domainName will be passed by command line parameter -domainName domainName = "soainfra" # adminServerName will be passed by command line parameter -adminServerName adminServerName = "AdminServer" # adminURL will be passed by command line parameter -adminURL adminURL = "soainfra-adminserver:7001" # soaClusterName will be passed by command line parameter -soaClusterName soaClusterName = "soaClusterName" # wlsMonitoringExporterTosoaCluster will be passed by command line parameter -wlsMonitoringExporterTosoaCluster wlsMonitoringExporterTosoaCluster = "false" # osbClusterName will be passed by command line parameter -osbClusterName osbClusterName = "osbClusterName" # wlsMonitoringExporterToosbCluster will be passed by command line parameter -wlsMonitoringExporterToosbCluster wlsMonitoringExporterToosbCluster = "false" # username will be passed by command line parameter -username username = "weblogic" # password will be passed by command line parameter -password password = "Welcome1" i=1 while i < len(sys.argv): if sys.argv[i] == '-domainName': domainName = sys.argv[i+1] i += 2 elif sys.argv[i] == '-adminServerName': adminServerName = sys.argv[i+1] i += 2 elif sys.argv[i] == '-adminURL': adminURL = sys.argv[i+1] i += 2 elif sys.argv[i] == '-username': username = sys.argv[i+1] i += 2 elif sys.argv[i] == '-password': password = sys.argv[i+1] i += 2 elif sys.argv[i] == '-soaClusterName': soaClusterName = sys.argv[i+1] i += 2 elif sys.argv[i] == '-wlsMonitoringExporterTosoaCluster': wlsMonitoringExporterTosoaCluster = sys.argv[i+1] i += 2 elif sys.argv[i] == '-osbClusterName': osbClusterName = sys.argv[i+1] i += 2 elif sys.argv[i] == '-wlsMonitoringExporterToosbCluster': wlsMonitoringExporterToosbCluster = sys.argv[i+1] i += 2 else: print 'Unexpected argument switch at position ' + str(i) + ': ' + str(sys.argv[i]) usage() sys.exit(1) # Deployment connect(username, password, 't3://' + adminURL) cd('AppDeployments') newDeploy('wls-exporter-adminserver',adminServerName) if 'true' == wlsMonitoringExporterTosoaCluster: newDeploy('wls-exporter-soa',soaClusterName) if 'true' == wlsMonitoringExporterToosbCluster: newDeploy('wls-exporter-osb',osbClusterName) disconnect() exit()
35.095238
139
0.662144
376
3,685
6.489362
0.284574
0.063115
0.062295
0.051639
0.205328
0.205328
0.20082
0.07541
0.07541
0.07541
0
0.012068
0.167978
3,685
104
140
35.432692
0.783757
0.318046
0
0.164179
0
0.014925
0.307785
0.115169
0
0
0
0
0
0
null
null
0.074627
0.014925
null
null
0.059701
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
1
0
0
0
0
0
2
881115cf6f920728f37f04947fb9ce5c2895fb31
321
py
Python
startup/20-diagon.py
mrakitin/profile_collection-six
20e41632b9898ac83a8e60fcca9b8aeaaa91f0ad
[ "BSD-3-Clause" ]
null
null
null
startup/20-diagon.py
mrakitin/profile_collection-six
20e41632b9898ac83a8e60fcca9b8aeaaa91f0ad
[ "BSD-3-Clause" ]
30
2017-05-18T19:11:24.000Z
2021-06-23T16:59:26.000Z
startup/20-diagon.py
mrakitin/profile_collection-six
20e41632b9898ac83a8e60fcca9b8aeaaa91f0ad
[ "BSD-3-Clause" ]
3
2018-01-10T17:16:47.000Z
2020-03-12T14:51:36.000Z
from ophyd import Device, EpicsMotor from ophyd import Component as Cpt class DIAGON(Device): hml = Cpt(EpicsMotor, '_HLPM}Mtr') hyag = Cpt(EpicsMotor, '_HLPF}Mtr') vml = Cpt(EpicsMotor, '_VLPM}Mtr') vyag = Cpt(EpicsMotor, '_VLPF}Mtr') diagon = DIAGON('XF:02IDA-OP{Diag:1-Ax:3', name='diagon')
26.75
57
0.666667
45
321
4.666667
0.6
0.247619
0.142857
0
0
0
0
0
0
0
0
0.015326
0.186916
321
11
58
29.181818
0.789272
0
0
0
0
0
0.202492
0.071651
0
0
0
0
0
1
0
false
0
0.25
0
0.875
0
0
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
2
714a038d78b5811f19d121e3701fb114835756fb
4,663
py
Python
yokome/language/_lang.py
julianbetz/Yokome
4e2f077cc6835a7719940e760cc351f47159bc36
[ "Apache-2.0" ]
1
2020-08-07T03:32:15.000Z
2020-08-07T03:32:15.000Z
yokome/language/_lang.py
julianbetz/Yokome
4e2f077cc6835a7719940e760cc351f47159bc36
[ "Apache-2.0" ]
11
2020-01-28T22:15:01.000Z
2022-02-10T00:29:58.000Z
yokome/language/_lang.py
julianbetz/Yokome
4e2f077cc6835a7719940e760cc351f47159bc36
[ "Apache-2.0" ]
null
null
null
# -*- coding: utf-8 -*- # Copyright 2019 Julian Betz # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. class Language: """Resources of a specific language. Stores information about the language and provides methods for text analysis that are tailored to that language. """ _LANGUAGES = dict() def __init__(self, code, name, *, loader, tokenizer, extractor, parallel_extractor): if code in Language._LANGUAGES: raise ValueError('Language code has to be unique') self._CODE = code self._NAME = name self._LOADER = loader self._TOKENIZER = tokenizer self._EXTRACTOR = extractor self._PARALLEL_EXTRACTOR = parallel_extractor Language._LANGUAGES[code] = self @staticmethod def by_code(code): """Look up a language by its unique identifier.""" return Language._LANGUAGES[code] @property def code(self): """The unique identifier of this language. This is usually the ISO 639-3 language code of this language. """ return self._CODE @property def load(self): """Function to load corpus sentences in this language. The order of sentences is randomized (independently of the number of samples requested and consistently in between calls requesting the same number of samples). Does not necessarily load the sentences themselves, but may provide IDs if :py:meth:`tokenize`, :py:meth:`extract` and :py:meth:`extract_parallel` can handle this format. :param int n_samples: The number of sample sentences to load. If ``None``, load all samples. :return: A tuple of sentences or sentence IDs. """ return self._LOADER @property def tokenize(self): """Function to tokenize a sentence in this language. :param sentence: A sentence or sentence ID. :return: A tuple of tuples of tokens. A token is represented as a dictionary of the following form: .. code-block:: python { 'surface_form': {'graphic': ..., 'phonetic': ...}, 'base_form': {'graphic': ..., 'phonetic': ...}, 'lemma': {'graphic': ..., 'phonetic': ...}, 'pos': <list of POS tags as strings>, 'inflection': <list of POS/inflection tags> } "Surface form" refers to the graphic variant used in an original document and its pronunciation. "Base form" refers to a lemmatized version of the surface form. "Lemma" a normalized version of the base form. (In Japanese, for example, there is a single lemma for multiple graphical variants of the base form which mean the same thing.) The POS and inflection lists are meant to be read by a :class:`..features.tree.TemplateTree`. """ return self._TOKENIZER @property def extract(self): """Function to turn an iterable of tokens into language model input. Differs from :meth:`extract_parallel` only for character-level extracts. :param tokens: An iterable of tokens (see :meth:`tokenize` for the token representation). :return: An iterable of token identifiers that is understood by the language model. """ return self._EXTRACTOR @property def extract_parallel(self): """Function to turn an iterable of tokens into language model input. Differs from :meth:`extract` only for character-level extracts. :param tokens: An iterable of tokens (see :meth:`tokenize` for the token representation). :return: An iterable of token identifiers that are understood by the language model. """ return self._PARALLEL_EXTRACTOR def __repr__(self): return '<%s %s>' % (type(self).__name__, self._CODE) def __str__(self): return self._NAME
31.295302
88
0.62063
563
4,663
5.062167
0.362345
0.021053
0.025263
0.025263
0.178246
0.178246
0.178246
0.151579
0.151579
0.151579
0
0.003991
0.301523
4,663
148
89
31.506757
0.871047
0.651297
0
0.147059
0
0
0.032314
0
0
0
0
0
0
1
0.264706
false
0
0
0.058824
0.558824
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
1
0
0
2
7163305b37436586d6c129690e6e4f65719501d3
283
py
Python
backend/app/plugin/__init__.py
MU-Software/dodoco
c81d3a31b8024a734097fd8ac395747d5a721bc8
[ "MIT" ]
null
null
null
backend/app/plugin/__init__.py
MU-Software/dodoco
c81d3a31b8024a734097fd8ac395747d5a721bc8
[ "MIT" ]
null
null
null
backend/app/plugin/__init__.py
MU-Software/dodoco
c81d3a31b8024a734097fd8ac395747d5a721bc8
[ "MIT" ]
null
null
null
# Add custom plugins here. # If you want to make git not to track this file anymore, # use `git update-index --skip-worktree app/plugin/__init__.py` import flask import app.plugin.ddc_docker as ddc_plugin_docker def init_app(app: flask.Flask): ddc_plugin_docker.init_app(app)
25.727273
63
0.770318
49
283
4.22449
0.612245
0.086957
0.144928
0
0
0
0
0
0
0
0
0
0.144876
283
10
64
28.3
0.855372
0.501767
0
0
0
0
0
0
0
0
0
0
0
1
0.25
false
0
0.5
0
0.75
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
1
0
0
0
0
2
7168374a85f91dff7306e54c08e6cfc79fd0daaa
3,403
py
Python
scripts/utils.py
wnstlr/influence-release
a305c4b9b63f641aeabb5a208667c01c15571e6b
[ "MIT" ]
63
2019-02-26T20:15:58.000Z
2022-03-24T15:59:02.000Z
scripts/utils.py
wnstlr/influence-release
a305c4b9b63f641aeabb5a208667c01c15571e6b
[ "MIT" ]
4
2019-04-25T18:30:58.000Z
2021-09-09T22:05:42.000Z
scripts/utils.py
wnstlr/influence-release
a305c4b9b63f641aeabb5a208667c01c15571e6b
[ "MIT" ]
17
2019-04-15T06:39:32.000Z
2021-05-20T03:25:30.000Z
import cv2 import numpy as np from keras.datasets import cifar10 from keras import backend as K from keras.utils import np_utils nb_train_samples = 50000 # 3000 training samples nb_valid_samples = 10000 # 100 validation samples num_classes = 10 def load_cifar10_data(img_rows, img_cols, start=None, end=None, what_data=None): # Load cifar10 training and validation sets (X_train, Y_train), (X_valid, Y_valid) = cifar10.load_data() #print(X_train.shape) #print(X_valid.shape) if start == None or end == None and what_data == None: # Resize trainging images (all of them) if K.image_dim_ordering() == 'th': X_train = np.array([cv2.resize(img.transpose(1,2,0), (img_rows,img_cols)).transpose(2,0,1) for img in X_train[:nb_train_samples,:,:,:]]) X_valid = np.array([cv2.resize(img.transpose(1,2,0), (img_rows,img_cols)).transpose(2,0,1) for img in X_valid[:nb_valid_samples,:,:,:]]) else: X_train = np.array([cv2.resize(img, (img_rows,img_cols)) for img in X_train[:nb_train_samples,:,:,:]]) X_valid = np.array([cv2.resize(img, (img_rows,img_cols)) for img in X_valid[:nb_valid_samples,:,:,:]]) # Transform targets to keras compatible format Y_train = np_utils.to_categorical(Y_train[:nb_train_samples], num_classes) Y_valid = np_utils.to_categorical(Y_valid[:nb_valid_samples], num_classes) else: # Resize and load part of them if K.image_dim_ordering() == 'th': if what_data == 'train': X_train = np.array([cv2.resize(img.transpose(1,2,0), (img_rows,img_cols)).transpose(2,0,1) for img in X_train[start:end,:,:,:]]) X_valid = np.array([cv2.resize(img.transpose(1,2,0), (img_rows,img_cols)).transpose(2,0,1) for img in X_valid[:nb_valid_samples,:,:,:]]) elif what_data == 'test': X_train = np.array([cv2.resize(img.transpose(1,2,0), (img_rows,img_cols)).transpose(2,0,1) for img in X_train[:nb_train_samples,:,:,:]]) X_valid = np.array([cv2.resize(img.transpose(1,2,0), (img_rows,img_cols)).transpose(2,0,1) for img in X_valid[start:end,:,:,:]]) else: if what_data == 'train': X_train = np.array([cv2.resize(img, (img_rows,img_cols)) for img in X_train[start:end,:,:,:]]) X_valid = np.array([cv2.resize(img, (img_rows,img_cols)) for img in X_valid[:nb_valid_samples,:,:,:]]) elif what_data == 'test': X_train = np.array([cv2.resize(img, (img_rows,img_cols)) for img in X_train[:nb_train_samples,:,:,:]]) X_valid = np.array([cv2.resize(img, (img_rows,img_cols)) for img in X_valid[start:end,:,:,:]]) # Transform targets to keras compatible format if what_data == 'train': Y_train = np_utils.to_categorical(Y_train[start:end], num_classes) Y_valid = np_utils.to_categorical(Y_valid[:nb_valid_samples], num_classes) elif what_data == 'test': Y_train = np_utils.to_categorical(Y_train[:nb_train_samples], num_classes) Y_valid = np_utils.to_categorical(Y_valid[start:end], num_classes) return X_train, Y_train, X_valid, Y_valid def reshape2original(img, img_rows, img_cols): return np.array([cv2.resize(img[i].transpose(1,2,0), (img_rows, img_cols)).transpose(2,0,1) for i in range(img.shape[0])])
53.171875
152
0.650015
543
3,403
3.81768
0.13628
0.050651
0.072359
0.101302
0.743367
0.726001
0.688374
0.680656
0.616015
0.616015
0
0.03125
0.200705
3,403
63
153
54.015873
0.730882
0.083162
0
0.534884
0
0
0.009977
0
0
0
0
0
0
1
0.046512
false
0
0.116279
0.023256
0.209302
0
0
0
0
null
0
0
0
0
1
0
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
2
71715df2a271b5e1a44230a34f1cd4606557381e
1,909
py
Python
recipes/Python/578637_Wigle_wifi/recipe-578637.py
tdiprima/code
61a74f5f93da087d27c70b2efe779ac6bd2a3b4f
[ "MIT" ]
2,023
2017-07-29T09:34:46.000Z
2022-03-24T08:00:45.000Z
recipes/Python/578637_Wigle_wifi/recipe-578637.py
unhacker/code
73b09edc1b9850c557a79296655f140ce5e853db
[ "MIT" ]
32
2017-09-02T17:20:08.000Z
2022-02-11T17:49:37.000Z
recipes/Python/578637_Wigle_wifi/recipe-578637.py
unhacker/code
73b09edc1b9850c557a79296655f140ce5e853db
[ "MIT" ]
780
2017-07-28T19:23:28.000Z
2022-03-25T20:39:41.000Z
from uuid import getnode import re import requests class WigleAgent(): def __init__(self, username, password): self.agent(username, password) self.mac_address() def get_lat_lng(self, mac_address=None): if mac_address == None: mac_address = self.mac_address if '-' in mac_address: mac_address = mac_address.replace('-', ':') try: self.query_response = self.send_query(mac_address) response = self.parse_response() except IndexError: response = 'MAC location not known' return response def agent(self, username, password): self.agent = requests.Session() self.agent.post('https://wigle.net/api/v1/jsonLogin', data={'credential_0': username, 'credential_1': password, 'destination': '/https://wigle.net/'}) def mac_address(self): mac = hex(getnode()) mac_bytes = [mac[x:x+2] for x in xrange(0, len(mac), 2)] self.mac_address = ':'.join(mac_bytes[1:6]) def send_query(self, mac_address): response = self.agent.post(url='https://wigle.net/api/v1/jsonLocation', data={'netid': mac_address, 'Query2': 'Query'}) return response.json() def parse_response(self): lat = self.get_lat() lng = self.get_lng() return lat, lng def get_lat(self): resp_lat = self.query_response['result'][0]['locationData'][0]['latitude'] return float(resp_lat) def get_lng(self): resp_lng = self.query_response['result'][0]['locationData'][0]['longitude'] return float(resp_lng) if __name__ == "__main__": wa = WigleAgent('your-username', 'your-key') print wa.get_lat_lng('00:1C:0E:42:79:43')
32.355932
83
0.565741
225
1,909
4.586667
0.351111
0.125969
0.067829
0.046512
0.162791
0.071705
0.071705
0
0
0
0
0.018045
0.3033
1,909
58
84
32.913793
0.757895
0
0
0
0
0
0.13934
0
0
0
0
0
0
0
null
null
0.086957
0.065217
null
null
0.021739
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
1
0
0
0
0
0
2
717353cd8baeeca7f83f887e8d10d4926736df0d
599
py
Python
ws/createWrktFromSheet.py
mbromberek/ProcessWorkout
76b33154d06ef4db04b38a2d3276de9f70954724
[ "BSD-3-Clause" ]
1
2019-12-03T11:41:02.000Z
2019-12-03T11:41:02.000Z
ws/createWrktFromSheet.py
mbromberek/ProcessWorkout
76b33154d06ef4db04b38a2d3276de9f70954724
[ "BSD-3-Clause" ]
5
2019-11-26T11:58:36.000Z
2021-08-19T12:24:56.000Z
ws/createWrktFromSheet.py
mbromberek/ProcessWorkout
76b33154d06ef4db04b38a2d3276de9f70954724
[ "BSD-3-Clause" ]
null
null
null
#! /Users/mikeyb/Applications/python3 # -*- coding: utf-8 -*- ''' BSD 3-Clause License Copyright (c) 2020, Mike Bromberek All rights reserved. ''' # First party classes import os, sys import logging import logging.config import requests import configparser # Custom classes from ExerciseInfo_Class import ExerciseInfo def create(exLst, wsConfig): server = wsConfig['server'] port = wsConfig['port'] wrkt = {'workouts':exLst} logger.debug(wrkt) # Call webservice r = requests.post(server + ':' + port + '/api/v1/wrkt_sheet', json=wrkt) logger.info(r) return r
18.151515
76
0.691152
75
599
5.493333
0.706667
0.063107
0
0
0
0
0
0
0
0
0
0.016393
0.185309
599
32
77
18.71875
0.827869
0.310518
0
0
0
0
0.092269
0
0
0
0
0
0
1
0.071429
false
0
0.428571
0
0.571429
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
2
7186400dda9e358adbae443386aa0a831796176c
1,096
py
Python
users/migrations/0017_auto_20200712_1559.py
ujlbu4/vas3k.club
1ec907cf7e5ae3a74059cde8729ca0b3e2d55a3e
[ "MIT" ]
496
2020-04-24T04:20:32.000Z
2022-03-31T21:55:57.000Z
users/migrations/0017_auto_20200712_1559.py
ujlbu4/vas3k.club
1ec907cf7e5ae3a74059cde8729ca0b3e2d55a3e
[ "MIT" ]
642
2020-04-24T11:54:13.000Z
2022-03-26T15:41:06.000Z
users/migrations/0017_auto_20200712_1559.py
ujlbu4/vas3k.club
1ec907cf7e5ae3a74059cde8729ca0b3e2d55a3e
[ "MIT" ]
243
2020-04-24T11:49:11.000Z
2022-03-24T18:38:48.000Z
# Generated by Django 3.0.4 on 2020-07-12 15:59 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('users', '0016_auto_20200712_1557'), ] operations = [ migrations.AlterField( model_name='user', name='email', field=models.EmailField(max_length=254, unique=True), ), migrations.RunSQL(""" CREATE OR REPLACE FUNCTION generate_random_hash(int) RETURNS text AS $$ SELECT array_to_string( ARRAY ( SELECT substring( 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!#*+./:<=>?@[]()^_~' FROM (random() * 72)::int FOR 1) FROM generate_series(1, $1) ), '' ) $$ LANGUAGE sql; """), migrations.RunSQL(""" update users set secret_hash = generate_random_hash(16); """), migrations.RunSQL(""" drop function generate_random_hash(int); """), ]
29.621622
103
0.531022
97
1,096
5.835052
0.690722
0.084806
0.095406
0.091873
0.102474
0
0
0
0
0
0
0.071629
0.350365
1,096
36
104
30.444444
0.723315
0.041058
0
0.2
1
0
0.602479
0.173499
0
0
0
0
0
1
0
false
0
0.033333
0
0.133333
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
2
718baa53359eacf1a188c7841ce8073230054316
159
py
Python
pyqmc/__init__.py
maximegodin/pyqmc
890faac8a8157fa568bbbdee76b2c856d8bd5b5f
[ "MIT" ]
null
null
null
pyqmc/__init__.py
maximegodin/pyqmc
890faac8a8157fa568bbbdee76b2c856d8bd5b5f
[ "MIT" ]
null
null
null
pyqmc/__init__.py
maximegodin/pyqmc
890faac8a8157fa568bbbdee76b2c856d8bd5b5f
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- """Top-level package for pyqmc.""" __author__ = """Maxime Godin""" __email__ = 'maximegodin@polytechnique.org' __version__ = '0.1.0'
19.875
43
0.654088
19
159
4.842105
0.947368
0
0
0
0
0
0
0
0
0
0
0.028986
0.132075
159
7
44
22.714286
0.637681
0.320755
0
0
0
0
0.45098
0.284314
0
0
0
0
0
1
0
false
0
0
0
0
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
2
718f2c063abb5fa04909fcd13c4b852f585c6817
283
py
Python
computer_science/algorithms/recursion/fibonacci/fibonacci.py
LeandroTk/Algorithms
569ed68eba3eeff902f8078992099c28ce4d7cd6
[ "MIT" ]
205
2018-12-01T17:49:49.000Z
2021-12-22T07:02:27.000Z
computer_science/algorithms/recursion/fibonacci/fibonacci.py
LeandroTk/Algorithms
569ed68eba3eeff902f8078992099c28ce4d7cd6
[ "MIT" ]
2
2020-01-01T16:34:29.000Z
2020-04-26T19:11:13.000Z
computer_science/algorithms/recursion/fibonacci/fibonacci.py
LeandroTk/Algorithms
569ed68eba3eeff902f8078992099c28ce4d7cd6
[ "MIT" ]
50
2018-11-28T20:51:36.000Z
2021-11-29T04:08:25.000Z
# Fibonacci Sequence: 0 1 1 2 3 5 8 13 ... def fibonacci(num): if num == 1: return 0 if num == 2: return 1 return fibonacci(num-1) + fibonacci(num-2) print(fibonacci(1)) print(fibonacci(2)) print(fibonacci(3)) print(fibonacci(4)) print(fibonacci(5))
15.722222
46
0.614841
45
283
3.866667
0.333333
0.402299
0.172414
0
0
0
0
0
0
0
0
0.093023
0.240283
283
17
47
16.647059
0.716279
0.141343
0
0
0
0
0
0
0
0
0
0
0
1
0.090909
false
0
0
0
0.363636
0.454545
0
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
1
0
2
71a97c88305660ddf201d243233628affc769e98
6,633
py
Python
fastai/distributed.py
thepooons/fastai
a790b37d1695ca0b1b2e027ad839d9f53af07bb4
[ "Apache-2.0" ]
null
null
null
fastai/distributed.py
thepooons/fastai
a790b37d1695ca0b1b2e027ad839d9f53af07bb4
[ "Apache-2.0" ]
null
null
null
fastai/distributed.py
thepooons/fastai
a790b37d1695ca0b1b2e027ad839d9f53af07bb4
[ "Apache-2.0" ]
null
null
null
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/20a_distributed.ipynb (unless otherwise specified). __all__ = ['ParallelTrainer', 'setup_distrib', 'teardown_distrib', 'DistributedDL', 'DistributedTrainer', 'rank0_first'] # Cell from .basics import * from .callback.progress import ProgressCallback from torch.nn.parallel import DistributedDataParallel, DataParallel from torch.utils.data.distributed import DistributedSampler # Cell @patch def reset(self: DataParallel): if hasattr(self.module, 'reset'): self.module.reset() # Cell @log_args class ParallelTrainer(Callback): run_after,run_before = TrainEvalCallback,Recorder def __init__(self, device_ids): self.device_ids = device_ids def before_fit(self): self.learn.model = DataParallel(self.learn.model, device_ids=self.device_ids) def after_fit(self): self.learn.model = self.learn.model.module # Cell @patch def to_parallel(self: Learner, device_ids=None): self.add_cb(ParallelTrainer(device_ids)) return self # Cell @patch def detach_parallel(self: Learner): "Remove ParallelTrainer callback from Learner." self.remove_cb(ParallelTrainer) return self # Cell @patch @contextmanager def parallel_ctx(self: Learner, device_ids=None): "A context manager to adapt a learner to train in data parallel mode." try: self.to_parallel(device_ids) yield self finally: self.detach_parallel() # Cell @patch def reset(self: DistributedDataParallel): if hasattr(self.module, 'reset'): self.module.reset() # Cell def setup_distrib(gpu=None): if gpu is None: return gpu gpu = int(gpu) torch.cuda.set_device(int(gpu)) if num_distrib() > 1: torch.distributed.init_process_group(backend='nccl', init_method='env://') return gpu # Cell def teardown_distrib(): if torch.distributed.is_initialized(): torch.distributed.destroy_process_group() # Cell @log_args(but_as=TfmdDL.__init__) @delegates() class DistributedDL(TfmdDL): def __init__(self, dataset, rank, world_size, **kwargs): super().__init__(dataset, **kwargs) if self.n%world_size != 0: self.n += world_size-self.n%world_size self.total_n,self.n = self.n,self.n//world_size store_attr(self, 'rank,world_size') def get_idxs(self): idxs = Inf.count if self.indexed else Inf.nones return idxs if self.n is None else list(itertools.islice(idxs, self.total_n)) def shuffle_fn(self, idxs): "Deterministically shuffle on each training process based on epoch." g = torch.Generator() g.manual_seed(self.epoch) return L(idxs)[torch.randperm(self.total_n, generator=g)] def sample(self): idxs = self.get_idxs() if self.shuffle: idxs = self.shuffle_fn(idxs) # add extra samples to make it evenly divisible idxs += idxs[:(self.total_n - len(idxs))] # subsample idxs = idxs[self.rank:self.total_n:self.world_size] return (b for i,b in enumerate(idxs) if i//(self.bs or 1)%self.nw==self.offs) def create_item(self, s): if s is not None and s >= len(self.dataset): s = s%len(self.dataset) return s if hasattr(self.dataset, 'iloc') else super().create_item(s) def set_epoch(self, epoch): self.epoch = epoch @classmethod def from_dl(cls, dl, rank, world_size, **kwargs): cur_kwargs = dict(num_workers=dl.fake_l.num_workers, pin_memory=dl.pin_memory, timeout=dl.timeout, bs=dl.bs, shuffle=dl.shuffle, drop_last=dl.drop_last, indexed=dl.indexed, device=dl.device) cur_kwargs.update({n: getattr(dl, n) for n in cls._methods if n not in "get_idxs sample shuffle_fn create_item".split()}) return cls(dl.dataset, rank, world_size, **merge(cur_kwargs, kwargs)) # Cell @log_args class DistributedTrainer(Callback): run_after,run_before = TrainEvalCallback,Recorder fup = None # for `find_unused_parameters` in DistributedDataParallel() def __init__(self, cuda_id=0,sync_bn=True): store_attr(self,'cuda_id,sync_bn') def before_fit(self): opt_kwargs = { 'find_unused_parameters' : DistributedTrainer.fup } if DistributedTrainer.fup is not None else {} self.learn.model = DistributedDataParallel( nn.SyncBatchNorm.convert_sync_batchnorm(self.model) if self.sync_bn else self.model, device_ids=[self.cuda_id], output_device=self.cuda_id, **opt_kwargs) self.old_dls = list(self.dls) self.learn.dls.loaders = [self._wrap_dl(dl) for dl in self.dls] if rank_distrib() > 0: self.learn.logger=noop def _wrap_dl(self, dl): return dl if isinstance(dl, DistributedDL) else DistributedDL.from_dl(dl, rank_distrib(), num_distrib()) def before_epoch(self): for dl in self.dls: dl.set_epoch(self.epoch) def before_train(self): self.learn.dl = self._wrap_dl(self.learn.dl) def before_validate(self): self.learn.dl = self._wrap_dl(self.learn.dl) def after_fit(self): self.learn.model = self.learn.model.module self.learn.dls.loaders = self.old_dls # Cell @patch def to_distributed(self: Learner, cuda_id,sync_bn=True): self.add_cb(DistributedTrainer(cuda_id,sync_bn)) if rank_distrib() > 0: self.remove_cb(ProgressCallback) return self # Cell @patch def detach_distributed(self: Learner): if num_distrib() <=1: return self self.remove_cb(DistributedTrainer) if rank_distrib() > 0 and not hasattr(self, 'progress'): self.add_cb(ProgressCallback()) return self # Cell @patch @contextmanager def distrib_ctx(self: Learner, cuda_id=None,sync_bn=True): "A context manager to adapt a learner to train in distributed data parallel mode." # Figure out the GPU to use from rank. Create a dpg if none exists yet. if cuda_id is None: cuda_id = rank_distrib() if not torch.distributed.is_initialized(): setup_distrib(cuda_id) cleanup_dpg = torch.distributed.is_initialized() else: cleanup_dpg = False # Adapt self to DistributedDataParallel, yield, and cleanup afterwards. try: if num_distrib() > 1: self.to_distributed(cuda_id,sync_bn) yield self finally: self.detach_distributed() if cleanup_dpg: teardown_distrib() # Cell def rank0_first(func): "Execute `func` in the Rank-0 process first, then in other ranks in parallel." dummy_l = Learner(DataLoaders(device='cpu'), nn.Linear(1,1), loss_func=lambda: 0) with dummy_l.distrib_ctx(): if rank_distrib() == 0: res = func() distrib_barrier() if rank_distrib() != 0: res = func() return res
37.055866
129
0.700437
948
6,633
4.71097
0.226793
0.028213
0.021944
0.015674
0.225705
0.143977
0.09785
0.075459
0.075459
0.056202
0
0.003528
0.187999
6,633
179
130
37.055866
0.825659
0.115785
0
0.19697
1
0
0.088278
0.003557
0
0
0
0
0
1
0.212121
false
0
0.030303
0.007576
0.363636
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
1
0
0
0
0
0
0
0
2
71adfc7c042e1fc62090f74221b948cc0b44e5dc
164
py
Python
week06/lecture/examples/12_agree.py
uldash/CS50x
c3ee0f42ad514b57a13c3ffbb96238b3ca3730e1
[ "MIT" ]
null
null
null
week06/lecture/examples/12_agree.py
uldash/CS50x
c3ee0f42ad514b57a13c3ffbb96238b3ca3730e1
[ "MIT" ]
null
null
null
week06/lecture/examples/12_agree.py
uldash/CS50x
c3ee0f42ad514b57a13c3ffbb96238b3ca3730e1
[ "MIT" ]
null
null
null
from cs50 import get_string s = get_string("Do you agree? ") if s.lower() in {"y", "yes"}: print("Agreed.") elif s == "N" or s == "n": print("Not agreed")
20.5
32
0.573171
28
164
3.285714
0.714286
0.195652
0
0
0
0
0
0
0
0
0
0.015504
0.213415
164
8
33
20.5
0.697674
0
0
0
0
0
0.224242
0
0
0
0
0
0
1
0
false
0
0.166667
0
0.166667
0.333333
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
2
71b304d7ca0bd0f43b7da6d2eeea6319b38d50cc
229
py
Python
Back-End/Python/Basics/Part -1 - Functional/07 - Tuples as Data Records/07_NT_docstring.py
ASHISHKUMAR2411/Programming-CookBook
9c60655d64d21985ccb4196360858d98344701f9
[ "MIT" ]
25
2021-04-28T02:51:26.000Z
2022-03-24T13:58:04.000Z
Back-End/Python/Basics/Part -1 - Functional/07 - Tuples as Data Records/07_NT_docstring.py
ASHISHKUMAR2411/Programming-CookBook
9c60655d64d21985ccb4196360858d98344701f9
[ "MIT" ]
1
2022-03-03T23:33:41.000Z
2022-03-03T23:35:41.000Z
Back-End/Python/Basics/Part -1 - Functional/07 - Tuples as Data Records/07_NT_docstring.py
ASHISHKUMAR2411/Programming-CookBook
9c60655d64d21985ccb4196360858d98344701f9
[ "MIT" ]
15
2021-05-30T01:35:20.000Z
2022-03-25T12:38:25.000Z
from collections import namedtuple Point2D = namedtuple('Point2D', 'x y') Point2D.__doc__ = 'Represents a 2D Cartesian coordinate' Point2D.x.__doc__ = 'x-coordinate' Point2D.y.__doc__ = 'y-coordinate' print(help(help(Point2D)))
28.625
56
0.764192
30
229
5.433333
0.5
0.208589
0
0
0
0
0
0
0
0
0
0.034146
0.104803
229
8
57
28.625
0.760976
0
0
0
0
0
0.304348
0
0
0
0
0
0
1
0
false
0
0.166667
0
0.166667
0.166667
0
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
2
71b4db37e0764e3779a29a918e6d996626c05199
1,611
py
Python
py_reportit/shared/model/crawl.py
fedus/py_reportit
46422cabb652571d8cce6c8e91a229009dcca141
[ "MIT" ]
1
2021-12-05T19:16:16.000Z
2021-12-05T19:16:16.000Z
py_reportit/shared/model/crawl.py
fedus/py_reportit
46422cabb652571d8cce6c8e91a229009dcca141
[ "MIT" ]
null
null
null
py_reportit/shared/model/crawl.py
fedus/py_reportit
46422cabb652571d8cce6c8e91a229009dcca141
[ "MIT" ]
null
null
null
from sqlalchemy.orm import relationship from sqlalchemy.ext.hybrid import hybrid_property from sqlalchemy import Column, Integer, Numeric, Unicode, select, not_, exists from py_reportit.shared.model.orm_base import Base from py_reportit.shared.model.crawl_item import CrawlItem, CrawlItemState from py_reportit.shared.util.localized_arrow import LocalizedArrow class Crawl(Base): __tablename__ = 'crawl' id = Column(Integer, primary_key=True) scheduled_at = Column(LocalizedArrow, nullable=False) items = relationship("CrawlItem", cascade="save-update, merge, delete, delete-orphan", uselist=True, backref="crawl") stop_at_lat = Column(Numeric(8,6), nullable=True) stop_at_lon = Column(Numeric(9,6), nullable=True) current_task_id = Column(Unicode(50), nullable=True) @hybrid_property def finished(self) -> bool: return not any(item.state == CrawlItemState.WAITING for item in self.items) @finished.expression def finished(cls): #return not_(exists(select(cls).where(cls.items.any(CrawlItem.state == CrawlItemState.WAITING)))) return not_(exists(select(CrawlItem).where(CrawlItem.crawl_id == cls.id, CrawlItem.state == CrawlItemState.WAITING))) @hybrid_property def waiting_items(self): return sorted(filter(lambda item: item.state == CrawlItemState.WAITING, self.items), key=lambda item: item.scheduled_for) @waiting_items.expression def waiting_items(cls): return select(CrawlItem).where(CrawlItem.crawl_id == cls.id, CrawlItem.state == CrawlItemState.WAITING).order_by(CrawlItem.scheduled_for.asc())
44.75
151
0.751086
207
1,611
5.690821
0.362319
0.080645
0.110357
0.050934
0.171477
0.129032
0.129032
0.129032
0.129032
0.129032
0
0.004326
0.139044
1,611
36
151
44.75
0.844989
0.05959
0
0.076923
0
0
0.039604
0
0
0
0
0
0
1
0.153846
false
0
0.230769
0.153846
0.846154
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
1
1
0
0
2
71bb470f1612faec7acff60fde8aa95ccec44422
307
py
Python
day2/fizzbuzz.py
3th3l/bootcamp-8-nbo
2cd167a40abb4bdac439fe215e1139ff7975ea62
[ "MIT" ]
null
null
null
day2/fizzbuzz.py
3th3l/bootcamp-8-nbo
2cd167a40abb4bdac439fe215e1139ff7975ea62
[ "MIT" ]
null
null
null
day2/fizzbuzz.py
3th3l/bootcamp-8-nbo
2cd167a40abb4bdac439fe215e1139ff7975ea62
[ "MIT" ]
null
null
null
def fizz_buzz(n): """ return fizz when divisible by 3 return buzz when n is divisible by 5 return fizzbuzz when n is divisible by both 3 and 5 """ if n % 15 == 0: return 'FizzBuzz' elif n % 3 == 0: return 'Fizz' elif n % 5 == 0: return 'Buzz' else: return n
19.1875
52
0.566775
50
307
3.46
0.38
0.190751
0.080925
0.184971
0.208092
0
0
0
0
0
0
0.054455
0.34202
307
15
53
20.466667
0.80198
0
0
0
0
0
0.090909
0
0
0
0
0
0
0
null
null
0
0
null
null
0
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
2
71c32c04a8def2d3db9f534eb035872e3fcea078
2,192
py
Python
PeopleApp/migrations/0018_auto_20170714_1526.py
kshitij1234/Chemisty-Department-Website
44848fe213aa47e8c02ca612f81c2b49a28b09d1
[ "MIT" ]
null
null
null
PeopleApp/migrations/0018_auto_20170714_1526.py
kshitij1234/Chemisty-Department-Website
44848fe213aa47e8c02ca612f81c2b49a28b09d1
[ "MIT" ]
null
null
null
PeopleApp/migrations/0018_auto_20170714_1526.py
kshitij1234/Chemisty-Department-Website
44848fe213aa47e8c02ca612f81c2b49a28b09d1
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- # Generated by Django 1.11.1 on 2017-07-14 15:26 from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('PeopleApp', '0017_faculty_list_position'), ] operations = [ migrations.AddField( model_name='faculty', name='awards_honors', field=models.TextField(blank=True, null=True), ), migrations.AddField( model_name='faculty', name='conference_attended', field=models.TextField(blank=True, null=True), ), migrations.AddField( model_name='faculty', name='conference_presentations', field=models.TextField(blank=True, null=True), ), migrations.AddField( model_name='faculty', name='fax', field=models.CharField(blank=True, max_length=12, null=True), ), migrations.AddField( model_name='faculty', name='google_scholar', field=models.CharField(default='#', max_length=200), ), migrations.AddField( model_name='faculty', name='invited_talks', field=models.TextField(blank=True, null=True), ), migrations.AddField( model_name='faculty', name='phd', field=models.CharField(blank=True, max_length=100, null=True), ), migrations.AddField( model_name='faculty', name='professional_experience', field=models.TextField(blank=True, null=True), ), migrations.AddField( model_name='faculty', name='publications', field=models.TextField(blank=True, null=True), ), migrations.AddField( model_name='faculty', name='sponsored_projects', field=models.TextField(blank=True, null=True), ), migrations.AlterField( model_name='faculty', name='research_areas', field=models.TextField(blank=True, null=True), ), ]
30.873239
74
0.559763
205
2,192
5.839024
0.317073
0.082707
0.147034
0.183793
0.666667
0.666667
0.603175
0.508772
0.392648
0.392648
0
0.019476
0.320712
2,192
70
75
31.314286
0.784419
0.031022
0
0.634921
1
0
0.126827
0.034418
0
0
0
0
0
1
0
false
0
0.031746
0
0.079365
0
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
2
71c5343c544a90313ad7d9c5999119a95a48096e
1,453
py
Python
critiquebrainz/frontend/forms/rate.py
code-master5/critiquebrainz
a231ef27923f54f8c3abb0c368e871215423546e
[ "Apache-2.0" ]
70
2015-03-10T00:08:21.000Z
2022-02-20T05:36:53.000Z
critiquebrainz/frontend/forms/rate.py
code-master5/critiquebrainz
a231ef27923f54f8c3abb0c368e871215423546e
[ "Apache-2.0" ]
279
2015-12-08T14:10:45.000Z
2022-03-29T13:54:23.000Z
critiquebrainz/frontend/forms/rate.py
code-master5/critiquebrainz
a231ef27923f54f8c3abb0c368e871215423546e
[ "Apache-2.0" ]
95
2015-03-12T21:39:42.000Z
2022-03-10T00:51:04.000Z
# critiquebrainz - Repository for Creative Commons licensed reviews # # Copyright (C) 2018 MetaBrainz Foundation Inc. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. from flask_wtf import FlaskForm from flask_babel import lazy_gettext from wtforms import validators, IntegerField, StringField from wtforms.widgets import Input, HiddenInput class RatingEditForm(FlaskForm): rating = IntegerField(lazy_gettext("Rating"), widget=Input(input_type='number'), validators=[validators.Optional()]) entity_id = StringField(widget=HiddenInput()) entity_type = StringField(widget=HiddenInput()) def __init__(self, entity_id=None, entity_type=None, **kwargs): kwargs['entity_id'] = entity_id kwargs['entity_type'] = entity_type FlaskForm.__init__(self, **kwargs)
42.735294
120
0.764625
201
1,453
5.422886
0.562189
0.029358
0.03578
0.052294
0.075229
0.051376
0
0
0
0
0
0.013126
0.161046
1,453
33
121
44.030303
0.88105
0.543014
0
0
0
0
0.049536
0
0
0
0
0
0
1
0.083333
false
0
0.333333
0
0.75
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
2
71cd34e67952ead5e1d22a2354c37df720d8143f
511
py
Python
basic/demo_time.py
708yamaguchi/MaixPy_scripts
5f1774e739fb7eecab344d619c0cd63a71ff3d4f
[ "MIT" ]
485
2019-03-18T10:53:59.000Z
2022-03-27T09:02:08.000Z
basic/demo_time.py
708yamaguchi/MaixPy_scripts
5f1774e739fb7eecab344d619c0cd63a71ff3d4f
[ "MIT" ]
110
2019-04-04T09:07:39.000Z
2022-03-03T08:08:19.000Z
basic/demo_time.py
708yamaguchi/MaixPy_scripts
5f1774e739fb7eecab344d619c0cd63a71ff3d4f
[ "MIT" ]
379
2019-03-18T04:48:46.000Z
2022-03-30T00:29:29.000Z
import time import machine print(time.time()) t1 = time.localtime(546450051) print('t1', t1) t2 = time.mktime(t1) print('t2', t2) print(time.time()) time.set_time(t1) print(time.time()) time.sleep(1) print(time.localtime(time.time())) ''' raw REPL; CTRL-B to exit >OK 74 t1 (2017, 4, 25, 15, 40, 51, 1, 115) t2 546450051 546450065 546450051 (2017, 4, 25, 15, 40, 52, 1, 115) > MicroPython v0.5.1-136-g039f72b6c-dirty on 2020-11-18; Sipeed_M1 with kendryte-k210 Type "help()" for more information. >>> >>> '''
17.033333
83
0.682975
89
511
3.898876
0.550562
0.138329
0.112392
0.097983
0.063401
0
0
0
0
0
0
0.240991
0.131115
511
29
84
17.62069
0.540541
0
0
0.25
0
0
0.016807
0
0
0
0
0
0
1
0
false
0
0.166667
0
0.166667
0.5
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
1
0
2
71d6d14b656730a8b5943eeafaf9a3bbc263bcaf
523
bzl
Python
java_test_repositories.bzl
simonhorlick/base
dffa9a4316fd80cc8e867704cca45269dd8a734a
[ "Apache-2.0" ]
null
null
null
java_test_repositories.bzl
simonhorlick/base
dffa9a4316fd80cc8e867704cca45269dd8a734a
[ "Apache-2.0" ]
null
null
null
java_test_repositories.bzl
simonhorlick/base
dffa9a4316fd80cc8e867704cca45269dd8a734a
[ "Apache-2.0" ]
null
null
null
# Commonly used java test dependencies def java_test_repositories(): native.maven_jar( name = "junit", artifact = "junit:junit:4.12", sha1 = "2973d150c0dc1fefe998f834810d68f278ea58ec", ) native.maven_jar( name = "hamcrest_core", artifact = "org.hamcrest:hamcrest-core:1.3", sha1 = "42a25dc3219429f0e5d060061f71acb49bf010a0", ) native.maven_jar( name = "org_mockito_mockito", artifact = "org.mockito:mockito-all:1.10.19", sha1 = "539df70269cc254a58cccc5d8e43286b4a73bf30", )
27.526316
54
0.707457
53
523
6.830189
0.528302
0.09116
0.116022
0.149171
0
0
0
0
0
0
0
0.207852
0.172084
523
19
55
27.526316
0.628176
0.068834
0
0.1875
0
0
0.481481
0.372428
0
0
0
0
0
1
0.0625
true
0
0
0
0.0625
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
2
71d8019fb0e4f44131b16816f887c66ae197e46c
235
py
Python
Webshop_app/db.py
Immortalits/Szakdolgozat
b5d29b41c0c7f45de1386a8607bfc2efe8756b37
[ "MIT" ]
null
null
null
Webshop_app/db.py
Immortalits/Szakdolgozat
b5d29b41c0c7f45de1386a8607bfc2efe8756b37
[ "MIT" ]
null
null
null
Webshop_app/db.py
Immortalits/Szakdolgozat
b5d29b41c0c7f45de1386a8607bfc2efe8756b37
[ "MIT" ]
null
null
null
from flask_sqlalchemy import SQLAlchemy from typing import TYPE_CHECKING db = SQLAlchemy() if TYPE_CHECKING: from flask_sqlalchemy.model import Model BaseModel = db.make_declarative_base(Model) else: BaseModel = db.Model
21.363636
47
0.787234
31
235
5.774194
0.483871
0.100559
0.212291
0
0
0
0
0
0
0
0
0
0.161702
235
10
48
23.5
0.908629
0
0
0
0
0
0
0
0
0
0
0
0
1
0
false
0
0.375
0
0.375
0
0
0
0
null
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
2
e0819dacbb91ca08c775b55a7b379edfa26822a6
3,183
py
Python
src/sentry/projectoptions/manager.py
vaniot-s/sentry
5c1accadebfaf8baf6863251c05b38ea979ee1c7
[ "BSD-3-Clause" ]
null
null
null
src/sentry/projectoptions/manager.py
vaniot-s/sentry
5c1accadebfaf8baf6863251c05b38ea979ee1c7
[ "BSD-3-Clause" ]
null
null
null
src/sentry/projectoptions/manager.py
vaniot-s/sentry
5c1accadebfaf8baf6863251c05b38ea979ee1c7
[ "BSD-3-Clause" ]
null
null
null
from __future__ import absolute_import import six import uuid import bisect from datetime import datetime from pytz import utc class WellKnownProjectOption(object): def __init__(self, key, default=None, epoch_defaults=None): self.key = key self.default = default self.epoch_defaults = epoch_defaults self._epoch_default_list = sorted(epoch_defaults or ()) def get_default(self, project=None, epoch=None): if self.epoch_defaults: if epoch is None: if project is None: epoch = 1 else: epoch = project.get_option("sentry:option-epoch") or 1 idx = bisect.bisect(self._epoch_default_list, epoch) if idx > 0: return self.epoch_defaults[self._epoch_default_list[idx - 1]] return self.default class ProjectOptionsManager(object): """Project options used to be implemented in a relatively ad-hoc manner in the past. The project manager still uses the functionality of the project model and just dispatches to it. Options can be used without declaring defaults, but if defaults are declared they are returned without having to define a default at the time of the option lookup. """ def __init__(self): self.registry = {} def lookup_well_known_key(self, key): return self.registry.get(key) def freeze_option_epoch(self, project, force=False): # The options are frozen in a receiver hook for project saves. # See `sentry.receivers.core.freeze_option_epoch_for_project` if force or project.get_option("sentry:option-epoch") is None: from .defaults import LATEST_EPOCH project.update_option("sentry:option-epoch", LATEST_EPOCH) def set(self, project, key, value): from sentry.models import ProjectOption self.update_rev_for_option(project) return ProjectOption.objects.set_value(project, key, value) def isset(self, project, key): return project.get_option(project, key, Ellipsis) is not Ellipsis def get(self, project, key, default=None, validate=None): from sentry.models import ProjectOption return ProjectOption.objects.get_value(project, key, default, validate=validate) def delete(self, project, key): from sentry.models import ProjectOption self.update_rev_for_option(project) return ProjectOption.objects.unset_value(project, key) def update_rev_for_option(self, project): from sentry.models import ProjectOption ProjectOption.objects.set_value(project, "sentry:relay-rev", uuid.uuid4().hex) ProjectOption.objects.set_value( project, "sentry:relay-rev-lastchange", datetime.utcnow().replace(tzinfo=utc) ) def register(self, key, default=None, epoch_defaults=None): self.registry[key] = WellKnownProjectOption( key=key, default=default, epoch_defaults=epoch_defaults ) def all(self): """ Return an iterator for all keys in the registry. """ return six.itervalues(self.registry)
34.225806
89
0.675778
399
3,183
5.238095
0.283208
0.055981
0.026794
0.042105
0.274163
0.233493
0.170335
0.170335
0.086124
0.086124
0
0.002084
0.246309
3,183
92
90
34.597826
0.869112
0.161797
0
0.105263
0
0
0.03827
0.010333
0
0
0
0
0
1
0.210526
false
0
0.192982
0.035088
0.578947
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
1
0
0
2
e0891447ac069209bcdd7751ebfc4dbcb2993e75
1,740
py
Python
CoordenacaoFacil/models/Student.py
vieiraeduardos/easy-management
88d124da4fc20455da2ce28ffacc62b691959387
[ "MIT" ]
null
null
null
CoordenacaoFacil/models/Student.py
vieiraeduardos/easy-management
88d124da4fc20455da2ce28ffacc62b691959387
[ "MIT" ]
7
2018-11-23T23:12:51.000Z
2018-11-23T23:37:08.000Z
CoordenacaoFacil/models/Student.py
vieiraeduardos/coordenacao-facil
88d124da4fc20455da2ce28ffacc62b691959387
[ "MIT" ]
null
null
null
from werkzeug.security import generate_password_hash, check_password_hash from CoordenacaoFacil import db class Student(): def __init__(self, code="", name="", email="", password="", course=None, university=None, createdAt=""): self.code = code self.name = name self.email = email self.password = password self.university = university self.course = course self.createdAt = createdAt self.type = "student" def create(self, student=None): try: db.students.insert({ "code": student.code, "name": student.name, "email": student.email, "password": generate_password_hash(student.password), "university": student.university, "course": student.course, "createdAt": student.createdAt, "type": self.type }) return True except: print("Houve um problema ao cadastrar novo estudante.") return False def login(self, code="", password=""): try: student = db.students.find_one({ "code": code }) if student: if check_password_hash(student["password"], password): return True return False except: print("Houve um problema ao entrar na aplicação.") return False def getUserByCode(self, code=""): try: student = db.students.find_one({ "code": code }) return student except: print("Houve um problema ao obter estudante.") return False
29.491525
108
0.526437
162
1,740
5.567901
0.296296
0.053215
0.053215
0.059867
0.170732
0.170732
0.077605
0.077605
0
0
0
0
0.375862
1,740
58
109
30
0.830571
0
0
0.387755
1
0
0.113218
0
0
0
0
0
0
1
0.081633
false
0.122449
0.040816
0
0.285714
0.061224
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
0
1
0
0
0
0
0
2
e094a85d71e167583235630d1b52cda4841bb976
231
py
Python
recorrer.py
napodevesa/python_master
9e0253fb8cd29c8228d27e8a8f39db4fb93d3edc
[ "MIT" ]
null
null
null
recorrer.py
napodevesa/python_master
9e0253fb8cd29c8228d27e8a8f39db4fb93d3edc
[ "MIT" ]
null
null
null
recorrer.py
napodevesa/python_master
9e0253fb8cd29c8228d27e8a8f39db4fb93d3edc
[ "MIT" ]
null
null
null
def run(): # nombre = input('Escribe tu nombre: ') # for letra in nombre: # print(letra) frase = input('Escribe una frase: ') for c in frase: print(c.upper()) if __name__ == '__main__': run()
17.769231
43
0.545455
29
231
4.068966
0.586207
0.20339
0
0
0
0
0
0
0
0
0
0
0.307359
231
12
44
19.25
0.7375
0.324675
0
0
0
0
0.177632
0
0
0
0
0
0
1
0.166667
false
0
0
0
0.166667
0.166667
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
2
e0b125609aa1a6ddf57d765fa4dd78c2f8f5f41b
434
py
Python
python/ray/dataframe/__init__.py
suryaabhi/ray
112ef075632c0815beb9838b91a83331fe649f0b
[ "Apache-2.0" ]
1
2020-06-25T18:17:10.000Z
2020-06-25T18:17:10.000Z
python/ray/dataframe/__init__.py
rickyHong/Ray-Population-Based-Training-repl
195a42f2fa4ab39d1e2260e6860d88c529023655
[ "Apache-2.0" ]
null
null
null
python/ray/dataframe/__init__.py
rickyHong/Ray-Population-Based-Training-repl
195a42f2fa4ab39d1e2260e6860d88c529023655
[ "Apache-2.0" ]
1
2021-09-22T14:46:19.000Z
2021-09-22T14:46:19.000Z
from __future__ import absolute_import from __future__ import division from __future__ import print_function from .dataframe import DataFrame from .dataframe import from_pandas from .dataframe import to_pandas import ray import pandas as pd __all__ = ["DataFrame", "from_pandas", "to_pandas"] ray.register_custom_serializer(pd.DataFrame, use_pickle=True) ray.register_custom_serializer(pd.core.indexes.base.Index, use_pickle=True)
28.933333
75
0.836406
61
434
5.491803
0.393443
0.089552
0.143284
0.161194
0.173134
0
0
0
0
0
0
0
0.096774
434
14
76
31
0.854592
0
0
0
0
0
0.06682
0
0
0
0
0
0
1
0
false
0
0.727273
0
0.727273
0.090909
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
2
e0b34a961622cf5d2b408ce1e25a2f6a04ba68d7
5,100
py
Python
ProcessPlot/classes/pen.py
testtech-solutions/ProcessPlot
eea51a406b539c5a9b0510d4ff8a06e0be3e98fa
[ "MIT" ]
null
null
null
ProcessPlot/classes/pen.py
testtech-solutions/ProcessPlot
eea51a406b539c5a9b0510d4ff8a06e0be3e98fa
[ "MIT" ]
null
null
null
ProcessPlot/classes/pen.py
testtech-solutions/ProcessPlot
eea51a406b539c5a9b0510d4ff8a06e0be3e98fa
[ "MIT" ]
null
null
null
""" Copyright (c) 2021 Adam Solchenberger asolchenberger@gmail.com, Jason Engman engmanj@gmail.com Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ import logging import numpy as np from gi.repository import Gdk, GdkPixbuf, GObject, Gtk from classes.database import PenSettings class Pen(object): __log = logging.getLogger("ProcessPlot.classes.Pen") orm_model = PenSettings @classmethod def get_params_from_orm(cls, result): """ pass in an orm result (database query result) and this will update the params dictionary with the table columns. the params object is used to pass into a widget's init """ params = { "id": result.id, "chart_id": result.chart_id, "tag_id": result.tag_id, "connection_id": result.connection_id, "visible": result.visible, "color": result.color, "weigth": result.weight, "scale_minimum": result.scale_minimum, "scale_maximum": result.scale_maximum, "scale_lock": result.scale_lock, "scale_auto": result.scale_auto, } return params @GObject.Property(type=int, flags=GObject.ParamFlags.READABLE) def id(self): return self._id @GObject.Property(type=int, flags=GObject.ParamFlags.READWRITE) def chart_id(self): return self._chart_id @chart_id.setter def chart_id(self, value): self._chart_id = value #self.move() @GObject.Property(type=int, flags=GObject.ParamFlags.READWRITE) def tag_id(self): return self._tag_id @tag_id.setter def tag_id(self, value): self._tag_id = value #self.move() @GObject.Property(type=int, flags=GObject.ParamFlags.READWRITE) def connection_id(self): return self._connection_id @connection_id.setter def connection_id(self, value): self._connection_id = value #self.resize() @GObject.Property(type=bool, default=False, flags=GObject.ParamFlags.READABLE) def visible(self): return self._visible @visible.setter def visible(self, value): self._visible = value #self.resize() @GObject.Property(type=str, flags=GObject.ParamFlags.READWRITE) def color(self): return self._color @color.setter def color(self, value): self._color = value #self.resize() @GObject.Property(type=int, flags=GObject.ParamFlags.READWRITE) def weight(self): return self._weight @weight.setter def weight(self, value): self._weight = value #self.resize() @GObject.Property(type=str, flags=GObject.ParamFlags.READWRITE) def scale_minimum(self): return self._scale_minimum @scale_minimum.setter def scale_minimum(self, value): self._scale_minimum = value #self.resize() @GObject.Property(type=str, flags=GObject.ParamFlags.READWRITE) def scale_maximum(self): return self._scale_maximum @scale_maximum.setter def scale_maximum(self, value): self._scale_maximum = value #self.resize() @GObject.Property(type=bool, default=False, flags=GObject.ParamFlags.READABLE) def scale_lock(self): return self._scale_lock @scale_lock.setter def scale_lock(self, value): self._scale_lock = value #self.resize() @GObject.Property(type=bool, default=False, flags=GObject.ParamFlags.READABLE) def scale_auto(self): return self._scale_auto @scale_auto.setter def scale_auto(self, value): self._scale_auto = value #self.resize() def __init__(self, chart, params) -> None: super().__init__() self.chart = chart self.app = chart.app self.buffer = np.ndarray(shape=(2,0xFFFFF), dtype=float) self.params = params self.initialize_params() def initialize_params(self, *args): #private settings try: self._chart_id = self.params.chart_id self._connection_id = self.params.connection_id self._tag_id = self.params.tag_id self._color = self.params.color self._visible = self.params.visible self._weight = self.params.weight self._scale_minimum = self.params.scale_minimum self._scale_maximum = self.params.scale_maximum self._scale_lock = self.params.scale_lock self._scale_auto = self.params.scale_auto except: pass
34.459459
94
0.730392
705
5,100
5.12766
0.262411
0.049793
0.057815
0.060028
0.22296
0.219917
0.215768
0.203596
0.203596
0.172614
0
0.001425
0.174314
5,100
148
95
34.459459
0.857041
0.280196
0
0.093458
0
0
0.031912
0.006327
0
0
0.001926
0
0
1
0.224299
false
0.009346
0.037383
0.102804
0.401869
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
0
0
0
2
e0bd0884a5858e7d43ce286a9a512dc6cfd6ad75
327
py
Python
monitor/urls.py
sweetchipsw/Sweetmon_legacy
27b0d9ab00d66b634852d7ad93e54b3a5cc457a4
[ "MIT" ]
2
2019-11-06T02:18:16.000Z
2020-04-26T04:13:23.000Z
monitor/urls.py
sweetchipsw/Sweetmon_legacy
27b0d9ab00d66b634852d7ad93e54b3a5cc457a4
[ "MIT" ]
2
2020-02-11T23:38:55.000Z
2020-06-05T17:36:42.000Z
monitor/urls.py
sweetchipsw/Sweetmon_legacy
27b0d9ab00d66b634852d7ad93e54b3a5cc457a4
[ "MIT" ]
null
null
null
from django.conf.urls import url from django.conf import settings from django.conf.urls.static import static from . import views urlpatterns = [ # views.py url(r'^$', views.fuzzer_list, name='fuzzers'), ] if settings.DEBUG: urlpatterns += static(settings.MEDIA_URL, document_root=settings.STATIC_ROOT)
23.357143
82
0.721713
44
327
5.272727
0.477273
0.12931
0.181034
0.155172
0
0
0
0
0
0
0
0
0.171254
327
13
83
25.153846
0.856089
0.024465
0
0
0
0
0.029605
0
0
0
0
0
0
1
0
false
0
0.444444
0
0.444444
0
0
0
0
null
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
2
e0c47f2613fb5fe08e0117facd79234cc27fcb29
623
py
Python
archiv/migrations/0018_auto_20210520_1616.py
acdh-oeaw/mmp
7ef8f33eafd3a7985328d374130f1cbe31f77df0
[ "MIT" ]
2
2021-06-02T11:27:54.000Z
2021-08-25T10:29:04.000Z
archiv/migrations/0018_auto_20210520_1616.py
acdh-oeaw/mmp
7ef8f33eafd3a7985328d374130f1cbe31f77df0
[ "MIT" ]
86
2021-01-29T12:31:34.000Z
2022-03-28T11:41:04.000Z
archiv/migrations/0018_auto_20210520_1616.py
acdh-oeaw/mmp
7ef8f33eafd3a7985328d374130f1cbe31f77df0
[ "MIT" ]
null
null
null
# Generated by Django 3.2 on 2021-05-20 16:16 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('archiv', '0017_event'), ] operations = [ migrations.AlterField( model_name='event', name='end_date', field=models.IntegerField(blank=True, help_text='bis', null=True, verbose_name='bis'), ), migrations.AlterField( model_name='event', name='start_date', field=models.IntegerField(blank=True, help_text='von', null=True, verbose_name='von'), ), ]
25.958333
98
0.59069
68
623
5.279412
0.558824
0.111421
0.139276
0.16156
0.456825
0.456825
0.245125
0.245125
0
0
0
0.040089
0.279294
623
23
99
27.086957
0.759465
0.069021
0
0.352941
1
0
0.096886
0
0
0
0
0
0
1
0
false
0
0.058824
0
0.235294
0
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
2
e0c90ed189d8d30542644ea8f37d2564b4a63a76
231
py
Python
tests/strings_tests.py
bulljustin/magic_python
43281b4b3820f046692d983b394ef95b9ee7c810
[ "Apache-2.0" ]
null
null
null
tests/strings_tests.py
bulljustin/magic_python
43281b4b3820f046692d983b394ef95b9ee7c810
[ "Apache-2.0" ]
null
null
null
tests/strings_tests.py
bulljustin/magic_python
43281b4b3820f046692d983b394ef95b9ee7c810
[ "Apache-2.0" ]
null
null
null
from nose.tools import * from magic_python.strings import * def setup(): print("setup") def teardown(): print("teardown") def test_file(): assert(file.WRITE == 'w') assert(file.READ_ONLY == 'r') assert(file.APPEND == 'a')
15.4
34
0.670996
33
231
4.606061
0.636364
0.197368
0
0
0
0
0
0
0
0
0
0
0.147186
231
14
35
16.5
0.771574
0
0
0
0
0
0.069565
0
0
0
0
0
0.3
1
0.3
true
0
0.2
0
0.5
0.2
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
1
0
0
0
0
0
0
2
e0c9e4cea246c4df15da36cb76f5062e2c2cee5e
392
py
Python
dev_tools/urls.py
uktrade/contractor-approval
316ba7b2321f5aeea6dc83dcdaaadda887275f4d
[ "MIT" ]
null
null
null
dev_tools/urls.py
uktrade/contractor-approval
316ba7b2321f5aeea6dc83dcdaaadda887275f4d
[ "MIT" ]
1
2022-02-18T09:17:41.000Z
2022-02-18T09:17:41.000Z
dev_tools/urls.py
uktrade/resourcing-approval
316ba7b2321f5aeea6dc83dcdaaadda887275f4d
[ "MIT" ]
null
null
null
from django.urls import path from dev_tools.views import change_user, create_test_resourcing_request, index app_name = "dev_tools" urlpatterns = [ path("", index, name="index"), path("change-user", change_user, name="change-user"), path( "create-test-resourcing-request", create_test_resourcing_request, name="create-test-resourcing-request", ), ]
23.058824
78
0.691327
48
392
5.416667
0.375
0.153846
0.307692
0.415385
0
0
0
0
0
0
0
0
0.183673
392
16
79
24.5
0.8125
0
0
0
0
0
0.244898
0.153061
0
0
0
0
0
1
0
false
0
0.166667
0
0.166667
0
0
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
2
e0d16e9fd314898ed2b07a67f71f6c1c8e97da43
542
py
Python
cpu_ver/hypergrad/slurm_job_watcher.py
bigaidream-projects/drmad
a4bb6010595d956f29c5a42a095bab76a60b29eb
[ "MIT" ]
119
2016-02-24T17:20:50.000Z
2021-05-28T21:35:16.000Z
cpu_ver/hypergrad/slurm_job_watcher.py
LinZichuan/drmad
a4bb6010595d956f29c5a42a095bab76a60b29eb
[ "MIT" ]
8
2016-02-25T03:13:38.000Z
2017-09-15T00:54:52.000Z
cpu_ver/hypergrad/slurm_job_watcher.py
LinZichuan/drmad
a4bb6010595d956f29c5a42a095bab76a60b29eb
[ "MIT" ]
31
2016-03-10T04:57:11.000Z
2021-05-02T01:00:04.000Z
import time from glob import glob import subprocess import os from odyssey import run_signal_stem, slurm_fname, temp_dir, jobdir if __name__ == "__main__": print "Monitoring slurm jobs in {0}".format(os.getcwd()) while True: for fname in glob(run_signal_stem + "*"): jobname = fname[len(run_signal_stem):] print "Launching job {0}".format(jobname) with temp_dir(jobdir(jobname)): subprocess.call(["sbatch", slurm_fname]) os.remove(fname) time.sleep(2)
30.111111
66
0.638376
70
542
4.685714
0.542857
0.082317
0.118902
0
0
0
0
0
0
0
0
0.007426
0.254613
542
17
67
31.882353
0.804455
0
0
0
0
0
0.110701
0
0
0
0
0
0
0
null
null
0
0.333333
null
null
0.133333
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
1
0
0
0
0
2
e0d92edd39effcb99f517c64c3798e2b82d6596b
485
py
Python
src/django_jsonfield_backport/apps.py
lociii/django-jsonfield-backport
ddd892f6954dab06ff5a3d08fcde759db86e576c
[ "BSD-3-Clause" ]
null
null
null
src/django_jsonfield_backport/apps.py
lociii/django-jsonfield-backport
ddd892f6954dab06ff5a3d08fcde759db86e576c
[ "BSD-3-Clause" ]
null
null
null
src/django_jsonfield_backport/apps.py
lociii/django-jsonfield-backport
ddd892f6954dab06ff5a3d08fcde759db86e576c
[ "BSD-3-Clause" ]
null
null
null
import django from django.apps import AppConfig from django.utils.translation import ugettext_lazy as _ from django_jsonfield_backport import features, forms, models class JSONFieldConfig(AppConfig): name = "django_jsonfield_backport" verbose_name = _("JSONField backport from Django 3.1") def ready(self): if django.VERSION >= (3, 1): return features.connect_signal_receivers() forms.patch_admin() models.register_lookups()
26.944444
61
0.717526
57
485
5.894737
0.596491
0.119048
0.136905
0
0
0
0
0
0
0
0
0.010417
0.208247
485
17
62
28.529412
0.864583
0
0
0
0
0
0.121649
0.051546
0
0
0
0
0
1
0.076923
false
0
0.307692
0
0.692308
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
2
e0dfc90ae8c4a5fcd43cbf3b01dea17271f04864
518
py
Python
models/notifications/update_favorites.py
Ewpratten/the-blue-alliance
702932612ab9e2839108107779e2323675b227a3
[ "MIT" ]
null
null
null
models/notifications/update_favorites.py
Ewpratten/the-blue-alliance
702932612ab9e2839108107779e2323675b227a3
[ "MIT" ]
null
null
null
models/notifications/update_favorites.py
Ewpratten/the-blue-alliance
702932612ab9e2839108107779e2323675b227a3
[ "MIT" ]
null
null
null
from models.notifications.notification import Notification class UpdateFavoritesNotification(Notification): def __init__(self, user_id): self.user_id = user_id @classmethod def _type(cls): from consts.notification_type import NotificationType return NotificationType.UPDATE_FAVORITES @property def platform_config(self): from models.fcm.platform_config import PlatformConfig return PlatformConfig(collapse_key='{}_favorite_update'.format(self.user_id))
28.777778
85
0.750965
55
518
6.781818
0.527273
0.064343
0.080429
0
0
0
0
0
0
0
0
0
0.183398
518
17
86
30.470588
0.881797
0
0
0
0
0
0.034749
0
0
0
0
0
0
1
0.25
false
0
0.25
0
0.75
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
1
0
0
2
e0e45392eff3b05e5126d719e4073dd4c304ceda
150
py
Python
src/home/urls.py
Vitaldocz/blog
91997b542def86eee6ad58e25c4dab1ad6e68e99
[ "MIT" ]
null
null
null
src/home/urls.py
Vitaldocz/blog
91997b542def86eee6ad58e25c4dab1ad6e68e99
[ "MIT" ]
null
null
null
src/home/urls.py
Vitaldocz/blog
91997b542def86eee6ad58e25c4dab1ad6e68e99
[ "MIT" ]
null
null
null
from django.urls import path from .views import IndexView app_name = 'home' urlpatterns = [ path('', IndexView.as_view(), name='indexView') ]
13.636364
51
0.693333
19
150
5.368421
0.684211
0
0
0
0
0
0
0
0
0
0
0
0.173333
150
10
52
15
0.822581
0
0
0
0
0
0.087248
0
0
0
0
0
0
1
0
false
0
0.333333
0
0.333333
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
2
e0f46bfc7a20783d0f7dda9d9c3184ebb52a8080
6,752
py
Python
pyecobee/objects/security_settings.py
gleblanc1783/Pyecobee
c8d1aa016f2e5f8e0c59163d34d8ca57844ae713
[ "MIT" ]
29
2017-07-05T20:32:27.000Z
2022-03-16T02:33:52.000Z
pyecobee/objects/security_settings.py
gleblanc1783/Pyecobee
c8d1aa016f2e5f8e0c59163d34d8ca57844ae713
[ "MIT" ]
24
2018-03-02T19:26:49.000Z
2022-02-16T18:43:31.000Z
pyecobee/objects/security_settings.py
gleblanc1783/Pyecobee
c8d1aa016f2e5f8e0c59163d34d8ca57844ae713
[ "MIT" ]
17
2017-05-22T18:20:32.000Z
2022-01-13T18:14:22.000Z
""" This module is home to the SecuritySettings class """ from pyecobee.ecobee_object import EcobeeObject class SecuritySettings(EcobeeObject): """ This class has been auto generated by scraping https://www.ecobee.com/home/developer/api/documentation/v1/objects/SecuritySettings.shtml Attribute names have been generated by converting ecobee property names from camelCase to snake_case. A getter property has been generated for each attribute. A setter property has been generated for each attribute whose value of READONLY is "no". An __init__ argument without a default value has been generated if the value of REQUIRED is "yes". An __init__ argument with a default value of None has been generated if the value of REQUIRED is "no". """ __slots__ = [ '_user_access_code', '_all_user_access', '_program_access', '_details_access', '_quick_save_access', '_vacation_access', ] attribute_name_map = { 'user_access_code': 'userAccessCode', 'userAccessCode': 'user_access_code', 'all_user_access': 'allUserAccess', 'allUserAccess': 'all_user_access', 'program_access': 'programAccess', 'programAccess': 'program_access', 'details_access': 'detailsAccess', 'detailsAccess': 'details_access', 'quick_save_access': 'quickSaveAccess', 'quickSaveAccess': 'quick_save_access', 'vacation_access': 'vacationAccess', 'vacationAccess': 'vacation_access', } attribute_type_map = { 'user_access_code': 'six.text_type', 'all_user_access': 'bool', 'program_access': 'bool', 'details_access': 'bool', 'quick_save_access': 'bool', 'vacation_access': 'bool', } def __init__( self, user_access_code=None, all_user_access=None, program_access=None, details_access=None, quick_save_access=None, vacation_access=None, ): """ Construct a SecuritySettings instance """ self._user_access_code = user_access_code self._all_user_access = all_user_access self._program_access = program_access self._details_access = details_access self._quick_save_access = quick_save_access self._vacation_access = vacation_access @property def user_access_code(self): """ Gets the user_access_code attribute of this SecuritySettings instance. :return: The value of the user_access_code attribute of this SecuritySettings instance. :rtype: six.text_type """ return self._user_access_code @user_access_code.setter def user_access_code(self, user_access_code): """ Sets the user_access_code attribute of this SecuritySettings instance. :param user_access_code: The user_access_code value to set for the user_access_code attribute of this SecuritySettings instance. :type: six.text_type """ self._user_access_code = user_access_code @property def all_user_access(self): """ Gets the all_user_access attribute of this SecuritySettings instance. :return: The value of the all_user_access attribute of this SecuritySettings instance. :rtype: bool """ return self._all_user_access @all_user_access.setter def all_user_access(self, all_user_access): """ Sets the all_user_access attribute of this SecuritySettings instance. :param all_user_access: The all_user_access value to set for the all_user_access attribute of this SecuritySettings instance. :type: bool """ self._all_user_access = all_user_access @property def program_access(self): """ Gets the program_access attribute of this SecuritySettings instance. :return: The value of the program_access attribute of this SecuritySettings instance. :rtype: bool """ return self._program_access @program_access.setter def program_access(self, program_access): """ Sets the program_access attribute of this SecuritySettings instance. :param program_access: The program_access value to set for the program_access attribute of this SecuritySettings instance. :type: bool """ self._program_access = program_access @property def details_access(self): """ Gets the details_access attribute of this SecuritySettings instance. :return: The value of the details_access attribute of this SecuritySettings instance. :rtype: bool """ return self._details_access @details_access.setter def details_access(self, details_access): """ Sets the details_access attribute of this SecuritySettings instance. :param details_access: The details_access value to set for the details_access attribute of this SecuritySettings instance. :type: bool """ self._details_access = details_access @property def quick_save_access(self): """ Gets the quick_save_access attribute of this SecuritySettings instance. :return: The value of the quick_save_access attribute of this SecuritySettings instance. :rtype: bool """ return self._quick_save_access @quick_save_access.setter def quick_save_access(self, quick_save_access): """ Sets the quick_save_access attribute of this SecuritySettings instance. :param quick_save_access: The quick_save_access value to set for the quick_save_access attribute of this SecuritySettings instance. :type: bool """ self._quick_save_access = quick_save_access @property def vacation_access(self): """ Gets the vacation_access attribute of this SecuritySettings instance. :return: The value of the vacation_access attribute of this SecuritySettings instance. :rtype: bool """ return self._vacation_access @vacation_access.setter def vacation_access(self, vacation_access): """ Sets the vacation_access attribute of this SecuritySettings instance. :param vacation_access: The vacation_access value to set for the vacation_access attribute of this SecuritySettings instance. :type: bool """ self._vacation_access = vacation_access
28.854701
93
0.655806
756
6,752
5.554233
0.117725
0.095261
0.085735
0.177185
0.654203
0.524649
0.48583
0.396047
0.336509
0.16623
0
0.000206
0.279917
6,752
233
94
28.978541
0.863431
0.441499
0
0.216867
1
0
0.188435
0
0
0
0
0
0
1
0.156627
false
0
0.012048
0
0.289157
0
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
2
e0f5e64f08d01f8156b7cf7869463654a3743b45
857
py
Python
test_op_detect.py
nogumbi/LISP-Interpreter
ac4489456e91e1328fbde05d106df4caa8f5af08
[ "MIT" ]
null
null
null
test_op_detect.py
nogumbi/LISP-Interpreter
ac4489456e91e1328fbde05d106df4caa8f5af08
[ "MIT" ]
null
null
null
test_op_detect.py
nogumbi/LISP-Interpreter
ac4489456e91e1328fbde05d106df4caa8f5af08
[ "MIT" ]
null
null
null
import unittest import operation_detection class TestOpDetect(unittest.TestCase): def testIsListTrue(self): test_string = "(a b)" ret_val = operation_detection.isList(test_string) self.assertTrue(ret_val) def testIsListFalse(self): test_string = "(+ a b)" ret_val = operation_detection.isList(test_string) self.assertFalse(ret_val) def testIsListEmpty(self): test_string = "()" ret_val = operation_detection.isList(test_string) self.assertTrue(ret_val) def testIsMathTrue(self): test_string = "(+ a b)" ret_val = operation_detection.isMath(test_string) self.assertTrue(ret_val) def testIsMathFalse(self): test_string = "(cons a b)" ret_val = operation_detection.isMath(test_string) self.assertFalse(ret_val)
29.551724
57
0.663944
98
857
5.540816
0.255102
0.184162
0.128913
0.220994
0.664825
0.664825
0.60221
0.567219
0.567219
0.541436
0
0
0.24154
857
29
58
29.551724
0.835385
0
0
0.521739
0
0
0.036131
0
0
0
0
0
0.217391
1
0.217391
false
0
0.086957
0
0.347826
0
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
0
0
0
2
e0f8d8f758c7040fd8fea3be28df1a7422352bb3
2,247
py
Python
ExamPrep/Shit Comp/Python Code/RootsOther/WS6ROOTFINDING.py
FHomewood/ScientificComputing
bc3477b4607b25a700f2d89ca4f01cb3ea0998c4
[ "IJG" ]
null
null
null
ExamPrep/Shit Comp/Python Code/RootsOther/WS6ROOTFINDING.py
FHomewood/ScientificComputing
bc3477b4607b25a700f2d89ca4f01cb3ea0998c4
[ "IJG" ]
null
null
null
ExamPrep/Shit Comp/Python Code/RootsOther/WS6ROOTFINDING.py
FHomewood/ScientificComputing
bc3477b4607b25a700f2d89ca4f01cb3ea0998c4
[ "IJG" ]
null
null
null
# module imports from math import sqrt # *****IMPORTANT***** #The function must equal 0. #IMPORTANT #These methods (excluding in-build fsolve) require a bracket in which each root is found. These brackets can be found by looking at a plot. # For example, on a plot of (y = x^2 - 1) there are two roots. An estimate for the upper and lower brackets could be -1.5 and -0.5 and 0.5 for the first root 0.5 and and 1.5 for the second root. (The actual roots are x=+1 and x=-1). ########################################################################### # defining function f on range x x = Np.linspace(xmin,xmax,numberofxvalues) def f(x): return x**2 ########################################################################### # Ridder's Method (do this for each root) import ridder # root = ridder(function, lower bracket, upper bracket, tolerance) root = ridder.ridder(f,a,b,tol=1.0e-9) # Each root is returned as one number. ########################################################################### # Newton-Raphson Method (do this for each root) import newtonRaphson #defining derivative of g (differentiate by hand) def df(x): return x**2 # root = newtonRaphson(function, dervivative of function, lower bracket, upper bracket, tolerance) root = newtonRaphson.newtonRaphson(f,df,e,k,tol=1.0e-9) # The root is returned as one number. ########################################################################### # In-build fsolve Method (returns both roots in an array and so the upper and lower brackets are of the entire interval in which all roots lie). from scipy.optimize import fsolve # root = fsolve(function, [lower limit of INTERVAL, upper limit of INTERVAL]) root = fsolve(f,[intlow,intupp]) # The roots are returned in array and are seperated out and defined individually below. 1stroot = root[0] 2ndroot = root[1] ########################################################################### # Bisection Method (do this for each root) import bisection # root = bisection.bisection(function, lower bracket, upper bracket, switch=1, tolerence) root = bisection.bisection(f,x1,x2,switch=1,tol=1.0e-9) # The root is returned as one number. ###########################################################################
33.537313
232
0.58834
305
2,247
4.334426
0.377049
0.030257
0.027231
0.034039
0.266263
0.205749
0.186838
0.05295
0.05295
0.05295
0
0.017526
0.136627
2,247
66
233
34.045455
0.663918
0.582109
0
0.125
0
0
0
0
0
0
0
0
0
0
null
null
0
0.3125
null
null
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
1
0
0
0
0
2
e0fa260c5b84533dd5d730db1100068f59e1e9bb
338
py
Python
test.py
lytex/Orgnode
e8aab2ea93261937d668f2068eba661568a85214
[ "MIT" ]
2
2019-03-31T20:45:05.000Z
2021-05-10T19:10:32.000Z
test.py
lytex/Orgnode
e8aab2ea93261937d668f2068eba661568a85214
[ "MIT" ]
null
null
null
test.py
lytex/Orgnode
e8aab2ea93261937d668f2068eba661568a85214
[ "MIT" ]
1
2020-08-06T21:09:51.000Z
2020-08-06T21:09:51.000Z
from Orgnode import myorgnode import json filename = "1.org" nodetree = myorgnode.maketree(filename) nodelist = myorgnode.makelist(filename) json_data_list = myorgnode.toJSON(nodelist) print "list\n" print json_data_list print "=======================" json_data_tree = myorgnode.toJSON(nodetree) print "tree\n" print json_data_tree
18.777778
43
0.742604
44
338
5.522727
0.409091
0.131687
0.160494
0.115226
0
0
0
0
0
0
0
0.003311
0.106509
338
17
44
19.882353
0.801325
0
0
0
0
0
0.119048
0.068452
0
0
0
0
0
0
null
null
0
0.166667
null
null
0.416667
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
1
0
2
e0fb180b64dd3c4d4c5bf223a7b55e55b702aa98
8,558
py
Python
setup.py
peekxc/tallem
949af20c1f50f9b6784ee32463e59123cd64294b
[ "Apache-2.0" ]
null
null
null
setup.py
peekxc/tallem
949af20c1f50f9b6784ee32463e59123cd64294b
[ "Apache-2.0" ]
null
null
null
setup.py
peekxc/tallem
949af20c1f50f9b6784ee32463e59123cd64294b
[ "Apache-2.0" ]
1
2021-07-25T04:58:58.000Z
2021-07-25T04:58:58.000Z
# -*- coding: utf-8 -*- import os import sys import pathlib import importlib import glob import shutil from setuptools import setup, find_packages, Extension from setuptools.command.build_ext import build_ext from pathlib import Path import mesonbuild import platform suffix = importlib.machinery.EXTENSION_SUFFIXES[0] package_dir = \ {'': 'src'} packages = \ ['tallem', 'tallem.extensions'] package_data = \ {'': ['*'], 'tallem.extensions': ['*.so', '*.pyd', 'extensions/*.so', 'extensions/*.pyd'] } install_requires = \ ['numpy>=1.21.3,<2.0.0', 'scipy>=1.6'] extras_require = \ {'autograd': ['autograd'], 'pymanopt': ['pymanopt>=0.2.5'], 'scikit-learn': ['scikit-learn>=1.0']} # From: https://stackoverflow.com/questions/51108256/how-to-take-a-pathname-string-with-wildcards-and-resolve-the-glob-with-pathlib def expandpath(path_pattern): p = Path(path_pattern).expanduser() parts = p.parts[p.is_absolute():] return Path(p.root).glob(str(Path(*parts))) def build_extensions(setup_kwargs): print(f"Building extensions with suffix: {suffix}") home_dir = os.getcwd() existing_modules = list(expandpath(f"{home_dir}/src/tallem/extensions/*{suffix}")) if len(existing_modules) > 0: print("Removing existing modules for a clean build") ## Remove existing extension modules for m in existing_modules: os.remove(m) import numpy as np print("\n==== NUMPY INCLUDES ====\n") print(f"{np.get_include()}") ## Recompile print("\n==== Printing compiler version ====\n") os.system("c++ --version") ## Check if build exists, and if it does remove it if os.path.isdir(f"{home_dir}/build"): print(f"\n==== Removing directory {home_dir}/build ====\n") shutil.rmtree(f"{home_dir}/build") print("\n==== Starting meson build ====\n") os.system("python3 -m mesonbuild.mesonmain build") os.system("python3 -m mesonbuild.mesonmain compile -vC build") ## Linux CI servers raise tty exception on meson install, so do manual copy instead os.system("python3 -m mesonbuild.mesonmain install -C build") target_path = next(expandpath(f"{home_dir}/src/tallem/extensions/")).resolve() print(f"\n==== Extension module install path: {target_path} ====\n") for file in glob.glob(f"build/*{suffix}"): print(f"Installing {file} to: {target_path} \n") shutil.copy(file, target_path) print("\n==== Finished meson build ====\n") ## Check if they now exist num_so = len([p.name for p in expandpath(f"{home_dir}/src/tallem/extensions/*{suffix}")]) if num_so > 0: return(0) else: print("ERROR: Did not detect native python extensions; Exiting build") sys.exit(-1) # Boilerplate from https://stackoverflow.com/questions/63350376/place-pre-compiled-extensions-in-root-folder-of-non-pure-python-wheel-package # because setuptools/distutils are archaic tools # class CustomDistribution(Distribution): # def iter_distribution_names(self): # for pkg in self.packages or (): # yield pkg # for module in self.py_modules or (): # yield module class CustomExtension(Extension): def __init__(self, path): self.path = path super().__init__(pathlib.PurePath(path).name, []) class build_CustomExtensions(build_ext): def run(self): for ext in (x for x in self.extensions if isinstance(x, CustomExtension)): source = f"{ext.path}{suffix}" build_dir = pathlib.PurePath(self.get_ext_fullpath(ext.name)).parent os.makedirs(f"{build_dir}/{pathlib.PurePath(ext.path).parent}", exist_ok = True) shutil.copy(f"{source}", f"{build_dir}/{source}") def find_extensions(directory): extensions = [] for path, _, filenames in os.walk(directory): for filename in filenames: filename = pathlib.PurePath(filename) if pathlib.PurePath(filename).suffix == suffix: extensions.append(CustomExtension(os.path.join(path, filename.stem))) return extensions setup_kwargs = { 'name': 'tallem', 'version': '0.2.2', 'description': 'Topological Assembly of Locally Euclidean Models', 'long_description': '# Topological Assembly of Local Euclidean Models \n\nThis repository implements TALLEM - a topologically inspired non-linear dimensionality reduction method.\n\nGiven some data set *X* and a map <img class=\'latex-inline math\' style="background: white; vertical-align:-0.105206pt;" src="https://render.githubusercontent.com/render/math?math=\\large f%20%3A%20X%20%5Cto%20B&mode=inline"> onto some topological space _B_ which captures the topology/nonlinearity of _X_, TALLEM constructs a map <img style="background: white; vertical-align:-0.105206pt" class=\'latex-inline math\' src="https://render.githubusercontent.com/render/math?math=\\large F%20%3A%20X%20%5Cto%20%5Cmathbb%7BR%7D%5ED%20&mode=inline"> mapping _X_ to a _D_-dimensional space. \n\nTODO: describe TALLEM more\n\n## Installing + Dependencies \n\n`tallem`\'s run-time dependencies are fairly minimal. They include: \n\n1. _Python >= 3.8.0_ \n2. *NumPy (>= 1.20)* and *SciPy* *(>=1.6)*\n\nThe details of the rest of package requirements are listed in [pyproject.toml](https://github.com/peekxc/tallem/blob/main/pyproject.toml). These are automatically downloaded and installed via `pip`: \n\n\n\nSome functions which extend TALLEM\'s core functionality require additional dependencies to be called---they include *autograd*, *pymanopt*, *scikit-learn*, and *bokeh*. These packages are completely optional, i.e. they are not needed to get the resulting embedding. Nonetheless, if you would like these package as well, use: \n\n\n\n\n\n###Installing from cibuildwheels\n\nTODO\n\n### Installing from source\n\nTo install `tallem` from source, clone the repository and install the package via: \n\n```bash\npython -m pip install .\n```\n\n`tallem` relies on a few package dependencies in order to compile correctly when building from source. These libraries include: \n\n* [Armadillo](http://arma.sourceforge.net/) >= 10.5.2 ([see here for installation options](http://arma.sourceforge.net/download.html))\n* [Poetry](https://python-poetry.org/) (for building the [source](https://packaging.python.org/glossary/#term-Source-Distribution-or-sdist) and [binary](https://packaging.python.org/glossary/#term-Wheel) distributions)\n* [Meson](https://mesonbuild.com/) and [Ninja](https://ninja-build.org/) (for building the [extension modules](https://docs.python.org/3/glossary.html#term-extension-module))\n\nAn install attempt of these external dependencies is made if they are not available prior to call to `pip`, however these may require manual installation. Additionally, the current source files are written in [C++17](https://en.wikipedia.org/wiki/C%2B%2B17), so a [C++17 compliant compiler](https://en.cppreference.com/w/cpp/compiler_support/17) will be needed. If you have an installation problems or questions, feel free to [make a new issue](https://github.com/peekxc/tallem/issues).\n\n## Usage \n\nBelow is some example code showcasing TALLEMs ability to handle topological obstructions to dimensionality reduction like non-orientability. \n\n```python\nfrom tallem import TALLEM\nfrom tallem.cover import IntervalCover\nfrom tallem.datasets import mobius_band\n\n## Get mobius band data + its parameter space\nX, B = mobius_band()\nB_polar = B[:,[1]]\n\n## Construct a cover over the polar coordinate\nm_dist = lambda x,y: np.sum(np.minimum(abs(x - y), (2*np.pi) - abs(x - y)))\ncover = IntervalCover(B_polar, n_sets = 10, overlap = 0.30, metric = m_dist)\n\n## Parameterize TALLEM + transform the data to the obtain the coordinization\nemb = TALLEM(cover=cover, local_map="cmds2", n_components=3).fit_transform(X, B_polar)\n\n## Draw the coordinates via 3D projection, colored by the polar coordinate\nimport matplotlib.pyplot as plt\nfig = plt.figure()\nax = fig.add_subplot(projection=\'3d\')\nax.scatter(*emb.T, marker=\'o\', c=B_polar)\n```\n\n![mobius band](https://github.com/peekxc/tallem/blob/main/resources/tallem_polar.png?raw=true)\n\n', 'author': 'Matt Piekenbrock', 'author_email': 'matt.piekenbrock@gmail.com', 'maintainer': None, 'maintainer_email': None, 'url': 'https://github.com/peekxc/tallem', 'package_dir': package_dir, 'packages': packages, 'package_data': package_data, 'install_requires': install_requires, 'extras_require': extras_require, 'python_requires': '>=3.8,<3.10', 'ext_modules': find_extensions("src/tallem"), 'cmdclass': {'build_ext': build_CustomExtensions}, # 'distclass': CustomDistribution } # Build first, then invoke setup build_extensions(setup_kwargs) setup(**setup_kwargs)
61.128571
3,951
0.728441
1,255
8,558
4.890837
0.356972
0.007494
0.003421
0.013034
0.12219
0.095797
0.067286
0.036494
0.036494
0.021831
0
0.016339
0.120355
8,558
139
3,952
61.568345
0.799017
0.092194
0
0
0
0.050505
0.629003
0.076188
0
0
0
0
0
1
0.050505
false
0
0.141414
0
0.232323
0.111111
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
2
e0fc4019d5d5033be976e395e6d4f473188dca4b
579
py
Python
mayan/apps/permissions/permissions.py
CMU-313/fall-2021-hw2-451-unavailable-for-legal-reasons
0e4e919fd2e1ded6711354a0330135283e87f8c7
[ "Apache-2.0" ]
2
2021-09-12T19:41:19.000Z
2021-09-12T19:41:20.000Z
mayan/apps/permissions/permissions.py
CMU-313/fall-2021-hw2-451-unavailable-for-legal-reasons
0e4e919fd2e1ded6711354a0330135283e87f8c7
[ "Apache-2.0" ]
37
2021-09-13T01:00:12.000Z
2021-10-02T03:54:30.000Z
mayan/apps/permissions/permissions.py
CMU-313/fall-2021-hw2-451-unavailable-for-legal-reasons
0e4e919fd2e1ded6711354a0330135283e87f8c7
[ "Apache-2.0" ]
1
2021-09-22T13:17:30.000Z
2021-09-22T13:17:30.000Z
from django.utils.translation import ugettext_lazy as _ from . import PermissionNamespace namespace = PermissionNamespace(label=_('Permissions'), name='permissions') permission_role_create = namespace.add_permission( label=_('Create roles'), name='role_create' ) permission_role_delete = namespace.add_permission( label=_('Delete roles'), name='role_delete' ) permission_role_edit = namespace.add_permission( label=_('Edit roles'), name='role_edit' ) permission_role_view = namespace.add_permission( label=_('View roles'), name='role_view' )
30.473684
76
0.747841
65
579
6.307692
0.323077
0.136585
0.214634
0.263415
0
0
0
0
0
0
0
0
0.136442
579
18
77
32.166667
0.82
0
0
0
0
0
0.188948
0
0
0
0
0
0
1
0
false
0
0.133333
0
0.133333
0
0
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
2
e0fff4047565a604105958950e9f90c2b8888b6f
909
py
Python
reviews/feeds.py
ftrain/django-ftrain
af535fda8e113e9dcdac31216852e35a01d3b950
[ "BSD-3-Clause" ]
1
2019-11-01T00:37:36.000Z
2019-11-01T00:37:36.000Z
reviews/feeds.py
ftrain/django-ftrain
af535fda8e113e9dcdac31216852e35a01d3b950
[ "BSD-3-Clause" ]
null
null
null
reviews/feeds.py
ftrain/django-ftrain
af535fda8e113e9dcdac31216852e35a01d3b950
[ "BSD-3-Clause" ]
null
null
null
from django.contrib.syndication.feeds import Feed from django.utils import feedgenerator from models import Event class LilliputEventsFeed(Feed): title = "The Lilliput Review, by Paul Ford" link = "http://www.lilliputreview.com" subtitle = "Big fella, little reviews." author_name = "Paul Ford" item_author_name = "Paul Ford" item_author_email = "ford@ftrain.com" item_author_link = "http://www.ftrain.com" item_copyright = 'Copyright (c) Paul Ford' feed_guid = 'http://www.ftrain.com/ftrain/feeds/the-lilliput-review/' feed_type = feedgenerator.Atom1Feed def items(self): return Event.objects.order_by('-time')[:25].select_related() def item_link(self, obj): return 'http://www.lilliputreview.com/lilliput/' + str(obj.time.strftime("%Y/%m/%d")) + '#' + obj.time.strftime("%H:%M") def item_pubdate(self, item): return item.time
34.961538
128
0.687569
122
909
5.008197
0.47541
0.052373
0.055646
0.07856
0.081833
0.081833
0
0
0
0
0
0.003995
0.173817
909
25
129
36.36
0.809587
0
0
0
0
0
0.305831
0
0
0
0
0
0
1
0.15
false
0
0.15
0.15
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
1
1
0
0
2
460b53491e154df1abb3a089e77996ef0878d637
224
py
Python
portals/wwits/groups/__init__.py
jalanb/portals
7a5360b48547719d3fbe50790f08eaf5571148dd
[ "ADSL" ]
null
null
null
portals/wwits/groups/__init__.py
jalanb/portals
7a5360b48547719d3fbe50790f08eaf5571148dd
[ "ADSL" ]
null
null
null
portals/wwits/groups/__init__.py
jalanb/portals
7a5360b48547719d3fbe50790f08eaf5571148dd
[ "ADSL" ]
null
null
null
"""This module consist of API Groups. All the REST APIs are divided into several groups 1) Access 2) General 3) Organization 4) Service 5) Service Action Each group has a set of APIs with their own models and schemas. """
18.666667
63
0.754464
39
224
4.333333
0.897436
0
0
0
0
0
0
0
0
0
0
0.027473
0.1875
224
11
64
20.363636
0.901099
0.964286
0
null
0
null
0
0
null
0
0
0
null
1
null
true
0
0
null
null
null
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
2
46141df3fc631ad4b880c855f157b57fbc92da7c
1,109
py
Python
laundry/models.py
proxim/laundry-app
f8fe5bc5e1229e5ca28a33387079d64cc97fa49b
[ "MIT" ]
null
null
null
laundry/models.py
proxim/laundry-app
f8fe5bc5e1229e5ca28a33387079d64cc97fa49b
[ "MIT" ]
null
null
null
laundry/models.py
proxim/laundry-app
f8fe5bc5e1229e5ca28a33387079d64cc97fa49b
[ "MIT" ]
null
null
null
from django.db import models class User(models.Model): name = models.CharField(max_length=100) phone = models.CharField(max_length=20) def __str__(self): return f'{self.name} at {self.phone}' class Load(models.Model): user = models.ForeignKey(User, on_delete=models.SET_NULL) class Washer(models.Model): class Status(models.IntegerChoices): EMPTY = 0 IN_PROGRESS = 1 DONE = 2 status = models.IntegerField(choices=Status.choices) load = models.ForeignKey(Load, on_delete=models.SET_NULL, blank=True, null=True) class Dryer(models.Model): class Status(models.IntegerChoices): EMPTY = 0 IN_PROGRESS = 1 DONE = 2 status = models.IntegerField(choices=Status.choices) load = models.ForeignKey(Load, on_delete=models.SET_NULL, blank=True, null=True) class Bin(models.Model): class Status(models.IntegerChoices): EMPTY = 0 FULL = 1 status = models.IntegerField(choices=Status.choices) load = models.ForeignKey(Load, on_delete=models.SET_NULL, blank=True, null=True)
27.725
84
0.677187
143
1,109
5.13986
0.307692
0.097959
0.07619
0.092517
0.714286
0.685714
0.685714
0.685714
0.620408
0.620408
0
0.014925
0.214608
1,109
39
85
28.435897
0.828932
0
0
0.571429
0
0
0.024346
0
0
0
0
0
0
1
0.035714
false
0
0.035714
0.035714
0.714286
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
2
1c9d6eb0017622168c53fe04928985d82d4dbdd6
1,273
py
Python
functions.py
cristianosch/Python
b3ae5444aa009a3a6cbe6db61b8583f8be5c2fe8
[ "MIT" ]
null
null
null
functions.py
cristianosch/Python
b3ae5444aa009a3a6cbe6db61b8583f8be5c2fe8
[ "MIT" ]
null
null
null
functions.py
cristianosch/Python
b3ae5444aa009a3a6cbe6db61b8583f8be5c2fe8
[ "MIT" ]
null
null
null
# Functions (Funções) # DRY - Don't repeat yourself # Parametro --> Argumento # Default = Aquele que você define o valor no parametro # Non-Default = Aquele que você não define o valor do parametro ''' No exemplo abaixo NOME é o Non-Default, pq o valor dele ainda será atribuido posteriormente podendo ser trocado a atribuição. Já em QUANTIDADE ele está Default (definido) ou seja seu Valor sera sempre o mesmo até que voce troque. Deve-se respeitar as ordens, o NON-DEFAULT deve ser atribuido antes def boas_vindas( nome, quantidade = 6): print(f'Olá{nome}.') print(f'Temos {str(quantidade)} laptops em estoque') boas_vindas('Marcos')# Não sera necessario chamar a quantidade ''' # Realizam uma tarefa # Calcula e retorna o valor def cliente1(nome): print(f'Olá {nome}') def cliente2(nome): return f'Olá {nome}' # Em return ele armazena informação e sé escreve se for chamado x = cliente1 ('Maria') y = cliente2 ('José') print(x) print(y) # Fixando Exercicio def restaurante(nome): print(f'Bem vindos ao {nome} o melhor restaurante da cidade ') def espera(itens): return f'Neste momento temos {itens} clientes em espera' z = restaurante ('Bon Appetit') h = espera (6) print(z) print(h) # Criar uma função que soma vários números.
21.948276
67
0.723488
197
1,273
4.664975
0.568528
0.026115
0.026115
0.043526
0
0
0
0
0
0
0
0.005769
0.183032
1,273
57
68
22.333333
0.877885
0.663001
0
0
0
0
0.338235
0
0
0
0
0
0
1
0.25
false
0
0
0.125
0.375
0.375
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
0
0
0
2
1ca2008c84a24e475d44aec485a4698505fff8e1
5,316
py
Python
example/simple_functions/simple_function.py
HowardHu97/ZOOpt
01568e8e6b0e65ac310d362af2da5245ac375e53
[ "MIT" ]
1
2018-11-03T12:05:00.000Z
2018-11-03T12:05:00.000Z
example/simple_functions/simple_function.py
HowardHu97/ZOOpt
01568e8e6b0e65ac310d362af2da5245ac375e53
[ "MIT" ]
null
null
null
example/simple_functions/simple_function.py
HowardHu97/ZOOpt
01568e8e6b0e65ac310d362af2da5245ac375e53
[ "MIT" ]
null
null
null
""" Objective functions can be implemented in this file. Author: Yu-Ren Liu """ from random import Random from zoopt.dimension import Dimension import numpy as np class SetCover: """ set cover problem for discrete optimization this problem has some extra initialization tasks, thus we define this problem as a class """ __weight = None __subset = None def __init__(self): self.__weight = [0.8356, 0.5495, 0.4444, 0.7269, 0.9960, 0.6633, 0.5062, 0.8429, 0.1293, 0.7355, 0.7979, 0.2814, 0.7962, 0.1754, 0.0267, 0.9862, 0.1786, 0.5884, 0.6289, 0.3008] self.__subset = [] self.__subset.append([0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 0]) self.__subset.append([0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 1, 1, 1, 0, 0]) self.__subset.append([1, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0]) self.__subset.append([0, 0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0]) self.__subset.append([1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1]) self.__subset.append([0, 0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0]) self.__subset.append([0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 1, 1, 1, 1, 0, 0]) self.__subset.append([0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 0, 0, 0]) self.__subset.append([0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 1, 0]) self.__subset.append([0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1]) self.__subset.append([0, 0, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0]) self.__subset.append([0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1]) self.__subset.append([1, 0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1]) self.__subset.append([1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 0, 1]) self.__subset.append([0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1]) self.__subset.append([1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0]) self.__subset.append([1, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 0, 0, 1, 0, 1]) self.__subset.append([0, 1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1]) self.__subset.append([0, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0]) self.__subset.append([0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 1, 1]) def fx(self, solution): """ Objective function. :param solution: a Solution object :return: the value of f(x) """ x = solution.get_x() allweight = 0 countw = 0 for i in range(len(self.__weight)): allweight += self.__weight[i] dims = [] for i in range(len(self.__subset[0])): dims.append(False) for i in range(len(self.__subset)): if x[i] == 1: countw += self.__weight[i] for j in range(len(self.__subset[i])): if self.__subset[i][j] == 1: dims[j] = True full = True for i in range(len(dims)): if dims[i] is False: full = False if full is False: countw += allweight return countw @property def dim(self): """ Dimension of set cover problem. :return: Dimension instance """ dim_size = 20 dim_regs = [[0, 1]] * dim_size dim_tys = [False] * dim_size return Dimension(dim_size, dim_regs, dim_tys) def sphere(solution): """ Sphere function for continuous optimization """ x = solution.get_x() value = sum([(i-0.2)*(i-0.2) for i in x]) return value def sphere_mixed(solution): """ Sphere function for mixed optimization """ x = solution.get_x() value = sum([i*i for i in x]) return value def sphere_discrete_order(solution): """ Sphere function for integer continuous optimization """ a = 0 rd = Random() x = solution.get_x() value = sum([(i-2)*(i-2) for i in x]) return value def ackley(solution): """ Ackley function for continuous optimization """ x = solution.get_x() bias = 0.2 ave_seq = sum([(i - bias) * (i - bias) for i in x]) / len(x) ave_cos = sum([np.cos(2.0*np.pi*(i-bias)) for i in x]) / len(x) value = -20 * np.exp(-0.2 * np.sqrt(ave_seq)) - np.exp(ave_cos) + 20.0 + np.e return value def ackley_noise_creator(mu, sigma): """ Ackley function under noise """ return lambda solution: ackley(solution) + np.random.normal(mu, sigma, 1)
37.174825
120
0.48307
1,062
5,316
2.335217
0.113936
0.120161
0.097984
0.067742
0.518548
0.504839
0.497581
0.456452
0.324194
0.276613
0
0.200772
0.317908
5,316
142
121
37.43662
0.483177
0.105154
0
0.111111
0
0
0
0
0
0
0
0
0
1
0.098765
false
0
0.037037
0
0.259259
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
1
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
2
1cbe7fa082b50a738ff3911e60c8a30f595ce123
3,460
py
Python
google/cloud/secretmanager/__init__.py
LaudateCorpus1/python-secret-manager
4056e97028a638934de9deea68d29e523fa45a1f
[ "Apache-2.0" ]
57
2019-12-27T23:43:30.000Z
2022-03-22T21:22:36.000Z
google/cloud/secretmanager/__init__.py
LaudateCorpus1/python-secret-manager
4056e97028a638934de9deea68d29e523fa45a1f
[ "Apache-2.0" ]
114
2019-12-20T00:50:24.000Z
2022-03-31T22:55:16.000Z
google/cloud/secretmanager/__init__.py
LaudateCorpus1/python-secret-manager
4056e97028a638934de9deea68d29e523fa45a1f
[ "Apache-2.0" ]
20
2019-12-19T21:18:58.000Z
2022-01-29T08:13:25.000Z
# -*- coding: utf-8 -*- # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from google.cloud.secretmanager_v1.services.secret_manager_service.client import ( SecretManagerServiceClient, ) from google.cloud.secretmanager_v1.services.secret_manager_service.async_client import ( SecretManagerServiceAsyncClient, ) from google.cloud.secretmanager_v1.types.resources import CustomerManagedEncryption from google.cloud.secretmanager_v1.types.resources import ( CustomerManagedEncryptionStatus, ) from google.cloud.secretmanager_v1.types.resources import Replication from google.cloud.secretmanager_v1.types.resources import ReplicationStatus from google.cloud.secretmanager_v1.types.resources import Rotation from google.cloud.secretmanager_v1.types.resources import Secret from google.cloud.secretmanager_v1.types.resources import SecretPayload from google.cloud.secretmanager_v1.types.resources import SecretVersion from google.cloud.secretmanager_v1.types.resources import Topic from google.cloud.secretmanager_v1.types.service import AccessSecretVersionRequest from google.cloud.secretmanager_v1.types.service import AccessSecretVersionResponse from google.cloud.secretmanager_v1.types.service import AddSecretVersionRequest from google.cloud.secretmanager_v1.types.service import CreateSecretRequest from google.cloud.secretmanager_v1.types.service import DeleteSecretRequest from google.cloud.secretmanager_v1.types.service import DestroySecretVersionRequest from google.cloud.secretmanager_v1.types.service import DisableSecretVersionRequest from google.cloud.secretmanager_v1.types.service import EnableSecretVersionRequest from google.cloud.secretmanager_v1.types.service import GetSecretRequest from google.cloud.secretmanager_v1.types.service import GetSecretVersionRequest from google.cloud.secretmanager_v1.types.service import ListSecretsRequest from google.cloud.secretmanager_v1.types.service import ListSecretsResponse from google.cloud.secretmanager_v1.types.service import ListSecretVersionsRequest from google.cloud.secretmanager_v1.types.service import ListSecretVersionsResponse from google.cloud.secretmanager_v1.types.service import UpdateSecretRequest __all__ = ( "SecretManagerServiceClient", "SecretManagerServiceAsyncClient", "CustomerManagedEncryption", "CustomerManagedEncryptionStatus", "Replication", "ReplicationStatus", "Rotation", "Secret", "SecretPayload", "SecretVersion", "Topic", "AccessSecretVersionRequest", "AccessSecretVersionResponse", "AddSecretVersionRequest", "CreateSecretRequest", "DeleteSecretRequest", "DestroySecretVersionRequest", "DisableSecretVersionRequest", "EnableSecretVersionRequest", "GetSecretRequest", "GetSecretVersionRequest", "ListSecretsRequest", "ListSecretsResponse", "ListSecretVersionsRequest", "ListSecretVersionsResponse", "UpdateSecretRequest", )
43.797468
88
0.824566
361
3,460
7.806094
0.290859
0.092264
0.138396
0.258339
0.456352
0.456352
0.456352
0.456352
0.041164
0
0
0.011305
0.105202
3,460
78
89
44.358974
0.898902
0.164451
0
0
0
0
0.182957
0.119304
0
0
0
0
0
1
0
false
0
0.433333
0
0.433333
0
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
2
1cbecf5a55e84a58915ab4eab2d828a3b9809763
516
py
Python
find-array-intersection.py
caipre/epigrams
4de7f8cda4cad1361cf69421beeda27d5bf48fa6
[ "MIT" ]
null
null
null
find-array-intersection.py
caipre/epigrams
4de7f8cda4cad1361cf69421beeda27d5bf48fa6
[ "MIT" ]
null
null
null
find-array-intersection.py
caipre/epigrams
4de7f8cda4cad1361cf69421beeda27d5bf48fa6
[ "MIT" ]
null
null
null
#!/usr/bin/env python3 def find_array_intersection(A, B): ai = 0 bi = 0 ret = [] while ai < len(A) and bi < len(B): if A[ai] < B[bi]: ai += 1 elif A[ai] > B[bi]: bi += 1 else: ret.append(A[ai]) v = A[ai] while ai < len(A) and A[ai] == v: ai += 1 while bi < len(B) and B[bi] == v: bi += 1 return ret A = [1, 2, 3, 4, 4, 5, 7, 8, 10] B = [3, 4, 8, 9, 9, 10, 13] print(find_array_intersection(A, B))
22.434783
53
0.424419
91
516
2.362637
0.373626
0.069767
0.195349
0.204651
0.344186
0
0
0
0
0
0
0.083871
0.399225
516
22
54
23.454545
0.609677
0.040698
0
0
0
0
0
0
0
0
0
0
0
1
0.055556
false
0
0
0
0.111111
0.055556
0
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
2
1ccc655b28022f1d53e191a5b6c11afa7af8a242
3,367
py
Python
tests/test_execution.py
pyxiis/boxscript-py
bf7f055b02b20ff686c5c224199025aaa548c7d9
[ "MIT" ]
1
2021-07-18T03:45:13.000Z
2021-07-18T03:45:13.000Z
tests/test_execution.py
pyxiis/boxscript-py
bf7f055b02b20ff686c5c224199025aaa548c7d9
[ "MIT" ]
7
2021-07-17T23:40:06.000Z
2021-07-18T21:45:31.000Z
tests/test_execution.py
pyxiis/boxscript-py
bf7f055b02b20ff686c5c224199025aaa548c7d9
[ "MIT" ]
1
2021-07-17T17:24:09.000Z
2021-07-17T17:24:09.000Z
import io import unittest from contextlib import redirect_stdout from textwrap import dedent from boxscript.interpreter import Interpreter def run_code(code: str) -> str: """Test helper method to run provided boxscript.""" stdout = io.StringIO() with redirect_stdout(stdout): Interpreter().run(code) return stdout.getvalue() class TestExecution(unittest.TestCase): """Tests boxscript.ast for parsing numbers properly.""" def test_48_is_zero(self) -> None: """48 is 0 in ascii""" s = """ ╔═════════════════╗ ║ 0 ║ ╚═════════════════╝ ┌───────────────┐ │▭▀▀▀▄▄▄▄ │ └───────────────┘ """ s = dedent(s).strip() self.assertEqual(run_code(s), "0\n") def test_01234567_bitwise(self) -> None: """Output: 01234567""" s = """ ╔═══════════════════╗ ║ output 0123456789 ║ ╚═══════════════════╝ ┏━━━━━━━━━━━━━━━━┓ ┃◇▀▄▒▀▀▄▀▄ ┃ ┡━━━━━━━━━━━━━━━━┩ │▀▀◈◇▀▄▒▀▀▀▄▄▄▄ │ │▀▀▄◈◇▀▄░▀▀▀▄▄▄▄ │ │┏━━━━━━━━━━━━━┓ │ │┃◇▀▀▄ ┃ │ │┡━━━━━━━━━━━━━┩ │ ││▀▀▀◈◇▀▀▄▚▀▀ │ │ ││▀▀▄◈◇▀▀░◇▀▀▀ │ │ ││▀▀◈◇▀▀▒◇▀▀▀ │ │ │└─────────────┘ │ │▭◇▀▀ │ ├────────────────┤ │▀▀◈◇▀▄░▀▀ │ │▀▄◈◇▀▄▒▀▀ │ │┏━━━━━━━━━━━━┓ │ │┃◇▀▀ ┃ │ │┡━━━━━━━━━━━━┩ │ ││▀▀▄◈◇▀▀▚▀▀ │ │ ││▀▀◈◇▀▄░◇▀▀▄ │ │ ││▀▄◈◇▀▄▒◇▀▀▄ │ │ │└────────────┘ │ └────────────────┘ """ s = dedent(s).strip() self.assertEqual(run_code(s), "0123456789\n") def test_01234567(self) -> None: """Output: 0123456789""" s = """ ┏━━━━━━━━━━━━┓ ┃◇▀▄▨▀▀▄▀▄ ┃ ┡━━━━━━━━━━━━┩ │▭◇▀▄▐▀▀▀▄▄▄▄│ ├────────────┤ │▀▄◈◇▀▄▐▀▀ │ └────────────┘ """ s = dedent(s) self.assertEqual(run_code(s), "0123456789\n") def test_invalid_code(self) -> None: """Provide invalid code""" s = """ ╔═══════════════════════╗ ║This code does nothing ║ ╚═══════════════════════╝ ┏━━━━━━━━━━━━━━━━┓ ┃◇▀▄▒▀▀▄▀▄ ┃ ┡━━━━━━━━━━━━━━━━┩ │◇▀▀◈◇▀▄▒▀▀▀▄▄▄▄ │ │◇▀▀▄◈◇▀▄░▀▀▀▄▄▄▄│ │┏━━━━━━━━━━━━━┓ │ │┃◇▀▀▄ ┃ │ │┡━━━━━━━━━━━━━┩ │ ││◇▀▀▀◈◇▀▀▄▚▀▀ │ │ ││◇▀▀▄◈◇▀▀░◇▀▀▀│ │ ││◇▀▀◈◇▀▀▒◇▀▀▀ │ │ │└─────────────┘ │ │╔═════════════╗ │ │║Test [orange]║ │ │╚═════════════╝ │ │▭◇▀▀ │ ├────────────────┤ │◇▀▀◈◇▀▄░▀▀ │ │◇▀▄◈◇▀▄▒▀▀ │ │┏━━━━━━━━━━━━┓ │ │┃◇▀▀ ┃ │ │┡━━━━━━━━━━━━┩ │ ││◇▀▀▄◈◇▀▀▚▀▀ │ │ ││◇▀▀◈◇▀▄░◇▀▀▄│ │ ││◇▀▄◈◇▀▄▒◇▀▀▄│ │ │└────────────┘ │ └────────────────┘ """ s = dedent(s).strip() self.assertRaises(Exception, run_code(s))
28.294118
59
0.217701
278
3,367
6.23741
0.406475
0.010381
0.018454
0.022491
0.2203
0.2203
0.2203
0.171857
0.125721
0
0
0.036979
0.429759
3,367
118
60
28.533898
0.336979
0.050193
0
0.405941
0
0
0.711399
0.02905
0
0
0
0
0.039604
1
0.049505
false
0
0.049505
0
0.118812
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
2
1cccdf00c028dbb0bcca672923690a45524d4c8d
798
py
Python
backend/api/migrations/0015_auto_20190821_0857.py
yamamz/BRMI_LOANAPP
e6f79789855a633ee78a168452bca508622bcca8
[ "MIT" ]
null
null
null
backend/api/migrations/0015_auto_20190821_0857.py
yamamz/BRMI_LOANAPP
e6f79789855a633ee78a168452bca508622bcca8
[ "MIT" ]
6
2020-06-05T22:43:22.000Z
2022-02-10T12:32:19.000Z
backend/api/migrations/0015_auto_20190821_0857.py
yamamz/BRMI_LOANAPP
e6f79789855a633ee78a168452bca508622bcca8
[ "MIT" ]
null
null
null
# Generated by Django 2.1.1 on 2019-08-21 00:57 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('api', '0014_auto_20190821_0852'), ] operations = [ migrations.AlterField( model_name='loan', name='cbu', field=models.DecimalField(blank=True, decimal_places=2, max_digits=20, null=True), ), migrations.AlterField( model_name='loan', name='interest', field=models.DecimalField(decimal_places=2, max_digits=40), ), migrations.AlterField( model_name='loan', name='processing_fee', field=models.DecimalField(blank=True, decimal_places=2, max_digits=40, null=True), ), ]
27.517241
94
0.591479
86
798
5.337209
0.511628
0.130719
0.163399
0.189542
0.540305
0.540305
0.239651
0.239651
0.239651
0.239651
0
0.070671
0.290727
798
28
95
28.5
0.740283
0.056391
0
0.409091
1
0
0.083888
0.030626
0
0
0
0
0
1
0
false
0
0.045455
0
0.181818
0
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
2
1cef36925814e72624cdc980e7e8e137d27f1303
5,081
py
Python
Preliminaries/Mathematics For ML - ICL/1. Linear Algebra/readonly/bearNecessities.py
MarcosSalib/Cocktail_MOOC
46279c2ec642554537c639702ed8e540ea49afdf
[ "MIT" ]
null
null
null
Preliminaries/Mathematics For ML - ICL/1. Linear Algebra/readonly/bearNecessities.py
MarcosSalib/Cocktail_MOOC
46279c2ec642554537c639702ed8e540ea49afdf
[ "MIT" ]
null
null
null
Preliminaries/Mathematics For ML - ICL/1. Linear Algebra/readonly/bearNecessities.py
MarcosSalib/Cocktail_MOOC
46279c2ec642554537c639702ed8e540ea49afdf
[ "MIT" ]
null
null
null
import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt import numpy as np import numpy.linalg as la bear_black = (0.141, 0.11, 0.11) bear_white = (0.89, 0.856, 0.856) magenta = (0xfc / 255, 0x75 / 255, 0xdb / 255) # Brighter magenta orange = (218 / 255, 171 / 255, 115 / 255) green = (175 / 255, 219 / 255, 133 / 255) white = (240 / 255, 245 / 255, 250 / 255) blue1 = (70 / 255, 101 / 255, 137 / 255) blue2 = (122 / 255, 174 / 255, 215 / 255) def gsBasis(A): B = np.array(A, dtype=np.float_) B[:, 0] = B[:, 0] / la.norm(B[:, 0]) B[:, 1] = B[:, 1] - B[:, 1] @ B[:, 0] * B[:, 0] if la.norm(B[:, 1]) > 1e-14: B[:, 1] = B[:, 1] / la.norm(B[:, 1]) else: B[:, 1] = np.zeros_like(B[:, 1]) return B def draw_mirror(bearVectors): fig, ax = plt.subplots(figsize=(12, 12), dpi=80) ax.set_xlim([-3.50, 3.50]) ax.set_ylim([-3.50, 3.50]) ax.set_aspect(1) # ax.set_axis_bgcolor(blue1) ax.set_facecolor(blue1) gs = gsBasis(bearVectors) ax.plot([gs[0, 0] * -5, gs[0, 0] * 5], [gs[1, 0] * -5, gs[1, 0] * 5], lw=2, color=green, zorder=4) ax.fill([ -5 * gs[0, 0], -5 * gs[0, 0] - 5 * gs[0, 1], 5 * gs[0, 0] - 5 * gs[0, 1], 5 * gs[0, 0] ], [ -5 * gs[1, 0], -5 * gs[1, 0] - 5 * gs[1, 1], 5 * gs[1, 0] - 5 * gs[1, 1], 5 * gs[1, 0] ], color=blue2, zorder=0) ax.arrow(0, 0, bearVectors[0, 0], bearVectors[1, 0], lw=3, color=orange, zorder=5, head_width=0.1) ax.arrow(0, 0, bearVectors[0, 1], bearVectors[1, 1], lw=3, color=orange, zorder=5, head_width=0.1) ax.arrow(0, 0, gs[0, 0], gs[1, 0], lw=3, color=magenta, zorder=6, head_width=0.1) ax.arrow(0, 0, gs[0, 1], gs[1, 1], lw=3, color=magenta, zorder=6, head_width=0.1) return ax bear_black_fur = np.array( [[2.0030351, 2.229253, 2.1639012, 2.0809546, 1.9728726, 1.8974666, 1.8924396, 2.0030351, np.nan, 2.7017972, 2.8500957, 2.9707453, 3.0159889, 2.94561, 2.8299874, 2.7017972, np.nan, 2.1639012, 2.2317666, 2.3147132, 2.299632, 2.2493613, 2.1890365, 2.1211711, 2.1337387, 2.1639012, np.nan, 2.4982011, 2.5610936, 2.6213642, 2.633986, 2.5536071, 2.5057417, 2.4982011, np.nan, 2.2468478, 2.3247673, 2.4429034, 2.4303357, 2.3448755, 2.2820372, 2.2468478, np.nan, 2.1966706, 2.2722074, 2.4055076, 2.481933, 2.449941, 2.4001756, 2.3237501, 2.222442, 2.1984479, 2.1966706, np.nan, 1.847196, 1.7818441, 1.7290599, 1.6310321, 1.4575984, 1.3369488, 1.2791375, 1.3671112, 1.8044659, 1.9577914, 2.2367936, 2.5962289, 2.7520679, 2.9028799, 3.4005595, 3.3150993, 3.0511783, 2.9531506, 2.8676905, 2.7746897, 2.4052003, 2.2795237, 2.1639012, 1.847196, np.nan, 2.0491517, 2.5112591, 2.3175294, 2.1326865, 2.0491517], [-1.3186252, -1.0902537, -0.99238015, -0.96477475, -0.99488975, -1.1153494, -1.2408283, -1.3186252, np.nan, -1.1881273, -1.0852346, -1.1454645, -1.3286636, -1.4666904, -1.4641808, -1.1881273, np.nan, -1.5545256, -1.5219011, -1.4014413, -1.3512497, -1.3412115, -1.3989317, -1.4917862, -1.5419777, -1.5545256, np.nan, -1.4265371, -1.3964222, -1.4968054, -1.6097363, -1.64738, -1.5545256, -1.4265371, np.nan, -1.6423608, -1.6699662, -1.677495, -1.7176483, -1.7477632, -1.7176483, -1.6423608, np.nan, -1.7223509, -1.7622781, -1.7764744, -1.7613908, -1.8767359, -1.9805465, -1.9991791, -1.9672374, -1.913114, -1.7223509, np.nan, -1.5043341, -1.5444873, -1.486767, -1.1504836, -1.0626484, -1.11284, -1.2558858, -1.7452537, -2.3902152, -2.4378972, -2.3575907, -2.1467861, -2.2446597, -2.5527822, -2.5527822, -2.1919586, -1.7828973, -1.6850238, -1.677495, -1.8431272, -2.028836, -2.0363647, -1.9485295, -1.5043341, np.nan, -2.5527822, -2.5527822, -2.4570104, -2.4463632, -2.5527822]]) bear_white_fur = np.array( [[2.229253, 2.4680387, 2.7017972, 2.8299874, 2.8676905, 2.7746897, 2.4052003, 2.2795237, 2.1639012, 1.847196, 2.0030351, 2.229253, np.nan, 1.8044659, 1.8974666, 2.0491517, 2.1326865, 2.3175294, 2.5112591, 2.9028799, 2.7520679, 2.5962289, 2.2367936, 1.9577914, 1.8044659], [-1.0902537, -1.0601388, -1.1881273, -1.4641809, -1.677495, -1.8431272, -2.028836, -2.0363647, -1.9485295, -1.5043341, -1.3186252, -1.0902537, np.nan, -2.3902152, -2.5527822, -2.5527822, -2.4463632, -2.4570104, -2.5527822, -2.5527822, -2.2446597, -2.1467861, -2.3575907, -2.4378972, -2.3902152]]) bear_face = np.array( [[2.2419927, 2.2526567, 2.3015334, 2.3477442, 2.441943, np.nan, 2.5258499, 2.5113971, 2.5327621, 2.5632387, 2.5780058, 2.5726645, 2.5475292, 2.5258499, np.nan, 2.2858075, 2.2704121, 2.2402497, 2.2283105, 2.2484187, 2.273554, 2.2858075], [-1.7605035, -1.9432811, -1.9707865, -1.9654629, -1.781798, np.nan, -1.4688862, -1.4942957, -1.5099806, -1.5112354, -1.4877081, -1.466063, -1.4588479, -1.4688862, np.nan, -1.4346933, -1.4506918, -1.4463002, -1.418381, -1.4055194, -1.4083427, -1.4346933]])
46.190909
102
0.592403
873
5,081
3.424971
0.302406
0.033445
0.013378
0.010033
0.189632
0.161873
0.143478
0.143144
0.143144
0.140468
0
0.530622
0.196615
5,081
109
103
46.614679
0.201862
0.008463
0
0
0
0
0.000596
0
0
0
0.002383
0
0
1
0.020619
false
0
0.041237
0
0.082474
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
1
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
2
1cf22fa983a2781779d558d59f790cb1dd8d6c81
19,081
py
Python
src/isanlp_rst/td_rst_parser/src/modules/treecrf.py
tchewik/isanlp_rst
459864b3daeeb702acf5e65543181068439ce12c
[ "MIT" ]
6
2020-05-09T01:13:10.000Z
2021-02-05T01:02:40.000Z
src/isanlp_rst/td_rst_parser/src/modules/treecrf.py
tchewik/isanlp_rst
459864b3daeeb702acf5e65543181068439ce12c
[ "MIT" ]
2
2019-09-26T11:32:46.000Z
2020-07-24T13:44:46.000Z
src/isanlp_rst/td_rst_parser/src/modules/treecrf.py
tchewik/isanlp_rst
459864b3daeeb702acf5e65543181068439ce12c
[ "MIT" ]
3
2019-09-26T13:39:26.000Z
2021-04-12T14:34:50.000Z
# -*- coding: utf-8 -*- import torch import torch.autograd as autograd import torch.nn as nn from src.utils.fn import stripe class MatrixTree(nn.Module): """ MatrixTree for calculating partition functions and marginals in O(N^3) for directed spanning trees (a.k.a. non-projective trees) by an adaptation of Kirchhoff's MatrixTree Theorem. This module differs from the original paper in that marginals are computed via back-propagation rather than matrix inversion. References: - Terry Koo, Amir Globerson, Xavier Carreras and Michael Collins. 2007. `Structured Prediction Models via the Matrix-Tree Theorem`_. .. _Structured Prediction Models via the Matrix-Tree Theorem: https://www.aclweb.org/anthology/D07-1015/ """ @torch.enable_grad() def forward(self, scores, mask, target=None, mbr=False): """ Args: scores (~torch.Tensor): ``[batch_size, seq_len, seq_len]``. The scores of all possible dependent-head pairs. mask (~torch.BoolTensor): ``[batch_size, seq_len]``. Mask to avoid aggregation on padding tokens. The first column serving as pseudo words for roots should be ``False``. target (~torch.LongTensor): ``[batch_size, seq_len]``. Tensor of gold-standard dependent-head pairs. Default: ``None``. mbr (bool): If ``True``, marginals will be returned to perform minimum Bayes-risk (mbr) decoding. Default: ``False``. Returns: loss (~torch.Tensor): scalar Loss averaged by number of tokens. This won't be returned if target is None. probs (~torch.Tensor): ``[batch_size, seq_len, ]``. Marginals if performing mbr decoding, original scores otherwise. """ training = scores.requires_grad # double precision to prevent overflows scores = scores.double() logZ = self.matrix_tree(scores.requires_grad_(), mask) probs = scores # calculate the marginals if mbr: probs, = autograd.grad(logZ, probs, retain_graph=training) probs = probs.float() if target is None: return probs score = scores.gather(-1, target.unsqueeze(-1)).squeeze(-1)[mask].sum() loss = (logZ - score).float() / mask.sum() return loss, probs def matrix_tree(self, scores, mask): lens = mask.sum(-1) batch_size, seq_len, _ = scores.shape mask = mask.index_fill(1, mask.new_tensor(0).long(), 1) scores = scores.masked_fill(~(mask.unsqueeze(-1) & mask.unsqueeze(-2)), float('-inf')) # the numerical stability trick is borrowed from timvieira (https://github.com/timvieira/spanning_tree) # log(det(exp(M))) = log(det(exp(M - m) * exp(m))) # = log(det(exp(M - m)) * exp(m)^n) # = log(det(exp(M - m))) + m*n m = scores.view(batch_size, -1).max(-1)[0] # clamp the lower bound to `torch.finfo().tiny` to prevent underflows A = torch.exp(scores - m.view(-1, 1, 1)).clamp(torch.finfo().tiny) # D is the weighted degree matrix # D(i, j) = sum_j(A(i, j)), if h == m # 0, otherwise D = torch.zeros_like(A) D.diagonal(0, 1, 2).copy_(A.sum(-1)) # Laplacian matrix L = nn.init.eye_(torch.empty_like(A[0])).repeat(batch_size, 1, 1) L = L.masked_scatter_(mask.unsqueeze(-1), (D - A)[mask]) # calculate the partition (a.k.a normalization) term # Z = L^(0, 0), which is the minor of L w.r.t row 0 and column 0 logZ = (L[:, 1:, 1:].slogdet()[1] + m*lens).sum() return logZ class CRFDependency(nn.Module): """ First-order TreeCRF for calculating partition functions and marginals in O(N^3) for projective dependency trees. For efficient calculation The module provides a bathcified implementation and relpace the outside pass with back-propagation totally. """ @torch.enable_grad() def forward(self, scores, mask, target=None, mbr=False, partial=False): """ Args: scores (~torch.Tensor): ``[batch_size, seq_len, seq_len]``. The scores of all possible dependent-head pairs. mask (~torch.BoolTensor): ``[batch_size, seq_len]``. Mask to avoid aggregation on padding tokens. The first column serving as pseudo words for roots should be ``False``. target (~torch.LongTensor): ``[batch_size, seq_len]``. Tensor of gold-standard dependent-head pairs. This should be provided for loss calculation. If partially annotated, the unannotated positions should be filled with -1. Default: ``None``. mbr (bool): If ``True``, marginals will be returned to perform minimum Bayes-risk (mbr) decoding. Default: ``False``. partial (bool): ``True`` indicates that the trees are partially annotated. Default: ``False``. Returns: loss (~torch.Tensor): scalar Loss averaged by number of tokens. This won't be returned if target is None. probs (~torch.Tensor): ``[batch_size, seq_len, seq_len]``. Marginals if performing mbr decoding, original scores otherwise. """ training = scores.requires_grad batch_size, seq_len, _ = scores.shape # always enable the gradient computation of scores in order for the computation of marginals logZ = self.inside(scores.requires_grad_(), mask) # marginals are used for decoding, and can be computed by combining the inside pass and autograd mechanism probs = scores if mbr: probs, = autograd.grad(logZ, scores, retain_graph=training) if target is None: return probs # the second inside process is needed if use partial annotation if partial: score = self.inside(scores, mask, target) else: score = scores.gather(-1, target.unsqueeze(-1)).squeeze(-1)[mask].sum() loss = (logZ - score) / mask.sum() return loss, probs def inside(self, scores, mask, cands=None): # the end position of each sentence in a batch lens = mask.sum(1) batch_size, seq_len, _ = scores.shape # [seq_len, seq_len, batch_size] scores = scores.permute(2, 1, 0) s_i = torch.full_like(scores, float('-inf')) s_c = torch.full_like(scores, float('-inf')) s_c.diagonal().fill_(0) # set the scores of arcs excluded by cands to -inf if cands is not None: mask = mask.index_fill(1, lens.new_tensor(0), 1) mask = (mask.unsqueeze(1) & mask.unsqueeze(-1)).permute(2, 1, 0) cands = cands.unsqueeze(-1).index_fill(1, lens.new_tensor(0), -1) cands = cands.eq(lens.new_tensor(range(seq_len))) | cands.lt(0) cands = cands.permute(2, 1, 0) & mask scores = scores.masked_fill(~cands, float('-inf')) for w in range(1, seq_len): # n denotes the number of spans to iterate, # from span (0, w) to span (n, n+w) given width w n = seq_len - w # ilr = C(i->r) + C(j->r+1) # [n, w, batch_size] ilr = stripe(s_c, n, w) + stripe(s_c, n, w, (w, 1)) if ilr.requires_grad: ilr.register_hook(lambda x: x.masked_fill_(torch.isnan(x), 0)) il = ir = ilr.permute(2, 0, 1).logsumexp(-1) # I(j->i) = logsumexp(C(i->r) + C(j->r+1)) + s(j->i), i <= r < j # fill the w-th diagonal of the lower triangular part of s_i # with I(j->i) of n spans s_i.diagonal(-w).copy_(il + scores.diagonal(-w)) # I(i->j) = logsumexp(C(i->r) + C(j->r+1)) + s(i->j), i <= r < j # fill the w-th diagonal of the upper triangular part of s_i # with I(i->j) of n spans s_i.diagonal(w).copy_(ir + scores.diagonal(w)) # C(j->i) = logsumexp(C(r->i) + I(j->r)), i <= r < j cl = stripe(s_c, n, w, (0, 0), 0) + stripe(s_i, n, w, (w, 0)) cl.register_hook(lambda x: x.masked_fill_(torch.isnan(x), 0)) s_c.diagonal(-w).copy_(cl.permute(2, 0, 1).logsumexp(-1)) # C(i->j) = logsumexp(I(i->r) + C(r->j)), i < r <= j cr = stripe(s_i, n, w, (0, 1)) + stripe(s_c, n, w, (1, w), 0) cr.register_hook(lambda x: x.masked_fill_(torch.isnan(x), 0)) s_c.diagonal(w).copy_(cr.permute(2, 0, 1).logsumexp(-1)) # disable multi words to modify the root s_c[0, w][lens.ne(w)] = float('-inf') return s_c[0].gather(0, lens.unsqueeze(0)).sum() class CRF2oDependency(nn.Module): """ Second-order TreeCRF for calculating partition functions and marginals in O(N^3) for projective dependency trees. For efficient calculation The module provides a bathcified implementation and relpace the outside pass with back-propagation totally. """ def __init__(self): super().__init__() @torch.enable_grad() def forward(self, scores, mask, target=None, mbr=True, partial=False): """ Args: scores (~torch.Tensor, ~torch.Tensor): tuple of two tensors s_arc and s_sib. s_arc ([batch_size, seq_len, seq_len]) holds The scores of all possible dependent-head pairs. s_sib ([batch_size, seq_len, seq_len, seq_len]) holds the scores of dependent-head-sibling triples. mask (~torch.BoolTensor): ``[batch_size, seq_len]``. Mask to avoid aggregation on padding tokens. The first column serving as pseudo words for roots should be ``False``. target (~torch.LongTensor): ``[batch_size, seq_len]``. Tensors of gold-standard dependent-head pairs and dependent-head-sibling triples. If partially annotated, the unannotated positions should be filled with -1. Default: ``None``. mbr (bool): If ``True``, marginals will be returned to perform minimum Bayes-risk (mbr) decoding. Default: ``False``. partial (bool): ``True`` indicates that the trees are partially annotated. Default: ``False``. Returns: loss (~torch.Tensor): scalar Loss averaged by number of tokens. This won't be returned if target is None. probs (~torch.Tensor): ``[batch_size, seq_len, seq_len]``. Marginals if performing mbr decoding, original scores otherwise. """ s_arc, s_sib = scores training = s_arc.requires_grad batch_size, seq_len, _ = s_arc.shape # always enable the gradient computation of scores in order for the computation of marginals logZ = self.inside((s.requires_grad_() for s in scores), mask) # marginals are used for decoding, and can be computed by combining the inside pass and autograd mechanism probs = s_arc if mbr: probs, = autograd.grad(logZ, s_arc, retain_graph=training) if target is None: return probs arcs, sibs = target # the second inside process is needed if use partial annotation if partial: score = self.inside(scores, mask, arcs) else: arc_seq, sib_seq = arcs[mask], sibs[mask] arc_mask, sib_mask = mask, sib_seq.gt(0) sib_seq = sib_seq[sib_mask] s_sib = s_sib[mask][torch.arange(len(arc_seq)), arc_seq] s_arc = s_arc[arc_mask].gather(-1, arc_seq.unsqueeze(-1)) s_sib = s_sib[sib_mask].gather(-1, sib_seq.unsqueeze(-1)) score = s_arc.sum() + s_sib.sum() loss = (logZ - score) / mask.sum() return loss, probs def inside(self, scores, mask, cands=None): # the end position of each sentence in a batch lens = mask.sum(1) s_arc, s_sib = scores batch_size, seq_len, _ = s_arc.shape # [seq_len, seq_len, batch_size] s_arc = s_arc.permute(2, 1, 0) # [seq_len, seq_len, seq_len, batch_size] s_sib = s_sib.permute(2, 1, 3, 0) s_i = torch.full_like(s_arc, float('-inf')) s_s = torch.full_like(s_arc, float('-inf')) s_c = torch.full_like(s_arc, float('-inf')) s_c.diagonal().fill_(0) # set the scores of arcs excluded by cands to -inf if cands is not None: mask = mask.index_fill(1, lens.new_tensor(0), 1) mask = (mask.unsqueeze(1) & mask.unsqueeze(-1)).permute(2, 1, 0) cands = cands.unsqueeze(-1).index_fill(1, lens.new_tensor(0), -1) cands = cands.eq(lens.new_tensor(range(seq_len))) | cands.lt(0) cands = cands.permute(2, 1, 0) & mask s_arc = s_arc.masked_fill(~cands, float('-inf')) for w in range(1, seq_len): # n denotes the number of spans to iterate, # from span (0, w) to span (n, n+w) given width w n = seq_len - w # I(j->i) = logsum(exp(I(j->r) + S(j->r, i)) +, i < r < j # exp(C(j->j) + C(i->j-1))) # + s(j->i) # [n, w, batch_size] il = stripe(s_i, n, w, (w, 1)) + stripe(s_s, n, w, (1, 0), 0) il += stripe(s_sib[range(w, n+w), range(n)], n, w, (0, 1)) # [n, 1, batch_size] il0 = stripe(s_c, n, 1, (w, w)) + stripe(s_c, n, 1, (0, w - 1)) # il0[0] are set to zeros since the scores of the complete spans starting from 0 are always -inf il[:, -1] = il0.index_fill_(0, lens.new_tensor(0), 0).squeeze(1) if il.requires_grad: il.register_hook(lambda x: x.masked_fill_(torch.isnan(x), 0)) il = il.permute(2, 0, 1).logsumexp(-1) s_i.diagonal(-w).copy_(il + s_arc.diagonal(-w)) # I(i->j) = logsum(exp(I(i->r) + S(i->r, j)) +, i < r < j # exp(C(i->i) + C(j->i+1))) # + s(i->j) # [n, w, batch_size] ir = stripe(s_i, n, w) + stripe(s_s, n, w, (0, w), 0) ir += stripe(s_sib[range(n), range(w, n+w)], n, w) ir[0] = float('-inf') # [n, 1, batch_size] ir0 = stripe(s_c, n, 1) + stripe(s_c, n, 1, (w, 1)) ir[:, 0] = ir0.squeeze(1) if ir.requires_grad: ir.register_hook(lambda x: x.masked_fill_(torch.isnan(x), 0)) ir = ir.permute(2, 0, 1).logsumexp(-1) s_i.diagonal(w).copy_(ir + s_arc.diagonal(w)) # [n, w, batch_size] slr = stripe(s_c, n, w) + stripe(s_c, n, w, (w, 1)) if slr.requires_grad: slr.register_hook(lambda x: x.masked_fill_(torch.isnan(x), 0)) slr = slr.permute(2, 0, 1).logsumexp(-1) # S(j, i) = logsumexp(C(i->r) + C(j->r+1)), i <= r < j s_s.diagonal(-w).copy_(slr) # S(i, j) = logsumexp(C(i->r) + C(j->r+1)), i <= r < j s_s.diagonal(w).copy_(slr) # C(j->i) = logsumexp(C(r->i) + I(j->r)), i <= r < j cl = stripe(s_c, n, w, (0, 0), 0) + stripe(s_i, n, w, (w, 0)) cl.register_hook(lambda x: x.masked_fill_(torch.isnan(x), 0)) s_c.diagonal(-w).copy_(cl.permute(2, 0, 1).logsumexp(-1)) # C(i->j) = logsumexp(I(i->r) + C(r->j)), i < r <= j cr = stripe(s_i, n, w, (0, 1)) + stripe(s_c, n, w, (1, w), 0) cr.register_hook(lambda x: x.masked_fill_(torch.isnan(x), 0)) s_c.diagonal(w).copy_(cr.permute(2, 0, 1).logsumexp(-1)) # disable multi words to modify the root s_c[0, w][lens.ne(w)] = float('-inf') return s_c[0].gather(0, lens.unsqueeze(0)).sum() class CRFConstituency(nn.Module): """ TreeCRF for calculating partition functions and marginals in O(N^3) for constituency trees. For efficient calculation The module provides a bathcified implementation and relpace the outside pass with back-propagation totally. """ @torch.enable_grad() def forward(self, scores, mask, target=None, mbr=False): """ Args: scores (~torch.Tensor): ``[batch_size, seq_len, seq_len]``. The scores of all possible constituents. mask (~torch.BoolTensor): ``[batch_size, seq_len, seq_len]``. Mask to avoid parsing over padding tokens. For each square matrix in a batch, the positions except upper triangular part should be masked out. target (~torch.BoolTensor): ``[batch_size, seq_len, seq_len]``. Tensor of gold-standard constituents. ``True`` if a constituent exists. Default: ``None``. mbr (bool): If ``True``, marginals will be returned to perform minimum Bayes-risk (mbr) decoding. Default: ``False``. Returns: loss (~torch.Tensor): scalar Loss averaged by number of tokens. This won't be returned if target is None. probs (~torch.Tensor): ``[batch_size, seq_len, seq_len]``. Marginals if performing mbr decoding, original scores otherwise. """ training = scores.requires_grad # always enable the gradient computation of scores in order for the computation of marginals logZ = self.inside(scores.requires_grad_(), mask) # marginals are used for decoding, and can be computed by combining the inside pass and autograd mechanism probs = scores if mbr: probs, = autograd.grad(logZ, scores, retain_graph=training) if target is None: return probs loss = (logZ - scores[mask & target].sum()) / mask[:, 0].sum() return loss, probs def inside(self, scores, mask): lens = mask[:, 0].sum(-1) batch_size, seq_len, _ = scores.shape # [seq_len, seq_len, batch_size] scores, mask = scores.permute(1, 2, 0), mask.permute(1, 2, 0) s = torch.full_like(scores, float('-inf')) for w in range(1, seq_len): # n denotes the number of spans to iterate, # from span (0, w) to span (n, n+w) given width w n = seq_len - w if w == 1: s.diagonal(w).copy_(scores.diagonal(w)) continue # [n, w, batch_size] s_s = stripe(s, n, w-1, (0, 1)) + stripe(s, n, w-1, (1, w), 0) # [batch_size, n, w] s_s = s_s.permute(2, 0, 1) if s_s.requires_grad: s_s.register_hook(lambda x: x.masked_fill_(torch.isnan(x), 0)) s_s = s_s.logsumexp(-1) s.diagonal(w).copy_(s_s + scores.diagonal(w)) return s[0].gather(0, lens.unsqueeze(0)).sum()
46.539024
121
0.566585
2,728
19,081
3.851906
0.116935
0.029121
0.026266
0.032832
0.751998
0.727351
0.694709
0.676627
0.64094
0.625238
0
0.017883
0.3025
19,081
409
122
46.652812
0.771658
0.446308
0
0.475676
0
0
0.004923
0
0
0
0
0
0
1
0.048649
false
0
0.021622
0
0.156757
0
0
0
0
null
0
0
0
0
1
0
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
2
1cf92d4fea209ec94d5d4c9e14b8d7703cdade59
2,157
py
Python
jim/registrations.py
markzz/jim
0944c9a35e1adfa51519cbe488ee1f8976d490ba
[ "0BSD" ]
1
2018-03-22T23:40:17.000Z
2018-03-22T23:40:17.000Z
jim/registrations.py
FUN-GINEERS/jim
0944c9a35e1adfa51519cbe488ee1f8976d490ba
[ "0BSD" ]
4
2018-03-20T15:55:34.000Z
2019-12-17T17:58:09.000Z
jim/registrations.py
markzz/jim
0944c9a35e1adfa51519cbe488ee1f8976d490ba
[ "0BSD" ]
2
2018-03-21T02:48:42.000Z
2019-10-22T14:16:53.000Z
from jim import cmd_funcs from jim.util.util import register_cmd, register_pattern, ADMINISTRATOR_PERM, MODERATOR_PERM def register_cmds(): register_cmd("8ball", "Ask the magic 8 ball.", None, 2, cmd_funcs.eight_ball, False) register_cmd("addcom", "Adds a custom command.", ADMINISTRATOR_PERM|MODERATOR_PERM, 3, cmd_funcs.addcom, False) register_cmd("addadmin", "Adds an admin or role to have admin permissions.", ADMINISTRATOR_PERM, 1, cmd_funcs.addadmin, False) register_cmd("addmod", "Adds a moderator or role to have moderator permissions.", ADMINISTRATOR_PERM, 1, cmd_funcs.addmod, False) register_cmd("about", "Gets general bot information.", None, 1, cmd_funcs.about, False) #register_cmd("archive", "Archives a channel.", ADMINISTRATOR_PERM, 2, cmd_funcs.archive, False) register_cmd("deladmin", "Deletes an admin or role from having admin permissions.", ADMINISTRATOR_PERM, 2, cmd_funcs.deladmin, False) register_cmd("delcom", "Deletes a custom command.", ADMINISTRATOR_PERM|MODERATOR_PERM, 2, cmd_funcs.delcom, False) register_cmd("delmod", "Deletes a moderator or role from having moderator permissions.", ADMINISTRATOR_PERM, 2, cmd_funcs.delmod, False) register_cmd("help", "Prints this help.", None, 1, cmd_funcs.help, True) register_cmd("mcinfo", "Get information on the Minecraft server", None, 1, cmd_funcs.mcinfo, False) register_cmd("murder", "Murders a person.", None, 2, cmd_funcs.murder, False) register_cmd("namechange", "Change my nickname on that server.", ADMINISTRATOR_PERM, 2, cmd_funcs.namechange, False) register_cmd("roll", "Roll a die.", None, 2, cmd_funcs.roll, False) register_cmd("ping", "Ping the bot.", None, 1, cmd_funcs.ping, False) register_cmd("prefix", "Change command prefix", ADMINISTRATOR_PERM, 1, cmd_funcs.prefix, False) register_cmd("wa", "Ask Wolfram Alpha a question.", None, 2, cmd_funcs.wolfram, False) def register_patterns(): # TODO: Write documentation on how to make patterns and what all can be done here. register_pattern(r'%%name%%.*\?', cmd_funcs.eight_ball) register_pattern(r'hi %%name%%!', cmd_funcs.hello_jim)
74.37931
140
0.743162
311
2,157
4.96463
0.302251
0.103627
0.15544
0.042098
0.203368
0.15285
0.056995
0
0
0
0
0.010193
0.135837
2,157
28
141
77.035714
0.818133
0.081595
0
0
0
0
0.310258
0
0
0
0
0.035714
0
1
0.090909
true
0
0.090909
0
0.181818
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
1
0
0
0
1
0
0
0
0
0
0
2
1cfe3cd1eeec50fb0008c13bccefe11726789a5b
160
py
Python
src/constants.py
chatforia/chatforia
39b471cf5118fffbd2a5f31e4628337d890526d0
[ "MIT" ]
null
null
null
src/constants.py
chatforia/chatforia
39b471cf5118fffbd2a5f31e4628337d890526d0
[ "MIT" ]
null
null
null
src/constants.py
chatforia/chatforia
39b471cf5118fffbd2a5f31e4628337d890526d0
[ "MIT" ]
null
null
null
import socket # import threading HOST = socket.gethostbyname(socket.gethostname()) PORT = 8000 ADDR = (HOST, PORT) FORMAT = 'utf-8' HEADER = 64 DISC = "!DISC"
16
49
0.70625
21
160
5.380952
0.714286
0
0
0
0
0
0
0
0
0
0
0.051852
0.15625
160
9
50
17.777778
0.785185
0.1
0
0
0
0
0.070423
0
0
0
0
0
0
1
0
false
0
0.142857
0
0.142857
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
2
e8001af1a78e40f5c371e55967d849152858188d
332
py
Python
data/migrations/0088_merge_20190404_1819.py
Duke-GCB/bespin-api
cea5c20fb2ff592adabe6ebb7ca934939aa11a34
[ "MIT" ]
null
null
null
data/migrations/0088_merge_20190404_1819.py
Duke-GCB/bespin-api
cea5c20fb2ff592adabe6ebb7ca934939aa11a34
[ "MIT" ]
137
2016-12-09T18:59:45.000Z
2021-06-10T18:55:47.000Z
data/migrations/0088_merge_20190404_1819.py
Duke-GCB/bespin-api
cea5c20fb2ff592adabe6ebb7ca934939aa11a34
[ "MIT" ]
3
2017-11-14T16:05:58.000Z
2018-12-28T18:07:43.000Z
# -*- coding: utf-8 -*- # Generated by Django 1.10.1 on 2019-04-04 18:19 from __future__ import unicode_literals from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('data', '0087_auto_20190329_2001'), ('data', '0087_auto_20190402_1722'), ] operations = [ ]
19.529412
48
0.656627
41
332
5.04878
0.756098
0.077295
0.115942
0
0
0
0
0
0
0
0
0.188462
0.216867
332
16
49
20.75
0.607692
0.204819
0
0
1
0
0.206897
0.176245
0
0
0
0
0
1
0
false
0
0.222222
0
0.555556
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
2
e80665cc4fb94898d7e91c94f14bb3bb1253f866
1,271
py
Python
lib/device.py
keke185321/webcam-pulse-detector
d6162901bc169de1266e9da00d73f7e943713a64
[ "Apache-2.0" ]
1,411
2015-01-03T00:06:06.000Z
2022-03-27T20:03:51.000Z
lib/device.py
keke185321/webcam-pulse-detector
d6162901bc169de1266e9da00d73f7e943713a64
[ "Apache-2.0" ]
36
2015-02-17T23:11:30.000Z
2021-05-31T18:31:47.000Z
lib/device.py
keke185321/webcam-pulse-detector
d6162901bc169de1266e9da00d73f7e943713a64
[ "Apache-2.0" ]
419
2015-01-01T17:03:17.000Z
2022-03-11T22:02:48.000Z
import cv2, time #TODO: fix ipcam #import urllib2, base64 import numpy as np class ipCamera(object): def __init__(self,url, user = None, password = None): self.url = url auth_encoded = base64.encodestring('%s:%s' % (user, password))[:-1] self.req = urllib2.Request(self.url) self.req.add_header('Authorization', 'Basic %s' % auth_encoded) def get_frame(self): response = urllib2.urlopen(self.req) img_array = np.asarray(bytearray(response.read()), dtype=np.uint8) frame = cv2.imdecode(img_array, 1) return frame class Camera(object): def __init__(self, camera = 0): self.cam = cv2.VideoCapture(camera) self.valid = False try: resp = self.cam.read() self.shape = resp[1].shape self.valid = True except: self.shape = None def get_frame(self): if self.valid: _,frame = self.cam.read() else: frame = np.ones((480,640,3), dtype=np.uint8) col = (0,256,256) cv2.putText(frame, "(Error: Camera not accessible)", (65,220), cv2.FONT_HERSHEY_PLAIN, 2, col) return frame def release(self): self.cam.release()
28.886364
75
0.571204
159
1,271
4.45283
0.471698
0.039548
0.036723
0.048023
0
0
0
0
0
0
0
0.042889
0.302911
1,271
44
76
28.886364
0.756208
0.029111
0
0.117647
0
0
0.045418
0
0
0
0
0.022727
0
1
0.147059
false
0.058824
0.058824
0
0.323529
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
1
0
0
0
0
1
0
0
0
0
0
2
e810763b045bb8c295c0e5fd74e31d96d380a402
738
py
Python
src/quo/__main__.py
chouette254/quo
8979afd118e77d3d0f93f9fbe8711efada7158c5
[ "MIT" ]
1
2021-02-15T03:56:00.000Z
2021-02-15T03:56:00.000Z
src/quo/__main__.py
chouette254/quo
8979afd118e77d3d0f93f9fbe8711efada7158c5
[ "MIT" ]
3
2021-02-22T11:49:23.000Z
2021-02-28T06:47:41.000Z
src/quo/__main__.py
viewerdiscretion/quo
fec78ae3b4a6d70501e2119868336c28c590fa50
[ "MIT" ]
null
null
null
from quo.i_o import echo from quo.color.rgb import * from quo.shortcuts import container from quo.widgets import Frame, TextArea container(Frame(TextArea(text=" FEATURES"), title="Quo")) echo(f"* ", fg="red", nl=False) echo(f"Support for ANSI and RGB color models") echo(f"* ", fg="blue", nl=False) echo(f"Support for tabular presentation of data") echo(f"* ", fg="green", nl=False) echo(f"Interactive progressbars") echo(f"* ", fg="magenta", nl=False) echo(f"Code completions") echo(f"* ", fg="yellow", nl=False) echo(f"Nesting of commands") echo(f"* ", fg=teal, nl=False) echo(f"Automatic help page generation") echo(f"* ", fg=aquamarine, nl=False) echo(f"Highlighting") echo(f"* ", fg=khaki, nl=False) echo(f"Lightweight")
28.384615
65
0.696477
119
738
4.310924
0.411765
0.155945
0.109162
0.187135
0.08577
0.08577
0
0
0
0
0
0
0.121951
738
25
66
29.52
0.791667
0
0
0
0
0
0.339213
0
0
0
0
0
0
1
0
true
0
0.190476
0
0.190476
0
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
2
e833d0845dd8386f55a129abd2f97e426b1f1af2
271
py
Python
Meeting Notes/2020/2020-10-03/cayley_2020_20.py
agryman/sean
11baf69c6eb9308266126bf9c8b1c67c6fd33afc
[ "MIT" ]
1
2020-03-28T18:17:52.000Z
2020-03-28T18:17:52.000Z
Meeting Notes/2020/2020-10-03/cayley_2020_20.py
agryman/sean
11baf69c6eb9308266126bf9c8b1c67c6fd33afc
[ "MIT" ]
1
2022-01-21T21:33:00.000Z
2022-01-21T21:33:00.000Z
Meeting Notes/2020/2020-10-03/cayley_2020_20.py
agryman/sean
11baf69c6eb9308266126bf9c8b1c67c6fd33afc
[ "MIT" ]
null
null
null
"""Cayley 2020, Problem 20""" def is_divisible(n, d): return (n % d) == 0 def solutions(): a = range(1, 101) b = range(101, 206) return [(m, n) for m in a for n in b if is_divisible(3**m + 7**n, 10)] s = solutions() a = len(s) print(f'Answer = {a}')
16.9375
74
0.549815
51
271
2.882353
0.588235
0.14966
0
0
0
0
0
0
0
0
0
0.10396
0.254613
271
16
75
16.9375
0.623762
0.084871
0
0
0
0
0.049383
0
0
0
0
0
0
1
0.222222
false
0
0
0.111111
0.444444
0.111111
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
0
0
0
2
e8343e7613b5659d1d2eecdb69c73cc951be1b32
258
py
Python
src/7sem/intersect.py
freepvps/hsesamples
adbf35c1c94521d78fb75f72287512a37e49bdc8
[ "MIT" ]
2
2019-10-19T22:29:50.000Z
2019-10-19T22:29:52.000Z
src/7sem/intersect.py
freepvps/hsesamples
adbf35c1c94521d78fb75f72287512a37e49bdc8
[ "MIT" ]
null
null
null
src/7sem/intersect.py
freepvps/hsesamples
adbf35c1c94521d78fb75f72287512a37e49bdc8
[ "MIT" ]
null
null
null
a = [1, 4, 5, 7, 19, 24] b = [4, 6, 7, 18, 24, 134] i = 0 j = 0 ans = [] while i < len(a) and j < len(b): if a[i] == b[j]: ans.append(a[i]) i += 1 j += 1 elif a[i] < b[j]: i += 1 else: j += 1 print(*ans)
14.333333
32
0.348837
52
258
1.730769
0.442308
0.066667
0.066667
0.088889
0
0
0
0
0
0
0
0.162162
0.426357
258
17
33
15.176471
0.445946
0
0
0.266667
0
0
0
0
0
0
0
0
0
1
0
false
0
0
0
0
0.066667
0
0
1
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
2
08fa85535ca6db73c40ba084c39495bbf98698ab
271
py
Python
src/ball.py
IamFoka/py_pong
0942223f2e76de04a9f3c49316140a5f7be9a914
[ "MIT" ]
1
2019-08-13T22:41:31.000Z
2019-08-13T22:41:31.000Z
src/ball.py
IamFoka/py_pong
0942223f2e76de04a9f3c49316140a5f7be9a914
[ "MIT" ]
null
null
null
src/ball.py
IamFoka/py_pong
0942223f2e76de04a9f3c49316140a5f7be9a914
[ "MIT" ]
null
null
null
import pygame class Ball(pygame.Rect): def __init__(self, velocity, *args, **kwargs): self.velocity = velocity self.angle = 0 super().__init__(*args, **kwargs) def move(self): self.x += self.velocity self.y += self.angle
22.583333
50
0.586716
33
271
4.575758
0.515152
0.238411
0
0
0
0
0
0
0
0
0
0.005102
0.276753
271
11
51
24.636364
0.765306
0
0
0
0
0
0
0
0
0
0
0
0
1
0.222222
false
0
0.111111
0
0.444444
0
0
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
0
0
0
2
1c002d2805935f0f76727d9aeb477442c979fc89
597
py
Python
mlab_api/data/raw_data.py
m-lab/mlab-vis-api
2368d88223148bf73d83c62d285fb458558619e0
[ "MIT" ]
1
2017-09-05T14:52:11.000Z
2017-09-05T14:52:11.000Z
mlab_api/data/raw_data.py
m-lab/mlab-vis-api
2368d88223148bf73d83c62d285fb458558619e0
[ "MIT" ]
9
2017-09-14T15:30:02.000Z
2019-03-05T18:35:20.000Z
mlab_api/data/raw_data.py
m-lab/mlab-vis-api
2368d88223148bf73d83c62d285fb458558619e0
[ "MIT" ]
3
2017-06-01T16:01:37.000Z
2017-10-24T22:44:47.000Z
# -*- coding: utf-8 -*- ''' Data class for accessing data for raw data ''' from mlab_api.data.table_config import get_table_config from mlab_api.data.base_data import Data import mlab_api.data.bigtable_utils as bt class RawData(Data): ''' Pull out some raw data ''' def get_raw_test_results(self): ''' Extract sample raw data. ''' table_name = 'raw_sample' table_config = get_table_config(self.table_configs, None, table_name) results = bt.scan_table(table_config, self.get_pool(), limit=1000) return {"results": results}
25.956522
77
0.666667
84
597
4.488095
0.452381
0.145889
0.087533
0.079576
0
0
0
0
0
0
0
0.010823
0.226131
597
22
78
27.136364
0.805195
0.18928
0
0
0
0
0.038813
0
0
0
0
0
0
1
0.111111
false
0
0.333333
0
0.666667
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
2
1c0ad9fa65e84e87558ca8cc77074cb1df1a954a
1,057
py
Python
solutions/blockedbillboard.py
24TangC/USACO-Bronze
80f0986cb04998b039ba23c7349d25431b4e876b
[ "MIT" ]
null
null
null
solutions/blockedbillboard.py
24TangC/USACO-Bronze
80f0986cb04998b039ba23c7349d25431b4e876b
[ "MIT" ]
null
null
null
solutions/blockedbillboard.py
24TangC/USACO-Bronze
80f0986cb04998b039ba23c7349d25431b4e876b
[ "MIT" ]
null
null
null
bill_board = list(map(int, input().split())) tarp = list(map(int, input().split())) xOverlap = max(min(bill_board[2], tarp[2]) - max(bill_board[0], tarp[0]), 0) yOverlap = max(min(bill_board[3], tarp[3]) - max(bill_board[1], tarp[1]), 0) if xOverlap == 0 or yOverlap == 0: print((bill_board[2] - bill_board[0]) * (bill_board[3] - bill_board[1])) exit() if xOverlap >= bill_board[2] - bill_board[0] and yOverlap >= bill_board[3] - bill_board[1]: print(0) exit() if xOverlap < bill_board[2] - bill_board[0] and yOverlap < bill_board[3] - bill_board[1]: print((bill_board[2] - bill_board[0])*(bill_board[3]-bill_board[1])) elif xOverlap >= bill_board[2] - bill_board[0] and yOverlap < bill_board[3] - bill_board[1]: print(xOverlap*(bill_board[3]-bill_board[1] - yOverlap)) elif yOverlap >= bill_board[3] - bill_board[1] and xOverlap < bill_board[2] - bill_board[0]: print(yOverlap*(bill_board[2] - bill_board[0] - xOverlap)) else: print((bill_board[2] - bill_board[0]) * (bill_board[3] - bill_board[1]))
48.045455
93
0.653737
175
1,057
3.737143
0.131429
0.509174
0.137615
0.171254
0.718654
0.657492
0.59633
0.510703
0.510703
0.510703
0
0.050448
0.156102
1,057
22
94
48.045455
0.682735
0
0
0.277778
0
0
0
0
0
0
0
0
0
1
0
false
0
0
0
0
0.333333
0
0
0
null
1
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
2
1c10a155f7a78b3cf10c543d702dda7f8847a03b
876
py
Python
src/testers/unittests/test_doc.py
werew/Triton
3f95e54f076308d6885071a21ae71eb2123771d2
[ "Apache-2.0" ]
15
2021-12-08T09:53:35.000Z
2022-03-07T10:13:37.000Z
src/testers/unittests/test_doc.py
igogo-x86/Triton
3225658a138f0beead593bf17103c0cf34500d68
[ "Apache-2.0" ]
null
null
null
src/testers/unittests/test_doc.py
igogo-x86/Triton
3225658a138f0beead593bf17103c0cf34500d68
[ "Apache-2.0" ]
3
2018-03-04T04:34:39.000Z
2019-08-27T16:10:15.000Z
#!/usr/bin/env python2 # coding: utf-8 """Tester for documentation.""" import unittest import doctest import os import glob SNIPPET_DIR = os.path.join(os.path.dirname(__file__), "..", "..", "libtriton", "bindings", "python", "objects") class TestDoc(unittest.TestCase): """Holder to run examples as tests.""" for i, example in enumerate(glob.iglob(os.path.join(SNIPPET_DIR, "*.cpp"))): def _test_snippet(self, example_name=example): """Run example and show stdout in case of fail.""" res = doctest.testfile(example_name, module_relative=False) self.assertEqual(res.failed, 0) # Define an arguments with a default value as default value is capture at # lambda creation so that example_name is not in the closure of the lambda # function. setattr(TestDoc, "test_" + str(i) + "_" + os.path.basename(example), _test_snippet)
32.444444
111
0.694064
122
876
4.852459
0.647541
0.040541
0.033784
0
0
0
0
0
0
0
0
0.004138
0.172374
876
26
112
33.692308
0.812414
0.335616
0
0
0
0
0.080071
0
0
0
0
0
0.090909
1
0.090909
false
0
0.363636
0
0.545455
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
2
1c10f71a6243b87586311b075f6a12f8f2c0e470
4,585
py
Python
LP2/Exercicio Banco/banco.py
luisxfelipe/Faculdade_Impacta_2semestre
ad6e0bcc22496bb96f56c5ca3d930554dd5302a4
[ "Apache-2.0" ]
null
null
null
LP2/Exercicio Banco/banco.py
luisxfelipe/Faculdade_Impacta_2semestre
ad6e0bcc22496bb96f56c5ca3d930554dd5302a4
[ "Apache-2.0" ]
6
2020-06-05T20:57:34.000Z
2022-03-11T23:47:43.000Z
LP2/Exercicio Banco/banco.py
luisxfelipe/Faculdade_Impacta_2semestre
ad6e0bcc22496bb96f56c5ca3d930554dd5302a4
[ "Apache-2.0" ]
null
null
null
from typing import List, Union Number = Union[int, float] """ 1) Termine os métodos calcula_juros() e saque(valor) da classe ContaPoupanca 2) Termine o método calcula_juros() da classe ContaCorrente 3) Adicione um atributo à classe Conta chamado _operacoes: self._operacoes = [] Ele servirá para guardar um extrato de todos as operações realizadas como saque, depósito, cobrança de juros, depósito de juros, etc. Você poderá registrar as operações assim: def saque(self, valor): self._saldo -= valor self._operacoes.append({'saque': valor}) """ class Cliente: ''' Classe Cliente do Módulo do Banco ''' def __init__(self, nome: str, telefone: int, email: str) -> None: self._nome = nome self._tel = telefone self._email = email def get_nome(self) -> str: ''' Acessor do atributo Nome ''' return self._nome def get_telefone(self) -> int: ''' Acessor do atributo Telefone ''' return self._tel def set_telefone(self, novo_telefone: int) -> None: ''' Mutador do atributo Telefone ''' if not type(novo_telefone) == int: raise TypeError else: self._tel = novo_telefone def get_email(self) -> str: ''' Acessor do atributo E-mail ''' return self._email def set_email(self, novo_email) -> None: ''' Mutador do atributo E-mail ''' if '@' not in novo_email: raise ValueError self._email = novo_email class Conta: ''' Conta básica ''' def __init__(self, clientes: List[Cliente], numero_conta: int, saldo_inicial: Number): self._clientes = clientes self._numero = numero_conta if saldo_inicial < 0: raise ValueError self._saldo = saldo_inicial def get_clientes(self) -> List[Cliente]: ''' Acessor Clientes ''' return self._clientes def get_numero_conta(self) -> int: ''' Acessor Número da Conta ''' return self._numero def get_saldo(self) -> Number: ''' Acessor Saldo ''' return self._saldo def set_saldo(self, novo_saldo: Number) -> None: self._saldo = novo_saldo def deposito(self, valor: Number) -> None: self._saldo += valor def saque(self, valor: Number) -> None: self._saldo -= valor class ContaPoupanca(Conta): ''' Conta Poupança ''' def __init__(self, clientes: List[Cliente], numero_conta: int, saldo_inicial: Number, taxa_juros: float): super().__init__(clientes, numero_conta, saldo_inicial) self._juros = taxa_juros def calcula_juros(self) -> None: # calcule os juros recebidos e atualize o saldo pass def saque(self, valor): # caso o saldo não seja suficiente, lance uma # exceção ValueError, senão chame o método saque # da classe pai if valor > saldo_inicial: raise ValueError else: super.saque(valor) class ContaCorrente(Conta): ''' classe conta corrente ''' def __init__(self, clientes, numero_conta, saldo_inicial, juros, limite): super().__init__(clientes, numero_conta, saldo_inicial) self._juros = juros self._limite = limite def calcula_juros(self): # caso minha conta esteja negativa, calcule os # juros devidos e atualize o saldo pass class Banco: def __init__(self, nome): self.nome = nome self._contas = [] def abre_cc(self, clientes, saldo_inicial): cc = ContaCorrente(clientes, len(self._contas) + 1, saldo_inicial, 0.1, 100) self._contas.append(cc) def abre_cp(self, clientes, saldo_inicial): cp = ContaPoupanca(clientes, len(self._contas) + 1, saldo_inicial, 0.01) self._contas.append(cp) def calcula_juros(self): for conta in self._contas: conta.calcula_juros() def mostra_saldos(self): for conta in self._contas: print(f'{conta.get_numero_conta()}: {conta.get_saldo()}') ''' fulano = Cliente('fulano', 9999999, 'fulano@gmail.com') bb = Banco('Meu Banco') bb.abre_cc([fulano], 100) bb.abre_cp([fulano], 300) bb.abre_cc([fulano], 0) bb._contas[2].saque(50) bb.mostra_saldos() bb.calcula_juros() bb.mostra_saldos() '''
24.918478
77
0.592366
539
4,585
4.823748
0.243043
0.055385
0.021154
0.019615
0.216538
0.156154
0.137692
0.112308
0.085385
0.047692
0
0.009755
0.30687
4,585
183
78
25.054645
0.80837
0.110142
0
0.164557
0
0
0.015748
0.008858
0
0
0
0.021858
0
1
0.291139
false
0.025316
0.012658
0
0.443038
0.012658
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
1
0
0
1
0
0
0
0
0
0
0
2
1c1895717aec592be120c250b74a97740aaf2d31
539
py
Python
tests/test_state.py
xiaoxianma/bitccl
26d59b8a369a5ef8786d56d1f1d06fddf7c91d64
[ "MIT" ]
1
2020-08-02T15:16:05.000Z
2020-08-02T15:16:05.000Z
tests/test_state.py
xiaoxianma/bitccl
26d59b8a369a5ef8786d56d1f1d06fddf7c91d64
[ "MIT" ]
2
2020-07-31T10:54:04.000Z
2020-08-14T11:44:44.000Z
tests/test_state.py
xiaoxianma/bitccl
26d59b8a369a5ef8786d56d1f1d06fddf7c91d64
[ "MIT" ]
1
2020-07-26T17:14:50.000Z
2020-07-26T17:14:50.000Z
from bitccl import run from bitccl.state import config, event_listeners def test_config_singleton(): assert config.get() == {} config.set({"test": 1}) assert config.get() == config.get() == {"test": 1} # consistent assert config.get().test == 1 config.set({}) assert config.get() == {} def test_empty_event_listeners(): event_listeners.clear() assert not event_listeners assert run("add_event_listener('test', lambda:None)") is None # no errors assert not event_listeners # cleanup after run
28.368421
78
0.675325
71
539
4.971831
0.394366
0.1983
0.169972
0.11898
0
0
0
0
0
0
0
0.006881
0.191095
539
18
79
29.944444
0.802752
0.070501
0
0.285714
0
0
0.094567
0.052314
0
0
0
0
0.5
1
0.142857
true
0
0.142857
0
0.285714
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
1
0
0
1
0
0
0
0
0
0
2
1c1ec8fc4dece49d5d8a9ce1b4aa1e737787006c
351
py
Python
Cartwheel/lib/Python26/Lib/site-packages/OpenGL/GL/SGIX/async.py
MontyThibault/centre-of-mass-awareness
58778f148e65749e1dfc443043e9fc054ca3ff4d
[ "MIT" ]
null
null
null
Cartwheel/lib/Python26/Lib/site-packages/OpenGL/GL/SGIX/async.py
MontyThibault/centre-of-mass-awareness
58778f148e65749e1dfc443043e9fc054ca3ff4d
[ "MIT" ]
null
null
null
Cartwheel/lib/Python26/Lib/site-packages/OpenGL/GL/SGIX/async.py
MontyThibault/centre-of-mass-awareness
58778f148e65749e1dfc443043e9fc054ca3ff4d
[ "MIT" ]
null
null
null
'''OpenGL extension SGIX.async This module customises the behaviour of the OpenGL.raw.GL.SGIX.async to provide a more Python-friendly API ''' from OpenGL import platform, constants, constant, arrays from OpenGL import extensions, wrapper from OpenGL.GL import glget import ctypes from OpenGL.raw.GL.SGIX.async import * ### END AUTOGENERATED SECTION
29.25
56
0.797721
52
351
5.384615
0.615385
0.142857
0.078571
0.107143
0.142857
0
0
0
0
0
0
0
0.131054
351
12
57
29.25
0.918033
0.071225
0
0
0
0
0
0
0
0
0
0
0
0
null
null
0
1
null
null
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
1
0
0
0
0
2
1c230791a959c93702e5277226b537668e9d6a50
313
py
Python
project euler/q33.py
milkmeat/thomas
fbc72af34267488d931a4885d4e19fce22fea582
[ "MIT" ]
null
null
null
project euler/q33.py
milkmeat/thomas
fbc72af34267488d931a4885d4e19fce22fea582
[ "MIT" ]
null
null
null
project euler/q33.py
milkmeat/thomas
fbc72af34267488d931a4885d4e19fce22fea582
[ "MIT" ]
null
null
null
def c(m,z): z10=z/10 z1=z%10 m10=m/10 m1=m%10 if m1==0: return False if z1==m10: if float(z)/float(m)==float(z10)/float(m1): return True return False for x in range(10,100): for y in range(10,100): if c(x,y): print x,y
20.866667
52
0.463259
56
313
2.589286
0.392857
0.041379
0.124138
0.165517
0
0
0
0
0
0
0
0.169312
0.396166
313
15
53
20.866667
0.597884
0
0
0.133333
0
0
0
0
0
0
0
0
0
0
null
null
0
0
null
null
0.066667
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
2
1c31346b9eb7cd50c1cd878990e61732e87c10f5
343
py
Python
wandbox/commands/__init__.py
v1nam/wandbox-cli
8ff88944ad3358dc99dd9bf4ac5c0cac2b98179b
[ "MIT" ]
7
2021-01-21T18:45:29.000Z
2021-01-27T06:54:17.000Z
wandbox/commands/__init__.py
v1nam/wandbox-cli
8ff88944ad3358dc99dd9bf4ac5c0cac2b98179b
[ "MIT" ]
null
null
null
wandbox/commands/__init__.py
v1nam/wandbox-cli
8ff88944ad3358dc99dd9bf4ac5c0cac2b98179b
[ "MIT" ]
null
null
null
from wandbox.commands.frominput import FromInput from wandbox.commands.fromfile import FromFile from wandbox.commands.frombuffer import FromBuffer from wandbox.commands.base import Base commands_dict = { "fromfile": FromFile.runfile, "frominput": FromInput.askinp, "frombuffer": FromBuffer.create_buffer, "base": Base.run, }
26.384615
50
0.77551
39
343
6.769231
0.358974
0.166667
0.287879
0
0
0
0
0
0
0
0
0
0.134111
343
12
51
28.583333
0.888889
0
0
0
0
0
0.090379
0
0
0
0
0
0
1
0
false
0
0.4
0
0.4
0
0
0
0
null
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
2
1c33cd7567a86a3efce192828b9c73c1ad9e3605
1,008
py
Python
src/kid/core/kglobals.py
KidKaboom/Kid-Maya-2022
0daec301a63438d681cc4c3a5df6d4efdc70daef
[ "MIT" ]
null
null
null
src/kid/core/kglobals.py
KidKaboom/Kid-Maya-2022
0daec301a63438d681cc4c3a5df6d4efdc70daef
[ "MIT" ]
null
null
null
src/kid/core/kglobals.py
KidKaboom/Kid-Maya-2022
0daec301a63438d681cc4c3a5df6d4efdc70daef
[ "MIT" ]
null
null
null
# :coding: utf-8 # Python Modules import os import sys # Platforms PLATFORM = sys.platform WINDOWS = "win32" OSX = "darwin" LINUX = "linux" # Paths GLOBALS_PATH = os.path.abspath(__file__) SCRIPTS_PATH = os.path.dirname(os.path.dirname(os.path.dirname(GLOBALS_PATH))) PROJECT_PATH = os.path.dirname(SCRIPTS_PATH) PLUGINS_PATH = os.path.join(PROJECT_PATH, "plug-ins") LIB_PATH = os.path.join(PROJECT_PATH, "lib") LIB_WINDOWS64_PATH = os.path.join(LIB_PATH, "win64") LIB_OSX_PATH = os.path.join(LIB_PATH, "osx") LIB_LINUX_PATH = os.path.join(LIB_PATH, "linux") BIN_PATH = os.path.join(PROJECT_PATH, "bin") BIN_WINDOWS64_PATH = os.path.join(BIN_PATH, "win64") BIN_OSX_PATH = os.path.join(BIN_PATH, "osx") BIN_LINUX_PATH = os.path.join(BIN_PATH, "linux") DOCS_PATH = os.path.join(PROJECT_PATH, "docs") USER_PATH = os.path.expanduser('~') DATA_PATH = os.path.join(SCRIPTS_PATH, "data") # User # Maya MAYA_WINDOW_NAME = "MayaWindow" if __name__ == "__main__": print(GLOBALS_PATH) print(DATA_PATH)
24.585366
78
0.738095
161
1,008
4.310559
0.26087
0.146974
0.216138
0.221902
0.430836
0.381844
0
0
0
0
0
0.012277
0.111111
1,008
40
79
25.2
0.762277
0.054563
0
0
0
0
0.087924
0
0
0
0
0
0
1
0
false
0
0.08
0
0.08
0.08
0
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
2
1c40b967e9798ce772656204f94ecfee89c38b0e
2,947
py
Python
djangosaml2/cache.py
chander/djangosaml2
edeef7e529769e5f7f99801a6a78c53ea7067198
[ "Apache-2.0" ]
5,079
2015-01-01T03:39:46.000Z
2022-03-31T07:38:22.000Z
djangosaml2/cache.py
chander/djangosaml2
edeef7e529769e5f7f99801a6a78c53ea7067198
[ "Apache-2.0" ]
1,623
2015-01-01T08:06:24.000Z
2022-03-30T19:48:52.000Z
djangosaml2/cache.py
chander/djangosaml2
edeef7e529769e5f7f99801a6a78c53ea7067198
[ "Apache-2.0" ]
2,033
2015-01-04T07:18:02.000Z
2022-03-28T19:55:47.000Z
# Copyright (C) 2011-2012 Yaco Sistemas (http://www.yaco.es) # Copyright (C) 2010 Lorenzo Gil Sanchez <lorenzo.gil.sanchez@gmail.com> # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from saml2.cache import Cache class DjangoSessionCacheAdapter(dict): """A cache of things that are stored in the Django Session""" key_prefix = '_saml2' def __init__(self, django_session, key_suffix): self.session = django_session self.key = self.key_prefix + key_suffix super(DjangoSessionCacheAdapter, self).__init__(self._get_objects()) def _get_objects(self): return self.session.get(self.key, {}) def _set_objects(self, objects): self.session[self.key] = objects def sync(self): # Changes in inner objects do not cause session invalidation # https://docs.djangoproject.com/en/1.9/topics/http/sessions/#when-sessions-are-saved #add objects to session self._set_objects(dict(self)) #invalidate session self.session.modified = True class OutstandingQueriesCache(object): """Handles the queries that have been sent to the IdP and have not been replied yet. """ def __init__(self, django_session): self._db = DjangoSessionCacheAdapter(django_session, '_outstanding_queries') def outstanding_queries(self): return self._db._get_objects() def set(self, saml2_session_id, came_from): self._db[saml2_session_id] = came_from self._db.sync() def delete(self, saml2_session_id): if saml2_session_id in self._db: del self._db[saml2_session_id] self._db.sync() class IdentityCache(Cache): """Handles information about the users that have been succesfully logged in. This information is useful because when the user logs out we must know where does he come from in order to notify such IdP/AA. The current implementation stores this information in the Django session. """ def __init__(self, django_session): self._db = DjangoSessionCacheAdapter(django_session, '_identities') self._sync = True class StateCache(DjangoSessionCacheAdapter): """Store state information that is needed to associate a logout request with its response. """ def __init__(self, django_session): super(StateCache, self).__init__(django_session, '_state')
32.744444
93
0.695283
387
2,947
5.105943
0.434109
0.065789
0.035425
0.034413
0.131579
0.097166
0.097166
0.068826
0.068826
0.068826
0
0.010908
0.22226
2,947
89
94
33.11236
0.851222
0.456057
0
0.147059
0
0
0.028197
0
0
0
0
0
0
1
0.294118
false
0
0.029412
0.058824
0.529412
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
1
0
0
2
1c5832d0289e7c0ad53dc88c255ac0823cc4a6c6
610
py
Python
ia_mri_tools/utils.py
snydek1/ia_mri_tools
525bdcc7f4c03e26d3114abf7da4932685b1e2e0
[ "BSD-3-Clause" ]
1
2019-02-15T16:03:08.000Z
2019-02-15T16:03:08.000Z
ia_mri_tools/utils.py
snydek1/ia_mri_tools
525bdcc7f4c03e26d3114abf7da4932685b1e2e0
[ "BSD-3-Clause" ]
11
2017-11-24T14:53:08.000Z
2018-12-18T16:25:03.000Z
ia_mri_tools/utils.py
snydek1/ia_mri_tools
525bdcc7f4c03e26d3114abf7da4932685b1e2e0
[ "BSD-3-Clause" ]
3
2017-11-24T14:53:47.000Z
2018-03-14T18:36:33.000Z
# Utility functions import numpy as np def select(data, mask=None): if isinstance(data, list): h = [] for dsub in data: h.append(select(dsub, mask)) return np.hstack(h) else: if mask is not None: if len(data.shape) == 3: return data.reshape(-1, 1)[mask.flatten(), :] else: return data.reshape(-1, data.shape[-1])[mask.flatten(), :] else: if len(data.shape) == 3: return data.reshape(-1, 1) else: return data.reshape(-1, data.shape[-1])
27.727273
74
0.490164
76
610
3.934211
0.394737
0.120401
0.227425
0.240803
0.441472
0.441472
0.441472
0.441472
0.227425
0.227425
0
0.026247
0.37541
610
21
75
29.047619
0.75853
0.027869
0
0.333333
0
0
0
0
0
0
0
0
0
1
0.055556
false
0
0.055556
0
0.388889
0
0
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
2