content stringlengths 7 1.05M | fixed_cases stringlengths 1 1.28M |
|---|---|
class Solution(object):
def letterCasePermutation(self, S):
"""
:type S: str
:rtype: List[str]
"""
res = []
self.dfs(S, res, "")
return res
def dfs(self, S, res, word):
if not S:
res.append(word)
return
if S[0].isalpha():
self.dfs(S[1:], res, word + S[0].upper())
self.dfs(S[1:], res, word + S[0].lower())
else:
self.dfs(S[1:], res, word + S[0])
S = "a1b2"
p = Solution()
print(p.letterCasePermutation(S)) | class Solution(object):
def letter_case_permutation(self, S):
"""
:type S: str
:rtype: List[str]
"""
res = []
self.dfs(S, res, '')
return res
def dfs(self, S, res, word):
if not S:
res.append(word)
return
if S[0].isalpha():
self.dfs(S[1:], res, word + S[0].upper())
self.dfs(S[1:], res, word + S[0].lower())
else:
self.dfs(S[1:], res, word + S[0])
s = 'a1b2'
p = solution()
print(p.letterCasePermutation(S)) |
# AND : both need to be TRUE
# has_high_income = True
# has_good_credit = False
# if has_high_income and has_good_credit:
# print('Eligible for loan')
# OR : at least one needs to be TRUE
has_high_income = False
has_good_credit = True
if has_high_income or has_good_credit:
print('Eligible for loan')
# NOT :
has_good_credit = True
has_criminal_record = False
if has_good_credit and not has_criminal_record:
print('Eligible for loan!')
# EXAMPLE
weight = input('What is your weight? ')
weight_type = input('(L)bs or (K)g ')
if weight_type.upper() == 'L':
converted_weight = int(weight) * .45
print(f'Your {weight}lbs equals {converted_weight} kilos.')
if weight_type.upper() == 'K':
converted_weight = int(weight) / .45
print(f'Your {weight}kg equals {converted_weight} pounds.') | has_high_income = False
has_good_credit = True
if has_high_income or has_good_credit:
print('Eligible for loan')
has_good_credit = True
has_criminal_record = False
if has_good_credit and (not has_criminal_record):
print('Eligible for loan!')
weight = input('What is your weight? ')
weight_type = input('(L)bs or (K)g ')
if weight_type.upper() == 'L':
converted_weight = int(weight) * 0.45
print(f'Your {weight}lbs equals {converted_weight} kilos.')
if weight_type.upper() == 'K':
converted_weight = int(weight) / 0.45
print(f'Your {weight}kg equals {converted_weight} pounds.') |
# from testfixtures import TempDirectory
# import pytest
"""
def test_reset_no_flags_should_delete_all_configs(tmpdir):
mock_dir = tmpdir.mkdir("temp")
pypcmgr_config = mock_dir.join(".pypcmgrconfig")
hook_config = mock_dir.join(".pre-commit-config.yaml")
assert len(mock_dir.listdir()) == ["temp"]
def test_create_file(tmpdir):
p = tmpdir.mkdir("sub").join("hello.txt")
p.write("content")
assert p.read() == "content"
assert len(tmpdir.listdir()) == 1
"""
| """
def test_reset_no_flags_should_delete_all_configs(tmpdir):
mock_dir = tmpdir.mkdir("temp")
pypcmgr_config = mock_dir.join(".pypcmgrconfig")
hook_config = mock_dir.join(".pre-commit-config.yaml")
assert len(mock_dir.listdir()) == ["temp"]
def test_create_file(tmpdir):
p = tmpdir.mkdir("sub").join("hello.txt")
p.write("content")
assert p.read() == "content"
assert len(tmpdir.listdir()) == 1
""" |
PGS_TOKEN = 'C888EE7F420841CF92D0B0063EDDFC7D'
PGS_EMAIL = 'pagseguro@panfleteria.com.br'
# from datetime import datetime
# from datetime import date
# from datetime import timedelta
# dates = [d0]
# dates_two = list()
# def date_paginator(x, y):
# print x, y
# if pages == 1 and pages_mods == 0:
# _date = d0 + timedelta(days=30)
# date_paginator(d0, _date)
# else:
# for i in range(pages):
# _date = d0 + timedelta(days=30 * (i + 1))
# dates.append(_date)
# if pages_mods > 0 and pages_mods < 30:
# new_date = dates[-1:][0] + timedelta(days=pages_mods)
# dates.append(new_date)
# if dates:
# for i in range(len(dates) - 1):
# date_paginator(dates[i], dates[i + 1])
# class DateRangePagination:
# """docstring for DateRangePagination"""
# def __init__(self, initial_date):
# self.initial_date = datetime.strptime(initial_date, "%Y-%m-%d").date()
# self.dates = [self.initial_date]
# self.date_limit = datetime.now().date()
# def get_ranges(self):
# print self.initial_date
# def set_ranges():
# d0 = date(2008, 8, 18)
# d1 = date(2008, 11, 18)
# delta = d1 - d0
# pages = delta.days / 30
# pages_mods = delta.days % 30
# pass
# def get_days(self,):
# pass | pgs_token = 'C888EE7F420841CF92D0B0063EDDFC7D'
pgs_email = 'pagseguro@panfleteria.com.br' |
class Presenter:
def __init__(self):
pass
@staticmethod
def date_slashes_as_dashes(date_str):
if date_str.find("/") == -1:
return date_str
else:
mdy = date_str.split("/")
return mdy[2] + "-" + mdy[0] + "-" + mdy[1]
@staticmethod
def value_without_symbols(value_str):
return value_str.replace("$", "").replace(",", "").replace(")", "").replace("(", "-")
@staticmethod
def decimal_as_percentage(percent_fraction):
return str(round(100 * percent_fraction, 1)) + "%"
| class Presenter:
def __init__(self):
pass
@staticmethod
def date_slashes_as_dashes(date_str):
if date_str.find('/') == -1:
return date_str
else:
mdy = date_str.split('/')
return mdy[2] + '-' + mdy[0] + '-' + mdy[1]
@staticmethod
def value_without_symbols(value_str):
return value_str.replace('$', '').replace(',', '').replace(')', '').replace('(', '-')
@staticmethod
def decimal_as_percentage(percent_fraction):
return str(round(100 * percent_fraction, 1)) + '%' |
# see: numpy.core.overrides
def set_module(module):
def decorator(func):
if module is not None:
func.__module__ = module
return func
return decorator
| def set_module(module):
def decorator(func):
if module is not None:
func.__module__ = module
return func
return decorator |
"""
Given a string s, return the longest palindromic substring in s.
Example 1:
Input: s = "babad"
Output: "bab"
Explanation: "aba" is also a valid answer.
Example 2:
Input: s = "cbbd"
Output: "bb"
Constraints:
1 <= s.length <= 1000
s consist of only digits and English letters.
"""
class Solution:
def longestPalindrome(self, s: str) -> str:
ans = [0, 0]
# odd
for i in range(len(s)):
left = i - 1
right = i + 1
while left >= 0 and right < len(s):
if s[left] == s[right]:
if right - left > ans[1] - ans[0]:
ans = [left, right]
left -= 1
right += 1
else:
break
# even
for i in range(len(s)):
left = i
right = i + 1
while left >= 0 and right < len(s):
if s[left] == s[right]:
if right - left > ans[1] - ans[0]:
ans = [left, right]
left -= 1
right += 1
else:
break
return s[ans[0]: ans[1] + 1]
| """
Given a string s, return the longest palindromic substring in s.
Example 1:
Input: s = "babad"
Output: "bab"
Explanation: "aba" is also a valid answer.
Example 2:
Input: s = "cbbd"
Output: "bb"
Constraints:
1 <= s.length <= 1000
s consist of only digits and English letters.
"""
class Solution:
def longest_palindrome(self, s: str) -> str:
ans = [0, 0]
for i in range(len(s)):
left = i - 1
right = i + 1
while left >= 0 and right < len(s):
if s[left] == s[right]:
if right - left > ans[1] - ans[0]:
ans = [left, right]
left -= 1
right += 1
else:
break
for i in range(len(s)):
left = i
right = i + 1
while left >= 0 and right < len(s):
if s[left] == s[right]:
if right - left > ans[1] - ans[0]:
ans = [left, right]
left -= 1
right += 1
else:
break
return s[ans[0]:ans[1] + 1] |
class Movie():
"""Class that represents a Movie"""
def __init__(self, movie_title, storyline, poster_image,
trailer_youtube, duration):
"""Inits all data of a movie
Args:
movie_title(str): Movie title
storyline(str): Movie storyline
poster_image(str): Movie poster image
trailer_youtube(str): Movie youtube url
duration(str): Duration of the movie
"""
self.title = movie_title
self.storyline = storyline
self.poster_image_url = poster_image
self.trailer_youtube_url = trailer_youtube
self.duration = duration
| class Movie:
"""Class that represents a Movie"""
def __init__(self, movie_title, storyline, poster_image, trailer_youtube, duration):
"""Inits all data of a movie
Args:
movie_title(str): Movie title
storyline(str): Movie storyline
poster_image(str): Movie poster image
trailer_youtube(str): Movie youtube url
duration(str): Duration of the movie
"""
self.title = movie_title
self.storyline = storyline
self.poster_image_url = poster_image
self.trailer_youtube_url = trailer_youtube
self.duration = duration |
#
# PySNMP MIB module Unisphere-Data-DS3-CONF (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/Unisphere-Data-DS3-CONF
# Produced by pysmi-0.3.4 at Mon Apr 29 21:23:40 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
Integer, ObjectIdentifier, OctetString = mibBuilder.importSymbols("ASN1", "Integer", "ObjectIdentifier", "OctetString")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsUnion, ValueRangeConstraint, ConstraintsIntersection, ValueSizeConstraint, SingleValueConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsUnion", "ValueRangeConstraint", "ConstraintsIntersection", "ValueSizeConstraint", "SingleValueConstraint")
dsx3FarEndConfigEntry, dsx3TotalEntry, dsx3FarEndCurrentEntry, dsx3SendCode, dsx3IntervalEntry, dsx3CurrentEntry, dsx3ConfigEntry, dsx3FracEntry, dsx3FarEndTotalEntry, dsx3FarEndIntervalEntry = mibBuilder.importSymbols("RFC1407-MIB", "dsx3FarEndConfigEntry", "dsx3TotalEntry", "dsx3FarEndCurrentEntry", "dsx3SendCode", "dsx3IntervalEntry", "dsx3CurrentEntry", "dsx3ConfigEntry", "dsx3FracEntry", "dsx3FarEndTotalEntry", "dsx3FarEndIntervalEntry")
NotificationGroup, AgentCapabilities, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "AgentCapabilities", "ModuleCompliance")
iso, ObjectIdentity, Gauge32, Counter32, Unsigned32, IpAddress, Bits, Integer32, ModuleIdentity, MibScalar, MibTable, MibTableRow, MibTableColumn, Counter64, MibIdentifier, TimeTicks, NotificationType = mibBuilder.importSymbols("SNMPv2-SMI", "iso", "ObjectIdentity", "Gauge32", "Counter32", "Unsigned32", "IpAddress", "Bits", "Integer32", "ModuleIdentity", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Counter64", "MibIdentifier", "TimeTicks", "NotificationType")
TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString")
usDataAgents, = mibBuilder.importSymbols("Unisphere-Data-Agents", "usDataAgents")
usdDs3Group2, usdDs3Group, usdDs3Group4, usdDs3FarEndGroup, usdDs3Group5, usdDs3Group3 = mibBuilder.importSymbols("Unisphere-Data-DS3-MIB", "usdDs3Group2", "usdDs3Group", "usdDs3Group4", "usdDs3FarEndGroup", "usdDs3Group5", "usdDs3Group3")
usdDs3Agent = ModuleIdentity((1, 3, 6, 1, 4, 1, 4874, 5, 2, 11))
usdDs3Agent.setRevisions(('2002-08-27 18:48', '2001-04-18 19:41',))
if mibBuilder.loadTexts: usdDs3Agent.setLastUpdated('200208271848Z')
if mibBuilder.loadTexts: usdDs3Agent.setOrganization('Unisphere Networks, Inc.')
usdDs3AgentV1 = AgentCapabilities((1, 3, 6, 1, 4, 1, 4874, 5, 2, 11, 1))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
usdDs3AgentV1 = usdDs3AgentV1.setProductRelease('Version 1 of the DS3 component of the Unisphere Routing Switch SNMP\n agent. This version of the DS3 component was supported in the Unisphere\n RX 1.0 system release.')
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
usdDs3AgentV1 = usdDs3AgentV1.setStatus('obsolete')
usdDs3AgentV2 = AgentCapabilities((1, 3, 6, 1, 4, 1, 4874, 5, 2, 11, 2))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
usdDs3AgentV2 = usdDs3AgentV2.setProductRelease('Version 2 of the DS3 component of the Unisphere Routing Switch SNMP\n agent. This version of the DS3 component was supported in the Unisphere\n RX 1.1 thru RX 2.5 system releases.')
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
usdDs3AgentV2 = usdDs3AgentV2.setStatus('obsolete')
usdDs3AgentV3 = AgentCapabilities((1, 3, 6, 1, 4, 1, 4874, 5, 2, 11, 3))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
usdDs3AgentV3 = usdDs3AgentV3.setProductRelease('Version 3 of the DS3 component of the Unisphere Routing Switch SNMP\n agent. This version of the DS3 component was supported in the Unisphere\n RX 2.6 thru RX 2.9 system releases.')
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
usdDs3AgentV3 = usdDs3AgentV3.setStatus('obsolete')
usdDs3AgentV4 = AgentCapabilities((1, 3, 6, 1, 4, 1, 4874, 5, 2, 11, 4))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
usdDs3AgentV4 = usdDs3AgentV4.setProductRelease('Version 4 of the DS3 component of the Unisphere Routing Switch SNMP\n agent. This version of the DS3 component was supported in the Unisphere\n RX 3.x system releases.')
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
usdDs3AgentV4 = usdDs3AgentV4.setStatus('obsolete')
usdDs3AgentV5 = AgentCapabilities((1, 3, 6, 1, 4, 1, 4874, 5, 2, 11, 5))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
usdDs3AgentV5 = usdDs3AgentV5.setProductRelease('Version 5 of the DS3 component of the Unisphere Routing Switch SNMP\n agent. This version of the DS3 component is supported in the Unisphere\n RX 4.0 and subsequent system releases.')
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
usdDs3AgentV5 = usdDs3AgentV5.setStatus('current')
mibBuilder.exportSymbols("Unisphere-Data-DS3-CONF", PYSNMP_MODULE_ID=usdDs3Agent, usdDs3AgentV4=usdDs3AgentV4, usdDs3AgentV3=usdDs3AgentV3, usdDs3Agent=usdDs3Agent, usdDs3AgentV2=usdDs3AgentV2, usdDs3AgentV5=usdDs3AgentV5, usdDs3AgentV1=usdDs3AgentV1)
| (integer, object_identifier, octet_string) = mibBuilder.importSymbols('ASN1', 'Integer', 'ObjectIdentifier', 'OctetString')
(named_values,) = mibBuilder.importSymbols('ASN1-ENUMERATION', 'NamedValues')
(constraints_union, value_range_constraint, constraints_intersection, value_size_constraint, single_value_constraint) = mibBuilder.importSymbols('ASN1-REFINEMENT', 'ConstraintsUnion', 'ValueRangeConstraint', 'ConstraintsIntersection', 'ValueSizeConstraint', 'SingleValueConstraint')
(dsx3_far_end_config_entry, dsx3_total_entry, dsx3_far_end_current_entry, dsx3_send_code, dsx3_interval_entry, dsx3_current_entry, dsx3_config_entry, dsx3_frac_entry, dsx3_far_end_total_entry, dsx3_far_end_interval_entry) = mibBuilder.importSymbols('RFC1407-MIB', 'dsx3FarEndConfigEntry', 'dsx3TotalEntry', 'dsx3FarEndCurrentEntry', 'dsx3SendCode', 'dsx3IntervalEntry', 'dsx3CurrentEntry', 'dsx3ConfigEntry', 'dsx3FracEntry', 'dsx3FarEndTotalEntry', 'dsx3FarEndIntervalEntry')
(notification_group, agent_capabilities, module_compliance) = mibBuilder.importSymbols('SNMPv2-CONF', 'NotificationGroup', 'AgentCapabilities', 'ModuleCompliance')
(iso, object_identity, gauge32, counter32, unsigned32, ip_address, bits, integer32, module_identity, mib_scalar, mib_table, mib_table_row, mib_table_column, counter64, mib_identifier, time_ticks, notification_type) = mibBuilder.importSymbols('SNMPv2-SMI', 'iso', 'ObjectIdentity', 'Gauge32', 'Counter32', 'Unsigned32', 'IpAddress', 'Bits', 'Integer32', 'ModuleIdentity', 'MibScalar', 'MibTable', 'MibTableRow', 'MibTableColumn', 'Counter64', 'MibIdentifier', 'TimeTicks', 'NotificationType')
(textual_convention, display_string) = mibBuilder.importSymbols('SNMPv2-TC', 'TextualConvention', 'DisplayString')
(us_data_agents,) = mibBuilder.importSymbols('Unisphere-Data-Agents', 'usDataAgents')
(usd_ds3_group2, usd_ds3_group, usd_ds3_group4, usd_ds3_far_end_group, usd_ds3_group5, usd_ds3_group3) = mibBuilder.importSymbols('Unisphere-Data-DS3-MIB', 'usdDs3Group2', 'usdDs3Group', 'usdDs3Group4', 'usdDs3FarEndGroup', 'usdDs3Group5', 'usdDs3Group3')
usd_ds3_agent = module_identity((1, 3, 6, 1, 4, 1, 4874, 5, 2, 11))
usdDs3Agent.setRevisions(('2002-08-27 18:48', '2001-04-18 19:41'))
if mibBuilder.loadTexts:
usdDs3Agent.setLastUpdated('200208271848Z')
if mibBuilder.loadTexts:
usdDs3Agent.setOrganization('Unisphere Networks, Inc.')
usd_ds3_agent_v1 = agent_capabilities((1, 3, 6, 1, 4, 1, 4874, 5, 2, 11, 1))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
usd_ds3_agent_v1 = usdDs3AgentV1.setProductRelease('Version 1 of the DS3 component of the Unisphere Routing Switch SNMP\n agent. This version of the DS3 component was supported in the Unisphere\n RX 1.0 system release.')
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
usd_ds3_agent_v1 = usdDs3AgentV1.setStatus('obsolete')
usd_ds3_agent_v2 = agent_capabilities((1, 3, 6, 1, 4, 1, 4874, 5, 2, 11, 2))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
usd_ds3_agent_v2 = usdDs3AgentV2.setProductRelease('Version 2 of the DS3 component of the Unisphere Routing Switch SNMP\n agent. This version of the DS3 component was supported in the Unisphere\n RX 1.1 thru RX 2.5 system releases.')
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
usd_ds3_agent_v2 = usdDs3AgentV2.setStatus('obsolete')
usd_ds3_agent_v3 = agent_capabilities((1, 3, 6, 1, 4, 1, 4874, 5, 2, 11, 3))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
usd_ds3_agent_v3 = usdDs3AgentV3.setProductRelease('Version 3 of the DS3 component of the Unisphere Routing Switch SNMP\n agent. This version of the DS3 component was supported in the Unisphere\n RX 2.6 thru RX 2.9 system releases.')
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
usd_ds3_agent_v3 = usdDs3AgentV3.setStatus('obsolete')
usd_ds3_agent_v4 = agent_capabilities((1, 3, 6, 1, 4, 1, 4874, 5, 2, 11, 4))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
usd_ds3_agent_v4 = usdDs3AgentV4.setProductRelease('Version 4 of the DS3 component of the Unisphere Routing Switch SNMP\n agent. This version of the DS3 component was supported in the Unisphere\n RX 3.x system releases.')
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
usd_ds3_agent_v4 = usdDs3AgentV4.setStatus('obsolete')
usd_ds3_agent_v5 = agent_capabilities((1, 3, 6, 1, 4, 1, 4874, 5, 2, 11, 5))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
usd_ds3_agent_v5 = usdDs3AgentV5.setProductRelease('Version 5 of the DS3 component of the Unisphere Routing Switch SNMP\n agent. This version of the DS3 component is supported in the Unisphere\n RX 4.0 and subsequent system releases.')
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
usd_ds3_agent_v5 = usdDs3AgentV5.setStatus('current')
mibBuilder.exportSymbols('Unisphere-Data-DS3-CONF', PYSNMP_MODULE_ID=usdDs3Agent, usdDs3AgentV4=usdDs3AgentV4, usdDs3AgentV3=usdDs3AgentV3, usdDs3Agent=usdDs3Agent, usdDs3AgentV2=usdDs3AgentV2, usdDs3AgentV5=usdDs3AgentV5, usdDs3AgentV1=usdDs3AgentV1) |
while True:
try:
a = ord(input()[:1])
except EOFError:
break
b = ord(input()[:1])
print((b-a+26) % 26)
| while True:
try:
a = ord(input()[:1])
except EOFError:
break
b = ord(input()[:1])
print((b - a + 26) % 26) |
# program to count the numbers of occurrences of characters in the given string and store them in a dictionary
string = input("Enter a string: ")
d = {}
for i in string:
if i not in d.keys():
d[i] = 1
else:
d[i] += 1
print(d)
| string = input('Enter a string: ')
d = {}
for i in string:
if i not in d.keys():
d[i] = 1
else:
d[i] += 1
print(d) |
class Config(object):
'''
Each environment will be a class that inherits from the main class config
Configurations that will be the same across all environment will go into config,
while configuration that are specific to an environment will go into the relevant environment below
'''
SECRET_KEY = "My$uper$ecretKey4Now"
SERVER_TIME_ZONE = "Africa/Johannesburg"
DEFAULT_DATETIME_FORMAT = "%Y-%m-%d %H:%M:%S"
class Production(Config):
FLASK_ENV = 'production'
PRODUCTION = True
class Testing(Config):
FLASK_ENV = 'testing'
TESTING = True
class Development(Config):
FLASK_ENV = 'development'
DEBUG = True
| class Config(object):
"""
Each environment will be a class that inherits from the main class config
Configurations that will be the same across all environment will go into config,
while configuration that are specific to an environment will go into the relevant environment below
"""
secret_key = 'My$uper$ecretKey4Now'
server_time_zone = 'Africa/Johannesburg'
default_datetime_format = '%Y-%m-%d %H:%M:%S'
class Production(Config):
flask_env = 'production'
production = True
class Testing(Config):
flask_env = 'testing'
testing = True
class Development(Config):
flask_env = 'development'
debug = True |
collected_data = {
"form_id": "2",
"name": "Test",
"client_id": "11",
"form_element_templates": [
{"name": "Full name", "value": "john doe"},
{"name": "Gender", "value": "male"},
{"name": "Country", "value": "morocco"},
],
}
form_element_field = {
"id": "2",
"name": "test",
"form_element_fields": [
{
"id": "101",
"form_element_template_id": "7",
"form_id": "2",
"form_element_template": {
"id": "7",
"name": "Gender",
"form_element_type_id": "19",
"form_element_type": {
"id": "19",
"name": "checkbox",
},
"form_element_list_values": [
{
"id": "12",
"form_element_template_id": "7",
"filled_form_id": "null",
"value": "male",
},
{
"id": "13",
"form_element_template_id": "7",
"filled_form_id": "null",
"value": "female",
},
],
},
},
{
"id": "102",
"form_element_template_id": "8",
"form_id": "2",
"form_element_template": {
"id": "8",
"name": "Full name",
"form_element_type_id": "20",
"form_element_type": {
"id": "20",
"name": "text",
"form_element_template_id": "8",
},
"form_element_list_values": [],
},
},
],
}
filled_form_for_update = [
{
"id": "87",
"form_element_template_id": "7",
"client_id": "11",
"value": "null",
"selected_list_values": [
{
"id": "300",
"form_element_list_values_id": "12",
"filled_form_id": "87",
"form_element_list_value": {
"id": "12",
"form_element_template_id": "7",
"value": "male",
},
}
],
"form_element_template": {
"id": "7",
"name": "Gender",
"form_element_type_id": "19",
"form_element_type": {
"id": "19",
"name": "checkbox",
},
"form_element_list_values": [
{
"id": "12",
"form_element_template_id": "7",
"filled_form_id": "null",
"value": "male",
},
{
"id": "13",
"form_element_template_id": "7",
"filled_form_id": "null",
"value": "female",
},
],
},
},
{
"id": "82",
"form_element_template_id": "8",
"client_id": "11",
"value": "jone doe",
"selected_list_values": [],
"form_element_template": {
"id": "8",
"name": "Full name",
"form_element_type_id": "20",
"form_element_type": {
"id": "20",
"name": "text",
},
"form_element_list_values": [],
},
},
]
| collected_data = {'form_id': '2', 'name': 'Test', 'client_id': '11', 'form_element_templates': [{'name': 'Full name', 'value': 'john doe'}, {'name': 'Gender', 'value': 'male'}, {'name': 'Country', 'value': 'morocco'}]}
form_element_field = {'id': '2', 'name': 'test', 'form_element_fields': [{'id': '101', 'form_element_template_id': '7', 'form_id': '2', 'form_element_template': {'id': '7', 'name': 'Gender', 'form_element_type_id': '19', 'form_element_type': {'id': '19', 'name': 'checkbox'}, 'form_element_list_values': [{'id': '12', 'form_element_template_id': '7', 'filled_form_id': 'null', 'value': 'male'}, {'id': '13', 'form_element_template_id': '7', 'filled_form_id': 'null', 'value': 'female'}]}}, {'id': '102', 'form_element_template_id': '8', 'form_id': '2', 'form_element_template': {'id': '8', 'name': 'Full name', 'form_element_type_id': '20', 'form_element_type': {'id': '20', 'name': 'text', 'form_element_template_id': '8'}, 'form_element_list_values': []}}]}
filled_form_for_update = [{'id': '87', 'form_element_template_id': '7', 'client_id': '11', 'value': 'null', 'selected_list_values': [{'id': '300', 'form_element_list_values_id': '12', 'filled_form_id': '87', 'form_element_list_value': {'id': '12', 'form_element_template_id': '7', 'value': 'male'}}], 'form_element_template': {'id': '7', 'name': 'Gender', 'form_element_type_id': '19', 'form_element_type': {'id': '19', 'name': 'checkbox'}, 'form_element_list_values': [{'id': '12', 'form_element_template_id': '7', 'filled_form_id': 'null', 'value': 'male'}, {'id': '13', 'form_element_template_id': '7', 'filled_form_id': 'null', 'value': 'female'}]}}, {'id': '82', 'form_element_template_id': '8', 'client_id': '11', 'value': 'jone doe', 'selected_list_values': [], 'form_element_template': {'id': '8', 'name': 'Full name', 'form_element_type_id': '20', 'form_element_type': {'id': '20', 'name': 'text'}, 'form_element_list_values': []}}] |
"""
List of constants used in the project
"""
SERVER_PORT = 8082
MAX_KEY_LENGTH = 21
| """
List of constants used in the project
"""
server_port = 8082
max_key_length = 21 |
"""
It should receive a sequence of n numbers.
If no argument is provided, return sum of numbers 1..100.
Look closely to the type of the function's default argument ...
"""
def sum_numbers(numbers=None):
if numbers is None:
return sum(range(1, 101))
else:
return sum(numbers)
if __name__ == '__main__':
numbers = 1, 2, 3
x = sum_numbers(numbers)
print(x)
| """
It should receive a sequence of n numbers.
If no argument is provided, return sum of numbers 1..100.
Look closely to the type of the function's default argument ...
"""
def sum_numbers(numbers=None):
if numbers is None:
return sum(range(1, 101))
else:
return sum(numbers)
if __name__ == '__main__':
numbers = (1, 2, 3)
x = sum_numbers(numbers)
print(x) |
"""
15 / 15 test cases passed.
Runtime: 40 ms
Memory Usage: 15 MB
"""
class Solution:
def numberOfArithmeticSlices(self, nums: List[int]) -> int:
n = len(nums)
if n < 3:
return 0
dp = [0] * n
for i in range(1, n - 1):
if nums[i] - nums[i - 1] == nums[i + 1] - nums[i]:
dp[i + 1] = dp[i] + 1
return sum(dp)
| """
15 / 15 test cases passed.
Runtime: 40 ms
Memory Usage: 15 MB
"""
class Solution:
def number_of_arithmetic_slices(self, nums: List[int]) -> int:
n = len(nums)
if n < 3:
return 0
dp = [0] * n
for i in range(1, n - 1):
if nums[i] - nums[i - 1] == nums[i + 1] - nums[i]:
dp[i + 1] = dp[i] + 1
return sum(dp) |
#!/usr/bin pypy3
# ------------------------ Simplest form of learning ------------------------
weight = .5
input = .5
target = .8
step_amount = .001 # <= How much to move weights each iteration
for iteration in range(1101) : # <= repeat the learning many times so that
# error can keep getting smaller
prediction = input * weight
error = (prediction - target) ** 2
print(f"Error: {round(error, 3)} prediction: {round(prediction, 3)}")
# Try up
up_prediction = input * (weight + step_amount)
up_error = (target - up_prediction) ** 2
# Try down
down_prediction = input * (weight - step_amount)
down_error = (target - down_prediction) ** 2
if (down_error < up_error) :
weight = weight - step_amount
if (down_error > up_error) :
weight += step_amount
# $ pypy3 error-update-advance.py
# Error: 0.301 Prediction:0.25
# Error: 0.301 Prediction:0.250
# ...........................
# Error: 0.0 prediction: 0.798
# Error: 0.0 prediction: 0.799
# Error: 0.0 prediction: 0.799
# Error: 0.0 prediction: 0.8 | weight = 0.5
input = 0.5
target = 0.8
step_amount = 0.001
for iteration in range(1101):
prediction = input * weight
error = (prediction - target) ** 2
print(f'Error: {round(error, 3)} prediction: {round(prediction, 3)}')
up_prediction = input * (weight + step_amount)
up_error = (target - up_prediction) ** 2
down_prediction = input * (weight - step_amount)
down_error = (target - down_prediction) ** 2
if down_error < up_error:
weight = weight - step_amount
if down_error > up_error:
weight += step_amount |
# AWS GENERAL SETTINGS:
AWS_REGION = "us-east-1"
AWS_PROFILE = "default" # The same profile used by your AWS CLI installation
SSH_KEY_NAME = "key.pem" # Expected to be in ~/.ssh
AWS_BUCKET = "dummybucket"
# EC2 AND ECS INFORMATION:
ECS_CLUSTER = "default_cluster"
# SQS QUEUE INFORMATION:
SQS_DEAD_LETTER_QUEUE = "arn:aws:sqs:us-east-1:XXXXXXXXXXXX:DeadMessages"
# SNS INFORMATION:
MONITOR_SNS = "arn:aws:sns:us-east-1:XXXXXXXXXXXX:Monitor"
| aws_region = 'us-east-1'
aws_profile = 'default'
ssh_key_name = 'key.pem'
aws_bucket = 'dummybucket'
ecs_cluster = 'default_cluster'
sqs_dead_letter_queue = 'arn:aws:sqs:us-east-1:XXXXXXXXXXXX:DeadMessages'
monitor_sns = 'arn:aws:sns:us-east-1:XXXXXXXXXXXX:Monitor' |
inp = open('input_d18.txt').read()
xl = len(inp)-1
yl = 400000
tiles = [[False for _ in range(xl)] for _ in range(yl)] # True if trap
def get_tile(x,y):
if x < 0 or x >= xl or y < 0 or y >= yl:
return False
return tiles[y][x]
def is_trap(x,y):
left,center,right = get_tile(x-1,y-1),get_tile(x,y-1),get_tile(x+1,y-1)
if left and center and not right:
return True
if center and right and not left:
return True
if left and not right and not center:
return True
if right and not left and not center:
return True
return False
count = 0
for x in range(xl):
tiles[0][x] = (True if inp[x] == '^' else False)
if inp[x] == '.':
count += 1
for y in range(1,yl):
for x in range(xl):
tiles[y][x] = is_trap(x,y)
if not tiles[y][x]:
count += 1
# for y in range(yl):
# for x in range(xl):
# if not tiles[y][x]:
# count += 1
print(count)
| inp = open('input_d18.txt').read()
xl = len(inp) - 1
yl = 400000
tiles = [[False for _ in range(xl)] for _ in range(yl)]
def get_tile(x, y):
if x < 0 or x >= xl or y < 0 or (y >= yl):
return False
return tiles[y][x]
def is_trap(x, y):
(left, center, right) = (get_tile(x - 1, y - 1), get_tile(x, y - 1), get_tile(x + 1, y - 1))
if left and center and (not right):
return True
if center and right and (not left):
return True
if left and (not right) and (not center):
return True
if right and (not left) and (not center):
return True
return False
count = 0
for x in range(xl):
tiles[0][x] = True if inp[x] == '^' else False
if inp[x] == '.':
count += 1
for y in range(1, yl):
for x in range(xl):
tiles[y][x] = is_trap(x, y)
if not tiles[y][x]:
count += 1
print(count) |
#
# PySNMP MIB module MERU-CONFIG-STATICSTATION-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/MERU-CONFIG-STATICSTATION-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 20:01:15 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
Integer, ObjectIdentifier, OctetString = mibBuilder.importSymbols("ASN1", "Integer", "ObjectIdentifier", "OctetString")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueSizeConstraint, ConstraintsUnion, ConstraintsIntersection, ValueRangeConstraint, SingleValueConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueSizeConstraint", "ConstraintsUnion", "ConstraintsIntersection", "ValueRangeConstraint", "SingleValueConstraint")
Ipv6Address, = mibBuilder.importSymbols("IPV6-TC", "Ipv6Address")
mwConfiguration, = mibBuilder.importSymbols("MERU-SMI", "mwConfiguration")
ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup")
MibIdentifier, enterprises, TimeTicks, Integer32, ModuleIdentity, MibScalar, MibTable, MibTableRow, MibTableColumn, IpAddress, Counter64, Counter32, ObjectIdentity, iso, Bits, Gauge32, Unsigned32, NotificationType = mibBuilder.importSymbols("SNMPv2-SMI", "MibIdentifier", "enterprises", "TimeTicks", "Integer32", "ModuleIdentity", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "IpAddress", "Counter64", "Counter32", "ObjectIdentity", "iso", "Bits", "Gauge32", "Unsigned32", "NotificationType")
TimeStamp, TextualConvention, TruthValue, DateAndTime, RowStatus, MacAddress, TimeInterval, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TimeStamp", "TextualConvention", "TruthValue", "DateAndTime", "RowStatus", "MacAddress", "TimeInterval", "DisplayString")
mwConfigStaticStation = ModuleIdentity((1, 3, 6, 1, 4, 1, 15983, 1, 1, 4, 16))
if mibBuilder.loadTexts: mwConfigStaticStation.setLastUpdated('200506050000Z')
if mibBuilder.loadTexts: mwConfigStaticStation.setOrganization('Meru Networks')
mwStaticStationTable = MibTable((1, 3, 6, 1, 4, 1, 15983, 1, 1, 4, 16, 1), )
if mibBuilder.loadTexts: mwStaticStationTable.setStatus('current')
mwStaticStationEntry = MibTableRow((1, 3, 6, 1, 4, 1, 15983, 1, 1, 4, 16, 1, 1), ).setIndexNames((0, "MERU-CONFIG-STATICSTATION-MIB", "mwStaticStationTableIndex"))
if mibBuilder.loadTexts: mwStaticStationEntry.setStatus('current')
mwStaticStationTableIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 15983, 1, 1, 4, 16, 1, 1, 1), Integer32())
if mibBuilder.loadTexts: mwStaticStationTableIndex.setStatus('current')
mwStaticStationIpAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 15983, 1, 1, 4, 16, 1, 1, 2), IpAddress()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: mwStaticStationIpAddress.setStatus('current')
mwStaticStationMacAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 15983, 1, 1, 4, 16, 1, 1, 3), MacAddress()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: mwStaticStationMacAddress.setStatus('current')
mwStaticStationRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 15983, 1, 1, 4, 16, 1, 1, 4), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: mwStaticStationRowStatus.setStatus('current')
mibBuilder.exportSymbols("MERU-CONFIG-STATICSTATION-MIB", mwStaticStationEntry=mwStaticStationEntry, mwStaticStationTableIndex=mwStaticStationTableIndex, mwStaticStationRowStatus=mwStaticStationRowStatus, mwConfigStaticStation=mwConfigStaticStation, mwStaticStationMacAddress=mwStaticStationMacAddress, PYSNMP_MODULE_ID=mwConfigStaticStation, mwStaticStationTable=mwStaticStationTable, mwStaticStationIpAddress=mwStaticStationIpAddress)
| (integer, object_identifier, octet_string) = mibBuilder.importSymbols('ASN1', 'Integer', 'ObjectIdentifier', 'OctetString')
(named_values,) = mibBuilder.importSymbols('ASN1-ENUMERATION', 'NamedValues')
(value_size_constraint, constraints_union, constraints_intersection, value_range_constraint, single_value_constraint) = mibBuilder.importSymbols('ASN1-REFINEMENT', 'ValueSizeConstraint', 'ConstraintsUnion', 'ConstraintsIntersection', 'ValueRangeConstraint', 'SingleValueConstraint')
(ipv6_address,) = mibBuilder.importSymbols('IPV6-TC', 'Ipv6Address')
(mw_configuration,) = mibBuilder.importSymbols('MERU-SMI', 'mwConfiguration')
(module_compliance, notification_group) = mibBuilder.importSymbols('SNMPv2-CONF', 'ModuleCompliance', 'NotificationGroup')
(mib_identifier, enterprises, time_ticks, integer32, module_identity, mib_scalar, mib_table, mib_table_row, mib_table_column, ip_address, counter64, counter32, object_identity, iso, bits, gauge32, unsigned32, notification_type) = mibBuilder.importSymbols('SNMPv2-SMI', 'MibIdentifier', 'enterprises', 'TimeTicks', 'Integer32', 'ModuleIdentity', 'MibScalar', 'MibTable', 'MibTableRow', 'MibTableColumn', 'IpAddress', 'Counter64', 'Counter32', 'ObjectIdentity', 'iso', 'Bits', 'Gauge32', 'Unsigned32', 'NotificationType')
(time_stamp, textual_convention, truth_value, date_and_time, row_status, mac_address, time_interval, display_string) = mibBuilder.importSymbols('SNMPv2-TC', 'TimeStamp', 'TextualConvention', 'TruthValue', 'DateAndTime', 'RowStatus', 'MacAddress', 'TimeInterval', 'DisplayString')
mw_config_static_station = module_identity((1, 3, 6, 1, 4, 1, 15983, 1, 1, 4, 16))
if mibBuilder.loadTexts:
mwConfigStaticStation.setLastUpdated('200506050000Z')
if mibBuilder.loadTexts:
mwConfigStaticStation.setOrganization('Meru Networks')
mw_static_station_table = mib_table((1, 3, 6, 1, 4, 1, 15983, 1, 1, 4, 16, 1))
if mibBuilder.loadTexts:
mwStaticStationTable.setStatus('current')
mw_static_station_entry = mib_table_row((1, 3, 6, 1, 4, 1, 15983, 1, 1, 4, 16, 1, 1)).setIndexNames((0, 'MERU-CONFIG-STATICSTATION-MIB', 'mwStaticStationTableIndex'))
if mibBuilder.loadTexts:
mwStaticStationEntry.setStatus('current')
mw_static_station_table_index = mib_table_column((1, 3, 6, 1, 4, 1, 15983, 1, 1, 4, 16, 1, 1, 1), integer32())
if mibBuilder.loadTexts:
mwStaticStationTableIndex.setStatus('current')
mw_static_station_ip_address = mib_table_column((1, 3, 6, 1, 4, 1, 15983, 1, 1, 4, 16, 1, 1, 2), ip_address()).setMaxAccess('readcreate')
if mibBuilder.loadTexts:
mwStaticStationIpAddress.setStatus('current')
mw_static_station_mac_address = mib_table_column((1, 3, 6, 1, 4, 1, 15983, 1, 1, 4, 16, 1, 1, 3), mac_address()).setMaxAccess('readcreate')
if mibBuilder.loadTexts:
mwStaticStationMacAddress.setStatus('current')
mw_static_station_row_status = mib_table_column((1, 3, 6, 1, 4, 1, 15983, 1, 1, 4, 16, 1, 1, 4), row_status()).setMaxAccess('readcreate')
if mibBuilder.loadTexts:
mwStaticStationRowStatus.setStatus('current')
mibBuilder.exportSymbols('MERU-CONFIG-STATICSTATION-MIB', mwStaticStationEntry=mwStaticStationEntry, mwStaticStationTableIndex=mwStaticStationTableIndex, mwStaticStationRowStatus=mwStaticStationRowStatus, mwConfigStaticStation=mwConfigStaticStation, mwStaticStationMacAddress=mwStaticStationMacAddress, PYSNMP_MODULE_ID=mwConfigStaticStation, mwStaticStationTable=mwStaticStationTable, mwStaticStationIpAddress=mwStaticStationIpAddress) |
"""Serializers"""
def serialize_greeting(greeting):
"""."""
return {
'id': None,
'type': 'greeting',
'attributes': {
'word': greeting.get('word', None),
'propertyTwo': greeting.get('propertyTwo', None),
'propertyThree': greeting.get('propertyThree', None),
}
}
| """Serializers"""
def serialize_greeting(greeting):
"""."""
return {'id': None, 'type': 'greeting', 'attributes': {'word': greeting.get('word', None), 'propertyTwo': greeting.get('propertyTwo', None), 'propertyThree': greeting.get('propertyThree', None)}} |
def f8(n):
m = n
sum = 0
while m!=0:
x = m%10
sum = sum + x**3
m = m//10
if sum == n:
print(n, " is an Armstrong number")
else:
print(n, " is not an Armstrong number")
f8(153)
f8(125) | def f8(n):
m = n
sum = 0
while m != 0:
x = m % 10
sum = sum + x ** 3
m = m // 10
if sum == n:
print(n, ' is an Armstrong number')
else:
print(n, ' is not an Armstrong number')
f8(153)
f8(125) |
# Define the number of memory threads
MEMORY_THREAD_NUM = 4
# Define the number of ebs threads
EBS_THREAD_NUM = 4
# Define the number of proxy worker threads
PROXY_THREAD_NUM = 4
# Number of tiers
MIN_TIER = 1
MAX_TIER = 2
# Define port offset
SERVER_PORT = 6560
NODE_JOIN_BASE_PORT = 6660
NODE_DEPART_BASE_PORT = 6760
SELF_DEPART_BASE_PORT = 6860
REPLICATION_FACTOR_BASE_PORT = 6960
REQUEST_PULLING_BASE_PORT = 6460
GOSSIP_BASE_PORT = 7060
REPLICATION_FACTOR_CHANGE_BASE_PORT = 7160
# used by proxies
SEED_BASE_PORT = 6560
NOTIFY_BASE_PORT = 6660
KEY_ADDRESS_BASE_PORT = 6760
# used by monitoring nodes
DEPART_DONE_BASE_PORT = 6760
LATENCY_REPORT_BASE_PORT = 6860
# used by benchmark threads
COMMAND_BASE_PORT = 6560
class Thread():
def __init__(self, ip, tid):
self.ip = ip
self.tid = tid
self._base = 'tcp://*:'
self._ip_base = 'tcp://' + self.ip + ':'
def get_ip(self):
return self.ip
def get_tid(self):
return self.tid
class UserThread(Thread):
def get_request_pull_connect_addr(self):
return self._ip_base + str(self.tid + REQUEST_PULLING_BASE_PORT)
def get_request_pull_bind_addr(self):
return self._base + str(self.tid + REQUEST_PULLING_BASE_PORT)
def get_key_address_connect_addr(self):
return self._ip_base + str(self.tid + KEY_ADDRESS_BASE_PORT)
def get_key_address_bind_addr(self):
return self._base + str(self.tid + KEY_ADDRESS_BASE_PORT)
class ProxyThread(Thread):
def get_key_address_connect_addr(self):
return self._ip_base + str(self.tid + KEY_ADDRESS_BASE_PORT)
def key_address_bind_addr(self):
return self._base + str(self.tid + KEY_ADDRESS_BASE_PORT)
| memory_thread_num = 4
ebs_thread_num = 4
proxy_thread_num = 4
min_tier = 1
max_tier = 2
server_port = 6560
node_join_base_port = 6660
node_depart_base_port = 6760
self_depart_base_port = 6860
replication_factor_base_port = 6960
request_pulling_base_port = 6460
gossip_base_port = 7060
replication_factor_change_base_port = 7160
seed_base_port = 6560
notify_base_port = 6660
key_address_base_port = 6760
depart_done_base_port = 6760
latency_report_base_port = 6860
command_base_port = 6560
class Thread:
def __init__(self, ip, tid):
self.ip = ip
self.tid = tid
self._base = 'tcp://*:'
self._ip_base = 'tcp://' + self.ip + ':'
def get_ip(self):
return self.ip
def get_tid(self):
return self.tid
class Userthread(Thread):
def get_request_pull_connect_addr(self):
return self._ip_base + str(self.tid + REQUEST_PULLING_BASE_PORT)
def get_request_pull_bind_addr(self):
return self._base + str(self.tid + REQUEST_PULLING_BASE_PORT)
def get_key_address_connect_addr(self):
return self._ip_base + str(self.tid + KEY_ADDRESS_BASE_PORT)
def get_key_address_bind_addr(self):
return self._base + str(self.tid + KEY_ADDRESS_BASE_PORT)
class Proxythread(Thread):
def get_key_address_connect_addr(self):
return self._ip_base + str(self.tid + KEY_ADDRESS_BASE_PORT)
def key_address_bind_addr(self):
return self._base + str(self.tid + KEY_ADDRESS_BASE_PORT) |
t=input().split()
hi=int(t[0])
hf=int(t[1])
if hi==hf:
ht=24
elif hi>hf:
ht=(24-hi)+hf
elif hi<hf:
ht=hf-hi
print("O JOGO DUROU %d HORA(S)" %ht)
| t = input().split()
hi = int(t[0])
hf = int(t[1])
if hi == hf:
ht = 24
elif hi > hf:
ht = 24 - hi + hf
elif hi < hf:
ht = hf - hi
print('O JOGO DUROU %d HORA(S)' % ht) |
# Atharv Kolhar
# Python Bytes
"""
Write a Program to convert Integers to Roman Symbols
Between number 1 to 1000
"""
def integer_to_roman(number):
n = [1, 4, 5, 9, 10, 40, 50, 90, 100, 400, 500, 900, 1000]
rn = ['I', 'IV', 'V', 'IX', 'X', 'XL', 'L', 'XC', 'C', 'CD', 'D', 'CM', 'M']
roman_num = ""
i = len(n) - 1
remainder = number
while remainder != 0:
quotient = remainder // n[i]
remainder = remainder % n[i]
roman_num += quotient * rn[i]
i -= 1 # i = i - 1
return roman_num
print(integer_to_roman(2022))
| """
Write a Program to convert Integers to Roman Symbols
Between number 1 to 1000
"""
def integer_to_roman(number):
n = [1, 4, 5, 9, 10, 40, 50, 90, 100, 400, 500, 900, 1000]
rn = ['I', 'IV', 'V', 'IX', 'X', 'XL', 'L', 'XC', 'C', 'CD', 'D', 'CM', 'M']
roman_num = ''
i = len(n) - 1
remainder = number
while remainder != 0:
quotient = remainder // n[i]
remainder = remainder % n[i]
roman_num += quotient * rn[i]
i -= 1
return roman_num
print(integer_to_roman(2022)) |
N = int(input())
ROCK, PAPER, SCISSORS = 'R', 'P', 'S'
beating_move_by_move = {
ROCK: PAPER,
PAPER: SCISSORS,
SCISSORS: ROCK,
}
MAX_CHARS = 500
IMPOSSIBLE = 'IMPOSSIBLE'
for case_id in range(1, N + 1):
nb_robots = int(input())
robot_programs = [input() for _ in range(nb_robots)]
best_combination = ''
can_win = True
for char_index in range(MAX_CHARS):
moves_to_beat = [*{robot_program[char_index % len(robot_program)] for robot_program in robot_programs}]
if len(moves_to_beat) == 1:
winning_move = beating_move_by_move[moves_to_beat[0]]
best_combination += winning_move
robot_programs = []
break
elif len(moves_to_beat) == 2:
best_move = '?'
if ROCK not in moves_to_beat:
best_move = SCISSORS
elif PAPER not in moves_to_beat:
best_move = ROCK
elif SCISSORS not in moves_to_beat:
best_move = PAPER
best_combination += best_move
robot_programs = [
robot_program for robot_program in robot_programs
if beating_move_by_move[robot_program[char_index % len(robot_program)]] != best_move
]
elif len(moves_to_beat) == 3:
break
print('Case #{}: {}'.format(case_id, best_combination if len(robot_programs) == 0 else IMPOSSIBLE))
| n = int(input())
(rock, paper, scissors) = ('R', 'P', 'S')
beating_move_by_move = {ROCK: PAPER, PAPER: SCISSORS, SCISSORS: ROCK}
max_chars = 500
impossible = 'IMPOSSIBLE'
for case_id in range(1, N + 1):
nb_robots = int(input())
robot_programs = [input() for _ in range(nb_robots)]
best_combination = ''
can_win = True
for char_index in range(MAX_CHARS):
moves_to_beat = [*{robot_program[char_index % len(robot_program)] for robot_program in robot_programs}]
if len(moves_to_beat) == 1:
winning_move = beating_move_by_move[moves_to_beat[0]]
best_combination += winning_move
robot_programs = []
break
elif len(moves_to_beat) == 2:
best_move = '?'
if ROCK not in moves_to_beat:
best_move = SCISSORS
elif PAPER not in moves_to_beat:
best_move = ROCK
elif SCISSORS not in moves_to_beat:
best_move = PAPER
best_combination += best_move
robot_programs = [robot_program for robot_program in robot_programs if beating_move_by_move[robot_program[char_index % len(robot_program)]] != best_move]
elif len(moves_to_beat) == 3:
break
print('Case #{}: {}'.format(case_id, best_combination if len(robot_programs) == 0 else IMPOSSIBLE)) |
class Pessoa:
ano_atual = 2019
def __init__(self, nome, idade):
self.nome = nome
self.idade = idade
def get_ano_nascimento(self):
print(self.ano_atual - self.idade)
@classmethod #Eu nao entendi a logica aq, sem nexo
def por_ano_nascimento(cls, nome, ano_nascimento):
idade = cls.ano_atual - ano_nascimento
return cls(nome, idade)
p1 = Pessoa('Pedro', 23)
print(p1)
print(p1.nome, p1.idade)
p1.get_ano_nascimento()
| class Pessoa:
ano_atual = 2019
def __init__(self, nome, idade):
self.nome = nome
self.idade = idade
def get_ano_nascimento(self):
print(self.ano_atual - self.idade)
@classmethod
def por_ano_nascimento(cls, nome, ano_nascimento):
idade = cls.ano_atual - ano_nascimento
return cls(nome, idade)
p1 = pessoa('Pedro', 23)
print(p1)
print(p1.nome, p1.idade)
p1.get_ano_nascimento() |
##Q3. Write a program to print table of 2
#############################################################################################################
##Program Objective: to print table of 2 ##
##Coded by: KNR ##
##Date: 17/09/2019 22:20 ##
##Lang: Python 3.7.4 ##
##Version: 1.0 ##
#############################################################################################################
# Method-1: Using for loop
##print("--------------------------------------------------")
##
##for i in range(1,11):
## print("{} x {} = {}".format(2,i,2*i))
##
##print("--------------------------------------------------")
# Method-2: Using while loop
print("--------------------------------------------------")
i = 1
while i<=10:
print("{} x {} = {}".format(2,i,2*i))
i+=1
print("--------------------------------------------------")
| print('--------------------------------------------------')
i = 1
while i <= 10:
print('{} x {} = {}'.format(2, i, 2 * i))
i += 1
print('--------------------------------------------------') |
def funcaoOrig(x):
return(x - 2)
def raizFuncao(a, b):
erro = 0.1
cont = 0
while((funcaoOrig(a)) * (funcaoOrig(b)) >= 0):
print(f'\nErro!\nCom os valores {a} e {b}, eh impossivel calcularmos as raizes da funcao original!')
a = float(input('\nDigite outro valor para a: '))
b = float(input('Digite outro valor para b: '))
while(abs(a - b) > erro):
x = (a + b) / 2
if((funcaoOrig(a)) * (funcaoOrig(x)) < 0):
b = x
else:
a = x
cont += 1
print(f'\nA raiz da funcao eh: {x} com precisao de {erro} em {cont} iteracoes')
| def funcao_orig(x):
return x - 2
def raiz_funcao(a, b):
erro = 0.1
cont = 0
while funcao_orig(a) * funcao_orig(b) >= 0:
print(f'\nErro!\nCom os valores {a} e {b}, eh impossivel calcularmos as raizes da funcao original!')
a = float(input('\nDigite outro valor para a: '))
b = float(input('Digite outro valor para b: '))
while abs(a - b) > erro:
x = (a + b) / 2
if funcao_orig(a) * funcao_orig(x) < 0:
b = x
else:
a = x
cont += 1
print(f'\nA raiz da funcao eh: {x} com precisao de {erro} em {cont} iteracoes') |
def make_default_resolver(field_attr_name):
def resolver(source, args, info):
property = getattr(source, field_attr_name, None)
if callable(property):
return property()
return property
resolver.__name__ = 'resolve_{}'.format(field_attr_name)
return resolver
| def make_default_resolver(field_attr_name):
def resolver(source, args, info):
property = getattr(source, field_attr_name, None)
if callable(property):
return property()
return property
resolver.__name__ = 'resolve_{}'.format(field_attr_name)
return resolver |
"""
Physical parameters of the laboratory Crazyflie quadrotors.
Additional sources:
https://bitcraze.io/2015/02/measuring-propeller-rpm-part-3
https://wiki.bitcraze.io/misc:investigations:thrust
https://commons.erau.edu/cgi/viewcontent.cgi?article=2057&context=publication
Notes:
k_thrust is inferred from 14.5 g thrust at 2500 rad/s
k_drag is mostly made up
"""
quad_params = {
'mass': 0.030, # kg
'Ixx': 1.43e-5, # kg*m^2
'Iyy': 1.43e-5, # kg*m^2
'Izz': 2.89e-5, # kg*m^2
'arm_length': 0.046, # meters
'rotor_speed_min': 0, # rad/s
'rotor_speed_max': 2500, # rad/s
'k_thrust': 2.3e-08, # N/(rad/s)**2
'k_drag': 7.8e-11, # Nm/(rad/s)**2
}
| """
Physical parameters of the laboratory Crazyflie quadrotors.
Additional sources:
https://bitcraze.io/2015/02/measuring-propeller-rpm-part-3
https://wiki.bitcraze.io/misc:investigations:thrust
https://commons.erau.edu/cgi/viewcontent.cgi?article=2057&context=publication
Notes:
k_thrust is inferred from 14.5 g thrust at 2500 rad/s
k_drag is mostly made up
"""
quad_params = {'mass': 0.03, 'Ixx': 1.43e-05, 'Iyy': 1.43e-05, 'Izz': 2.89e-05, 'arm_length': 0.046, 'rotor_speed_min': 0, 'rotor_speed_max': 2500, 'k_thrust': 2.3e-08, 'k_drag': 7.8e-11} |
# Time: O(n)
# Space: O(1)
class Solution(object):
def validMountainArray(self, A):
"""
:type A: List[int]
:rtype: bool
"""
i = 0
while i+1 < len(A) and A[i] < A[i+1]:
i += 1
j = len(A)-1
while j-1 >= 0 and A[j-1] > A[j]:
j -= 1
return 0 < i == j < len(A)-1
| class Solution(object):
def valid_mountain_array(self, A):
"""
:type A: List[int]
:rtype: bool
"""
i = 0
while i + 1 < len(A) and A[i] < A[i + 1]:
i += 1
j = len(A) - 1
while j - 1 >= 0 and A[j - 1] > A[j]:
j -= 1
return 0 < i == j < len(A) - 1 |
strs = ["flower","flow","flight"]
if len(strs) == 0:
print("")
exit()
prefix = strs[0]
for str in strs:
while(str.find(prefix) != 0):
prefix = prefix[0:len(prefix)-1]
if len(prefix) == 0:
print("")
exit()
print(prefix)
| strs = ['flower', 'flow', 'flight']
if len(strs) == 0:
print('')
exit()
prefix = strs[0]
for str in strs:
while str.find(prefix) != 0:
prefix = prefix[0:len(prefix) - 1]
if len(prefix) == 0:
print('')
exit()
print(prefix) |
d1 = {1:'Sugandh', 2:'Divya', 3:'Mintoo'}
print(d1) # D1
print("deleting an item from the dictionary...")
del d1[3]
print(d1) # D2
print("deleting an entire dictionary...")
del d1
print(d1) # D3
| d1 = {1: 'Sugandh', 2: 'Divya', 3: 'Mintoo'}
print(d1)
print('deleting an item from the dictionary...')
del d1[3]
print(d1)
print('deleting an entire dictionary...')
del d1
print(d1) |
'''
Template untuk solusi Lab 09 kelas A.
'''
class Bangunan:
'''
Sebuah bangunan.
'''
def __init__(self, nama_bangunan, lama_sewa, harga_sewa):
self.nama = nama_bangunan
self.lama_sewa = lama_sewa
self.harga_sewa = harga_sewa
def get_harga_sewa(self):
'''
Mengembalikan harga sewa.
'''
return self.harga_sewa
class Restoran(Bangunan):
'''
Sebuah restoran.
'''
def __init__(self, nama_restoran, lama_sewa=0):
Bangunan.__init__(self, nama_restoran, lama_sewa, 30000000)
# Silahkan ditambahkan class-class lainnya atau jika ingin memodifikasi
daftar_bangunan = None
while True:
masukan = input().split()
if masukan[0] == "BANGUN":
# dapatkan nilai ini dari masukan_split sesuai indexnya
nama = None
jenis_bangunan = None
# lakukan selection untuk menentukan tipe Pegawai
if jenis_bangunan == "HOTEL":
bangunan = None
elif jenis_bangunan == "RESTORAN":
bangunan = Restoran(nama)
elif jenis_bangunan == "RUMAHSAKIT":
bangunan = None
# masukan bangunan yang sudah dibuat ke dalam dictionary
# cetak pesan sesuai format
elif masukan[0] == "INFO":
pass
elif masukan[0] == "JUALMAKANAN":
pass
elif masukan[0] == "TERIMATAMU":
pass
elif masukan[0] == "OBATIPASIEN":
pass
elif masukan[0] == "HITUNGUANG":
pass
| """
Template untuk solusi Lab 09 kelas A.
"""
class Bangunan:
"""
Sebuah bangunan.
"""
def __init__(self, nama_bangunan, lama_sewa, harga_sewa):
self.nama = nama_bangunan
self.lama_sewa = lama_sewa
self.harga_sewa = harga_sewa
def get_harga_sewa(self):
"""
Mengembalikan harga sewa.
"""
return self.harga_sewa
class Restoran(Bangunan):
"""
Sebuah restoran.
"""
def __init__(self, nama_restoran, lama_sewa=0):
Bangunan.__init__(self, nama_restoran, lama_sewa, 30000000)
daftar_bangunan = None
while True:
masukan = input().split()
if masukan[0] == 'BANGUN':
nama = None
jenis_bangunan = None
if jenis_bangunan == 'HOTEL':
bangunan = None
elif jenis_bangunan == 'RESTORAN':
bangunan = restoran(nama)
elif jenis_bangunan == 'RUMAHSAKIT':
bangunan = None
elif masukan[0] == 'INFO':
pass
elif masukan[0] == 'JUALMAKANAN':
pass
elif masukan[0] == 'TERIMATAMU':
pass
elif masukan[0] == 'OBATIPASIEN':
pass
elif masukan[0] == 'HITUNGUANG':
pass |
def crime_list(z): #function definition
f=open(z,"r") #open csv file in read mode
d1=dict()
d2=dict()
list1=[]
list2=[]
for line in f:
line.strip()
for lines in line.split(','):
list1.append(lines[-1])
list2.append(lines[-2])
for y in list1:
if y not in d1:
d1[y]=1
else:
d1[y]=d[y]+1
for z in list2:
if z not in d2:
d2[z]=1
else:
d2[z]+=1
print("crime name",' '*15,"crimeid",' '*15,"crimecount") #printing Headers
for k1,v in d1.items():
for k2,v in d2.items():
print(k1,k2,v, "\n")
file="Crime.csv"
crime_list(file) #function call
| def crime_list(z):
f = open(z, 'r')
d1 = dict()
d2 = dict()
list1 = []
list2 = []
for line in f:
line.strip()
for lines in line.split(','):
list1.append(lines[-1])
list2.append(lines[-2])
for y in list1:
if y not in d1:
d1[y] = 1
else:
d1[y] = d[y] + 1
for z in list2:
if z not in d2:
d2[z] = 1
else:
d2[z] += 1
print('crime name', ' ' * 15, 'crimeid', ' ' * 15, 'crimecount')
for (k1, v) in d1.items():
for (k2, v) in d2.items():
print(k1, k2, v, '\n')
file = 'Crime.csv'
crime_list(file) |
def part1(lines):
total = 0
for box in lines:
dim = list(map(int, box.split("x")))
lw = dim[0] * dim[1]
wh = dim[1] * dim[2]
hl = dim[2] * dim[0]
total += 2*lw + 2*wh + 2*hl + min(lw,wh,hl)
return total
def part2(lines):
total = 0
for box in lines:
dim = sorted(list(map(int, box.split("x"))))
total += dim[0] + dim[0] + dim[1] + dim[1] + dim[0]*dim[1]*dim[2]
return total
if __name__ == "__main__":
with open("../input.txt") as file:
lines = file.read().splitlines()
print(part1(lines))
print(part2(lines)) | def part1(lines):
total = 0
for box in lines:
dim = list(map(int, box.split('x')))
lw = dim[0] * dim[1]
wh = dim[1] * dim[2]
hl = dim[2] * dim[0]
total += 2 * lw + 2 * wh + 2 * hl + min(lw, wh, hl)
return total
def part2(lines):
total = 0
for box in lines:
dim = sorted(list(map(int, box.split('x'))))
total += dim[0] + dim[0] + dim[1] + dim[1] + dim[0] * dim[1] * dim[2]
return total
if __name__ == '__main__':
with open('../input.txt') as file:
lines = file.read().splitlines()
print(part1(lines))
print(part2(lines)) |
def _findwinners(players):
best = -1
winners = []
for i in range(len(players)):
if players[i]:
if best == -1 or players[i] > best:
best = players[i]
winners = [i]
elif players[i] == best:
winners.append(i)
return winners
def _splitpot(winners, pot):
amount = pot // len(winners)
winnings = [amount] * len(winners)
winningsamount = amount * len(winners)
for i in range(len(winnings)):
if winningsamount >= amount:
break
winnings[i] += 1
winningsamount += 1
return winnings
def payout(players, pots):
payouts = [0] * len(players)
for pot in pots:
if pot['chips'] > 0:
winners = _findwinners(players)
if (len(winners) > 0):
winnings = _splitpot(winners, pot['chips'])
for i in range(len(winners)):
payouts[winners[i]] += winnings[i]
for allin in pot['players']:
players[allin] = False
return payouts
if __name__ == '__main__':
payouts = payout([False, [8, 13], [7, 14], False], [{ 'chips': 100, 'players': [] }])
if payouts != [0, 100, 0, 0]:
print('Test1 failed')
payouts = payout([False, [8, 13], [7, 14], False], [{ 'chips': 100, 'players': [1] }, { 'chips': 50, 'players': [] }])
if payouts != [0, 100, 50, 0]:
print('Test2 failed')
payouts = payout([False, [8, 13], [7, 14], [8, 13]], [{ 'chips': 100, 'players': [] }])
if payouts != [0, 50, 0, 50]:
print('Test3 failed')
| def _findwinners(players):
best = -1
winners = []
for i in range(len(players)):
if players[i]:
if best == -1 or players[i] > best:
best = players[i]
winners = [i]
elif players[i] == best:
winners.append(i)
return winners
def _splitpot(winners, pot):
amount = pot // len(winners)
winnings = [amount] * len(winners)
winningsamount = amount * len(winners)
for i in range(len(winnings)):
if winningsamount >= amount:
break
winnings[i] += 1
winningsamount += 1
return winnings
def payout(players, pots):
payouts = [0] * len(players)
for pot in pots:
if pot['chips'] > 0:
winners = _findwinners(players)
if len(winners) > 0:
winnings = _splitpot(winners, pot['chips'])
for i in range(len(winners)):
payouts[winners[i]] += winnings[i]
for allin in pot['players']:
players[allin] = False
return payouts
if __name__ == '__main__':
payouts = payout([False, [8, 13], [7, 14], False], [{'chips': 100, 'players': []}])
if payouts != [0, 100, 0, 0]:
print('Test1 failed')
payouts = payout([False, [8, 13], [7, 14], False], [{'chips': 100, 'players': [1]}, {'chips': 50, 'players': []}])
if payouts != [0, 100, 50, 0]:
print('Test2 failed')
payouts = payout([False, [8, 13], [7, 14], [8, 13]], [{'chips': 100, 'players': []}])
if payouts != [0, 50, 0, 50]:
print('Test3 failed') |
__add_idle_call = None
__remove_idle_call = None
__add_timeout_call = None
__remove_timeout_call = None
__setup_event_loop = None
__start_event_loop = None
__stop_event_loop = None
__idle_call_dict = {}
__timeout_call_dict = {}
def add_idle_call(func, *args, **kwargs):
global __add_idle_call
global __idle_call_dict
if __add_idle_call is not None:
__idle_call_dict[func] = __add_idle_call(func, *args, **kwargs)
else: # toolkit does not support this or is not loaded:
func(*args, **kwargs)
def remove_idle_call(func):
global __remove_idle_call
global __idle_call_dict
if __remove_idle_call is not None:
__remove_idle_call(__idle_call_dict[func])
def add_timeout_call(timeout, func, *args, **kwargs):
global __add_timeout_call
global __timeout_call_dict
if __add_timeout_call is not None:
__timeout_call_dict[func] = __add_timeout_call(timeout, func, *args, **kwargs)
else: # toolkit does not support this or is not loaded:
func(*args, **kwargs)
def remove_timeout_call(func):
global __remove_timeout_call
global __timeout_call_dict
if __remove_timeout_call is not None:
__remove_timeout_call(__timeout_call_dict[func])
def start_event_loop():
global __start_event_loop
if __start_event_loop is not None:
return __start_event_loop()
def stop_event_loop():
global __stop_event_loop
if __stop_event_loop is not None:
return __stop_event_loop()
def load_toolkit_functions(
add_idle_call,
remove_idle_call,
add_timeout_call,
remove_timeout_call,
start_event_loop,
stop_event_loop):
"""
'add_idle_call' should take a function as 1st argument, the return
value is passed back to 'remove_idle_call'. Internally a cache is maintained
in which keys are functions and values are return values.
'add_timeout_call' and 'remove_timeout_call' work analogously
start_event_loop and stop_event_loop don't take arguments and should be self-explanatory.
"""
global __add_idle_call
global __remove_idle_call
global __add_timeout_call
global __remove_timeout_call
global __start_event_loop
global __stop_event_loop
assert callable(add_idle_call)
assert callable(remove_idle_call)
assert callable(add_timeout_call)
assert callable(remove_timeout_call)
assert callable(start_event_loop)
assert callable(stop_event_loop)
__add_idle_call = add_idle_call
__remove_idle_call = remove_idle_call
__add_timeout_call = add_timeout_call
__remove_timeout_call = remove_timeout_call
__start_event_loop = start_event_loop
__stop_event_loop = stop_event_loop
"""
Decorators:
"""
def run_when_idle(func):
def callback(*args, **kwargs):
return add_idle_call(func, *args, **kwargs)
return callback
def run_every(timeout):
def wrapper(func):
def callback(*args, **kwargs):
return add_timeout_call(func, timeout, *args, **kwargs)
return callback
return wrapper
| __add_idle_call = None
__remove_idle_call = None
__add_timeout_call = None
__remove_timeout_call = None
__setup_event_loop = None
__start_event_loop = None
__stop_event_loop = None
__idle_call_dict = {}
__timeout_call_dict = {}
def add_idle_call(func, *args, **kwargs):
global __add_idle_call
global __idle_call_dict
if __add_idle_call is not None:
__idle_call_dict[func] = __add_idle_call(func, *args, **kwargs)
else:
func(*args, **kwargs)
def remove_idle_call(func):
global __remove_idle_call
global __idle_call_dict
if __remove_idle_call is not None:
__remove_idle_call(__idle_call_dict[func])
def add_timeout_call(timeout, func, *args, **kwargs):
global __add_timeout_call
global __timeout_call_dict
if __add_timeout_call is not None:
__timeout_call_dict[func] = __add_timeout_call(timeout, func, *args, **kwargs)
else:
func(*args, **kwargs)
def remove_timeout_call(func):
global __remove_timeout_call
global __timeout_call_dict
if __remove_timeout_call is not None:
__remove_timeout_call(__timeout_call_dict[func])
def start_event_loop():
global __start_event_loop
if __start_event_loop is not None:
return __start_event_loop()
def stop_event_loop():
global __stop_event_loop
if __stop_event_loop is not None:
return __stop_event_loop()
def load_toolkit_functions(add_idle_call, remove_idle_call, add_timeout_call, remove_timeout_call, start_event_loop, stop_event_loop):
"""
'add_idle_call' should take a function as 1st argument, the return
value is passed back to 'remove_idle_call'. Internally a cache is maintained
in which keys are functions and values are return values.
'add_timeout_call' and 'remove_timeout_call' work analogously
start_event_loop and stop_event_loop don't take arguments and should be self-explanatory.
"""
global __add_idle_call
global __remove_idle_call
global __add_timeout_call
global __remove_timeout_call
global __start_event_loop
global __stop_event_loop
assert callable(add_idle_call)
assert callable(remove_idle_call)
assert callable(add_timeout_call)
assert callable(remove_timeout_call)
assert callable(start_event_loop)
assert callable(stop_event_loop)
__add_idle_call = add_idle_call
__remove_idle_call = remove_idle_call
__add_timeout_call = add_timeout_call
__remove_timeout_call = remove_timeout_call
__start_event_loop = start_event_loop
__stop_event_loop = stop_event_loop
'\n Decorators:\n'
def run_when_idle(func):
def callback(*args, **kwargs):
return add_idle_call(func, *args, **kwargs)
return callback
def run_every(timeout):
def wrapper(func):
def callback(*args, **kwargs):
return add_timeout_call(func, timeout, *args, **kwargs)
return callback
return wrapper |
"""Maps between model names and more readable versions.
Model/code names (on the left) are used internally by Cavecalc. Readable names
are largely used in the GUI. Conversion between them is handled by the
setter.NameSwitcher class.
"""
database = "PHREEQC Database Filename"
phreeqc_log_file = "Log PHREEQC input"
phreeqc_log_file_name = "PHREEQC Log Filename"
out_dir = "Output Directory"
temperature = "Temperature (Degrees C)"
co2_decrement = "CO2(g) removal per step (fraction)"
calcite_sat_limit = "Calcite supersaturation limit (SI)"
bedrock = "Bedrock (moles)"
bedrock_mineral = "Bedrock Lithology"
bedrock_pyrite = "Bedrock Pyrite (moles)"
bedrock_d44Ca = "Bedrock d44Ca (per mil)"
bedrock_d13C = "Bedrock d13C (per mil)"
bedrock_d18O = "Bedrock d18O (per mil)"
bedrock_MgCa = "Bedrock Mg/Ca (mmol/mol)"
bedrock_SrCa = "Bedrock Sr/Ca (mmol/mol)"
bedrock_BaCa = "Bedrock Ba/Ca (mmol/mol)"
atmo_exchange = "Second Gas Fraction (0-1)"
gas_volume = "Gas Volume (L)"
atm_O2 = "Second Gas O2 (%)"
atm_pCO2 = "Second Gas pCO2 (ppmv)"
atm_d13C = "Second Gas d13C (per mil)"
atm_R14C = "Second Gas R14C (pmc)"
atm_d18O = "Rainfall d18O (per mil)"
init_O2 = "Initial O2 (%)"
init_pCO2 = "Initial pCO2 (ppmv)"
init_d13C = "Initial d13C (per mil)"
init_R14C = "Initial R14C (pmc)"
init_solution_d13c = "Initial Solution d13C (per mil) - OVERWRITE"
cave_O2 = "Cave Air O2 (%)"
cave_pCO2 = "Cave Air pCO2 (ppmv)"
cave_d13C = "Cave Air d13C (per mil)"
cave_R14C = "Cave Air R14C (pmc)"
cave_d18O = "Cave Air d18O (per mil)"
cave_air_volume = "Cave Air Volume (L)"
soil_pH = "Soil pH (approx.)"
soil_O2 = "Soil Gas O2 (%)"
soil_pCO2 = "Soil Gas pCO2 (ppmv)"
soil_Ca = "Soil Ca (mmol/kgw)"
soil_Mg = "Soil Mg (mmol/kgw)"
soil_Sr = "Soil Sr (mmol/kgw)"
soil_Ba = "Soil Ba (mmol/kgw)"
soil_d13C = "Soil Gas d13C (per mil)"
soil_R14C = "Soil Gas R14C (pmc)"
soil_d44Ca = "Soil d44Ca (per mil)"
kinetics_mode = "Degassing/Precipitation Mode"
reprecip = "Allow Calcite Reprecipitation"
totals = "Totals"
molalities = "Molalities"
isotopes = "Isotopes"
| """Maps between model names and more readable versions.
Model/code names (on the left) are used internally by Cavecalc. Readable names
are largely used in the GUI. Conversion between them is handled by the
setter.NameSwitcher class.
"""
database = 'PHREEQC Database Filename'
phreeqc_log_file = 'Log PHREEQC input'
phreeqc_log_file_name = 'PHREEQC Log Filename'
out_dir = 'Output Directory'
temperature = 'Temperature (Degrees C)'
co2_decrement = 'CO2(g) removal per step (fraction)'
calcite_sat_limit = 'Calcite supersaturation limit (SI)'
bedrock = 'Bedrock (moles)'
bedrock_mineral = 'Bedrock Lithology'
bedrock_pyrite = 'Bedrock Pyrite (moles)'
bedrock_d44_ca = 'Bedrock d44Ca (per mil)'
bedrock_d13_c = 'Bedrock d13C (per mil)'
bedrock_d18_o = 'Bedrock d18O (per mil)'
bedrock__mg_ca = 'Bedrock Mg/Ca (mmol/mol)'
bedrock__sr_ca = 'Bedrock Sr/Ca (mmol/mol)'
bedrock__ba_ca = 'Bedrock Ba/Ca (mmol/mol)'
atmo_exchange = 'Second Gas Fraction (0-1)'
gas_volume = 'Gas Volume (L)'
atm_o2 = 'Second Gas O2 (%)'
atm_p_co2 = 'Second Gas pCO2 (ppmv)'
atm_d13_c = 'Second Gas d13C (per mil)'
atm_r14_c = 'Second Gas R14C (pmc)'
atm_d18_o = 'Rainfall d18O (per mil)'
init_o2 = 'Initial O2 (%)'
init_p_co2 = 'Initial pCO2 (ppmv)'
init_d13_c = 'Initial d13C (per mil)'
init_r14_c = 'Initial R14C (pmc)'
init_solution_d13c = 'Initial Solution d13C (per mil) - OVERWRITE'
cave_o2 = 'Cave Air O2 (%)'
cave_p_co2 = 'Cave Air pCO2 (ppmv)'
cave_d13_c = 'Cave Air d13C (per mil)'
cave_r14_c = 'Cave Air R14C (pmc)'
cave_d18_o = 'Cave Air d18O (per mil)'
cave_air_volume = 'Cave Air Volume (L)'
soil_p_h = 'Soil pH (approx.)'
soil_o2 = 'Soil Gas O2 (%)'
soil_p_co2 = 'Soil Gas pCO2 (ppmv)'
soil__ca = 'Soil Ca (mmol/kgw)'
soil__mg = 'Soil Mg (mmol/kgw)'
soil__sr = 'Soil Sr (mmol/kgw)'
soil__ba = 'Soil Ba (mmol/kgw)'
soil_d13_c = 'Soil Gas d13C (per mil)'
soil_r14_c = 'Soil Gas R14C (pmc)'
soil_d44_ca = 'Soil d44Ca (per mil)'
kinetics_mode = 'Degassing/Precipitation Mode'
reprecip = 'Allow Calcite Reprecipitation'
totals = 'Totals'
molalities = 'Molalities'
isotopes = 'Isotopes' |
def avg(list):
return (float(sum(list)) / len(list))
def apply(map, f_x, f_y):
return {f_x(x): f_y(y) for x,y in map.items()}
def linear(a, b):
return (lambda x: (a * x + b))
def read_csv(filename):
file = open(filename, "r")
labels = file.readline()[:-1].split(',')
print("We get {} from {}".format(labels[1], labels[0]))
map = {}
for line in file:
x, y = line.split(',')
map[int(x)] = int(y)
file.close()
return (map)
def distance(map):
def partial(single_cost):
def total(f):
sum = 0
for (x,y) in map.items():
sum += single_cost(f, x, y)
return (sum / len(map))
return (total)
return (partial)
def train():
F = read_csv("data.csv")
mx, my = max(F.keys()), max(F.values())
delta = distance(apply(F, lambda x: x/mx, lambda y: y/my))
dx = delta(lambda f,x,y: y - f(x))
dy = delta(lambda f,x,y: (y - f(x)) * x)
cost = delta(lambda f,x,y: (y - f(x)) ** 2)
theta = gradient_descent([dx, dy, cost])
return (theta[0] * my, theta[1] * my / mx)
def gradient_descent(distance_functions):
count = 0
learning_rate = 1.5
theta = [0, 0]
prev_cost = 200
cur_cost = 100
while (count < 1000 and abs(cur_cost - prev_cost) > 10e-11):
f = linear(theta[1], theta[0])
prev_cost = cur_cost
cur_cost = distance_functions[2](f)
for i in range(2):
theta[i] += learning_rate * distance_functions[i](f)
count += 1
print("Performed {} iterations".format(count))
return (theta)
| def avg(list):
return float(sum(list)) / len(list)
def apply(map, f_x, f_y):
return {f_x(x): f_y(y) for (x, y) in map.items()}
def linear(a, b):
return lambda x: a * x + b
def read_csv(filename):
file = open(filename, 'r')
labels = file.readline()[:-1].split(',')
print('We get {} from {}'.format(labels[1], labels[0]))
map = {}
for line in file:
(x, y) = line.split(',')
map[int(x)] = int(y)
file.close()
return map
def distance(map):
def partial(single_cost):
def total(f):
sum = 0
for (x, y) in map.items():
sum += single_cost(f, x, y)
return sum / len(map)
return total
return partial
def train():
f = read_csv('data.csv')
(mx, my) = (max(F.keys()), max(F.values()))
delta = distance(apply(F, lambda x: x / mx, lambda y: y / my))
dx = delta(lambda f, x, y: y - f(x))
dy = delta(lambda f, x, y: (y - f(x)) * x)
cost = delta(lambda f, x, y: (y - f(x)) ** 2)
theta = gradient_descent([dx, dy, cost])
return (theta[0] * my, theta[1] * my / mx)
def gradient_descent(distance_functions):
count = 0
learning_rate = 1.5
theta = [0, 0]
prev_cost = 200
cur_cost = 100
while count < 1000 and abs(cur_cost - prev_cost) > 1e-10:
f = linear(theta[1], theta[0])
prev_cost = cur_cost
cur_cost = distance_functions[2](f)
for i in range(2):
theta[i] += learning_rate * distance_functions[i](f)
count += 1
print('Performed {} iterations'.format(count))
return theta |
class test:
def __init__(self):
pass
def printer(self):
print("hello world!")
if __name__=='__main__':
foo = test()
foo.printer()
| class Test:
def __init__(self):
pass
def printer(self):
print('hello world!')
if __name__ == '__main__':
foo = test()
foo.printer() |
res = client.get_arrays_space()
print(res)
if type(res) == pypureclient.responses.ValidResponse:
print(list(res.items))
# list array space of file systems
res = client.get_arrays_space(type='file-system')
print(res)
if type(res) == pypureclient.responses.ValidResponse:
print(list(res.items))
# list historical array space
res = client.get_arrays_space(start_time=START_TIME,
end_time=END_TIME,
resolution=30000)
print(res)
if type(res) == pypureclient.responses.ValidResponse:
print(list(res.items))
| res = client.get_arrays_space()
print(res)
if type(res) == pypureclient.responses.ValidResponse:
print(list(res.items))
res = client.get_arrays_space(type='file-system')
print(res)
if type(res) == pypureclient.responses.ValidResponse:
print(list(res.items))
res = client.get_arrays_space(start_time=START_TIME, end_time=END_TIME, resolution=30000)
print(res)
if type(res) == pypureclient.responses.ValidResponse:
print(list(res.items)) |
# Created by MechAviv
# Map ID :: 940001210
# Eastern Region of Pantheon : East Sanctum
sm.curNodeEventEnd(True)
sm.setTemporarySkillSet(0)
sm.setInGameDirectionMode(True, True, False, False)
sm.forcedInput(0)
sm.forcedInput(2)
sm.sendDelay(30)
sm.forcedInput(0)
OBJECT_1 = sm.sendNpcController(3000103, -300, 220)
sm.showNpcSpecialActionByObjectId(OBJECT_1, "summon", 0)
OBJECT_2 = sm.sendNpcController(3000104, -450, 220)
sm.showNpcSpecialActionByObjectId(OBJECT_2, "summon", 0)
OBJECT_3 = sm.sendNpcController(3000110, -120, 220)
sm.showNpcSpecialActionByObjectId(OBJECT_3, "summon", 0)
OBJECT_4 = sm.sendNpcController(3000114, -100, 220)
sm.showNpcSpecialActionByObjectId(OBJECT_4, "summon", 0)
OBJECT_5 = sm.sendNpcController(3000111, 130, 220)
sm.showNpcSpecialActionByObjectId(OBJECT_5, "summon", 0)
OBJECT_6 = sm.sendNpcController(3000115, 250, 220)
sm.showNpcSpecialActionByObjectId(OBJECT_6, "summon", 0)
sm.setSpeakerID(3000104)
sm.removeEscapeButton()
sm.setSpeakerType(3)
sm.sendNext("Nothing here, big surprise...")
sm.showEffect("Effect/Direction9.img/effect/story/BalloonMsg1/3", 1200, 0, -120, 0, OBJECT_2, False, 0)
sm.showEffect("Effect/Direction9.img/effect/story/BalloonMsg1/3", 1200, 0, -120, -2, -2, False, 0)
sm.showEffect("Effect/Direction9.img/effect/story/BalloonMsg1/4", 1200, 0, -120, 0, OBJECT_1, False, 0)
sm.sendDelay(1200)
sm.setSpeakerID(3000103)
sm.removeEscapeButton()
sm.setSpeakerType(3)
sm.sendNext("Well, those priests are keeping busy. It's funny, though...I don't recognize any of them.")
sm.setSpeakerID(3000103)
sm.removeEscapeButton()
sm.flipDialoguePlayerAsSpeaker()
sm.setSpeakerType(3)
sm.sendSay("Shhh! Something is not right. Velderoth!")
sm.setSpeakerID(3000104)
sm.removeEscapeButton()
sm.setSpeakerType(3)
sm.sendSay("You're right. They look suspicious. I'm going to run back to base and get help. You two stay here and keep an eye on them, okay? But no heroics. You get out of here if they spot you.")
sm.showEffect("Effect/Direction9.img/effect/story/BalloonMsg0/0", 1200, 0, -120, 0, OBJECT_1, False, 0)
sm.sendDelay(900)
sm.setSpeakerID(3000103)
sm.removeEscapeButton()
sm.setSpeakerType(3)
sm.sendNext("What are you talking about?")
sm.sendNpcController(OBJECT_2, False)
sm.setSpeakerID(3000103)
sm.removeEscapeButton()
sm.flipDialoguePlayerAsSpeaker()
sm.setSpeakerType(3)
sm.sendSay("They attacked the East Sanctum? What are they trying to do with the Relic?)")
sm.setSpeakerID(3000110)
sm.removeEscapeButton()
sm.setSpeakerType(3)
sm.sendSay("The relic's disappearance should weaken the shields.")
# Unhandled Stat Changed [HP] Packet: 00 00 00 04 00 00 00 00 00 00 02 02 00 00 FF 00 00 00 00
sm.setSpeakerID(3000114)
sm.removeEscapeButton()
sm.setSpeakerType(3)
sm.sendSay("I thought the relic was cursed... should we really be touching it?")
sm.setSpeakerID(3000110)
sm.removeEscapeButton()
sm.setSpeakerType(3)
sm.sendSay("I did not realize they allowed superstitious nincompoops entry to our order! Will you balk at the call of destiny?")
sm.setSpeakerID(3000110)
sm.removeEscapeButton()
sm.flipDialoguePlayerAsSpeaker()
sm.setSpeakerType(3)
sm.sendSay("Are they trying to take the Relic?")
sm.setSpeakerID(3000103)
sm.removeEscapeButton()
sm.setSpeakerType(3)
sm.sendSay("We gotta stop them!")
sm.moveNpcByObjectId(OBJECT_1, False, 300, 100)
sm.sendDelay(300)
sm.forcedInput(2)
sm.showEffect("Effect/Direction9.img/effect/story/BalloonMsg1/1", 1200, 0, -120, 0, OBJECT_3, False, 0)
sm.showEffect("Effect/Direction9.img/effect/story/BalloonMsg1/1", 1200, 0, -120, 0, OBJECT_4, False, 0)
sm.showEffect("Effect/Direction9.img/effect/story/BalloonMsg1/1", 1200, 0, -120, 0, OBJECT_5, False, 0)
sm.showEffect("Effect/Direction9.img/effect/story/BalloonMsg1/1", 1200, 0, -120, 0, OBJECT_6, False, 0)
sm.sendDelay(600)
sm.forcedInput(0)
sm.showEffect("Effect/Direction9.img/effect/story/BalloonMsg1/7", 900, 0, -120, 0, OBJECT_1, False, 0)
sm.sendDelay(900)
sm.showFieldEffect("kaiser/tear_rush", 0)
sm.sendDelay(3000)
# Unhandled Message [COLLECTION_RECORD_MESSAGE] Packet: 2A 01 00 00 00 2F 00 31 30 3A 31 3A 32 3A 31 31 3D 34 3B 31 30 3A 31 3A 32 3A 31 32 3D 35 3B 31 30 3A 31 3A 33 3A 31 35 3D 34 3B 31 30 3A 31 3A 33 3A 31 36 3D 35
sm.sendNpcController(OBJECT_1, False)
sm.setTemporarySkillSet(0)
sm.setInGameDirectionMode(False, True, False, False)
sm.sendNpcController(OBJECT_3, False)
sm.sendNpcController(OBJECT_4, False)
sm.sendNpcController(OBJECT_5, False)
sm.sendNpcController(OBJECT_6, False)
sm.warp(940001220, 0)
| sm.curNodeEventEnd(True)
sm.setTemporarySkillSet(0)
sm.setInGameDirectionMode(True, True, False, False)
sm.forcedInput(0)
sm.forcedInput(2)
sm.sendDelay(30)
sm.forcedInput(0)
object_1 = sm.sendNpcController(3000103, -300, 220)
sm.showNpcSpecialActionByObjectId(OBJECT_1, 'summon', 0)
object_2 = sm.sendNpcController(3000104, -450, 220)
sm.showNpcSpecialActionByObjectId(OBJECT_2, 'summon', 0)
object_3 = sm.sendNpcController(3000110, -120, 220)
sm.showNpcSpecialActionByObjectId(OBJECT_3, 'summon', 0)
object_4 = sm.sendNpcController(3000114, -100, 220)
sm.showNpcSpecialActionByObjectId(OBJECT_4, 'summon', 0)
object_5 = sm.sendNpcController(3000111, 130, 220)
sm.showNpcSpecialActionByObjectId(OBJECT_5, 'summon', 0)
object_6 = sm.sendNpcController(3000115, 250, 220)
sm.showNpcSpecialActionByObjectId(OBJECT_6, 'summon', 0)
sm.setSpeakerID(3000104)
sm.removeEscapeButton()
sm.setSpeakerType(3)
sm.sendNext('Nothing here, big surprise...')
sm.showEffect('Effect/Direction9.img/effect/story/BalloonMsg1/3', 1200, 0, -120, 0, OBJECT_2, False, 0)
sm.showEffect('Effect/Direction9.img/effect/story/BalloonMsg1/3', 1200, 0, -120, -2, -2, False, 0)
sm.showEffect('Effect/Direction9.img/effect/story/BalloonMsg1/4', 1200, 0, -120, 0, OBJECT_1, False, 0)
sm.sendDelay(1200)
sm.setSpeakerID(3000103)
sm.removeEscapeButton()
sm.setSpeakerType(3)
sm.sendNext("Well, those priests are keeping busy. It's funny, though...I don't recognize any of them.")
sm.setSpeakerID(3000103)
sm.removeEscapeButton()
sm.flipDialoguePlayerAsSpeaker()
sm.setSpeakerType(3)
sm.sendSay('Shhh! Something is not right. Velderoth!')
sm.setSpeakerID(3000104)
sm.removeEscapeButton()
sm.setSpeakerType(3)
sm.sendSay("You're right. They look suspicious. I'm going to run back to base and get help. You two stay here and keep an eye on them, okay? But no heroics. You get out of here if they spot you.")
sm.showEffect('Effect/Direction9.img/effect/story/BalloonMsg0/0', 1200, 0, -120, 0, OBJECT_1, False, 0)
sm.sendDelay(900)
sm.setSpeakerID(3000103)
sm.removeEscapeButton()
sm.setSpeakerType(3)
sm.sendNext('What are you talking about?')
sm.sendNpcController(OBJECT_2, False)
sm.setSpeakerID(3000103)
sm.removeEscapeButton()
sm.flipDialoguePlayerAsSpeaker()
sm.setSpeakerType(3)
sm.sendSay('They attacked the East Sanctum? What are they trying to do with the Relic?)')
sm.setSpeakerID(3000110)
sm.removeEscapeButton()
sm.setSpeakerType(3)
sm.sendSay("The relic's disappearance should weaken the shields.")
sm.setSpeakerID(3000114)
sm.removeEscapeButton()
sm.setSpeakerType(3)
sm.sendSay('I thought the relic was cursed... should we really be touching it?')
sm.setSpeakerID(3000110)
sm.removeEscapeButton()
sm.setSpeakerType(3)
sm.sendSay('I did not realize they allowed superstitious nincompoops entry to our order! Will you balk at the call of destiny?')
sm.setSpeakerID(3000110)
sm.removeEscapeButton()
sm.flipDialoguePlayerAsSpeaker()
sm.setSpeakerType(3)
sm.sendSay('Are they trying to take the Relic?')
sm.setSpeakerID(3000103)
sm.removeEscapeButton()
sm.setSpeakerType(3)
sm.sendSay('We gotta stop them!')
sm.moveNpcByObjectId(OBJECT_1, False, 300, 100)
sm.sendDelay(300)
sm.forcedInput(2)
sm.showEffect('Effect/Direction9.img/effect/story/BalloonMsg1/1', 1200, 0, -120, 0, OBJECT_3, False, 0)
sm.showEffect('Effect/Direction9.img/effect/story/BalloonMsg1/1', 1200, 0, -120, 0, OBJECT_4, False, 0)
sm.showEffect('Effect/Direction9.img/effect/story/BalloonMsg1/1', 1200, 0, -120, 0, OBJECT_5, False, 0)
sm.showEffect('Effect/Direction9.img/effect/story/BalloonMsg1/1', 1200, 0, -120, 0, OBJECT_6, False, 0)
sm.sendDelay(600)
sm.forcedInput(0)
sm.showEffect('Effect/Direction9.img/effect/story/BalloonMsg1/7', 900, 0, -120, 0, OBJECT_1, False, 0)
sm.sendDelay(900)
sm.showFieldEffect('kaiser/tear_rush', 0)
sm.sendDelay(3000)
sm.sendNpcController(OBJECT_1, False)
sm.setTemporarySkillSet(0)
sm.setInGameDirectionMode(False, True, False, False)
sm.sendNpcController(OBJECT_3, False)
sm.sendNpcController(OBJECT_4, False)
sm.sendNpcController(OBJECT_5, False)
sm.sendNpcController(OBJECT_6, False)
sm.warp(940001220, 0) |
# dictionary of key signatures
keySigSemanticToLP = {
"Cm": "\\key c \\minor ",
"CM": "\\key c \\major ",
"C#m": "\\key cis \\minor ",
"C#M": "\\key cis \\major ",
"Cbm": "\\key ces \\minor ",
"CbM": "\\key ces \\major ",
"Dm": "\\key d \\minor ",
"DM": "\\key d \\major ",
"D#m": "\\key dis \\minor ",
"D#M": "\\key dis \\major ",
"Dbm": "\\key des \\minor ",
"DbM": "\\key des \\major ",
"Em": "\\key e \\minor ",
"EM": "\\key e \\major ",
"E#m": "\\key eis \\minor ",
"E#M": "\\key eis \\major ",
"Ebm": "\\key ees \\minor ",
"EbM": "\\key ees \\major ",
"Fm": "\\key f \\minor ",
"FM": "\\key f \\major ",
"F#m": "\\key fis \\minor ",
"F#M": "\\key fis \\major ",
"Fbm": "\\key fes \\minor ",
"FbM": "\\key fes \\major ",
"Gm": "\\key g \\minor ",
"GM": "\\key g \\major ",
"G#m": "\\key gis \\minor ",
"G#M": "\\key gis \\major ",
"Gbm": "\\key ges \\minor ",
"GbM": "\\key ges \\major ",
"Am": "\\key a \\minor ",
"AM": "\\key a \\major ",
"A#m": "\\key ais \\minor ",
"A#M": "\\key ais \\major ",
"Abm": "\\key aes \\minor ",
"AbM": "\\key aes \\major ",
"Bm": "\\key b \\minor ",
"BM": "\\key b \\major ",
"B#m": "\\key bis \\minor ",
"B#M": "\\key bis \\major ",
"Bbm": "\\key bes \\minor ",
"BbM": "\\key bes \\major "
}
# dictionary of note lengths
# need to add double dots
lengthToNum = {
"quadruple_whole": "\\longa ",
"quadruple_whole.": "\\longa. ",
"double_whole": "\\breve ",
"double_whole.": "\\breve. ",
"whole": "1 ",
"whole.": "1. ",
"whole..": "1.. ",
"half": "2 ",
"half.": "2. ",
"half..": "2.. ",
"quarter": "4 ",
"quarter.": "4. ",
"quarter..": "4. ",
"eighth": "8 ",
"eighth.": "8. ",
"eighth..": "8.. ",
"sixteenth": "16 ",
"sixteenth.": "16. ",
"sixteenth..": "16.. ",
"thirty_second": "32 ",
"thirty_second.": "32. ",
"thirty_second..": "32.. ",
"sixty_fourth": "64 ",
"sixty_fourth.": "64. ",
"sixty_fourth.,": "64.. ",
"hundred_twenty_eighth": "128 "
}
# dictionary of note pitches
letterToNote = {
"C": "c ",
"C#": "cis ",
"Cb": "ces ",
"D": "d ",
"D#": "dis ",
"Db": "des ",
"E": "e ",
"E#": "eis ",
"Eb": "ees ",
"F": "f ",
"F#": "fis ",
"Fb": "fes ",
"G": "g ",
"G#": "gis ",
"Gb": "ges ",
"A": "a ",
"A#": "ais ",
"Ab": "aes ",
"B": "b ",
"B#": "bis ",
"Bb": "bes "
}
# dictionary of clefs
clefDict = {
"C1": "soprano ",
"C2": "mezzosoprano ",
"C3": "alto ",
"C4": "tenor ",
"C5": "baritone ",
"F3": "baritone ",
"F4": "bass ",
"F5": "subbass ",
"G1": "french ",
"G2": "treble "
}
BEATS_PER_MEASURE = "1"
def parser(toparse):
""" parses each command output of the semantic model,
returns the equivalent LilyPond command
inputs: toparse -- TODO
outputs: TODO
"""
divided_string = toparse.split("-")
global BEATS_PER_MEASURE
if divided_string[0] == "clef":
return "\\clef " + clefDict[divided_string[1]] + "\n"
elif divided_string[0] == "keySignature":
return keySigSemanticToLP[divided_string[1]] + "\n"
elif divided_string[0] == "timeSignature":
BEATS_PER_MEASURE = divided_string[1][0]
if divided_string[1] == "C":
return "\\time 4/4 \n"
elif divided_string[1] == "C/":
return "\\time 2/2 \n"
else:
return "\\time " + divided_string[1] + "\n"
# this is the number of beats, so somehow we need to keep track of the time signature
elif divided_string[0] == "multirest":
if BEATS_PER_MEASURE != 0:
return "\\compressFullBarRests \n \t R1*" +\
str(int(divided_string[1])//int(BEATS_PER_MEASURE)) + "\n "
else:
return "\\compressFullBarRests \n \t R1*" +\
divided_string[1] + "\n "
elif divided_string[0] == "barline":
return " \\bar \"|\" "
# return " "
elif divided_string[0] == "rest":
ending = ""
if divided_string[1][-7:] == "fermata":
ending = "\\fermata "
divided_string[1] = divided_string[1][:-8]
return "r" + lengthToNum[divided_string[1]] + ending + " "
elif divided_string[0] == "note":
note_info = divided_string[1].split("_", 1)
ending = ""
if note_info[1][-7:] == "fermata":
ending = "\\fermata "
note_info[1] = note_info[1][:-8]
if int(note_info[0][-1]) == 3:
return letterToNote[note_info[0][:-1]] +\
lengthToNum[note_info[1]] + ending
elif int(note_info[0][-1]) < 3:
return letterToNote[note_info[0][:-1]] +\
(3 - int(note_info[0][-1])) * "," +\
lengthToNum[note_info[1]] + ending
elif int(note_info[0][-1]) > 3:
return letterToNote[note_info[0][:-1]] +\
(int(note_info[0][-1]) - 3) * "\'" +\
lengthToNum[note_info[1]] + ending
elif divided_string[0] == "gracenote":
note_info = divided_string[1].split("_", 1)
if int(note_info[0][-1]) == 3:
notepart = letterToNote[note_info[0][:-1]] +\
lengthToNum[note_info[1]]
elif int(note_info[0][-1]) < 3:
notepart = letterToNote[note_info[0][:-1]] +\
(3 - int(note_info[0][-1])) * "," + lengthToNum[note_info[1]]
elif int(note_info[0][-1]) > 3:
notepart = letterToNote[note_info[0][:-1]] +\
(int(note_info[0][-1]) - 3) * "\'" + lengthToNum[note_info[1]]
return "\\grace { " + notepart + " }"
elif divided_string[0] == "tie":
return "~"
else:
return "% could not find a match for the musical element" + toparse
def generate_music(model_output, piece_title):
""" calls the parser to parse the input,
sends it to a LilyPond file to generate a PDF
inputs: model_output -- string output produced by the semantic model
piece_title -- title of the piece, which becomes the filename
outputs: TODO
"""
element_list = model_output.split("\n")
lilyPond = "\\version \"2.20.0\" \n\header{\n title = \"" +\
piece_title + "\"\n}\n\\score{ {\n"
# f = open(piece_title + ".ly", "w+")
# f.write("\\version \"2.20.0\" \n\header{\n title = \"" + piece_title + "\"\n}\n\\score{ {\n")
for x in range(len(element_list)-1):
next_elem = parser(element_list[x])
print(next_elem)
lilyPond += next_elem
# f.write(parser(element_list[x]))
lilyPond += "} \n \\midi{} \n \\layout{} }"
return lilyPond
# f.write("\\midi{} \n \\layout{} }")
# f.close()
| key_sig_semantic_to_lp = {'Cm': '\\key c \\minor ', 'CM': '\\key c \\major ', 'C#m': '\\key cis \\minor ', 'C#M': '\\key cis \\major ', 'Cbm': '\\key ces \\minor ', 'CbM': '\\key ces \\major ', 'Dm': '\\key d \\minor ', 'DM': '\\key d \\major ', 'D#m': '\\key dis \\minor ', 'D#M': '\\key dis \\major ', 'Dbm': '\\key des \\minor ', 'DbM': '\\key des \\major ', 'Em': '\\key e \\minor ', 'EM': '\\key e \\major ', 'E#m': '\\key eis \\minor ', 'E#M': '\\key eis \\major ', 'Ebm': '\\key ees \\minor ', 'EbM': '\\key ees \\major ', 'Fm': '\\key f \\minor ', 'FM': '\\key f \\major ', 'F#m': '\\key fis \\minor ', 'F#M': '\\key fis \\major ', 'Fbm': '\\key fes \\minor ', 'FbM': '\\key fes \\major ', 'Gm': '\\key g \\minor ', 'GM': '\\key g \\major ', 'G#m': '\\key gis \\minor ', 'G#M': '\\key gis \\major ', 'Gbm': '\\key ges \\minor ', 'GbM': '\\key ges \\major ', 'Am': '\\key a \\minor ', 'AM': '\\key a \\major ', 'A#m': '\\key ais \\minor ', 'A#M': '\\key ais \\major ', 'Abm': '\\key aes \\minor ', 'AbM': '\\key aes \\major ', 'Bm': '\\key b \\minor ', 'BM': '\\key b \\major ', 'B#m': '\\key bis \\minor ', 'B#M': '\\key bis \\major ', 'Bbm': '\\key bes \\minor ', 'BbM': '\\key bes \\major '}
length_to_num = {'quadruple_whole': '\\longa ', 'quadruple_whole.': '\\longa. ', 'double_whole': '\\breve ', 'double_whole.': '\\breve. ', 'whole': '1 ', 'whole.': '1. ', 'whole..': '1.. ', 'half': '2 ', 'half.': '2. ', 'half..': '2.. ', 'quarter': '4 ', 'quarter.': '4. ', 'quarter..': '4. ', 'eighth': '8 ', 'eighth.': '8. ', 'eighth..': '8.. ', 'sixteenth': '16 ', 'sixteenth.': '16. ', 'sixteenth..': '16.. ', 'thirty_second': '32 ', 'thirty_second.': '32. ', 'thirty_second..': '32.. ', 'sixty_fourth': '64 ', 'sixty_fourth.': '64. ', 'sixty_fourth.,': '64.. ', 'hundred_twenty_eighth': '128 '}
letter_to_note = {'C': 'c ', 'C#': 'cis ', 'Cb': 'ces ', 'D': 'd ', 'D#': 'dis ', 'Db': 'des ', 'E': 'e ', 'E#': 'eis ', 'Eb': 'ees ', 'F': 'f ', 'F#': 'fis ', 'Fb': 'fes ', 'G': 'g ', 'G#': 'gis ', 'Gb': 'ges ', 'A': 'a ', 'A#': 'ais ', 'Ab': 'aes ', 'B': 'b ', 'B#': 'bis ', 'Bb': 'bes '}
clef_dict = {'C1': 'soprano ', 'C2': 'mezzosoprano ', 'C3': 'alto ', 'C4': 'tenor ', 'C5': 'baritone ', 'F3': 'baritone ', 'F4': 'bass ', 'F5': 'subbass ', 'G1': 'french ', 'G2': 'treble '}
beats_per_measure = '1'
def parser(toparse):
""" parses each command output of the semantic model,
returns the equivalent LilyPond command
inputs: toparse -- TODO
outputs: TODO
"""
divided_string = toparse.split('-')
global BEATS_PER_MEASURE
if divided_string[0] == 'clef':
return '\\clef ' + clefDict[divided_string[1]] + '\n'
elif divided_string[0] == 'keySignature':
return keySigSemanticToLP[divided_string[1]] + '\n'
elif divided_string[0] == 'timeSignature':
beats_per_measure = divided_string[1][0]
if divided_string[1] == 'C':
return '\\time 4/4 \n'
elif divided_string[1] == 'C/':
return '\\time 2/2 \n'
else:
return '\\time ' + divided_string[1] + '\n'
elif divided_string[0] == 'multirest':
if BEATS_PER_MEASURE != 0:
return '\\compressFullBarRests \n \t R1*' + str(int(divided_string[1]) // int(BEATS_PER_MEASURE)) + '\n '
else:
return '\\compressFullBarRests \n \t R1*' + divided_string[1] + '\n '
elif divided_string[0] == 'barline':
return ' \\bar "|" '
elif divided_string[0] == 'rest':
ending = ''
if divided_string[1][-7:] == 'fermata':
ending = '\\fermata '
divided_string[1] = divided_string[1][:-8]
return 'r' + lengthToNum[divided_string[1]] + ending + ' '
elif divided_string[0] == 'note':
note_info = divided_string[1].split('_', 1)
ending = ''
if note_info[1][-7:] == 'fermata':
ending = '\\fermata '
note_info[1] = note_info[1][:-8]
if int(note_info[0][-1]) == 3:
return letterToNote[note_info[0][:-1]] + lengthToNum[note_info[1]] + ending
elif int(note_info[0][-1]) < 3:
return letterToNote[note_info[0][:-1]] + (3 - int(note_info[0][-1])) * ',' + lengthToNum[note_info[1]] + ending
elif int(note_info[0][-1]) > 3:
return letterToNote[note_info[0][:-1]] + (int(note_info[0][-1]) - 3) * "'" + lengthToNum[note_info[1]] + ending
elif divided_string[0] == 'gracenote':
note_info = divided_string[1].split('_', 1)
if int(note_info[0][-1]) == 3:
notepart = letterToNote[note_info[0][:-1]] + lengthToNum[note_info[1]]
elif int(note_info[0][-1]) < 3:
notepart = letterToNote[note_info[0][:-1]] + (3 - int(note_info[0][-1])) * ',' + lengthToNum[note_info[1]]
elif int(note_info[0][-1]) > 3:
notepart = letterToNote[note_info[0][:-1]] + (int(note_info[0][-1]) - 3) * "'" + lengthToNum[note_info[1]]
return '\\grace { ' + notepart + ' }'
elif divided_string[0] == 'tie':
return '~'
else:
return '% could not find a match for the musical element' + toparse
def generate_music(model_output, piece_title):
""" calls the parser to parse the input,
sends it to a LilyPond file to generate a PDF
inputs: model_output -- string output produced by the semantic model
piece_title -- title of the piece, which becomes the filename
outputs: TODO
"""
element_list = model_output.split('\n')
lily_pond = '\\version "2.20.0" \n\\header{\n title = "' + piece_title + '"\n}\n\\score{ {\n'
for x in range(len(element_list) - 1):
next_elem = parser(element_list[x])
print(next_elem)
lily_pond += next_elem
lily_pond += '} \n \\midi{} \n \\layout{} }'
return lilyPond |
class PyDisFishError(Exception):
"""Base error class for the module"""
pass
class FetchError(PyDisFishError):
"""Error that's raised when _fetch_list() fails
This is probably something either on your end or discord's
"""
pass
class NotReady(PyDisFishError):
"""Error that's raised when trying to check a URL before Phisherman is ready
you should use Phisherman.ready to tell when the domain list has been fetched
"""
pass
| class Pydisfisherror(Exception):
"""Base error class for the module"""
pass
class Fetcherror(PyDisFishError):
"""Error that's raised when _fetch_list() fails
This is probably something either on your end or discord's
"""
pass
class Notready(PyDisFishError):
"""Error that's raised when trying to check a URL before Phisherman is ready
you should use Phisherman.ready to tell when the domain list has been fetched
"""
pass |
# 2019-01-31
# number sorting and reading in list
score = [[80, 85, 90], [82, 87, 92], [75, 92, 84]]
for i in range(len(score)):
student = score[i]
score_sum = 0
for j in student:
score_sum += j
# append the sum of scores
score[i] = [score_sum] + score[i]
# sorts the students according to the total sum number
score.sort(reverse=True)
for s in score:
print(s[0], s[1], s[2], s[3])
| score = [[80, 85, 90], [82, 87, 92], [75, 92, 84]]
for i in range(len(score)):
student = score[i]
score_sum = 0
for j in student:
score_sum += j
score[i] = [score_sum] + score[i]
score.sort(reverse=True)
for s in score:
print(s[0], s[1], s[2], s[3]) |
mysql={
'host':"localhost",
'user':'root',
'password':'Liscannor10',
'database': 'datarepresentation'
} | mysql = {'host': 'localhost', 'user': 'root', 'password': 'Liscannor10', 'database': 'datarepresentation'} |
d = dict()
d['0']='O'
d['1']='l'
d['3']='E'
d['4']='A'
d['5']='S'
d['6']='G'
d['8']='B'
d['9']='g'
s = input()
for c in s:
print(d[c] if c in d else c, end='') | d = dict()
d['0'] = 'O'
d['1'] = 'l'
d['3'] = 'E'
d['4'] = 'A'
d['5'] = 'S'
d['6'] = 'G'
d['8'] = 'B'
d['9'] = 'g'
s = input()
for c in s:
print(d[c] if c in d else c, end='') |
MIN_SQUARE = 0
MAX_SQUARE = 63
MAX_INT = 2 ** 64 - 1
STARTING_FEN = "rnbqkbnr/pppppppp/8/8/8/8/PPPPPPPP/RNBQKBNR w KQkq - 0 1"
# DIRECTIONS
NORTH = 8
EAST = 1
SOUTH = -8
WEST = -1
NE = 9
SE = -7
SW = -9
NW = 7
NWW = NW + WEST
NNW = NORTH + NW
NNE = NORTH + NE
NEE = NE + EAST
SEE = SE + EAST
SSE = SOUTH + SE
SSW = SOUTH + SW
SWW = SW + WEST
INFINITY = float("inf")
MAX_PLY = 31
QUIESCENCE_SEARCH_DEPTH_PLY = 5
| min_square = 0
max_square = 63
max_int = 2 ** 64 - 1
starting_fen = 'rnbqkbnr/pppppppp/8/8/8/8/PPPPPPPP/RNBQKBNR w KQkq - 0 1'
north = 8
east = 1
south = -8
west = -1
ne = 9
se = -7
sw = -9
nw = 7
nww = NW + WEST
nnw = NORTH + NW
nne = NORTH + NE
nee = NE + EAST
see = SE + EAST
sse = SOUTH + SE
ssw = SOUTH + SW
sww = SW + WEST
infinity = float('inf')
max_ply = 31
quiescence_search_depth_ply = 5 |
num = input()[::-1]
# num = num[::-1]
for i in num:
if int(i)==0:
print("ZERO")
else:
symbol = chr(int(i) + 33)
print(symbol*int(i))
| num = input()[::-1]
for i in num:
if int(i) == 0:
print('ZERO')
else:
symbol = chr(int(i) + 33)
print(symbol * int(i)) |
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
'targets': [
{
'target_name': 'libwebp_dec',
'type': 'static_library',
'dependencies' : [
'libwebp_dsp',
'libwebp_dsp_neon',
'libwebp_utils',
],
'include_dirs': ['.'],
'sources': [
'<(DEPTH)/third_party/libwebp/src/dec/alpha.c',
'<(DEPTH)/third_party/libwebp/src/dec/buffer.c',
'<(DEPTH)/third_party/libwebp/src/dec/frame.c',
'<(DEPTH)/third_party/libwebp/src/dec/idec.c',
'<(DEPTH)/third_party/libwebp/src/dec/io.c',
'<(DEPTH)/third_party/libwebp/src/dec/quant.c',
'<(DEPTH)/third_party/libwebp/src/dec/tree.c',
'<(DEPTH)/third_party/libwebp/src/dec/vp8.c',
'<(DEPTH)/third_party/libwebp/src/dec/vp8l.c',
'<(DEPTH)/third_party/libwebp/src/dec/webp.c',
],
},
{
'target_name': 'libwebp_demux',
'type': 'static_library',
'include_dirs': ['.'],
'sources': [
'demux/demux.c',
],
},
{
'target_name': 'libwebp_dsp',
'type': 'static_library',
'include_dirs': ['.'],
'sources': [
'<(DEPTH)/third_party/libwebp/src/dsp/alpha_processing.c',
'<(DEPTH)/third_party/libwebp/src/dsp/cpu.c',
'<(DEPTH)/third_party/libwebp/src/dsp/dec.c',
'<(DEPTH)/third_party/libwebp/src/dsp/dec_clip_tables.c',
'<(DEPTH)/third_party/libwebp/src/dsp/dec_mips32.c',
'<(DEPTH)/third_party/libwebp/src/dsp/dec_sse2.c',
'<(DEPTH)/third_party/libwebp/src/dsp/enc.c',
'<(DEPTH)/third_party/libwebp/src/dsp/enc_avx2.c',
'<(DEPTH)/third_party/libwebp/src/dsp/enc_mips32.c',
'<(DEPTH)/third_party/libwebp/src/dsp/enc_sse2.c',
'<(DEPTH)/third_party/libwebp/src/dsp/lossless.c',
'<(DEPTH)/third_party/libwebp/src/dsp/lossless_mips32.c',
'<(DEPTH)/third_party/libwebp/src/dsp/lossless_sse2.c',
'<(DEPTH)/third_party/libwebp/src/dsp/upsampling.c',
'<(DEPTH)/third_party/libwebp/src/dsp/upsampling_sse2.c',
'<(DEPTH)/third_party/libwebp/src/dsp/yuv.c',
'<(DEPTH)/third_party/libwebp/src/dsp/yuv_mips32.c',
'<(DEPTH)/third_party/libwebp/src/dsp/yuv_sse2.c',
],
# 'conditions': [
# ['OS == "android"', {
# 'includes': [ 'android/cpufeatures.gypi' ],
# }],
# ['order_profiling != 0', {
# 'target_conditions' : [
# ['_toolset=="target"', {
# 'cflags!': [ '-finstrument-functions' ],
# }],
# ],
# }],
# ],
},
{
'target_name': 'libwebp_dsp_neon',
'conditions': [
['target_arch == "arm" and arm_version >= 7 and (arm_neon == 1 or arm_neon_optional == 1)', {
'type': 'static_library',
'include_dirs': ['.'],
'sources': [
'<(DEPTH)/third_party/libwebp/src/dsp/dec_neon.c',
'<(DEPTH)/third_party/libwebp/src/dsp/enc_neon.c',
'<(DEPTH)/third_party/libwebp/src/dsp/lossless_neon.c',
'<(DEPTH)/third_party/libwebp/src/dsp/upsampling_neon.c',
],
# behavior similar to *.c.neon in an Android.mk
'cflags!': [ '-mfpu=vfpv3-d16' ],
'cflags': [ '-mfpu=neon' ],
},{ # "target_arch != "arm" or arm_version < 7"
'type': 'none',
}],
['order_profiling != 0', {
'target_conditions' : [
['_toolset=="target"', {
'cflags!': [ '-finstrument-functions' ],
}],
],
}],
],
},
{
'target_name': 'libwebp_enc',
'type': 'static_library',
'include_dirs': ['.'],
'sources': [
'<(DEPTH)/third_party/libwebp/src/enc/alpha.c',
'<(DEPTH)/third_party/libwebp/src/enc/analysis.c',
'<(DEPTH)/third_party/libwebp/src/enc/backward_references.c',
'<(DEPTH)/third_party/libwebp/src/enc/config.c',
'<(DEPTH)/third_party/libwebp/src/enc/cost.c',
'<(DEPTH)/third_party/libwebp/src/enc/filter.c',
'<(DEPTH)/third_party/libwebp/src/enc/frame.c',
'<(DEPTH)/third_party/libwebp/src/enc/histogram.c',
'<(DEPTH)/third_party/libwebp/src/enc/iterator.c',
'<(DEPTH)/third_party/libwebp/src/enc/picture.c',
'<(DEPTH)/third_party/libwebp/src/enc/picture_csp.c',
'<(DEPTH)/third_party/libwebp/src/enc/picture_psnr.c',
'<(DEPTH)/third_party/libwebp/src/enc/picture_rescale.c',
'<(DEPTH)/third_party/libwebp/src/enc/picture_tools.c',
'<(DEPTH)/third_party/libwebp/src/enc/quant.c',
'<(DEPTH)/third_party/libwebp/src/enc/syntax.c',
'<(DEPTH)/third_party/libwebp/src/enc/token.c',
'<(DEPTH)/third_party/libwebp/src/enc/tree.c',
'<(DEPTH)/third_party/libwebp/src/enc/vp8l.c',
'<(DEPTH)/third_party/libwebp/src/enc/webpenc.c',
],
},
{
'target_name': 'libwebp_utils',
'type': 'static_library',
'include_dirs': ['.'],
'sources': [
'<(DEPTH)/third_party/libwebp/src/utils/bit_reader.c',
'<(DEPTH)/third_party/libwebp/src/utils/bit_writer.c',
'<(DEPTH)/third_party/libwebp/src/utils/color_cache.c',
'<(DEPTH)/third_party/libwebp/src/utils/filters.c',
'<(DEPTH)/third_party/libwebp/src/utils/huffman.c',
'<(DEPTH)/third_party/libwebp/src/utils/huffman_encode.c',
'<(DEPTH)/third_party/libwebp/src/utils/quant_levels.c',
'<(DEPTH)/third_party/libwebp/src/utils/quant_levels_dec.c',
'<(DEPTH)/third_party/libwebp/src/utils/random.c',
'<(DEPTH)/third_party/libwebp/src/utils/rescaler.c',
'<(DEPTH)/third_party/libwebp/src/utils/thread.c',
'<(DEPTH)/third_party/libwebp/src/utils/utils.c',
],
},
{
'target_name': 'libwebp_mux',
'type': 'static_library',
'include_dirs': ['.'],
'sources': [
'<(DEPTH)/third_party/libwebp/src/mux/muxedit.c',
'<(DEPTH)/third_party/libwebp/src/mux/muxinternal.c',
'<(DEPTH)/third_party/libwebp/src/mux/muxread.c',
],
},
{
'target_name': 'libwebp_enc_mux',
'type': 'static_library',
'dependencies': [
'libwebp_mux',
],
'include_dirs': [
'<(DEPTH)/third_party/libwebp/src',
],
'sources': [
'<(DEPTH)/third_party/libwebp/examples/gif2webp_util.c',
],
},
{
'target_name': 'libwebp',
'type': 'none',
'dependencies' : [
'libwebp_dec',
'libwebp_demux',
'libwebp_dsp',
'libwebp_dsp_neon',
'libwebp_enc',
'libwebp_enc_mux',
'libwebp_utils',
],
'direct_dependent_settings': {
'include_dirs': ['.'],
},
'conditions': [
['OS!="win"', {'product_name': 'webp'}],
],
},
],
}
| {'targets': [{'target_name': 'libwebp_dec', 'type': 'static_library', 'dependencies': ['libwebp_dsp', 'libwebp_dsp_neon', 'libwebp_utils'], 'include_dirs': ['.'], 'sources': ['<(DEPTH)/third_party/libwebp/src/dec/alpha.c', '<(DEPTH)/third_party/libwebp/src/dec/buffer.c', '<(DEPTH)/third_party/libwebp/src/dec/frame.c', '<(DEPTH)/third_party/libwebp/src/dec/idec.c', '<(DEPTH)/third_party/libwebp/src/dec/io.c', '<(DEPTH)/third_party/libwebp/src/dec/quant.c', '<(DEPTH)/third_party/libwebp/src/dec/tree.c', '<(DEPTH)/third_party/libwebp/src/dec/vp8.c', '<(DEPTH)/third_party/libwebp/src/dec/vp8l.c', '<(DEPTH)/third_party/libwebp/src/dec/webp.c']}, {'target_name': 'libwebp_demux', 'type': 'static_library', 'include_dirs': ['.'], 'sources': ['demux/demux.c']}, {'target_name': 'libwebp_dsp', 'type': 'static_library', 'include_dirs': ['.'], 'sources': ['<(DEPTH)/third_party/libwebp/src/dsp/alpha_processing.c', '<(DEPTH)/third_party/libwebp/src/dsp/cpu.c', '<(DEPTH)/third_party/libwebp/src/dsp/dec.c', '<(DEPTH)/third_party/libwebp/src/dsp/dec_clip_tables.c', '<(DEPTH)/third_party/libwebp/src/dsp/dec_mips32.c', '<(DEPTH)/third_party/libwebp/src/dsp/dec_sse2.c', '<(DEPTH)/third_party/libwebp/src/dsp/enc.c', '<(DEPTH)/third_party/libwebp/src/dsp/enc_avx2.c', '<(DEPTH)/third_party/libwebp/src/dsp/enc_mips32.c', '<(DEPTH)/third_party/libwebp/src/dsp/enc_sse2.c', '<(DEPTH)/third_party/libwebp/src/dsp/lossless.c', '<(DEPTH)/third_party/libwebp/src/dsp/lossless_mips32.c', '<(DEPTH)/third_party/libwebp/src/dsp/lossless_sse2.c', '<(DEPTH)/third_party/libwebp/src/dsp/upsampling.c', '<(DEPTH)/third_party/libwebp/src/dsp/upsampling_sse2.c', '<(DEPTH)/third_party/libwebp/src/dsp/yuv.c', '<(DEPTH)/third_party/libwebp/src/dsp/yuv_mips32.c', '<(DEPTH)/third_party/libwebp/src/dsp/yuv_sse2.c']}, {'target_name': 'libwebp_dsp_neon', 'conditions': [['target_arch == "arm" and arm_version >= 7 and (arm_neon == 1 or arm_neon_optional == 1)', {'type': 'static_library', 'include_dirs': ['.'], 'sources': ['<(DEPTH)/third_party/libwebp/src/dsp/dec_neon.c', '<(DEPTH)/third_party/libwebp/src/dsp/enc_neon.c', '<(DEPTH)/third_party/libwebp/src/dsp/lossless_neon.c', '<(DEPTH)/third_party/libwebp/src/dsp/upsampling_neon.c'], 'cflags!': ['-mfpu=vfpv3-d16'], 'cflags': ['-mfpu=neon']}, {'type': 'none'}], ['order_profiling != 0', {'target_conditions': [['_toolset=="target"', {'cflags!': ['-finstrument-functions']}]]}]]}, {'target_name': 'libwebp_enc', 'type': 'static_library', 'include_dirs': ['.'], 'sources': ['<(DEPTH)/third_party/libwebp/src/enc/alpha.c', '<(DEPTH)/third_party/libwebp/src/enc/analysis.c', '<(DEPTH)/third_party/libwebp/src/enc/backward_references.c', '<(DEPTH)/third_party/libwebp/src/enc/config.c', '<(DEPTH)/third_party/libwebp/src/enc/cost.c', '<(DEPTH)/third_party/libwebp/src/enc/filter.c', '<(DEPTH)/third_party/libwebp/src/enc/frame.c', '<(DEPTH)/third_party/libwebp/src/enc/histogram.c', '<(DEPTH)/third_party/libwebp/src/enc/iterator.c', '<(DEPTH)/third_party/libwebp/src/enc/picture.c', '<(DEPTH)/third_party/libwebp/src/enc/picture_csp.c', '<(DEPTH)/third_party/libwebp/src/enc/picture_psnr.c', '<(DEPTH)/third_party/libwebp/src/enc/picture_rescale.c', '<(DEPTH)/third_party/libwebp/src/enc/picture_tools.c', '<(DEPTH)/third_party/libwebp/src/enc/quant.c', '<(DEPTH)/third_party/libwebp/src/enc/syntax.c', '<(DEPTH)/third_party/libwebp/src/enc/token.c', '<(DEPTH)/third_party/libwebp/src/enc/tree.c', '<(DEPTH)/third_party/libwebp/src/enc/vp8l.c', '<(DEPTH)/third_party/libwebp/src/enc/webpenc.c']}, {'target_name': 'libwebp_utils', 'type': 'static_library', 'include_dirs': ['.'], 'sources': ['<(DEPTH)/third_party/libwebp/src/utils/bit_reader.c', '<(DEPTH)/third_party/libwebp/src/utils/bit_writer.c', '<(DEPTH)/third_party/libwebp/src/utils/color_cache.c', '<(DEPTH)/third_party/libwebp/src/utils/filters.c', '<(DEPTH)/third_party/libwebp/src/utils/huffman.c', '<(DEPTH)/third_party/libwebp/src/utils/huffman_encode.c', '<(DEPTH)/third_party/libwebp/src/utils/quant_levels.c', '<(DEPTH)/third_party/libwebp/src/utils/quant_levels_dec.c', '<(DEPTH)/third_party/libwebp/src/utils/random.c', '<(DEPTH)/third_party/libwebp/src/utils/rescaler.c', '<(DEPTH)/third_party/libwebp/src/utils/thread.c', '<(DEPTH)/third_party/libwebp/src/utils/utils.c']}, {'target_name': 'libwebp_mux', 'type': 'static_library', 'include_dirs': ['.'], 'sources': ['<(DEPTH)/third_party/libwebp/src/mux/muxedit.c', '<(DEPTH)/third_party/libwebp/src/mux/muxinternal.c', '<(DEPTH)/third_party/libwebp/src/mux/muxread.c']}, {'target_name': 'libwebp_enc_mux', 'type': 'static_library', 'dependencies': ['libwebp_mux'], 'include_dirs': ['<(DEPTH)/third_party/libwebp/src'], 'sources': ['<(DEPTH)/third_party/libwebp/examples/gif2webp_util.c']}, {'target_name': 'libwebp', 'type': 'none', 'dependencies': ['libwebp_dec', 'libwebp_demux', 'libwebp_dsp', 'libwebp_dsp_neon', 'libwebp_enc', 'libwebp_enc_mux', 'libwebp_utils'], 'direct_dependent_settings': {'include_dirs': ['.']}, 'conditions': [['OS!="win"', {'product_name': 'webp'}]]}]} |
class CronDayOfWeek(int):
"""
Job Manager cron scheduling day of week. Zero represents Sunday.
-1 represents all days of a week and only supported for cron
schedule create and modify.
Range : [-1..6].
"""
@staticmethod
def get_api_name():
return "cron-day-of-week"
| class Crondayofweek(int):
"""
Job Manager cron scheduling day of week. Zero represents Sunday.
-1 represents all days of a week and only supported for cron
schedule create and modify.
Range : [-1..6].
"""
@staticmethod
def get_api_name():
return 'cron-day-of-week' |
class Solution:
def isPowerOfFour1(self, num):
"""
:type num: int
:rtype: bool
"""
x = 1
while x != num:
if x < num:
x <<= 2
else:
return False
return True
def isPowerOfFour2(self, num):
"""
:type num: int
:rtype: bool
"""
return num > 0 and (num & (num - 1)) == 0 and (num & 0b01010101010101010101010101010101) == num
| class Solution:
def is_power_of_four1(self, num):
"""
:type num: int
:rtype: bool
"""
x = 1
while x != num:
if x < num:
x <<= 2
else:
return False
return True
def is_power_of_four2(self, num):
"""
:type num: int
:rtype: bool
"""
return num > 0 and num & num - 1 == 0 and (num & 1431655765 == num) |
def simple_moving_average(df, base, target, period):
"""
Function to compute Simple Moving Average (SMA)
This is a lagging indicator
df - the data frame
base - on which the indicator has to be calculated eg Close
target - column name to store output
period - period of the sma
"""
df[target] = df[base].rolling(window=period).mean().round(2)
return df
def exponential_moving_average(df, base, target, period):
"""
Function to compute Exponential Moving Average (EMA)
This is a lagging indicator
df - the data frame
base - on which the indicator has to be calculated eg Close
target - column name to store output
period - period of the ema
"""
df[target] = df[base].ewm(ignore_na=False, min_periods=period, com=period, adjust=True).mean()
return df
def moving_average_convergence_divergence(df, base, macd_target, macd_line_target, period_long=26, period_short=12,
period_signal=9):
"""
Function to compute MACD (Moving Average Convergence/Divergence)
This is a lagging indicator
df - the data frame
base - on which the indicator has to be calculated eg Close
macd_target - column name to store macd value
macd_line_target - column name to store macd line
period_long - period of the longer time frame
period_short - period of the shorter time frame
period_signal - period of the signal
"""
short_ema_target = 'ema_{}'.format(period_short)
long_ema_target = 'ema_{}'.format(period_long)
df = exponential_moving_average(df, base, long_ema_target, period_long)
df = exponential_moving_average(df, base, short_ema_target, period_short)
df[macd_target] = df[short_ema_target] - df[long_ema_target]
df[macd_line_target] = df[macd_target].ewm(ignore_na=False, min_periods=0, com=period_signal, adjust=True).mean()
df = df.drop([short_ema_target, long_ema_target], axis=1)
return df
| def simple_moving_average(df, base, target, period):
"""
Function to compute Simple Moving Average (SMA)
This is a lagging indicator
df - the data frame
base - on which the indicator has to be calculated eg Close
target - column name to store output
period - period of the sma
"""
df[target] = df[base].rolling(window=period).mean().round(2)
return df
def exponential_moving_average(df, base, target, period):
"""
Function to compute Exponential Moving Average (EMA)
This is a lagging indicator
df - the data frame
base - on which the indicator has to be calculated eg Close
target - column name to store output
period - period of the ema
"""
df[target] = df[base].ewm(ignore_na=False, min_periods=period, com=period, adjust=True).mean()
return df
def moving_average_convergence_divergence(df, base, macd_target, macd_line_target, period_long=26, period_short=12, period_signal=9):
"""
Function to compute MACD (Moving Average Convergence/Divergence)
This is a lagging indicator
df - the data frame
base - on which the indicator has to be calculated eg Close
macd_target - column name to store macd value
macd_line_target - column name to store macd line
period_long - period of the longer time frame
period_short - period of the shorter time frame
period_signal - period of the signal
"""
short_ema_target = 'ema_{}'.format(period_short)
long_ema_target = 'ema_{}'.format(period_long)
df = exponential_moving_average(df, base, long_ema_target, period_long)
df = exponential_moving_average(df, base, short_ema_target, period_short)
df[macd_target] = df[short_ema_target] - df[long_ema_target]
df[macd_line_target] = df[macd_target].ewm(ignore_na=False, min_periods=0, com=period_signal, adjust=True).mean()
df = df.drop([short_ema_target, long_ema_target], axis=1)
return df |
class Usuario:
def __init__(self, nome) -> None:
self.nome = nome
self.id = None
| class Usuario:
def __init__(self, nome) -> None:
self.nome = nome
self.id = None |
_all__ = [
'Auto'
]
class Auto(object):
pass
| _all__ = ['Auto']
class Auto(object):
pass |
def add(number_one, number_two):
return number_one + number_two
def multiply(number_one, number_two):
return number_one * number_two
if __name__ == "__main__":
print(add(3, 4))
print(multiply(5, 5))
| def add(number_one, number_two):
return number_one + number_two
def multiply(number_one, number_two):
return number_one * number_two
if __name__ == '__main__':
print(add(3, 4))
print(multiply(5, 5)) |
# ElasticQuery
# File: exception.py
# Desc: ES query builder exceptions
class QueryError(ValueError):
pass
class NoQueryError(QueryError):
pass
class NoAggregateError(QueryError):
pass
class NoSuggesterError(QueryError):
pass
class MissingArgError(ValueError):
pass
| class Queryerror(ValueError):
pass
class Noqueryerror(QueryError):
pass
class Noaggregateerror(QueryError):
pass
class Nosuggestererror(QueryError):
pass
class Missingargerror(ValueError):
pass |
#serie a,b,a,b,a,b....
limite = int(input("Digite las veces que se repetira: "))
interructor = True
controlador = 0
letra = ""
while controlador < limite:
controlador = controlador + 1
if interructor:
letra = "A"
interructor = False
else:
letra = "B"
interructor = True
print(letra, end=", ")
| limite = int(input('Digite las veces que se repetira: '))
interructor = True
controlador = 0
letra = ''
while controlador < limite:
controlador = controlador + 1
if interructor:
letra = 'A'
interructor = False
else:
letra = 'B'
interructor = True
print(letra, end=', ') |
# Longest Common Subsequence
def lcs(X, Y, m, n):
if m == 0 or n == 0:
return 0
elif X[m - 1] == Y[n - 1]:
return 1 + lcs(X, Y, m-1, n-1)
else:
return max(lcs(X, Y, m, n-1), lcs(X, Y, m-1, n))
X = 'AGGTAB'
Y = 'GXTXAYB'
print('Length of LCS is ', lcs(X, Y, len(X), len(Y)))
'''
Output:-
>>>
Length of LCS is 4
'''
| def lcs(X, Y, m, n):
if m == 0 or n == 0:
return 0
elif X[m - 1] == Y[n - 1]:
return 1 + lcs(X, Y, m - 1, n - 1)
else:
return max(lcs(X, Y, m, n - 1), lcs(X, Y, m - 1, n))
x = 'AGGTAB'
y = 'GXTXAYB'
print('Length of LCS is ', lcs(X, Y, len(X), len(Y)))
'\nOutput:-\n>>> \nLength of LCS is 4\n' |
#!env python
#
# ACSL Intermediate Division - Number Transformation - 2019-2020
# Solution by Arul John
# 2020-11-22
#
# Function to do the number transformations
def number_transformation(n, p):
str_n = n
n = int(n)
p = int(p)
if len(str_n) < p:
return
str_n_ans = '' # answer
# index of the Pth digit
i = len(str_n) - p
# Pth digit
pth_digit = int(str_n[i:i+1])
str_n_right = str_n[i+1:]
str_n_left = str_n[:i]
for c in str_n_left:
str_n_ans += str((int(c) + pth_digit) % 10)
str_n_ans += str(pth_digit)
for c in str_n_right:
str_n_ans += str(abs(int(c) - pth_digit))
return int(str_n_ans)
# Tests
def test_number_transformation():
test_data = [('296351 5', 193648),
('762184 3', 873173),
('45873216 7', 95322341),
('19750418 6', 86727361),
('386257914 5', 831752441)
]
for test_input, answer in test_data:
testlist = test_input.split(' ')
assert number_transformation(*testlist) == answer
# Main
if __name__ == "__main__":
test_number_transformation()
n, p = input().split()
print(number_transformation(n, p))
| def number_transformation(n, p):
str_n = n
n = int(n)
p = int(p)
if len(str_n) < p:
return
str_n_ans = ''
i = len(str_n) - p
pth_digit = int(str_n[i:i + 1])
str_n_right = str_n[i + 1:]
str_n_left = str_n[:i]
for c in str_n_left:
str_n_ans += str((int(c) + pth_digit) % 10)
str_n_ans += str(pth_digit)
for c in str_n_right:
str_n_ans += str(abs(int(c) - pth_digit))
return int(str_n_ans)
def test_number_transformation():
test_data = [('296351 5', 193648), ('762184 3', 873173), ('45873216 7', 95322341), ('19750418 6', 86727361), ('386257914 5', 831752441)]
for (test_input, answer) in test_data:
testlist = test_input.split(' ')
assert number_transformation(*testlist) == answer
if __name__ == '__main__':
test_number_transformation()
(n, p) = input().split()
print(number_transformation(n, p)) |
def fileNaming(names):
for i in range(1, len(names)):
temp = names[i]
counter = 1
while temp in names[0:i]:
temp = f"{names[i]}({counter})"
counter += 1
names[i] = temp
return names
print(fileNaming(["doc", "doc", "image", "doc(1)", "doc"]))
| def file_naming(names):
for i in range(1, len(names)):
temp = names[i]
counter = 1
while temp in names[0:i]:
temp = f'{names[i]}({counter})'
counter += 1
names[i] = temp
return names
print(file_naming(['doc', 'doc', 'image', 'doc(1)', 'doc'])) |
helpline_numbers = {
'source':'https://www.mohfw.gov.in/pdf/coronvavirushelplinenumber.pdf',
'helpline_number':'+91-11-23978046',
'toll_free':'1075',
'helpline_email':'ncov2019@gov.in',
'contact_details':[
{
'state_or_UT':'Andhra Pradesh',
'helpline_number':'0866-2410978'
},
{
'state_or_UT':'Arunachal Pradesh',
'helpline_number':'9436055743'
},
{
'state_or_UT':'Assam',
'helpline_number':'6913347770'
},
{
'state_or_UT':'Bihar',
'helpline_number':'104'
},
{
'state_or_UT':'Chhattisgarh',
'helpline_number':'104'
},
{
'state_or_UT':'Goa',
'helpline_number':'104'
},
{
'state_or_UT':'Gujarat',
'helpline_number':'104'
},
{
'state_or_UT':'Haryana',
'helpline_number':'8558893911'
},
{
'state_or_UT':'Himachal Pradesh',
'helpline_number':'104'
},
{
'state_or_UT':'Jharkhand',
'helpline_number':'104'
},
{
'state_or_UT':'Karnataka',
'helpline_number':'104'
},
{
'state_or_UT':'Kerala',
'helpline_number':'0471-2552056'
},
{
'state_or_UT':'Madhya Pradesh',
'helpline_number':'104'
},
{
'state_or_UT':'Maharashtra',
'helpline_number':'020-26127394'
},
{
'state_or_UT':'Manipur',
'helpline_number':'3852411668'
},
{
'state_or_UT':'Meghalaya',
'helpline_number':'108'
},
{
'state_or_UT':'Mizoram',
'helpline_number':'102'
},
{
'state_or_UT':'Nagaland',
'helpline_number':'7005539653'
},
{
'state_or_UT':'Odisha',
'helpline_number':'9439994859'
},
{
'state_or_UT':'Punjab',
'helpline_number':'104'
},
{
'state_or_UT':'Rajasthan',
'helpline_number':'0141-2225624'
},
{
'state_or_UT':'Sikkim',
'helpline_number':'104'
},
{
'state_or_UT':'Tamil Nadu',
'helpline_number':'044-29510500'
},
{
'state_or_UT':'Telangana',
'helpline_number':'104'
},
{
'state_or_UT':'Tripura',
'helpline_number':'0381-2315879'
},
{
'state_or_UT':'Uttarakhand',
'helpline_number':'104'
},
{
'state_or_UT':'Uttar Pradesh',
'helpline_number':'18001805145'
},
{
'state_or_UT':'West Bengal',
'helpline_number':'1800313444222 , 03323412600'
},
{
'state_or_UT':'Andaman and Nicobar Islands',
'helpline_number':'03192-232102'
},
{
'state_or_UT':'Chandigarh',
'helpline_number':'9779558282'
},
{
'state_or_UT':'Dadra and Nagar Haveli and Daman & Diu',
'helpline_number':'104'
},
{
'state_or_UT':'Delhi',
'helpline_number':'011-22307145'
},
{
'state_or_UT':'Jammu & Kashmir',
'helpline_number':'01912520982, 0194-2440283'
},
{
'state_or_UT':'Ladakh',
'helpline_number':'01982256462'
},
{
'state_or_UT':'Lakshadweep',
'helpline_number':'104'
},
{
'state_or_UT':'Puducherry',
'helpline_number':'104'
}
]
} | helpline_numbers = {'source': 'https://www.mohfw.gov.in/pdf/coronvavirushelplinenumber.pdf', 'helpline_number': '+91-11-23978046', 'toll_free': '1075', 'helpline_email': 'ncov2019@gov.in', 'contact_details': [{'state_or_UT': 'Andhra Pradesh', 'helpline_number': '0866-2410978'}, {'state_or_UT': 'Arunachal Pradesh', 'helpline_number': '9436055743'}, {'state_or_UT': 'Assam', 'helpline_number': '6913347770'}, {'state_or_UT': 'Bihar', 'helpline_number': '104'}, {'state_or_UT': 'Chhattisgarh', 'helpline_number': '104'}, {'state_or_UT': 'Goa', 'helpline_number': '104'}, {'state_or_UT': 'Gujarat', 'helpline_number': '104'}, {'state_or_UT': 'Haryana', 'helpline_number': '8558893911'}, {'state_or_UT': 'Himachal Pradesh', 'helpline_number': '104'}, {'state_or_UT': 'Jharkhand', 'helpline_number': '104'}, {'state_or_UT': 'Karnataka', 'helpline_number': '104'}, {'state_or_UT': 'Kerala', 'helpline_number': '0471-2552056'}, {'state_or_UT': 'Madhya Pradesh', 'helpline_number': '104'}, {'state_or_UT': 'Maharashtra', 'helpline_number': '020-26127394'}, {'state_or_UT': 'Manipur', 'helpline_number': '3852411668'}, {'state_or_UT': 'Meghalaya', 'helpline_number': '108'}, {'state_or_UT': 'Mizoram', 'helpline_number': '102'}, {'state_or_UT': 'Nagaland', 'helpline_number': '7005539653'}, {'state_or_UT': 'Odisha', 'helpline_number': '9439994859'}, {'state_or_UT': 'Punjab', 'helpline_number': '104'}, {'state_or_UT': 'Rajasthan', 'helpline_number': '0141-2225624'}, {'state_or_UT': 'Sikkim', 'helpline_number': '104'}, {'state_or_UT': 'Tamil Nadu', 'helpline_number': '044-29510500'}, {'state_or_UT': 'Telangana', 'helpline_number': '104'}, {'state_or_UT': 'Tripura', 'helpline_number': '0381-2315879'}, {'state_or_UT': 'Uttarakhand', 'helpline_number': '104'}, {'state_or_UT': 'Uttar Pradesh', 'helpline_number': '18001805145'}, {'state_or_UT': 'West Bengal', 'helpline_number': '1800313444222 , 03323412600'}, {'state_or_UT': 'Andaman and Nicobar Islands', 'helpline_number': '03192-232102'}, {'state_or_UT': 'Chandigarh', 'helpline_number': '9779558282'}, {'state_or_UT': 'Dadra and Nagar Haveli and Daman & Diu', 'helpline_number': '104'}, {'state_or_UT': 'Delhi', 'helpline_number': '011-22307145'}, {'state_or_UT': 'Jammu & Kashmir', 'helpline_number': '01912520982, 0194-2440283'}, {'state_or_UT': 'Ladakh', 'helpline_number': '01982256462'}, {'state_or_UT': 'Lakshadweep', 'helpline_number': '104'}, {'state_or_UT': 'Puducherry', 'helpline_number': '104'}]} |
"""Takes a number of miles and converts it to a feet value."""
def main() -> None:
"""Convert the user input miles value to feet and display the result."""
print('Hike Calculator')
miles: float = float(input('How many miles did you walk?: '))
print(f'You walked {to_feet(miles)} feet.')
def to_feet(miles: float) -> int:
"""Return the input miles value as feet."""
FEET_PER_MILE: int = 5280
return int(miles * FEET_PER_MILE)
if __name__ == '__main__':
main() | """Takes a number of miles and converts it to a feet value."""
def main() -> None:
"""Convert the user input miles value to feet and display the result."""
print('Hike Calculator')
miles: float = float(input('How many miles did you walk?: '))
print(f'You walked {to_feet(miles)} feet.')
def to_feet(miles: float) -> int:
"""Return the input miles value as feet."""
feet_per_mile: int = 5280
return int(miles * FEET_PER_MILE)
if __name__ == '__main__':
main() |
#-------------------------------------------------------------------------------
#
# Project: EOxServer <http://eoxserver.org>
# Authors: Fabian Schindler <fabian.schindler@eox.at>
# Martin Paces <martin.paces@eox.at>
#
#-------------------------------------------------------------------------------
# Copyright (C) 2013 EOX IT Services GmbH
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies of this Software or works derived from this Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#-------------------------------------------------------------------------------
class AsyncBackendInterface(object):
""" Interface class for an asynchronous WPS back-end.
NOTE: Only one asynchronous back-end at time is allowed to be configured.
"""
@property
def supported_versions(self):
""" A list of versions of the WPS standard supported by the back-end.
"""
def execute(self, process, raw_inputs, resp_form, extra_parts=None,
job_id=None, version="1.0.0", **kwargs):
""" Execute process asynchronously.
The request is defined by the process's identifier ``process_id``,
``raw_inputs`` (before the decoding and resolution
of the references), and the ``resp_form`` (holding
the outputs' parameters). The ``version`` of the WPS standard
to be used. Optionally, the user defined ``job_id`` can be passed.
If the ``job_id`` cannot be used the execute shall fail.
The ``extra_parts`` should contain a dictionary of named request parts
should the request contain multi-part/related CID references.
On success, the method returns the ``job_id`` assigned to the
executed job.
"""
def get_response_url(self, job_id):
""" Get URL of the execute response for the given job id """
def get_status(self, job_id):
""" Get status of a job. Allowed responses and their meanings are:
ACCEPTED - job scheduled for execution
STARTED - job in progress
PAUSED - job is stopped and it can be resumed
CANCELLED - job was terminated by the user
FAILED - job ended with an error
SUCCEEDED - job ended successfully
"""
def purge(self, job_id, **kwargs):
""" Purge the job from the system by removing all the resources
occupied by the job.
"""
def cancel(self, job_id, **kwargs):
""" Cancel the job execution. """
def pause(self, job_id, **kwargs):
""" Pause the job execution. """
def resume(self, job_id, **kwargs):
""" Resume the job execution. """
class ProcessInterface(object):
""" Interface class for processes offered, described and executed by
the WPS.
"""
@property
def version(self):
""" The version of the process, if applicable. Optional.
When omitted it defaults to '1.0.0'.
"""
@property
def synchronous(self):
""" Optional boolean flag indicating whether the process can be executed
synchronously. If missing True is assumed.
"""
@property
def asynchronous(self):
""" Optional boolean flag indicating whether the process can be executed
asynchronously. If missing False is assumed.
"""
@property
def retention_period(self):
""" This optional property (`datetime.timedelta`) indicates the minimum
time the process results shall be retained after the completion.
If omitted the default server retention policy is applied.
"""
@property
def identifier(self):
""" An identifier (URI) of the process. Optional.
When omitted it defaults to the process' class-name.
"""
@property
def title(self):
""" A human-readable title of the process. Optional. When omitted it
defaults to the process identifier.
"""
@property
def description(self):
""" A human-readable detailed description of the process. Optional.
(Content of the the abstract in the WPS process description.)
"""
@property
def profiles(self):
""" A iterable of URNs of WPS application profiles this process
adheres to. Optional.
"""
@property
def metadata(self):
""" A dict of title/URL meta-data pairs associated with the process.
Optional.
"""
@property
def wsdl(self):
""" A URL of WSDL document describing this process. Optional.
"""
@property
def inputs(self):
""" A dict mapping the inputs' identifiers to their respective types.
The type can be either one of the supported native python types
(automatically converted to a ``LiteralData`` object) or an instance
of one of the data-specification classes (``LiteralData``,
``BoundingBoxData``, or ``ComplexData``). Mandatory.
"""
@property
def outputs(self):
""" A dict mapping the outputs' identifiers to their respective types.
The type can be either one of the supported native python types
(automatically converted to a ``LiteralData`` object) or an instance
of one of the data-specification classes (``LiteralData``,
``BoundingBoxData``, or ``ComplexData``). Mandatory.
"""
def execute(self, **kwargs):
""" The main execution function for the process. The ``kwargs`` are the
parsed input inputs (using the keys as defined by the ``inputs``)
and the Complex Data format requests (using the keys as defined by
the ``outputs``).
The method is expected to return a dictionary of the output values
(using the keys as defined by the ``outputs``). In case of only
one output item defined by the ``outputs``, one output value
is allowed to be returned directly.
"""
| class Asyncbackendinterface(object):
""" Interface class for an asynchronous WPS back-end.
NOTE: Only one asynchronous back-end at time is allowed to be configured.
"""
@property
def supported_versions(self):
""" A list of versions of the WPS standard supported by the back-end.
"""
def execute(self, process, raw_inputs, resp_form, extra_parts=None, job_id=None, version='1.0.0', **kwargs):
""" Execute process asynchronously.
The request is defined by the process's identifier ``process_id``,
``raw_inputs`` (before the decoding and resolution
of the references), and the ``resp_form`` (holding
the outputs' parameters). The ``version`` of the WPS standard
to be used. Optionally, the user defined ``job_id`` can be passed.
If the ``job_id`` cannot be used the execute shall fail.
The ``extra_parts`` should contain a dictionary of named request parts
should the request contain multi-part/related CID references.
On success, the method returns the ``job_id`` assigned to the
executed job.
"""
def get_response_url(self, job_id):
""" Get URL of the execute response for the given job id """
def get_status(self, job_id):
""" Get status of a job. Allowed responses and their meanings are:
ACCEPTED - job scheduled for execution
STARTED - job in progress
PAUSED - job is stopped and it can be resumed
CANCELLED - job was terminated by the user
FAILED - job ended with an error
SUCCEEDED - job ended successfully
"""
def purge(self, job_id, **kwargs):
""" Purge the job from the system by removing all the resources
occupied by the job.
"""
def cancel(self, job_id, **kwargs):
""" Cancel the job execution. """
def pause(self, job_id, **kwargs):
""" Pause the job execution. """
def resume(self, job_id, **kwargs):
""" Resume the job execution. """
class Processinterface(object):
""" Interface class for processes offered, described and executed by
the WPS.
"""
@property
def version(self):
""" The version of the process, if applicable. Optional.
When omitted it defaults to '1.0.0'.
"""
@property
def synchronous(self):
""" Optional boolean flag indicating whether the process can be executed
synchronously. If missing True is assumed.
"""
@property
def asynchronous(self):
""" Optional boolean flag indicating whether the process can be executed
asynchronously. If missing False is assumed.
"""
@property
def retention_period(self):
""" This optional property (`datetime.timedelta`) indicates the minimum
time the process results shall be retained after the completion.
If omitted the default server retention policy is applied.
"""
@property
def identifier(self):
""" An identifier (URI) of the process. Optional.
When omitted it defaults to the process' class-name.
"""
@property
def title(self):
""" A human-readable title of the process. Optional. When omitted it
defaults to the process identifier.
"""
@property
def description(self):
""" A human-readable detailed description of the process. Optional.
(Content of the the abstract in the WPS process description.)
"""
@property
def profiles(self):
""" A iterable of URNs of WPS application profiles this process
adheres to. Optional.
"""
@property
def metadata(self):
""" A dict of title/URL meta-data pairs associated with the process.
Optional.
"""
@property
def wsdl(self):
""" A URL of WSDL document describing this process. Optional.
"""
@property
def inputs(self):
""" A dict mapping the inputs' identifiers to their respective types.
The type can be either one of the supported native python types
(automatically converted to a ``LiteralData`` object) or an instance
of one of the data-specification classes (``LiteralData``,
``BoundingBoxData``, or ``ComplexData``). Mandatory.
"""
@property
def outputs(self):
""" A dict mapping the outputs' identifiers to their respective types.
The type can be either one of the supported native python types
(automatically converted to a ``LiteralData`` object) or an instance
of one of the data-specification classes (``LiteralData``,
``BoundingBoxData``, or ``ComplexData``). Mandatory.
"""
def execute(self, **kwargs):
""" The main execution function for the process. The ``kwargs`` are the
parsed input inputs (using the keys as defined by the ``inputs``)
and the Complex Data format requests (using the keys as defined by
the ``outputs``).
The method is expected to return a dictionary of the output values
(using the keys as defined by the ``outputs``). In case of only
one output item defined by the ``outputs``, one output value
is allowed to be returned directly.
""" |
# Utility macros for Twister2 core files
def twister2_core_files():
return twister2_core_conf_files() + twister2_core_lib_files()
def twister2_core_conf_files():
return [
"//twister2/config/src/yaml:config-system-yaml",
"//twister2/config/src/yaml:common-conf-yaml",
]
def twister2_core_lib_files():
return twister2_core_lib_resource_scheduler_files() + \
twister2_core_lib_task_scheduler_files() + \
twister2_core_lib_communication_files()
def twister2_core_lib_resource_scheduler_files():
return [
"//twister2/resource-scheduler/src/java:resource-scheduler-java",
]
def twister2_core_lib_task_scheduler_files():
return [
"//twister2/taskscheduler/src/java:taskscheduler-java",
]
def twister2_core_lib_communication_files():
return [
"//twister2/comms/src/java:comms-java",
"//twister2/proto:proto-jobmaster-java",
]
def twister2_core_lib_connector_files():
return [
"//twister2/connectors/src/java:connector-java",
"@org_xerial_snappy_snappy_java//jar",
"@org_lz4_lz4_java//jar",
"@org_slf4j_slf4j_api//jar",
"@org_apache_kafka_kafka_clients//jar",
]
def twister2_client_lib_master_files():
return [
"//twister2/connectors/src/java:master-java",
]
def twister2_core_lib_data_files():
return [
"//twister2/data/src/main/java:data-java",
"@org_apache_hadoop_hadoop_hdfs//jar",
"@org_apache_hadoop_hadoop_common//jar",
"@org_apache_hadoop_hadoop_annotations//jar",
"@org_apache_hadoop_hadoop_auth//jar",
"@org_apache_hadoop_hadoop_mapreduce_client_core//jar",
"@com_google_code_findbugs_jsr305//jar",
"@com_fasterxml_woodstox_woodstox_core//jar",
"@org_codehaus_woodstox_stax2_api//jar",
"@commons_io_commons_io//jar",
"@commons_collections_commons_collections//jar",
"@org_apache_commons_commons_lang3//jar",
"@commons_configuration_commons_configuration//jar",
"@log4j_log4j//jar",
"@org_apache_htrace_htrace_core4//jar",
"@org_apache_hadoop_hadoop_hdfs_client//jar",
]
def twister2_core_lib_executor_files():
return [
"//twister2/executor/src/java:executor-java",
]
def twister2_core_lib_data_lmdb_files():
return [
"//twister2/data/src/main/java:data-java",
"@org_lmdbjava_lmdbjava//jar",
"@org_lmdbjava_lmdbjava_native_linux_x86_64//jar",
"@org_lmdbjava_lmdbjava_native_windows_x86_64//jar",
"@org_lmdbjava_lmdbjava_native_osx_x86_64//jar",
"@com_github_jnr_jnr_ffi//jar",
"@com_github_jnr_jnr_constants//jar",
"@com_github_jnr_jffi//jar",
"//third_party:com_github_jnr_jffi_native",
]
def twister2_harp_integration_files():
return [
"//twister2/compatibility/harp:twister2-harp",
"//third_party:harp_collective",
"@it_unimi_dsi_fastutil//jar",
]
def twister2_dashboard_files():
return [
"//dashboard/server:twister2-dash-server",
]
def twister2_core_checkpointing_files():
return [
"//twister2/checkpointing/src/java:checkpointing-java",
]
def twister2_core_tset_files():
return [
"//twister2/tset/src/java:tset-java",
"@maven//:com_google_re2j_re2j"
]
def twister2_core_restarter_files():
return [
"//twister2/checkpointing/src/java/edu/iu/dsc/tws/restarter:restarter-java",
]
def twister2_storm_files():
return [
"//twister2/compatibility/storm:twister2-storm",
]
def twister2_beam_files():
return [
"//twister2/compatibility/beam:twister2-beam",
"@org_apache_beam_beam_runners_core_java//jar",
"@org_apache_beam_beam_sdks_java_core//jar",
"@org_apache_beam_beam_model_pipeline//jar",
"@org_apache_beam_beam_runners_java_fn_execution//jar",
"@com_fasterxml_jackson_core_jackson_annotations//jar",
"@joda_time_joda_time//jar",
"@org_apache_beam_beam_runners_core_construction_java//jar",
"@com_google_guava_guava//jar",
"//third_party:vendored_grpc_1_21_0",
"//third_party:vendored_guava_26_0_jre",
"@org_apache_beam_beam_vendor_guava_20_0//jar",
"@javax_xml_bind_jaxb_api//jar",
"@org_apache_beam_beam_vendor_sdks_java_extensions_protobuf//jar",
"@org_apache_beam_beam_vendor_grpc_1_13_1//jar",
]
def twister2_python_support_files():
return [
"//twister2/python-support:python-support",
"@net_sf_py4j_py4j//jar",
"@black_ninia_jep//jar",
]
| def twister2_core_files():
return twister2_core_conf_files() + twister2_core_lib_files()
def twister2_core_conf_files():
return ['//twister2/config/src/yaml:config-system-yaml', '//twister2/config/src/yaml:common-conf-yaml']
def twister2_core_lib_files():
return twister2_core_lib_resource_scheduler_files() + twister2_core_lib_task_scheduler_files() + twister2_core_lib_communication_files()
def twister2_core_lib_resource_scheduler_files():
return ['//twister2/resource-scheduler/src/java:resource-scheduler-java']
def twister2_core_lib_task_scheduler_files():
return ['//twister2/taskscheduler/src/java:taskscheduler-java']
def twister2_core_lib_communication_files():
return ['//twister2/comms/src/java:comms-java', '//twister2/proto:proto-jobmaster-java']
def twister2_core_lib_connector_files():
return ['//twister2/connectors/src/java:connector-java', '@org_xerial_snappy_snappy_java//jar', '@org_lz4_lz4_java//jar', '@org_slf4j_slf4j_api//jar', '@org_apache_kafka_kafka_clients//jar']
def twister2_client_lib_master_files():
return ['//twister2/connectors/src/java:master-java']
def twister2_core_lib_data_files():
return ['//twister2/data/src/main/java:data-java', '@org_apache_hadoop_hadoop_hdfs//jar', '@org_apache_hadoop_hadoop_common//jar', '@org_apache_hadoop_hadoop_annotations//jar', '@org_apache_hadoop_hadoop_auth//jar', '@org_apache_hadoop_hadoop_mapreduce_client_core//jar', '@com_google_code_findbugs_jsr305//jar', '@com_fasterxml_woodstox_woodstox_core//jar', '@org_codehaus_woodstox_stax2_api//jar', '@commons_io_commons_io//jar', '@commons_collections_commons_collections//jar', '@org_apache_commons_commons_lang3//jar', '@commons_configuration_commons_configuration//jar', '@log4j_log4j//jar', '@org_apache_htrace_htrace_core4//jar', '@org_apache_hadoop_hadoop_hdfs_client//jar']
def twister2_core_lib_executor_files():
return ['//twister2/executor/src/java:executor-java']
def twister2_core_lib_data_lmdb_files():
return ['//twister2/data/src/main/java:data-java', '@org_lmdbjava_lmdbjava//jar', '@org_lmdbjava_lmdbjava_native_linux_x86_64//jar', '@org_lmdbjava_lmdbjava_native_windows_x86_64//jar', '@org_lmdbjava_lmdbjava_native_osx_x86_64//jar', '@com_github_jnr_jnr_ffi//jar', '@com_github_jnr_jnr_constants//jar', '@com_github_jnr_jffi//jar', '//third_party:com_github_jnr_jffi_native']
def twister2_harp_integration_files():
return ['//twister2/compatibility/harp:twister2-harp', '//third_party:harp_collective', '@it_unimi_dsi_fastutil//jar']
def twister2_dashboard_files():
return ['//dashboard/server:twister2-dash-server']
def twister2_core_checkpointing_files():
return ['//twister2/checkpointing/src/java:checkpointing-java']
def twister2_core_tset_files():
return ['//twister2/tset/src/java:tset-java', '@maven//:com_google_re2j_re2j']
def twister2_core_restarter_files():
return ['//twister2/checkpointing/src/java/edu/iu/dsc/tws/restarter:restarter-java']
def twister2_storm_files():
return ['//twister2/compatibility/storm:twister2-storm']
def twister2_beam_files():
return ['//twister2/compatibility/beam:twister2-beam', '@org_apache_beam_beam_runners_core_java//jar', '@org_apache_beam_beam_sdks_java_core//jar', '@org_apache_beam_beam_model_pipeline//jar', '@org_apache_beam_beam_runners_java_fn_execution//jar', '@com_fasterxml_jackson_core_jackson_annotations//jar', '@joda_time_joda_time//jar', '@org_apache_beam_beam_runners_core_construction_java//jar', '@com_google_guava_guava//jar', '//third_party:vendored_grpc_1_21_0', '//third_party:vendored_guava_26_0_jre', '@org_apache_beam_beam_vendor_guava_20_0//jar', '@javax_xml_bind_jaxb_api//jar', '@org_apache_beam_beam_vendor_sdks_java_extensions_protobuf//jar', '@org_apache_beam_beam_vendor_grpc_1_13_1//jar']
def twister2_python_support_files():
return ['//twister2/python-support:python-support', '@net_sf_py4j_py4j//jar', '@black_ninia_jep//jar'] |
class Exception:
def __init__(self, message):
self.message = message
class TypeError(Exception):
pass
class AttributeError(Exception):
pass
class KeyError(Exception):
pass
class StopIteration(Exception):
pass
class NotImplementedError(Exception):
pass
class NotImplemented(Exception):
pass
class ValueError(Exception):
pass
| class Exception:
def __init__(self, message):
self.message = message
class Typeerror(Exception):
pass
class Attributeerror(Exception):
pass
class Keyerror(Exception):
pass
class Stopiteration(Exception):
pass
class Notimplementederror(Exception):
pass
class Notimplemented(Exception):
pass
class Valueerror(Exception):
pass |
__version__ = '0.15.0.dev0'
PROJECT_NAME = "pulsar"
PROJECT_OWNER = PROJECT_USERAME = "galaxyproject"
PROJECT_AUTHOR = 'Galaxy Project and Community'
PROJECT_EMAIL = 'jmchilton@gmail.com'
PROJECT_URL = "https://github.com/{}/{}".format(PROJECT_OWNER, PROJECT_NAME)
RAW_CONTENT_URL = "https://raw.github.com/{}/{}/master/".format(
PROJECT_USERAME, PROJECT_NAME
)
| __version__ = '0.15.0.dev0'
project_name = 'pulsar'
project_owner = project_userame = 'galaxyproject'
project_author = 'Galaxy Project and Community'
project_email = 'jmchilton@gmail.com'
project_url = 'https://github.com/{}/{}'.format(PROJECT_OWNER, PROJECT_NAME)
raw_content_url = 'https://raw.github.com/{}/{}/master/'.format(PROJECT_USERAME, PROJECT_NAME) |
test = {
'name': 'Problem 5',
'points': 2,
'suites': [
{
'cases': [
{
'code': r"""
>>> abs_diff = lambda w1, w2, limit: abs(len(w2) - len(w1))
>>> autocorrect("cul", ["culture", "cult", "cultivate"], abs_diff, 10)
'cult'
>>> autocorrect("cul", ["culture", "cult", "cultivate"], abs_diff, 0)
'cul'
>>> autocorrect("wor", ["worry", "car", "part"], abs_diff, 10)
'car'
>>> first_diff = lambda w1, w2, limit: 1 if w1[0] != w2[0] else 0
>>> autocorrect("wrod", ["word", "rod"], first_diff, 1)
'word'
>>> autocorrect("inside", ["idea", "inside"], first_diff, 0.5)
'inside'
>>> autocorrect("inside", ["idea", "insider"], first_diff, 0.5)
'idea'
>>> autocorrect("outside", ["idea", "insider"], first_diff, 0.5)
'outside'
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> matching_diff = lambda w1, w2, limit: sum([w1[i] != w2[i] for i in range(min(len(w1), len(w2)))]) # Num matching chars
>>> autocorrect("tosting", ["testing", "asking", "fasting"], matching_diff, 10)
'testing'
>>> autocorrect("tsting", ["testing", "rowing"], matching_diff, 10)
'rowing'
>>> autocorrect("bwe", ["awe", "bye"], matching_diff, 10)
'awe'
>>> autocorrect("bwe", ["bye", "awe"], matching_diff, 10)
'bye'
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> words_list = sorted(lines_from_file('data/words.txt')[:10000])
>>> autocorrect("testng", words_list, lambda w1, w2, limit: 1, 10)
'a'
>>> autocorrect("testing", words_list, lambda w1, w2, limit: 1, 10)
'testing'
>>> autocorrect("gesting", words_list, lambda w1, w2, limit: sum([w1[i] != w2[i] for i in range(min(len(w1), len(w2)))]) + abs(len(w1) - len(w2)), 10)
'getting'
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> autocorrect('statutably', ['statutably'], lambda x, y, lim: min(lim + 1, abs(len(x) - len(y))), 5)
'statutably'
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> autocorrect('decephalization', ['tautit', 'misorder', 'uptill', 'urostealith', 'cowy', 'sinistrodextral'], lambda x, y, lim: min(lim + 1, abs(len(x) - len(y))), 3)
'sinistrodextral'
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> autocorrect('hypostasis', ['tinosa'], lambda x, y, lim: min(lim + 1, abs(len(x) - len(y))), 0)
'hypostasis'
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> autocorrect('indeprivable', ['echeneidid', 'iridiate', 'conjugality', 'convolute', 'momentariness', 'hotelless', 'archon', 'rheotome', 'transformistic'], lambda x, y, lim: min(lim + 1, abs(len(x) - len(y))), 6)
'conjugality'
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> autocorrect('helena', ['ailment', 'undeclared', 'staphyloplastic', 'ag', 'sulphurless', 'ungrappler', 'ascertainer', 'dormitory', 'zoarial'], lambda x, y, lim: min(lim + 1, abs(len(x) - len(y))), 0)
'helena'
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> autocorrect('lictorian', ['felineness', 'deontological', 'extraterrestrial', 'experimentalist', 'incomputable'], lambda x, y, lim: min(lim + 1, abs(len(x) - len(y))), 0)
'lictorian'
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> autocorrect('mousehound', ['unembowelled', 'indigene', 'kersmash', 'mousehound', 'matchmark', 'proportionably', 'persons', 'suprasternal', 'agomphious'], lambda x, y, lim: min(lim + 1, abs(len(x) - len(y))), 0)
'mousehound'
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> autocorrect('chamfer', ['pretyrannical'], lambda x, y, lim: min(lim + 1, abs(len(x) - len(y))), 8)
'pretyrannical'
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> autocorrect('locksman', ['coinheritance', 'fourscore', 'naggingly', 'scutelliplantar', 'shiftful', 'prolonger'], lambda x, y, lim: min(lim + 1, abs(len(x) - len(y))), 6)
'shiftful'
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> autocorrect('nonextensional', ['unlivably', 'error', 'emoloa'], lambda x, y, lim: min(lim + 1, abs(len(x) - len(y))), 0)
'nonextensional'
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> autocorrect('incavern', ['donnert', 'incavern'], lambda x, y, lim: min(lim + 1, abs(len(x) - len(y))), 8)
'incavern'
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> autocorrect('disable', ['semiductile', 'microcephalic', 'coauthor', 'whorishness', 'disable'], lambda x, y, lim: min(lim + 1, abs(len(x) - len(y))), 8)
'disable'
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> autocorrect('antigravitational', ['brad'], lambda x, y, lim: min(lim + 1, abs(len(x) - len(y))), 5)
'antigravitational'
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> autocorrect('trifurcation', ['trifurcation', 'formative'], lambda x, y, lim: min(lim + 1, abs(len(x) - len(y))), 3)
'trifurcation'
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> autocorrect('phlogistic', ['mangue', 'lawproof', 'paginary', 'eruption', 'ambrosin', 'tubularly', 'alienee'], lambda x, y, lim: min(lim + 1, abs(len(x) - len(y))), 9)
'tubularly'
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> autocorrect('psychotic', ['cylinderlike', 'filipendulous'], lambda x, y, lim: min(lim + 1, abs(len(x) - len(y))), 2)
'psychotic'
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> autocorrect('terpodion', ['terpodion', 'wintertide'], lambda x, y, lim: min(lim + 1, abs(len(x) - len(y))), 1)
'terpodion'
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> autocorrect('socky', ['childed', 'peoplehood', 'foxwood', 'brachistochronic', 'dentilation', 'luteous'], lambda x, y, lim: min(lim + 1, abs(len(x) - len(y))), 1)
'socky'
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> autocorrect('wedding', ['mordaciously', 'quinia', 'fixer', 'wedding', 'sendable', 'ainoi'], lambda x, y, lim: min(lim + 1, abs(len(x) - len(y))), 7)
'wedding'
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> autocorrect('unarrestable', ['unmarring', 'cationic', 'nunhood', 'martyrdom', 'perambulation', 'gaseous'], lambda x, y, lim: min(lim + 1, abs(len(x) - len(y))), 3)
'perambulation'
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> autocorrect('sprightliness', ['unlimb', 'octamerism', 'antipodist', 'caprinic', 'ringbark', 'suboptimal', 'kingfish', 'amomal'], lambda x, y, lim: min(lim + 1, abs(len(x) - len(y))), 0)
'sprightliness'
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> autocorrect('veteraness', ['wavement', 'paradoxidian', 'hypergeometrical', 'veteraness', 'purposeful', 'irrigative', 'ultramontanism', 'epephragmal'], lambda x, y, lim: min(lim + 1, abs(len(x) - len(y))), 6)
'veteraness'
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> autocorrect('hyperphysics', ['thiouracil', 'cibophobia', 'katamorphism', 'trimorphism', 'norie'], lambda x, y, lim: min(lim + 1, abs(len(x) - len(y))), 4)
'katamorphism'
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> autocorrect('evagation', ['sensationalize', 'stamphead', 'tankmaker', 'becut', 'oenochoe', 'digoneutic', 'refinement', 'tininess', 'benedictively', 'segment'], lambda x, y, lim: min(lim + 1, abs(len(x) - len(y))), 5)
'stamphead'
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> autocorrect('ashine', ['nonfrustration', 'perineostomy', 'nonupholstered', 'hypocoristically', 'plushlike', 'rancorously'], lambda x, y, lim: min(lim + 1, abs(len(x) - len(y))), 2)
'ashine'
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> autocorrect('unshameful', ['ger', 'ahoy', 'ventriloquial'], lambda x, y, lim: min(lim + 1, abs(len(x) - len(y))), 1)
'unshameful'
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> autocorrect('dramatist', ['tournament', 'acclinate', 'rasion'], lambda x, y, lim: min(lim + 1, abs(len(x) - len(y))), 8)
'acclinate'
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> autocorrect('beewort', ['terrestrious', 'sociometry'], lambda x, y, lim: min(lim + 1, abs(len(x) - len(y))), 0)
'beewort'
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> autocorrect('claylike', ['houndish', 'muirfowl', 'unexplorative'], lambda x, y, lim: min(lim + 1, abs(len(x) - len(y))), 3)
'houndish'
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> autocorrect('columbine', ['nonupholstered', 'columbine', 'entoptical', 'spondylolisthetic'], lambda x, y, lim: min(lim + 1, abs(len(x) - len(y))), 5)
'columbine'
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> autocorrect('owners', ['choledochostomy', 'superobstinate', 'pagoscope'], lambda x, y, lim: min(lim + 1, abs(len(x) - len(y))), 0)
'owners'
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> autocorrect('tampa', ['commonness', 'incentively', 'courtezanship', 'unapproachableness', 'readvertisement', 'strumiprivous'], lambda x, y, lim: min(lim + 1, abs(len(x) - len(y))), 2)
'tampa'
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> autocorrect('reaffirmance', ['reaffirmance', 'nursy'], lambda x, y, lim: min(lim + 1, abs(len(x) - len(y))), 4)
'reaffirmance'
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> autocorrect('photonasty', ['decisively', 'uninclosed', 'chlor'], lambda x, y, lim: min(lim + 1, abs(len(x) - len(y))), 6)
'decisively'
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> autocorrect('intercepter', ['empiecement'], lambda x, y, lim: min(lim + 1, abs(len(x) - len(y))), 7)
'empiecement'
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> autocorrect('semideity', ['roundseam', 'misrule', 'cardioblast', 'semideity', 'yaply', 'anthroponomy'], lambda x, y, lim: min(lim + 1, abs(len(x) - len(y))), 3)
'semideity'
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> autocorrect('served', ['cecomorphic', 'ademption', 'impassibility', 'introvert', 'reintrench', 'transmigratively', 'commerge', 'hematocryal'], lambda x, y, lim: min(lim + 1, abs(len(x) - len(y))), 3)
'commerge'
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> autocorrect('unliterally', ['vility', 'copellidine', 'creditor', 'parvenuism', 'hindbrain', 'autantitypy', 'sailing', 'dermatoskeleton'], lambda x, y, lim: min(lim + 1, abs(len(x) - len(y))), 5)
'copellidine'
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> autocorrect('growth', ['assassinator'], lambda x, y, lim: min(lim + 1, abs(len(x) - len(y))), 1)
'growth'
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> autocorrect('thereabouts', ['quantifiable', 'exterritorial', 'believe', 'untemporal', 'thereabouts'], lambda x, y, lim: min(lim + 1, abs(len(x) - len(y))), 0)
'thereabouts'
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> autocorrect('goblinism', ['bobby', 'thig', 'plasterwork', 'unhyphenated', 'subessential', 'softhead', 'metrocracy', 'understem'], lambda x, y, lim: min(lim + 1, abs(len(x) - len(y))), 5)
'understem'
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> autocorrect('resoutive', ['hydroseparation', 'descry', 'apodosis', 'atavist'], lambda x, y, lim: min(lim + 1, abs(len(x) - len(y))), 5)
'apodosis'
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> autocorrect('unwebbed', ['cramble', 'pseudopopular', 'unwebbed', 'minimize', 'ricinoleate', 'arthrogastran', 'testaceography'], lambda x, y, lim: min(lim + 1, abs(len(x) - len(y))), 1)
'unwebbed'
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> autocorrect('emphasize', ['putchen'], lambda x, y, lim: min(lim + 1, abs(len(x) - len(y))), 0)
'emphasize'
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> autocorrect('whapuka', ['seambiter', 'cogman', 'polymorphistic'], lambda x, y, lim: min(lim + 1, abs(len(x) - len(y))), 1)
'cogman'
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> autocorrect('cubatory', ['byssaceous', 'begins', 'cubatory', 'galvanothermometer', 'appearanced', 'proavian'], lambda x, y, lim: min(lim + 1, abs(len(x) - len(y))), 7)
'cubatory'
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> autocorrect('singler', ['mycetous', 'singler'], lambda x, y, lim: min(lim + 1, abs(len(x) - len(y))), 2)
'singler'
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> autocorrect('esquirearchy', ['souper', 'ark', 'niccolite', 'reagin', 'esquirearchy', 'effeminatize'], lambda x, y, lim: min(lim + 1, abs(len(x) - len(y))), 7)
'esquirearchy'
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> autocorrect('evulse', ['uniocular', 'caution', 'unhoofed', 'misinterpret', 'ooscope', 'physiophilosophy', 'potteringly', 'wartyback'], lambda x, y, lim: min(lim + 1, abs(len(x) - len(y))), 0)
'evulse'
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> autocorrect('underpopulation', ['adenocarcinomatous', 'soliloquy', 'antispace', 'slimeman', 'cardioncus', 'bin', 'undervalve', 'sundek'], lambda x, y, lim: min(lim + 1, abs(len(x) - len(y))), 1)
'underpopulation'
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> autocorrect('osteology', ['transphenomenal'], lambda x, y, lim: min(lim + 1, abs(len(x) - len(y))), 4)
'osteology'
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> autocorrect('solenaceous', ['padding', 'pixel', 'unalimentary', 'dyschroa', 'undefinedly', 'violational', 'bisulfid', 'pralltriller', 'demonstration'], lambda x, y, lim: min(lim + 1, abs(len(x) - len(y))), 3)
'undefinedly'
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> autocorrect('diabolicalness', ['cronstedtite', 'precipitate', 'undertook', 'unconspicuousness'], lambda x, y, lim: min(lim + 1, abs(len(x) - len(y))), 7)
'cronstedtite'
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> autocorrect('airworthiness', ['sep'], lambda x, y, lim: min(lim + 1, abs(len(x) - len(y))), 9)
'airworthiness'
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> autocorrect('pseudomorula', ['toprope', 'doltishly', 'radiotelegraphic'], lambda x, y, lim: min(lim + 1, abs(len(x) - len(y))), 1)
'pseudomorula'
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> autocorrect('hauld', ['pyrenodean', 'hauld'], lambda x, y, lim: min(lim + 1, abs(len(x) - len(y))), 2)
'hauld'
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> autocorrect('doll', ['stylolitic', 'altigraph', 'doll', 'avowably', 'manzana', 'galloon'], lambda x, y, lim: min(lim + 1, abs(len(x) - len(y))), 2)
'doll'
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> autocorrect('rachilla', ['tridentated', 'bridgework', 'coif', 'hitchhike', 'rachilla', 'uptaker', 'penalty', 'commitment', 'supervisor', 'unquartered'], lambda x, y, lim: min(lim + 1, abs(len(x) - len(y))), 4)
'rachilla'
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> autocorrect('ventriculogram', ['luggie', 'septectomy', 'unproctored', 'volition', 'straked', 'oliver', 'telescopic', 'scarabaeoid'], lambda x, y, lim: min(lim + 1, abs(len(x) - len(y))), 2)
'ventriculogram'
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> autocorrect('arsonvalization', ['nondisarmament', 'arsonvalization', 'ketyl', 'tussle', 'rhabdomysarcoma'], lambda x, y, lim: min(lim + 1, abs(len(x) - len(y))), 0)
'arsonvalization'
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> autocorrect('photospectroscopical', ['unenclosed', 'sagacious', 'saur', 'gloveress', 'limbless', 'daresay', 'mysticize'], lambda x, y, lim: min(lim + 1, abs(len(x) - len(y))), 8)
'photospectroscopical'
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> autocorrect('tupara', ['unkerchiefed', 'dormant', 'triplite', 'bimuscular', 'insider', 'coadjacency', 'unslighted', 'perichordal'], lambda x, y, lim: min(lim + 1, abs(len(x) - len(y))), 3)
'dormant'
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> autocorrect('odontoglossate', ['odontoglossate', 'conceivableness'], lambda x, y, lim: min(lim + 1, abs(len(x) - len(y))), 4)
'odontoglossate'
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> autocorrect('retro', ['grayback'], lambda x, y, lim: min(lim + 1, abs(len(x) - len(y))), 1)
'retro'
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> autocorrect('pung', ['campoo'], lambda x, y, lim: min(lim + 1, abs(len(x) - len(y))), 1)
'pung'
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> autocorrect('liberal', ['owse', 'ingenerable', 'patrol', 'kenosis', 'wetted'], lambda x, y, lim: min(lim + 1, abs(len(x) - len(y))), 6)
'kenosis'
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> autocorrect('disclaimer', ['psiloi', 'kusti', 'vallation', 'reprehensive', 'blameworthiness', 'proteiform', 'taintless', 'incruent', 'wednesday', 'codebtor'], lambda x, y, lim: min(lim + 1, abs(len(x) - len(y))), 4)
'proteiform'
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> autocorrect('orthological', ['consentaneous', 'orthological'], lambda x, y, lim: min(lim + 1, abs(len(x) - len(y))), 2)
'orthological'
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> autocorrect('amylemia', ['chirotonsory', 'loiter', 'ulnad', 'ticklebrain'], lambda x, y, lim: min(lim + 1, abs(len(x) - len(y))), 1)
'amylemia'
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> autocorrect('amendableness', ['baked', 'nonpriestly', 'unfavorably', 'amendableness', 'curatorship', 'intermediacy'], lambda x, y, lim: min(lim + 1, abs(len(x) - len(y))), 7)
'amendableness'
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> autocorrect('gammoning', ['weariedly', 'elongation', 'xanthous', 'squatty', 'dermad', 'iamatology', 'hexachloride', 'womanize', 'favorably'], lambda x, y, lim: min(lim + 1, abs(len(x) - len(y))), 8)
'weariedly'
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> autocorrect('siliciferous', ['siliciferous'], lambda x, y, lim: min(lim + 1, abs(len(x) - len(y))), 5)
'siliciferous'
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> autocorrect('patrization', ['deuteropathy', 'pregracile'], lambda x, y, lim: min(lim + 1, abs(len(x) - len(y))), 5)
'deuteropathy'
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> autocorrect('royetously', ['coster', 'microbiological', 'royetously'], lambda x, y, lim: min(lim + 1, abs(len(x) - len(y))), 8)
'royetously'
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> autocorrect('unbewritten', ['camphane', 'unbewritten', 'meditationist', 'hydriform'], lambda x, y, lim: min(lim + 1, abs(len(x) - len(y))), 8)
'unbewritten'
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> autocorrect('searing', ['ultrapapist', 'shriekingly', 'scratchiness', 'searing', 'pot', 'valanche', 'subterraqueous', 'helleboraceous'], lambda x, y, lim: min(lim + 1, abs(len(x) - len(y))), 7)
'searing'
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> autocorrect('arara', ['synergistically', 'prerecital', 'lozengeways', 'coessentially', 'cubicontravariant', 'snootiness', 'hetaerocracy', 'acaudate', 'simperer'], lambda x, y, lim: min(lim + 1, abs(len(x) - len(y))), 5)
'acaudate'
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> autocorrect('uptowner', ['hopyard'], lambda x, y, lim: min(lim + 1, abs(len(x) - len(y))), 1)
'hopyard'
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> autocorrect('presettlement', ['previsit'], lambda x, y, lim: min(lim + 1, abs(len(x) - len(y))), 3)
'presettlement'
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> autocorrect('soak', ['upside', 'demirevetment', 'undelineated', 'excusative', 'engagingness'], lambda x, y, lim: min(lim + 1, abs(len(x) - len(y))), 6)
'upside'
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> autocorrect('reduction', ['gym'], lambda x, y, lim: min(lim + 1, abs(len(x) - len(y))), 1)
'reduction'
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> autocorrect('doctorship', ['peccable', 'jussive', 'doctorship', 'overgladly', 'lurdanism', 'channel', 'malpublication', 'derringer', 'amental'], lambda x, y, lim: min(lim + 1, abs(len(x) - len(y))), 9)
'doctorship'
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> autocorrect('wintry', ['scyllitol', 'seringhi', 'ditchdown', 'procursive', 'unwholesome', 'unholiday', 'eureka', 'feathertop'], lambda x, y, lim: min(lim + 1, abs(len(x) - len(y))), 7)
'eureka'
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> autocorrect('transborder', ['conciliative', 'undercoachman', 'phasogeneous', 'philobrutish'], lambda x, y, lim: min(lim + 1, abs(len(x) - len(y))), 2)
'conciliative'
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> autocorrect('pericycloid', ['viertelein', 'felicitation', 'aortolith', 'nonpresbyter', 'germinable', 'illegibleness', 'undercondition', 'introverted', 'noselessly', 'tramming'], lambda x, y, lim: min(lim + 1, abs(len(x) - len(y))), 3)
'introverted'
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> autocorrect('indissolvability', ['hotbox', 'tokay', 'palaeofauna', 'indissolvability'], lambda x, y, lim: min(lim + 1, abs(len(x) - len(y))), 9)
'indissolvability'
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> autocorrect('soubrettish', ['unadvisably', 'unoften', 'unsecreted', 'precessional', 'erosional'], lambda x, y, lim: min(lim + 1, abs(len(x) - len(y))), 8)
'unadvisably'
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> autocorrect('semiopacity', ['animotheism', 'recoupment', 'juvenescence', 'scalable', 'thai', 'semiopacity'], lambda x, y, lim: min(lim + 1, abs(len(x) - len(y))), 1)
'semiopacity'
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> autocorrect('penwomanship', ['pyrocondensation', 'spooler', 'archorrhea', 'penwomanship', 'acheirus', 'lieutenant', 'plumless'], lambda x, y, lim: min(lim + 1, abs(len(x) - len(y))), 3)
'penwomanship'
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> autocorrect('subcordiform', ['subcordiform'], lambda x, y, lim: min(lim + 1, abs(len(x) - len(y))), 7)
'subcordiform'
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> autocorrect('quarrelsomely', ['concert', 'canonics', 'hip', 'uncrested', 'ectoethmoid', 'supertutelary', 'ignore'], lambda x, y, lim: min(lim + 1, abs(len(x) - len(y))), 6)
'supertutelary'
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> autocorrect('ditchdown', ['gonapophysis', 'permeability'], lambda x, y, lim: min(lim + 1, abs(len(x) - len(y))), 1)
'ditchdown'
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> autocorrect('suborbiculate', ['prisonable', 'rhapsodist', 'suborbiculate'], lambda x, y, lim: min(lim + 1, abs(len(x) - len(y))), 9)
'suborbiculate'
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> autocorrect('quiesce', ['synedria', 'apesthesia', 'squawbush', 'devourer', 'tetany', 'quiesce'], lambda x, y, lim: min(lim + 1, abs(len(x) - len(y))), 7)
'quiesce'
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> autocorrect('shiv', ['unconceivableness', 'inappetence'], lambda x, y, lim: min(lim + 1, abs(len(x) - len(y))), 6)
'shiv'
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> autocorrect('shuttleheaded', ['uterovaginal', 'extraparochially', 'serolipase'], lambda x, y, lim: min(lim + 1, abs(len(x) - len(y))), 0)
'shuttleheaded'
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> autocorrect('hilarity', ['valorously', 'hilarity', 'fungilliform', 'haven', 'torsk', 'thing', 'pickerel', 'refilm'], lambda x, y, lim: min(lim + 1, abs(len(x) - len(y))), 4)
'hilarity'
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> autocorrect('prefunctional', ['kep'], lambda x, y, lim: min(lim + 1, abs(len(x) - len(y))), 2)
'prefunctional'
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> autocorrect('oxypurine', ['overpensiveness', 'overindustrialize', 'lightweight', 'provinciality', 'telestereoscope', 'vastidity'], lambda x, y, lim: min(lim + 1, abs(len(x) - len(y))), 0)
'vastidity'
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> autocorrect('forewarningly', ['foxery'], lambda x, y, lim: min(lim + 1, abs(len(x) - len(y))), 9)
'foxery'
""",
'hidden': False,
'locked': False
}
],
'scored': True,
'setup': r"""
>>> from typing import autocorrect, lines_from_file
""",
'teardown': '',
'type': 'doctest'
}
]
}
| test = {'name': 'Problem 5', 'points': 2, 'suites': [{'cases': [{'code': '\n >>> abs_diff = lambda w1, w2, limit: abs(len(w2) - len(w1))\n >>> autocorrect("cul", ["culture", "cult", "cultivate"], abs_diff, 10)\n \'cult\'\n >>> autocorrect("cul", ["culture", "cult", "cultivate"], abs_diff, 0)\n \'cul\'\n >>> autocorrect("wor", ["worry", "car", "part"], abs_diff, 10)\n \'car\'\n >>> first_diff = lambda w1, w2, limit: 1 if w1[0] != w2[0] else 0\n >>> autocorrect("wrod", ["word", "rod"], first_diff, 1)\n \'word\'\n >>> autocorrect("inside", ["idea", "inside"], first_diff, 0.5)\n \'inside\'\n >>> autocorrect("inside", ["idea", "insider"], first_diff, 0.5)\n \'idea\'\n >>> autocorrect("outside", ["idea", "insider"], first_diff, 0.5)\n \'outside\'\n ', 'hidden': False, 'locked': False}, {'code': '\n >>> matching_diff = lambda w1, w2, limit: sum([w1[i] != w2[i] for i in range(min(len(w1), len(w2)))]) # Num matching chars\n >>> autocorrect("tosting", ["testing", "asking", "fasting"], matching_diff, 10)\n \'testing\'\n >>> autocorrect("tsting", ["testing", "rowing"], matching_diff, 10)\n \'rowing\'\n >>> autocorrect("bwe", ["awe", "bye"], matching_diff, 10)\n \'awe\'\n >>> autocorrect("bwe", ["bye", "awe"], matching_diff, 10)\n \'bye\'\n ', 'hidden': False, 'locked': False}, {'code': '\n >>> words_list = sorted(lines_from_file(\'data/words.txt\')[:10000])\n >>> autocorrect("testng", words_list, lambda w1, w2, limit: 1, 10)\n \'a\'\n >>> autocorrect("testing", words_list, lambda w1, w2, limit: 1, 10)\n \'testing\'\n >>> autocorrect("gesting", words_list, lambda w1, w2, limit: sum([w1[i] != w2[i] for i in range(min(len(w1), len(w2)))]) + abs(len(w1) - len(w2)), 10)\n \'getting\'\n ', 'hidden': False, 'locked': False}, {'code': "\n >>> autocorrect('statutably', ['statutably'], lambda x, y, lim: min(lim + 1, abs(len(x) - len(y))), 5)\n 'statutably'\n ", 'hidden': False, 'locked': False}, {'code': "\n >>> autocorrect('decephalization', ['tautit', 'misorder', 'uptill', 'urostealith', 'cowy', 'sinistrodextral'], lambda x, y, lim: min(lim + 1, abs(len(x) - len(y))), 3)\n 'sinistrodextral'\n ", 'hidden': False, 'locked': False}, {'code': "\n >>> autocorrect('hypostasis', ['tinosa'], lambda x, y, lim: min(lim + 1, abs(len(x) - len(y))), 0)\n 'hypostasis'\n ", 'hidden': False, 'locked': False}, {'code': "\n >>> autocorrect('indeprivable', ['echeneidid', 'iridiate', 'conjugality', 'convolute', 'momentariness', 'hotelless', 'archon', 'rheotome', 'transformistic'], lambda x, y, lim: min(lim + 1, abs(len(x) - len(y))), 6)\n 'conjugality'\n ", 'hidden': False, 'locked': False}, {'code': "\n >>> autocorrect('helena', ['ailment', 'undeclared', 'staphyloplastic', 'ag', 'sulphurless', 'ungrappler', 'ascertainer', 'dormitory', 'zoarial'], lambda x, y, lim: min(lim + 1, abs(len(x) - len(y))), 0)\n 'helena'\n ", 'hidden': False, 'locked': False}, {'code': "\n >>> autocorrect('lictorian', ['felineness', 'deontological', 'extraterrestrial', 'experimentalist', 'incomputable'], lambda x, y, lim: min(lim + 1, abs(len(x) - len(y))), 0)\n 'lictorian'\n ", 'hidden': False, 'locked': False}, {'code': "\n >>> autocorrect('mousehound', ['unembowelled', 'indigene', 'kersmash', 'mousehound', 'matchmark', 'proportionably', 'persons', 'suprasternal', 'agomphious'], lambda x, y, lim: min(lim + 1, abs(len(x) - len(y))), 0)\n 'mousehound'\n ", 'hidden': False, 'locked': False}, {'code': "\n >>> autocorrect('chamfer', ['pretyrannical'], lambda x, y, lim: min(lim + 1, abs(len(x) - len(y))), 8)\n 'pretyrannical'\n ", 'hidden': False, 'locked': False}, {'code': "\n >>> autocorrect('locksman', ['coinheritance', 'fourscore', 'naggingly', 'scutelliplantar', 'shiftful', 'prolonger'], lambda x, y, lim: min(lim + 1, abs(len(x) - len(y))), 6)\n 'shiftful'\n ", 'hidden': False, 'locked': False}, {'code': "\n >>> autocorrect('nonextensional', ['unlivably', 'error', 'emoloa'], lambda x, y, lim: min(lim + 1, abs(len(x) - len(y))), 0)\n 'nonextensional'\n ", 'hidden': False, 'locked': False}, {'code': "\n >>> autocorrect('incavern', ['donnert', 'incavern'], lambda x, y, lim: min(lim + 1, abs(len(x) - len(y))), 8)\n 'incavern'\n ", 'hidden': False, 'locked': False}, {'code': "\n >>> autocorrect('disable', ['semiductile', 'microcephalic', 'coauthor', 'whorishness', 'disable'], lambda x, y, lim: min(lim + 1, abs(len(x) - len(y))), 8)\n 'disable'\n ", 'hidden': False, 'locked': False}, {'code': "\n >>> autocorrect('antigravitational', ['brad'], lambda x, y, lim: min(lim + 1, abs(len(x) - len(y))), 5)\n 'antigravitational'\n ", 'hidden': False, 'locked': False}, {'code': "\n >>> autocorrect('trifurcation', ['trifurcation', 'formative'], lambda x, y, lim: min(lim + 1, abs(len(x) - len(y))), 3)\n 'trifurcation'\n ", 'hidden': False, 'locked': False}, {'code': "\n >>> autocorrect('phlogistic', ['mangue', 'lawproof', 'paginary', 'eruption', 'ambrosin', 'tubularly', 'alienee'], lambda x, y, lim: min(lim + 1, abs(len(x) - len(y))), 9)\n 'tubularly'\n ", 'hidden': False, 'locked': False}, {'code': "\n >>> autocorrect('psychotic', ['cylinderlike', 'filipendulous'], lambda x, y, lim: min(lim + 1, abs(len(x) - len(y))), 2)\n 'psychotic'\n ", 'hidden': False, 'locked': False}, {'code': "\n >>> autocorrect('terpodion', ['terpodion', 'wintertide'], lambda x, y, lim: min(lim + 1, abs(len(x) - len(y))), 1)\n 'terpodion'\n ", 'hidden': False, 'locked': False}, {'code': "\n >>> autocorrect('socky', ['childed', 'peoplehood', 'foxwood', 'brachistochronic', 'dentilation', 'luteous'], lambda x, y, lim: min(lim + 1, abs(len(x) - len(y))), 1)\n 'socky'\n ", 'hidden': False, 'locked': False}, {'code': "\n >>> autocorrect('wedding', ['mordaciously', 'quinia', 'fixer', 'wedding', 'sendable', 'ainoi'], lambda x, y, lim: min(lim + 1, abs(len(x) - len(y))), 7)\n 'wedding'\n ", 'hidden': False, 'locked': False}, {'code': "\n >>> autocorrect('unarrestable', ['unmarring', 'cationic', 'nunhood', 'martyrdom', 'perambulation', 'gaseous'], lambda x, y, lim: min(lim + 1, abs(len(x) - len(y))), 3)\n 'perambulation'\n ", 'hidden': False, 'locked': False}, {'code': "\n >>> autocorrect('sprightliness', ['unlimb', 'octamerism', 'antipodist', 'caprinic', 'ringbark', 'suboptimal', 'kingfish', 'amomal'], lambda x, y, lim: min(lim + 1, abs(len(x) - len(y))), 0)\n 'sprightliness'\n ", 'hidden': False, 'locked': False}, {'code': "\n >>> autocorrect('veteraness', ['wavement', 'paradoxidian', 'hypergeometrical', 'veteraness', 'purposeful', 'irrigative', 'ultramontanism', 'epephragmal'], lambda x, y, lim: min(lim + 1, abs(len(x) - len(y))), 6)\n 'veteraness'\n ", 'hidden': False, 'locked': False}, {'code': "\n >>> autocorrect('hyperphysics', ['thiouracil', 'cibophobia', 'katamorphism', 'trimorphism', 'norie'], lambda x, y, lim: min(lim + 1, abs(len(x) - len(y))), 4)\n 'katamorphism'\n ", 'hidden': False, 'locked': False}, {'code': "\n >>> autocorrect('evagation', ['sensationalize', 'stamphead', 'tankmaker', 'becut', 'oenochoe', 'digoneutic', 'refinement', 'tininess', 'benedictively', 'segment'], lambda x, y, lim: min(lim + 1, abs(len(x) - len(y))), 5)\n 'stamphead'\n ", 'hidden': False, 'locked': False}, {'code': "\n >>> autocorrect('ashine', ['nonfrustration', 'perineostomy', 'nonupholstered', 'hypocoristically', 'plushlike', 'rancorously'], lambda x, y, lim: min(lim + 1, abs(len(x) - len(y))), 2)\n 'ashine'\n ", 'hidden': False, 'locked': False}, {'code': "\n >>> autocorrect('unshameful', ['ger', 'ahoy', 'ventriloquial'], lambda x, y, lim: min(lim + 1, abs(len(x) - len(y))), 1)\n 'unshameful'\n ", 'hidden': False, 'locked': False}, {'code': "\n >>> autocorrect('dramatist', ['tournament', 'acclinate', 'rasion'], lambda x, y, lim: min(lim + 1, abs(len(x) - len(y))), 8)\n 'acclinate'\n ", 'hidden': False, 'locked': False}, {'code': "\n >>> autocorrect('beewort', ['terrestrious', 'sociometry'], lambda x, y, lim: min(lim + 1, abs(len(x) - len(y))), 0)\n 'beewort'\n ", 'hidden': False, 'locked': False}, {'code': "\n >>> autocorrect('claylike', ['houndish', 'muirfowl', 'unexplorative'], lambda x, y, lim: min(lim + 1, abs(len(x) - len(y))), 3)\n 'houndish'\n ", 'hidden': False, 'locked': False}, {'code': "\n >>> autocorrect('columbine', ['nonupholstered', 'columbine', 'entoptical', 'spondylolisthetic'], lambda x, y, lim: min(lim + 1, abs(len(x) - len(y))), 5)\n 'columbine'\n ", 'hidden': False, 'locked': False}, {'code': "\n >>> autocorrect('owners', ['choledochostomy', 'superobstinate', 'pagoscope'], lambda x, y, lim: min(lim + 1, abs(len(x) - len(y))), 0)\n 'owners'\n ", 'hidden': False, 'locked': False}, {'code': "\n >>> autocorrect('tampa', ['commonness', 'incentively', 'courtezanship', 'unapproachableness', 'readvertisement', 'strumiprivous'], lambda x, y, lim: min(lim + 1, abs(len(x) - len(y))), 2)\n 'tampa'\n ", 'hidden': False, 'locked': False}, {'code': "\n >>> autocorrect('reaffirmance', ['reaffirmance', 'nursy'], lambda x, y, lim: min(lim + 1, abs(len(x) - len(y))), 4)\n 'reaffirmance'\n ", 'hidden': False, 'locked': False}, {'code': "\n >>> autocorrect('photonasty', ['decisively', 'uninclosed', 'chlor'], lambda x, y, lim: min(lim + 1, abs(len(x) - len(y))), 6)\n 'decisively'\n ", 'hidden': False, 'locked': False}, {'code': "\n >>> autocorrect('intercepter', ['empiecement'], lambda x, y, lim: min(lim + 1, abs(len(x) - len(y))), 7)\n 'empiecement'\n ", 'hidden': False, 'locked': False}, {'code': "\n >>> autocorrect('semideity', ['roundseam', 'misrule', 'cardioblast', 'semideity', 'yaply', 'anthroponomy'], lambda x, y, lim: min(lim + 1, abs(len(x) - len(y))), 3)\n 'semideity'\n ", 'hidden': False, 'locked': False}, {'code': "\n >>> autocorrect('served', ['cecomorphic', 'ademption', 'impassibility', 'introvert', 'reintrench', 'transmigratively', 'commerge', 'hematocryal'], lambda x, y, lim: min(lim + 1, abs(len(x) - len(y))), 3)\n 'commerge'\n ", 'hidden': False, 'locked': False}, {'code': "\n >>> autocorrect('unliterally', ['vility', 'copellidine', 'creditor', 'parvenuism', 'hindbrain', 'autantitypy', 'sailing', 'dermatoskeleton'], lambda x, y, lim: min(lim + 1, abs(len(x) - len(y))), 5)\n 'copellidine'\n ", 'hidden': False, 'locked': False}, {'code': "\n >>> autocorrect('growth', ['assassinator'], lambda x, y, lim: min(lim + 1, abs(len(x) - len(y))), 1)\n 'growth'\n ", 'hidden': False, 'locked': False}, {'code': "\n >>> autocorrect('thereabouts', ['quantifiable', 'exterritorial', 'believe', 'untemporal', 'thereabouts'], lambda x, y, lim: min(lim + 1, abs(len(x) - len(y))), 0)\n 'thereabouts'\n ", 'hidden': False, 'locked': False}, {'code': "\n >>> autocorrect('goblinism', ['bobby', 'thig', 'plasterwork', 'unhyphenated', 'subessential', 'softhead', 'metrocracy', 'understem'], lambda x, y, lim: min(lim + 1, abs(len(x) - len(y))), 5)\n 'understem'\n ", 'hidden': False, 'locked': False}, {'code': "\n >>> autocorrect('resoutive', ['hydroseparation', 'descry', 'apodosis', 'atavist'], lambda x, y, lim: min(lim + 1, abs(len(x) - len(y))), 5)\n 'apodosis'\n ", 'hidden': False, 'locked': False}, {'code': "\n >>> autocorrect('unwebbed', ['cramble', 'pseudopopular', 'unwebbed', 'minimize', 'ricinoleate', 'arthrogastran', 'testaceography'], lambda x, y, lim: min(lim + 1, abs(len(x) - len(y))), 1)\n 'unwebbed'\n ", 'hidden': False, 'locked': False}, {'code': "\n >>> autocorrect('emphasize', ['putchen'], lambda x, y, lim: min(lim + 1, abs(len(x) - len(y))), 0)\n 'emphasize'\n ", 'hidden': False, 'locked': False}, {'code': "\n >>> autocorrect('whapuka', ['seambiter', 'cogman', 'polymorphistic'], lambda x, y, lim: min(lim + 1, abs(len(x) - len(y))), 1)\n 'cogman'\n ", 'hidden': False, 'locked': False}, {'code': "\n >>> autocorrect('cubatory', ['byssaceous', 'begins', 'cubatory', 'galvanothermometer', 'appearanced', 'proavian'], lambda x, y, lim: min(lim + 1, abs(len(x) - len(y))), 7)\n 'cubatory'\n ", 'hidden': False, 'locked': False}, {'code': "\n >>> autocorrect('singler', ['mycetous', 'singler'], lambda x, y, lim: min(lim + 1, abs(len(x) - len(y))), 2)\n 'singler'\n ", 'hidden': False, 'locked': False}, {'code': "\n >>> autocorrect('esquirearchy', ['souper', 'ark', 'niccolite', 'reagin', 'esquirearchy', 'effeminatize'], lambda x, y, lim: min(lim + 1, abs(len(x) - len(y))), 7)\n 'esquirearchy'\n ", 'hidden': False, 'locked': False}, {'code': "\n >>> autocorrect('evulse', ['uniocular', 'caution', 'unhoofed', 'misinterpret', 'ooscope', 'physiophilosophy', 'potteringly', 'wartyback'], lambda x, y, lim: min(lim + 1, abs(len(x) - len(y))), 0)\n 'evulse'\n ", 'hidden': False, 'locked': False}, {'code': "\n >>> autocorrect('underpopulation', ['adenocarcinomatous', 'soliloquy', 'antispace', 'slimeman', 'cardioncus', 'bin', 'undervalve', 'sundek'], lambda x, y, lim: min(lim + 1, abs(len(x) - len(y))), 1)\n 'underpopulation'\n ", 'hidden': False, 'locked': False}, {'code': "\n >>> autocorrect('osteology', ['transphenomenal'], lambda x, y, lim: min(lim + 1, abs(len(x) - len(y))), 4)\n 'osteology'\n ", 'hidden': False, 'locked': False}, {'code': "\n >>> autocorrect('solenaceous', ['padding', 'pixel', 'unalimentary', 'dyschroa', 'undefinedly', 'violational', 'bisulfid', 'pralltriller', 'demonstration'], lambda x, y, lim: min(lim + 1, abs(len(x) - len(y))), 3)\n 'undefinedly'\n ", 'hidden': False, 'locked': False}, {'code': "\n >>> autocorrect('diabolicalness', ['cronstedtite', 'precipitate', 'undertook', 'unconspicuousness'], lambda x, y, lim: min(lim + 1, abs(len(x) - len(y))), 7)\n 'cronstedtite'\n ", 'hidden': False, 'locked': False}, {'code': "\n >>> autocorrect('airworthiness', ['sep'], lambda x, y, lim: min(lim + 1, abs(len(x) - len(y))), 9)\n 'airworthiness'\n ", 'hidden': False, 'locked': False}, {'code': "\n >>> autocorrect('pseudomorula', ['toprope', 'doltishly', 'radiotelegraphic'], lambda x, y, lim: min(lim + 1, abs(len(x) - len(y))), 1)\n 'pseudomorula'\n ", 'hidden': False, 'locked': False}, {'code': "\n >>> autocorrect('hauld', ['pyrenodean', 'hauld'], lambda x, y, lim: min(lim + 1, abs(len(x) - len(y))), 2)\n 'hauld'\n ", 'hidden': False, 'locked': False}, {'code': "\n >>> autocorrect('doll', ['stylolitic', 'altigraph', 'doll', 'avowably', 'manzana', 'galloon'], lambda x, y, lim: min(lim + 1, abs(len(x) - len(y))), 2)\n 'doll'\n ", 'hidden': False, 'locked': False}, {'code': "\n >>> autocorrect('rachilla', ['tridentated', 'bridgework', 'coif', 'hitchhike', 'rachilla', 'uptaker', 'penalty', 'commitment', 'supervisor', 'unquartered'], lambda x, y, lim: min(lim + 1, abs(len(x) - len(y))), 4)\n 'rachilla'\n ", 'hidden': False, 'locked': False}, {'code': "\n >>> autocorrect('ventriculogram', ['luggie', 'septectomy', 'unproctored', 'volition', 'straked', 'oliver', 'telescopic', 'scarabaeoid'], lambda x, y, lim: min(lim + 1, abs(len(x) - len(y))), 2)\n 'ventriculogram'\n ", 'hidden': False, 'locked': False}, {'code': "\n >>> autocorrect('arsonvalization', ['nondisarmament', 'arsonvalization', 'ketyl', 'tussle', 'rhabdomysarcoma'], lambda x, y, lim: min(lim + 1, abs(len(x) - len(y))), 0)\n 'arsonvalization'\n ", 'hidden': False, 'locked': False}, {'code': "\n >>> autocorrect('photospectroscopical', ['unenclosed', 'sagacious', 'saur', 'gloveress', 'limbless', 'daresay', 'mysticize'], lambda x, y, lim: min(lim + 1, abs(len(x) - len(y))), 8)\n 'photospectroscopical'\n ", 'hidden': False, 'locked': False}, {'code': "\n >>> autocorrect('tupara', ['unkerchiefed', 'dormant', 'triplite', 'bimuscular', 'insider', 'coadjacency', 'unslighted', 'perichordal'], lambda x, y, lim: min(lim + 1, abs(len(x) - len(y))), 3)\n 'dormant'\n ", 'hidden': False, 'locked': False}, {'code': "\n >>> autocorrect('odontoglossate', ['odontoglossate', 'conceivableness'], lambda x, y, lim: min(lim + 1, abs(len(x) - len(y))), 4)\n 'odontoglossate'\n ", 'hidden': False, 'locked': False}, {'code': "\n >>> autocorrect('retro', ['grayback'], lambda x, y, lim: min(lim + 1, abs(len(x) - len(y))), 1)\n 'retro'\n ", 'hidden': False, 'locked': False}, {'code': "\n >>> autocorrect('pung', ['campoo'], lambda x, y, lim: min(lim + 1, abs(len(x) - len(y))), 1)\n 'pung'\n ", 'hidden': False, 'locked': False}, {'code': "\n >>> autocorrect('liberal', ['owse', 'ingenerable', 'patrol', 'kenosis', 'wetted'], lambda x, y, lim: min(lim + 1, abs(len(x) - len(y))), 6)\n 'kenosis'\n ", 'hidden': False, 'locked': False}, {'code': "\n >>> autocorrect('disclaimer', ['psiloi', 'kusti', 'vallation', 'reprehensive', 'blameworthiness', 'proteiform', 'taintless', 'incruent', 'wednesday', 'codebtor'], lambda x, y, lim: min(lim + 1, abs(len(x) - len(y))), 4)\n 'proteiform'\n ", 'hidden': False, 'locked': False}, {'code': "\n >>> autocorrect('orthological', ['consentaneous', 'orthological'], lambda x, y, lim: min(lim + 1, abs(len(x) - len(y))), 2)\n 'orthological'\n ", 'hidden': False, 'locked': False}, {'code': "\n >>> autocorrect('amylemia', ['chirotonsory', 'loiter', 'ulnad', 'ticklebrain'], lambda x, y, lim: min(lim + 1, abs(len(x) - len(y))), 1)\n 'amylemia'\n ", 'hidden': False, 'locked': False}, {'code': "\n >>> autocorrect('amendableness', ['baked', 'nonpriestly', 'unfavorably', 'amendableness', 'curatorship', 'intermediacy'], lambda x, y, lim: min(lim + 1, abs(len(x) - len(y))), 7)\n 'amendableness'\n ", 'hidden': False, 'locked': False}, {'code': "\n >>> autocorrect('gammoning', ['weariedly', 'elongation', 'xanthous', 'squatty', 'dermad', 'iamatology', 'hexachloride', 'womanize', 'favorably'], lambda x, y, lim: min(lim + 1, abs(len(x) - len(y))), 8)\n 'weariedly'\n ", 'hidden': False, 'locked': False}, {'code': "\n >>> autocorrect('siliciferous', ['siliciferous'], lambda x, y, lim: min(lim + 1, abs(len(x) - len(y))), 5)\n 'siliciferous'\n ", 'hidden': False, 'locked': False}, {'code': "\n >>> autocorrect('patrization', ['deuteropathy', 'pregracile'], lambda x, y, lim: min(lim + 1, abs(len(x) - len(y))), 5)\n 'deuteropathy'\n ", 'hidden': False, 'locked': False}, {'code': "\n >>> autocorrect('royetously', ['coster', 'microbiological', 'royetously'], lambda x, y, lim: min(lim + 1, abs(len(x) - len(y))), 8)\n 'royetously'\n ", 'hidden': False, 'locked': False}, {'code': "\n >>> autocorrect('unbewritten', ['camphane', 'unbewritten', 'meditationist', 'hydriform'], lambda x, y, lim: min(lim + 1, abs(len(x) - len(y))), 8)\n 'unbewritten'\n ", 'hidden': False, 'locked': False}, {'code': "\n >>> autocorrect('searing', ['ultrapapist', 'shriekingly', 'scratchiness', 'searing', 'pot', 'valanche', 'subterraqueous', 'helleboraceous'], lambda x, y, lim: min(lim + 1, abs(len(x) - len(y))), 7)\n 'searing'\n ", 'hidden': False, 'locked': False}, {'code': "\n >>> autocorrect('arara', ['synergistically', 'prerecital', 'lozengeways', 'coessentially', 'cubicontravariant', 'snootiness', 'hetaerocracy', 'acaudate', 'simperer'], lambda x, y, lim: min(lim + 1, abs(len(x) - len(y))), 5)\n 'acaudate'\n ", 'hidden': False, 'locked': False}, {'code': "\n >>> autocorrect('uptowner', ['hopyard'], lambda x, y, lim: min(lim + 1, abs(len(x) - len(y))), 1)\n 'hopyard'\n ", 'hidden': False, 'locked': False}, {'code': "\n >>> autocorrect('presettlement', ['previsit'], lambda x, y, lim: min(lim + 1, abs(len(x) - len(y))), 3)\n 'presettlement'\n ", 'hidden': False, 'locked': False}, {'code': "\n >>> autocorrect('soak', ['upside', 'demirevetment', 'undelineated', 'excusative', 'engagingness'], lambda x, y, lim: min(lim + 1, abs(len(x) - len(y))), 6)\n 'upside'\n ", 'hidden': False, 'locked': False}, {'code': "\n >>> autocorrect('reduction', ['gym'], lambda x, y, lim: min(lim + 1, abs(len(x) - len(y))), 1)\n 'reduction'\n ", 'hidden': False, 'locked': False}, {'code': "\n >>> autocorrect('doctorship', ['peccable', 'jussive', 'doctorship', 'overgladly', 'lurdanism', 'channel', 'malpublication', 'derringer', 'amental'], lambda x, y, lim: min(lim + 1, abs(len(x) - len(y))), 9)\n 'doctorship'\n ", 'hidden': False, 'locked': False}, {'code': "\n >>> autocorrect('wintry', ['scyllitol', 'seringhi', 'ditchdown', 'procursive', 'unwholesome', 'unholiday', 'eureka', 'feathertop'], lambda x, y, lim: min(lim + 1, abs(len(x) - len(y))), 7)\n 'eureka'\n ", 'hidden': False, 'locked': False}, {'code': "\n >>> autocorrect('transborder', ['conciliative', 'undercoachman', 'phasogeneous', 'philobrutish'], lambda x, y, lim: min(lim + 1, abs(len(x) - len(y))), 2)\n 'conciliative'\n ", 'hidden': False, 'locked': False}, {'code': "\n >>> autocorrect('pericycloid', ['viertelein', 'felicitation', 'aortolith', 'nonpresbyter', 'germinable', 'illegibleness', 'undercondition', 'introverted', 'noselessly', 'tramming'], lambda x, y, lim: min(lim + 1, abs(len(x) - len(y))), 3)\n 'introverted'\n ", 'hidden': False, 'locked': False}, {'code': "\n >>> autocorrect('indissolvability', ['hotbox', 'tokay', 'palaeofauna', 'indissolvability'], lambda x, y, lim: min(lim + 1, abs(len(x) - len(y))), 9)\n 'indissolvability'\n ", 'hidden': False, 'locked': False}, {'code': "\n >>> autocorrect('soubrettish', ['unadvisably', 'unoften', 'unsecreted', 'precessional', 'erosional'], lambda x, y, lim: min(lim + 1, abs(len(x) - len(y))), 8)\n 'unadvisably'\n ", 'hidden': False, 'locked': False}, {'code': "\n >>> autocorrect('semiopacity', ['animotheism', 'recoupment', 'juvenescence', 'scalable', 'thai', 'semiopacity'], lambda x, y, lim: min(lim + 1, abs(len(x) - len(y))), 1)\n 'semiopacity'\n ", 'hidden': False, 'locked': False}, {'code': "\n >>> autocorrect('penwomanship', ['pyrocondensation', 'spooler', 'archorrhea', 'penwomanship', 'acheirus', 'lieutenant', 'plumless'], lambda x, y, lim: min(lim + 1, abs(len(x) - len(y))), 3)\n 'penwomanship'\n ", 'hidden': False, 'locked': False}, {'code': "\n >>> autocorrect('subcordiform', ['subcordiform'], lambda x, y, lim: min(lim + 1, abs(len(x) - len(y))), 7)\n 'subcordiform'\n ", 'hidden': False, 'locked': False}, {'code': "\n >>> autocorrect('quarrelsomely', ['concert', 'canonics', 'hip', 'uncrested', 'ectoethmoid', 'supertutelary', 'ignore'], lambda x, y, lim: min(lim + 1, abs(len(x) - len(y))), 6)\n 'supertutelary'\n ", 'hidden': False, 'locked': False}, {'code': "\n >>> autocorrect('ditchdown', ['gonapophysis', 'permeability'], lambda x, y, lim: min(lim + 1, abs(len(x) - len(y))), 1)\n 'ditchdown'\n ", 'hidden': False, 'locked': False}, {'code': "\n >>> autocorrect('suborbiculate', ['prisonable', 'rhapsodist', 'suborbiculate'], lambda x, y, lim: min(lim + 1, abs(len(x) - len(y))), 9)\n 'suborbiculate'\n ", 'hidden': False, 'locked': False}, {'code': "\n >>> autocorrect('quiesce', ['synedria', 'apesthesia', 'squawbush', 'devourer', 'tetany', 'quiesce'], lambda x, y, lim: min(lim + 1, abs(len(x) - len(y))), 7)\n 'quiesce'\n ", 'hidden': False, 'locked': False}, {'code': "\n >>> autocorrect('shiv', ['unconceivableness', 'inappetence'], lambda x, y, lim: min(lim + 1, abs(len(x) - len(y))), 6)\n 'shiv'\n ", 'hidden': False, 'locked': False}, {'code': "\n >>> autocorrect('shuttleheaded', ['uterovaginal', 'extraparochially', 'serolipase'], lambda x, y, lim: min(lim + 1, abs(len(x) - len(y))), 0)\n 'shuttleheaded'\n ", 'hidden': False, 'locked': False}, {'code': "\n >>> autocorrect('hilarity', ['valorously', 'hilarity', 'fungilliform', 'haven', 'torsk', 'thing', 'pickerel', 'refilm'], lambda x, y, lim: min(lim + 1, abs(len(x) - len(y))), 4)\n 'hilarity'\n ", 'hidden': False, 'locked': False}, {'code': "\n >>> autocorrect('prefunctional', ['kep'], lambda x, y, lim: min(lim + 1, abs(len(x) - len(y))), 2)\n 'prefunctional'\n ", 'hidden': False, 'locked': False}, {'code': "\n >>> autocorrect('oxypurine', ['overpensiveness', 'overindustrialize', 'lightweight', 'provinciality', 'telestereoscope', 'vastidity'], lambda x, y, lim: min(lim + 1, abs(len(x) - len(y))), 0)\n 'vastidity'\n ", 'hidden': False, 'locked': False}, {'code': "\n >>> autocorrect('forewarningly', ['foxery'], lambda x, y, lim: min(lim + 1, abs(len(x) - len(y))), 9)\n 'foxery'\n ", 'hidden': False, 'locked': False}], 'scored': True, 'setup': '\n >>> from typing import autocorrect, lines_from_file\n ', 'teardown': '', 'type': 'doctest'}]} |
# puzzle3a.py
def main():
calc_slope(1, 1)
calc_slope(3, 1)
calc_slope(5, 1)
calc_slope(7, 1)
calc_slope(1, 2)
def calc_slope(horiz_step = 3, vert_step = 1):
tree_char = "#"
horiz_pos = 0
trees = 0
input_file = open("input.txt", "r")
lines = input_file.readlines()
line_len = len(lines[0]) - 1 # Subtract 1 to account for '\n'
lines = lines[vert_step::vert_step]
for line in lines:
horiz_pos = (horiz_pos + horiz_step) % line_len
if line[horiz_pos] == tree_char:
trees += 1
print("Trees encountered: " + str(trees))
return trees
if __name__ == "__main__":
main() | def main():
calc_slope(1, 1)
calc_slope(3, 1)
calc_slope(5, 1)
calc_slope(7, 1)
calc_slope(1, 2)
def calc_slope(horiz_step=3, vert_step=1):
tree_char = '#'
horiz_pos = 0
trees = 0
input_file = open('input.txt', 'r')
lines = input_file.readlines()
line_len = len(lines[0]) - 1
lines = lines[vert_step::vert_step]
for line in lines:
horiz_pos = (horiz_pos + horiz_step) % line_len
if line[horiz_pos] == tree_char:
trees += 1
print('Trees encountered: ' + str(trees))
return trees
if __name__ == '__main__':
main() |
"""Exceptions that pertain to Flashcards-level functionality."""
class FlashCardsError(Exception):
"""Base class for other Flashcards-level exceptions."""
def __init__(self, message: str) -> None:
super().__init__(f"Flashcards Error: {message}")
class InvalidConfiguration(FlashCardsError):
"""Something was attempted that is not possible for the current configuration."""
def __init__(self, message: str) -> None:
super().__init__(f"Invalid configuration: {message}")
class MediaPlayerError(FlashCardsError):
"""Something bad happened in / with the media player"""
| """Exceptions that pertain to Flashcards-level functionality."""
class Flashcardserror(Exception):
"""Base class for other Flashcards-level exceptions."""
def __init__(self, message: str) -> None:
super().__init__(f'Flashcards Error: {message}')
class Invalidconfiguration(FlashCardsError):
"""Something was attempted that is not possible for the current configuration."""
def __init__(self, message: str) -> None:
super().__init__(f'Invalid configuration: {message}')
class Mediaplayererror(FlashCardsError):
"""Something bad happened in / with the media player""" |
#
# PySNMP MIB module RADLAN-AGGREGATEVLAN-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/RADLAN-AGGREGATEVLAN-MIB
# Produced by pysmi-0.3.4 at Wed May 1 14:45:06 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
Integer, OctetString, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "Integer", "OctetString", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueRangeConstraint, ConstraintsUnion, SingleValueConstraint, ValueSizeConstraint, ConstraintsIntersection = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueRangeConstraint", "ConstraintsUnion", "SingleValueConstraint", "ValueSizeConstraint", "ConstraintsIntersection")
InterfaceIndex, = mibBuilder.importSymbols("IF-MIB", "InterfaceIndex")
rnd, = mibBuilder.importSymbols("RADLAN-MIB", "rnd")
NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance")
MibIdentifier, Unsigned32, Integer32, ModuleIdentity, NotificationType, Bits, Counter32, MibScalar, MibTable, MibTableRow, MibTableColumn, Gauge32, IpAddress, iso, Counter64, TimeTicks, ObjectIdentity = mibBuilder.importSymbols("SNMPv2-SMI", "MibIdentifier", "Unsigned32", "Integer32", "ModuleIdentity", "NotificationType", "Bits", "Counter32", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Gauge32", "IpAddress", "iso", "Counter64", "TimeTicks", "ObjectIdentity")
RowStatus, TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "RowStatus", "TextualConvention", "DisplayString")
rlAggregateVlan = ModuleIdentity((1, 3, 6, 1, 4, 1, 89, 73))
rlAggregateVlan.setRevisions(('2007-01-02 00:00',))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
if mibBuilder.loadTexts: rlAggregateVlan.setRevisionsDescriptions(('Initial revision.',))
if mibBuilder.loadTexts: rlAggregateVlan.setLastUpdated('200701020000Z')
if mibBuilder.loadTexts: rlAggregateVlan.setOrganization('Radlan - a MARVELL company. Marvell Semiconductor, Inc.')
if mibBuilder.loadTexts: rlAggregateVlan.setContactInfo('www.marvell.com')
if mibBuilder.loadTexts: rlAggregateVlan.setDescription('This private MIB module defines Aggregate Vlan private MIBs.')
rlAggregateVlanMibVersion = MibScalar((1, 3, 6, 1, 4, 1, 89, 73, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rlAggregateVlanMibVersion.setStatus('current')
if mibBuilder.loadTexts: rlAggregateVlanMibVersion.setDescription("MIB's version, the current version is 1.")
rlAggregateVlanTable = MibTable((1, 3, 6, 1, 4, 1, 89, 73, 2), )
if mibBuilder.loadTexts: rlAggregateVlanTable.setStatus('current')
if mibBuilder.loadTexts: rlAggregateVlanTable.setDescription('The table creates an aggregateVlans, the IfIndex is from 10000')
rlAggregateVlanEntry = MibTableRow((1, 3, 6, 1, 4, 1, 89, 73, 2, 1), ).setIndexNames((0, "RADLAN-AGGREGATEVLAN-MIB", "rlAggregateVlanIndex"))
if mibBuilder.loadTexts: rlAggregateVlanEntry.setStatus('current')
if mibBuilder.loadTexts: rlAggregateVlanEntry.setDescription('The row definition for this table.')
rlAggregateVlanIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 73, 2, 1, 1), InterfaceIndex())
if mibBuilder.loadTexts: rlAggregateVlanIndex.setStatus('current')
if mibBuilder.loadTexts: rlAggregateVlanIndex.setDescription('This index indicate the aggrigateVlan id, the aggregate vlan index is starting from 10000 ')
rlAggregateVlanName = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 73, 2, 1, 2), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 32))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: rlAggregateVlanName.setStatus('current')
if mibBuilder.loadTexts: rlAggregateVlanName.setDescription('The name of the aggregateVlan ')
rlAggregateVlanPhysAddressType = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 73, 2, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("default", 1), ("reserve", 2))).clone('default')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: rlAggregateVlanPhysAddressType.setStatus('current')
if mibBuilder.loadTexts: rlAggregateVlanPhysAddressType.setDescription(' This variable indicates whether the physical address assigned to this VLAN should be the default one or be chosen from the set of reserved physical addresses of the device.')
rlAggregateVlanStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 73, 2, 1, 4), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: rlAggregateVlanStatus.setStatus('current')
if mibBuilder.loadTexts: rlAggregateVlanStatus.setDescription("The status of the aggregateVlan table entry. It's used to delete an entry")
rlAggregateSubVlanTable = MibTable((1, 3, 6, 1, 4, 1, 89, 73, 3), )
if mibBuilder.loadTexts: rlAggregateSubVlanTable.setStatus('current')
if mibBuilder.loadTexts: rlAggregateSubVlanTable.setDescription('The table indicates all the allocated sub-vlans to the aggregateVlans, an entry in the rlAggregateVlanTable must be exist before allocating the subVlans')
rlAggregateSubVlanEntry = MibTableRow((1, 3, 6, 1, 4, 1, 89, 73, 3, 1), ).setIndexNames((0, "RADLAN-AGGREGATEVLAN-MIB", "rlAggregateVlanIndex"), (0, "RADLAN-AGGREGATEVLAN-MIB", "rlAggregateSubVlanIfIndex"))
if mibBuilder.loadTexts: rlAggregateSubVlanEntry.setStatus('current')
if mibBuilder.loadTexts: rlAggregateSubVlanEntry.setDescription('The row definition for this table.')
rlAggregateSubVlanIfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 73, 3, 1, 1), InterfaceIndex()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rlAggregateSubVlanIfIndex.setStatus('current')
if mibBuilder.loadTexts: rlAggregateSubVlanIfIndex.setDescription('Indicate the subVlan that allocated to the aggregate vlan')
rlAggregateSubVlanStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 73, 3, 1, 2), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: rlAggregateSubVlanStatus.setStatus('current')
if mibBuilder.loadTexts: rlAggregateSubVlanStatus.setDescription("The status of the aggregateSubVlan table entry. It's used to delete an entry")
rlAggregateVlanArpProxy = MibScalar((1, 3, 6, 1, 4, 1, 89, 73, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enable", 1), ("disable", 2))).clone('disable')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlAggregateVlanArpProxy.setStatus('current')
if mibBuilder.loadTexts: rlAggregateVlanArpProxy.setDescription('When ARP Proxy is enabled, the router can respond to ARP requests for nodes located on different sub-vlans, which belong to the same Super VLAN.The router responds with its own MAC address. When ARP Proxy is disabled, the router responds only to ARP requests for its own IP addresses.')
mibBuilder.exportSymbols("RADLAN-AGGREGATEVLAN-MIB", rlAggregateVlanName=rlAggregateVlanName, rlAggregateVlan=rlAggregateVlan, rlAggregateSubVlanTable=rlAggregateSubVlanTable, PYSNMP_MODULE_ID=rlAggregateVlan, rlAggregateVlanIndex=rlAggregateVlanIndex, rlAggregateSubVlanEntry=rlAggregateSubVlanEntry, rlAggregateVlanEntry=rlAggregateVlanEntry, rlAggregateVlanArpProxy=rlAggregateVlanArpProxy, rlAggregateVlanTable=rlAggregateVlanTable, rlAggregateSubVlanStatus=rlAggregateSubVlanStatus, rlAggregateSubVlanIfIndex=rlAggregateSubVlanIfIndex, rlAggregateVlanPhysAddressType=rlAggregateVlanPhysAddressType, rlAggregateVlanStatus=rlAggregateVlanStatus, rlAggregateVlanMibVersion=rlAggregateVlanMibVersion)
| (integer, octet_string, object_identifier) = mibBuilder.importSymbols('ASN1', 'Integer', 'OctetString', 'ObjectIdentifier')
(named_values,) = mibBuilder.importSymbols('ASN1-ENUMERATION', 'NamedValues')
(value_range_constraint, constraints_union, single_value_constraint, value_size_constraint, constraints_intersection) = mibBuilder.importSymbols('ASN1-REFINEMENT', 'ValueRangeConstraint', 'ConstraintsUnion', 'SingleValueConstraint', 'ValueSizeConstraint', 'ConstraintsIntersection')
(interface_index,) = mibBuilder.importSymbols('IF-MIB', 'InterfaceIndex')
(rnd,) = mibBuilder.importSymbols('RADLAN-MIB', 'rnd')
(notification_group, module_compliance) = mibBuilder.importSymbols('SNMPv2-CONF', 'NotificationGroup', 'ModuleCompliance')
(mib_identifier, unsigned32, integer32, module_identity, notification_type, bits, counter32, mib_scalar, mib_table, mib_table_row, mib_table_column, gauge32, ip_address, iso, counter64, time_ticks, object_identity) = mibBuilder.importSymbols('SNMPv2-SMI', 'MibIdentifier', 'Unsigned32', 'Integer32', 'ModuleIdentity', 'NotificationType', 'Bits', 'Counter32', 'MibScalar', 'MibTable', 'MibTableRow', 'MibTableColumn', 'Gauge32', 'IpAddress', 'iso', 'Counter64', 'TimeTicks', 'ObjectIdentity')
(row_status, textual_convention, display_string) = mibBuilder.importSymbols('SNMPv2-TC', 'RowStatus', 'TextualConvention', 'DisplayString')
rl_aggregate_vlan = module_identity((1, 3, 6, 1, 4, 1, 89, 73))
rlAggregateVlan.setRevisions(('2007-01-02 00:00',))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
if mibBuilder.loadTexts:
rlAggregateVlan.setRevisionsDescriptions(('Initial revision.',))
if mibBuilder.loadTexts:
rlAggregateVlan.setLastUpdated('200701020000Z')
if mibBuilder.loadTexts:
rlAggregateVlan.setOrganization('Radlan - a MARVELL company. Marvell Semiconductor, Inc.')
if mibBuilder.loadTexts:
rlAggregateVlan.setContactInfo('www.marvell.com')
if mibBuilder.loadTexts:
rlAggregateVlan.setDescription('This private MIB module defines Aggregate Vlan private MIBs.')
rl_aggregate_vlan_mib_version = mib_scalar((1, 3, 6, 1, 4, 1, 89, 73, 1), integer32()).setMaxAccess('readonly')
if mibBuilder.loadTexts:
rlAggregateVlanMibVersion.setStatus('current')
if mibBuilder.loadTexts:
rlAggregateVlanMibVersion.setDescription("MIB's version, the current version is 1.")
rl_aggregate_vlan_table = mib_table((1, 3, 6, 1, 4, 1, 89, 73, 2))
if mibBuilder.loadTexts:
rlAggregateVlanTable.setStatus('current')
if mibBuilder.loadTexts:
rlAggregateVlanTable.setDescription('The table creates an aggregateVlans, the IfIndex is from 10000')
rl_aggregate_vlan_entry = mib_table_row((1, 3, 6, 1, 4, 1, 89, 73, 2, 1)).setIndexNames((0, 'RADLAN-AGGREGATEVLAN-MIB', 'rlAggregateVlanIndex'))
if mibBuilder.loadTexts:
rlAggregateVlanEntry.setStatus('current')
if mibBuilder.loadTexts:
rlAggregateVlanEntry.setDescription('The row definition for this table.')
rl_aggregate_vlan_index = mib_table_column((1, 3, 6, 1, 4, 1, 89, 73, 2, 1, 1), interface_index())
if mibBuilder.loadTexts:
rlAggregateVlanIndex.setStatus('current')
if mibBuilder.loadTexts:
rlAggregateVlanIndex.setDescription('This index indicate the aggrigateVlan id, the aggregate vlan index is starting from 10000 ')
rl_aggregate_vlan_name = mib_table_column((1, 3, 6, 1, 4, 1, 89, 73, 2, 1, 2), display_string().subtype(subtypeSpec=value_size_constraint(0, 32))).setMaxAccess('readcreate')
if mibBuilder.loadTexts:
rlAggregateVlanName.setStatus('current')
if mibBuilder.loadTexts:
rlAggregateVlanName.setDescription('The name of the aggregateVlan ')
rl_aggregate_vlan_phys_address_type = mib_table_column((1, 3, 6, 1, 4, 1, 89, 73, 2, 1, 3), integer32().subtype(subtypeSpec=constraints_union(single_value_constraint(1, 2))).clone(namedValues=named_values(('default', 1), ('reserve', 2))).clone('default')).setMaxAccess('readcreate')
if mibBuilder.loadTexts:
rlAggregateVlanPhysAddressType.setStatus('current')
if mibBuilder.loadTexts:
rlAggregateVlanPhysAddressType.setDescription(' This variable indicates whether the physical address assigned to this VLAN should be the default one or be chosen from the set of reserved physical addresses of the device.')
rl_aggregate_vlan_status = mib_table_column((1, 3, 6, 1, 4, 1, 89, 73, 2, 1, 4), row_status()).setMaxAccess('readcreate')
if mibBuilder.loadTexts:
rlAggregateVlanStatus.setStatus('current')
if mibBuilder.loadTexts:
rlAggregateVlanStatus.setDescription("The status of the aggregateVlan table entry. It's used to delete an entry")
rl_aggregate_sub_vlan_table = mib_table((1, 3, 6, 1, 4, 1, 89, 73, 3))
if mibBuilder.loadTexts:
rlAggregateSubVlanTable.setStatus('current')
if mibBuilder.loadTexts:
rlAggregateSubVlanTable.setDescription('The table indicates all the allocated sub-vlans to the aggregateVlans, an entry in the rlAggregateVlanTable must be exist before allocating the subVlans')
rl_aggregate_sub_vlan_entry = mib_table_row((1, 3, 6, 1, 4, 1, 89, 73, 3, 1)).setIndexNames((0, 'RADLAN-AGGREGATEVLAN-MIB', 'rlAggregateVlanIndex'), (0, 'RADLAN-AGGREGATEVLAN-MIB', 'rlAggregateSubVlanIfIndex'))
if mibBuilder.loadTexts:
rlAggregateSubVlanEntry.setStatus('current')
if mibBuilder.loadTexts:
rlAggregateSubVlanEntry.setDescription('The row definition for this table.')
rl_aggregate_sub_vlan_if_index = mib_table_column((1, 3, 6, 1, 4, 1, 89, 73, 3, 1, 1), interface_index()).setMaxAccess('readonly')
if mibBuilder.loadTexts:
rlAggregateSubVlanIfIndex.setStatus('current')
if mibBuilder.loadTexts:
rlAggregateSubVlanIfIndex.setDescription('Indicate the subVlan that allocated to the aggregate vlan')
rl_aggregate_sub_vlan_status = mib_table_column((1, 3, 6, 1, 4, 1, 89, 73, 3, 1, 2), row_status()).setMaxAccess('readcreate')
if mibBuilder.loadTexts:
rlAggregateSubVlanStatus.setStatus('current')
if mibBuilder.loadTexts:
rlAggregateSubVlanStatus.setDescription("The status of the aggregateSubVlan table entry. It's used to delete an entry")
rl_aggregate_vlan_arp_proxy = mib_scalar((1, 3, 6, 1, 4, 1, 89, 73, 4), integer32().subtype(subtypeSpec=constraints_union(single_value_constraint(1, 2))).clone(namedValues=named_values(('enable', 1), ('disable', 2))).clone('disable')).setMaxAccess('readwrite')
if mibBuilder.loadTexts:
rlAggregateVlanArpProxy.setStatus('current')
if mibBuilder.loadTexts:
rlAggregateVlanArpProxy.setDescription('When ARP Proxy is enabled, the router can respond to ARP requests for nodes located on different sub-vlans, which belong to the same Super VLAN.The router responds with its own MAC address. When ARP Proxy is disabled, the router responds only to ARP requests for its own IP addresses.')
mibBuilder.exportSymbols('RADLAN-AGGREGATEVLAN-MIB', rlAggregateVlanName=rlAggregateVlanName, rlAggregateVlan=rlAggregateVlan, rlAggregateSubVlanTable=rlAggregateSubVlanTable, PYSNMP_MODULE_ID=rlAggregateVlan, rlAggregateVlanIndex=rlAggregateVlanIndex, rlAggregateSubVlanEntry=rlAggregateSubVlanEntry, rlAggregateVlanEntry=rlAggregateVlanEntry, rlAggregateVlanArpProxy=rlAggregateVlanArpProxy, rlAggregateVlanTable=rlAggregateVlanTable, rlAggregateSubVlanStatus=rlAggregateSubVlanStatus, rlAggregateSubVlanIfIndex=rlAggregateSubVlanIfIndex, rlAggregateVlanPhysAddressType=rlAggregateVlanPhysAddressType, rlAggregateVlanStatus=rlAggregateVlanStatus, rlAggregateVlanMibVersion=rlAggregateVlanMibVersion) |
# Title : Url shortner
# Author : Kiran Raj R.
# Date : 07:11:2020
class URL_short:
url_id = 0
url_dict = {}
def url_shortner(self,orginal_url):
if orginal_url in self.url_dict:
url = self.url_dict[orginal_url]
return f"{orginal_url} already has a shorten url: my_url.com/{url}"
else:
url = str(hex(self.url_id))
# print(url)
self.url_dict[orginal_url] = url
self.url_id+=1
return f"my_url.com/{url}"
def return_site(self,short_url):
url = short_url.split('/')[1]
site = [key for key,value in self.url_dict.items() if value == url]
if site == []:
return f"{short_url} not found!!!"
else:
return site[0]
url_obj = URL_short()
site1 = url_obj.url_shortner('google.com')
site2 = url_obj.url_shortner('facebook.com')
site3 = url_obj.url_shortner('instagram.com')
site4 = url_obj.url_shortner('facebook.com')
# print(site1, site2, site3, site4)
# print(url_obj.url_dict)
print(url_obj.return_site('my_url.com/0x1'))
print(url_obj.return_site('my_url.com/0x21'))
print(site4) | class Url_Short:
url_id = 0
url_dict = {}
def url_shortner(self, orginal_url):
if orginal_url in self.url_dict:
url = self.url_dict[orginal_url]
return f'{orginal_url} already has a shorten url: my_url.com/{url}'
else:
url = str(hex(self.url_id))
self.url_dict[orginal_url] = url
self.url_id += 1
return f'my_url.com/{url}'
def return_site(self, short_url):
url = short_url.split('/')[1]
site = [key for (key, value) in self.url_dict.items() if value == url]
if site == []:
return f'{short_url} not found!!!'
else:
return site[0]
url_obj = url_short()
site1 = url_obj.url_shortner('google.com')
site2 = url_obj.url_shortner('facebook.com')
site3 = url_obj.url_shortner('instagram.com')
site4 = url_obj.url_shortner('facebook.com')
print(url_obj.return_site('my_url.com/0x1'))
print(url_obj.return_site('my_url.com/0x21'))
print(site4) |
"""
Exceptions raised by cli-toolkit
"""
class ScriptError(Exception):
"""
Errors raise during script processing
"""
| """
Exceptions raised by cli-toolkit
"""
class Scripterror(Exception):
"""
Errors raise during script processing
""" |
def snake_to_camel(s):
"""
>>> snake_to_camel('snake_to_camel')
'snakeToCamel'
"""
pass
| def snake_to_camel(s):
"""
>>> snake_to_camel('snake_to_camel')
'snakeToCamel'
"""
pass |
# -*- coding: utf-8 -*-
__title__ = 'gym_breakout_pygame'
__description__ = 'Gym Breakout environment using Pygame'
__url__ = 'https://github.com/whitemech/gym-breakout-pygame.git'
__version__ = '0.1.1'
__author__ = 'Marco Favorito, Luca Iocchi'
__author_email__ = 'favorito@diag.uniroma1.it, iocchi@diag.uniroma1.it'
__license__ = 'Apache License 2.0'
__copyright__ = '2019 Marco Favorito, Luca Iocchi'
| __title__ = 'gym_breakout_pygame'
__description__ = 'Gym Breakout environment using Pygame'
__url__ = 'https://github.com/whitemech/gym-breakout-pygame.git'
__version__ = '0.1.1'
__author__ = 'Marco Favorito, Luca Iocchi'
__author_email__ = 'favorito@diag.uniroma1.it, iocchi@diag.uniroma1.it'
__license__ = 'Apache License 2.0'
__copyright__ = '2019 Marco Favorito, Luca Iocchi' |
def numUniqueEmails(self, emails):
"""
:type emails: List[str]
:rtype: int
"""
email_set = set()
for i in emails:
email = i.split("@")
local = email[0].replace(".", "")
locate_plus = local.find("+")
if locate_plus != -1:
email_set.add(local[:locate_plus] + "@" + email[1])
else:
email_set.add(local + "@" + email[1])
return len(email_set)
#Less efficient
# hash_map = {}
# for i in emails:
# locate_at = i.find("@")
# local = get_rid_periods(i[:locate_at])
# locate_plus = local.find("+")
# if locate_plus != -1 and locate_plus < locate_at:
# email = local[:locate_plus] + i[locate_at:]
# if email not in hash_map:
# hash_map[email] = 0
# else:
# pass
# else:
# email = local + i[locate_at:]
# if email not in hash_map:
# hash_map[email] = 0
# else:
# pass
# return len(hash_map)
# def get_rid_periods(local_name):
# return "".join([i for i in local_name if i != "."])
Input = ["test.email+alex@leetcode.com","test.e.mail+bob.cathy@leetcode.com","testemail+david@lee.tcode.com"]
# Output: 2
# Explanation: "testemail@leetcode.com" and "testemail@lee.tcode.com" actually receive mails | def num_unique_emails(self, emails):
"""
:type emails: List[str]
:rtype: int
"""
email_set = set()
for i in emails:
email = i.split('@')
local = email[0].replace('.', '')
locate_plus = local.find('+')
if locate_plus != -1:
email_set.add(local[:locate_plus] + '@' + email[1])
else:
email_set.add(local + '@' + email[1])
return len(email_set)
input = ['test.email+alex@leetcode.com', 'test.e.mail+bob.cathy@leetcode.com', 'testemail+david@lee.tcode.com'] |
class Solution(object):
def missingNumber(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
l = len(nums)
sum = (0 + l) * (l + 1) / 2
for n in nums:
sum -= n
return sum
| class Solution(object):
def missing_number(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
l = len(nums)
sum = (0 + l) * (l + 1) / 2
for n in nums:
sum -= n
return sum |
class ContactList(list):
def search(self, name):
"""Return all contacts that contain the search value in their name"""
matching_contact = []
for contact in self:
if name in contact.name:
matching_contact.append(contact)
return matching_contact
| class Contactlist(list):
def search(self, name):
"""Return all contacts that contain the search value in their name"""
matching_contact = []
for contact in self:
if name in contact.name:
matching_contact.append(contact)
return matching_contact |
# MOOC-Python Task
# takuron@github
def lcm(n1,n2) :
maxx = 0
if n1>=n2:
maxx = n1
else:
maxx = n2
i = 0
while 1 :
i = i+1
if ((maxx+i)%n1==0) and ((maxx+i)%n2==0):
return maxx+i
num1=int(input(""))
num2=int(input(""))
print(lcm(num1,num2)) | def lcm(n1, n2):
maxx = 0
if n1 >= n2:
maxx = n1
else:
maxx = n2
i = 0
while 1:
i = i + 1
if (maxx + i) % n1 == 0 and (maxx + i) % n2 == 0:
return maxx + i
num1 = int(input(''))
num2 = int(input(''))
print(lcm(num1, num2)) |
# The marketing team is spending way too much time typing in hashtags.
# Let's help them with our own Hashtag Generator!
#
# Here's the deal:
#
# It must start with a hashtag (#).
# All words must have their first letter capitalized.
# If the final result is longer than 140 chars it must return false.
# If the input or the result is an empty string it must return false.
# Examples
# " Hello there thanks for trying my Kata" => "#HelloThereThanksForTryingMyKata"
# " Hello World " => "#HelloWorld"
# "" => false
def generate_hashtag(s):
output_str = ''
first_letter = True
if not s:
return False
output_str = output_str + "#"
for x in s:
if first_letter and x != ' ':
output_str += x.upper()
first_letter = False
elif x != ' ':
output_str += x.lower()
else:
first_letter = True
if len(output_str) > 140:
return False
else:
return output_str
| def generate_hashtag(s):
output_str = ''
first_letter = True
if not s:
return False
output_str = output_str + '#'
for x in s:
if first_letter and x != ' ':
output_str += x.upper()
first_letter = False
elif x != ' ':
output_str += x.lower()
else:
first_letter = True
if len(output_str) > 140:
return False
else:
return output_str |
class Solution:
def isPowerOfThree(self, n: int) -> bool:
if n < 1:
return False
if n == 1:
return True
if sum(list(map(int, str(n)))) % 3 != 0:
return False
else:
while n > 1:
if n % 3 == 0:
n /= 3
else:
return False
if n != 1:
return False
else:
return True
# Alternate Approach
class Solution:
def isPowerOfThree(self, n: int) -> bool:
if n < 1:
return False
else:
return 1162261467 % n == 0
| class Solution:
def is_power_of_three(self, n: int) -> bool:
if n < 1:
return False
if n == 1:
return True
if sum(list(map(int, str(n)))) % 3 != 0:
return False
else:
while n > 1:
if n % 3 == 0:
n /= 3
else:
return False
if n != 1:
return False
else:
return True
class Solution:
def is_power_of_three(self, n: int) -> bool:
if n < 1:
return False
else:
return 1162261467 % n == 0 |
"""Static Token And Credential Scanner CI integrations.
SPDX-License-Identifier: BSD-3-Clause
"""
__title__ = "stacs-ci"
__summary__ = "Static Token And Credential Scanner CI Integrations."
__version__ = "0.1.6"
__author__ = "Peter Adkins"
__uri__ = "https://www.github.com/stacscan/stacs-integration/"
__license__ = "BSD-3-Clause"
| """Static Token And Credential Scanner CI integrations.
SPDX-License-Identifier: BSD-3-Clause
"""
__title__ = 'stacs-ci'
__summary__ = 'Static Token And Credential Scanner CI Integrations.'
__version__ = '0.1.6'
__author__ = 'Peter Adkins'
__uri__ = 'https://www.github.com/stacscan/stacs-integration/'
__license__ = 'BSD-3-Clause' |
def bubble_sort(items):
for i in range(len(items)):
for j in range(len(items)-1-i):
if items[j] > items[j+1]:
items[j], items[j+1] = items[j+1], items[j]
def f(n): #bubble sort worst case
bubble_sort(range(n,0,-1)) | def bubble_sort(items):
for i in range(len(items)):
for j in range(len(items) - 1 - i):
if items[j] > items[j + 1]:
(items[j], items[j + 1]) = (items[j + 1], items[j])
def f(n):
bubble_sort(range(n, 0, -1)) |
"""
Global information to be used by the module.
"""
class Question:
q_p_id: int
q_id: str
q_text: str
q_answer: str
q_a: str
q_b: str
q_c: str
q_d: str
def __init__(
self,
q_p_id,
q_id,
q_text,
q_answer,
q_a,
q_b,
q_c,
q_d,
):
self.q_p_id = q_p_id
self.q_id = q_id
self.q_text = q_text
self.q_answer = q_answer
self.q_a = q_a
self.q_b = q_b
self.q_c = q_c
self.q_d = q_d
def __str__(self):
temp = f" Pool ID: {self.q_p_id}\n"
temp = f"{temp}Question ID: {self.q_id}\n"
temp = f"{temp} Text: {self.q_text}\n"
temp = f"{temp}Answer: {self.q_answer}\n"
temp = f"{temp} A: {self.q_a}\n"
temp = f"{temp} B: {self.q_b}\n"
temp = f"{temp} C: {self.q_c}\n"
temp = f"{temp} D: {self.q_d}"
return temp
| """
Global information to be used by the module.
"""
class Question:
q_p_id: int
q_id: str
q_text: str
q_answer: str
q_a: str
q_b: str
q_c: str
q_d: str
def __init__(self, q_p_id, q_id, q_text, q_answer, q_a, q_b, q_c, q_d):
self.q_p_id = q_p_id
self.q_id = q_id
self.q_text = q_text
self.q_answer = q_answer
self.q_a = q_a
self.q_b = q_b
self.q_c = q_c
self.q_d = q_d
def __str__(self):
temp = f' Pool ID: {self.q_p_id}\n'
temp = f'{temp}Question ID: {self.q_id}\n'
temp = f'{temp} Text: {self.q_text}\n'
temp = f'{temp}Answer: {self.q_answer}\n'
temp = f'{temp} A: {self.q_a}\n'
temp = f'{temp} B: {self.q_b}\n'
temp = f'{temp} C: {self.q_c}\n'
temp = f'{temp} D: {self.q_d}'
return temp |
a = 'C:\\Users\\Jason\\Documents\\GitHub\\chesspdftofen\\data\\out\\yasser\\WhiteQueen'
l = []
for e in sorted(os.listdir(a)):
i = cv2.imread(os.path.join(a, e), 0)
h,w = i.shape
# l.append([np.sum(i[h//4:h//4*3, w//4:w//4*3]), e])
l.append([np.sum(i), e])
l.sort()
print(l)
for i, k in enumerate(l):
# aa = k[1].find('_')
# bb = k[1][aa:]
# os.rename(os.path.join(a, k[1]), os.path.join(a, str(i) + bb))
os.rename(os.path.join(a, k[1]), os.path.join(a, ('%06d_' % (i,)) + '_' + k[1])) | a = 'C:\\Users\\Jason\\Documents\\GitHub\\chesspdftofen\\data\\out\\yasser\\WhiteQueen'
l = []
for e in sorted(os.listdir(a)):
i = cv2.imread(os.path.join(a, e), 0)
(h, w) = i.shape
l.append([np.sum(i), e])
l.sort()
print(l)
for (i, k) in enumerate(l):
os.rename(os.path.join(a, k[1]), os.path.join(a, '%06d_' % (i,) + '_' + k[1])) |
'''
Created on Sep 3, 2010
@author: ilg
'''
class WriterBase(object):
'''
classdocs
'''
def __init__(self, sName):
'''
Constructor
'''
self.bVerbose = False
self.sName = sName
self.sLocalUrl = None
pass
def setVerbose(self, bVerbose):
self.bVerbose = bVerbose
def setUserUrl(self, sUrl):
self.sUserUrl = sUrl
def setLocalUrl(self, sUrl):
self.sLocalUrl = sUrl
def setOutputStream(self, oOutStream):
self.oOutStream = oOutStream
def setHierarchicalDepth(self, s):
self.sHierarchicalDepth = s
def setCollection(self, sID, sTitle, sDescription, sSmallIcon, sLargeIcon):
self.sCollectionID = sID
self.sCollectionTitle = sTitle
self.sCollectionDescription = sDescription
self.sCollectionSmallIcon = sSmallIcon
self.sCollectionLargeIcon = sLargeIcon
def setStatistics(self, nCollections, nSets, nPhotos):
self.nCollections = nCollections
self.nSets = nSets
self.nPhotos = nPhotos
def setPhotoset(self, sID, sTitle, sDescription, nPhotos, sIcon):
self.sPhotosetID = sID
self.sPhotosetTitle = sTitle
self.sPhotosetDescription = sDescription
self.nPhotosetPhotos = nPhotos
self.sPhotosetIcon = sIcon
def setDepth(self, nDepth):
self.nDepth = nDepth
self.sIndent = ''
for i in range(1, self.nDepth): #@UnusedVariable
self.sIndent += '\t'
def incDepth(self):
self.setDepth(self.nDepth+1)
def decDepth(self):
self.setDepth(self.nDepth-1)
def writeBegin(self):
return
def writeEnd(self):
return
def writeHeaderBegin(self):
return
def writeHeaderEnd(self):
return
def writeCollectionBegin(self):
return
def writeEmbeddedBegin(self):
return
def writeEmbeddedEnd(self):
return
def writeCollectionEnd(self):
return
def writePhotosetBegin(self):
return
def writePhotosetEnd(self):
return
| """
Created on Sep 3, 2010
@author: ilg
"""
class Writerbase(object):
"""
classdocs
"""
def __init__(self, sName):
"""
Constructor
"""
self.bVerbose = False
self.sName = sName
self.sLocalUrl = None
pass
def set_verbose(self, bVerbose):
self.bVerbose = bVerbose
def set_user_url(self, sUrl):
self.sUserUrl = sUrl
def set_local_url(self, sUrl):
self.sLocalUrl = sUrl
def set_output_stream(self, oOutStream):
self.oOutStream = oOutStream
def set_hierarchical_depth(self, s):
self.sHierarchicalDepth = s
def set_collection(self, sID, sTitle, sDescription, sSmallIcon, sLargeIcon):
self.sCollectionID = sID
self.sCollectionTitle = sTitle
self.sCollectionDescription = sDescription
self.sCollectionSmallIcon = sSmallIcon
self.sCollectionLargeIcon = sLargeIcon
def set_statistics(self, nCollections, nSets, nPhotos):
self.nCollections = nCollections
self.nSets = nSets
self.nPhotos = nPhotos
def set_photoset(self, sID, sTitle, sDescription, nPhotos, sIcon):
self.sPhotosetID = sID
self.sPhotosetTitle = sTitle
self.sPhotosetDescription = sDescription
self.nPhotosetPhotos = nPhotos
self.sPhotosetIcon = sIcon
def set_depth(self, nDepth):
self.nDepth = nDepth
self.sIndent = ''
for i in range(1, self.nDepth):
self.sIndent += '\t'
def inc_depth(self):
self.setDepth(self.nDepth + 1)
def dec_depth(self):
self.setDepth(self.nDepth - 1)
def write_begin(self):
return
def write_end(self):
return
def write_header_begin(self):
return
def write_header_end(self):
return
def write_collection_begin(self):
return
def write_embedded_begin(self):
return
def write_embedded_end(self):
return
def write_collection_end(self):
return
def write_photoset_begin(self):
return
def write_photoset_end(self):
return |
def process_old_format(self, cg_resources_requirements):
"""
Old format is njs,request_cpu=1,request_memory=1,request_disk=1,request_color=blue
Regex is assumed to be true
:param cg_resources_requirements:
:return:
"""
cg_res_req_split = cg_resources_requirements.split(",") # List
# Access and remove clientgroup from the statement
client_group = cg_res_req_split.pop(0)
requirements = dict()
for item in cg_res_req_split:
(req, value) = item.split("=")
requirements[req] = value
# Set up default resources
resources = self.get_default_resources(client_group)
if client_group is None or client_group is "":
client_group = resources[self.CG]
requirements_statement = []
for key, value in requirements.items():
if key in resources:
# Overwrite the resources with catalog entries
resources[key] = value
else:
# Otherwise add it to the requirements statement
requirements_statement.append(f"{key}={value}")
# Delete special keys
print(resources)
print(requirements)
del requirements[self.REQUEST_MEMORY]
del requirements[self.REQUEST_CPUS]
del requirements[self.REQUEST_DISK]
# Set the clientgroup just in case it was blank
# Add clientgroup to resources because it is special
# Regex is enabled by default
cge = f'regexp("{client_group}",CLIENTGROUP)'
requirements_statement.append(cge)
rv = dict()
rv[self.CG] = client_group
rv["client_group_expression"] = cge
rv["requirements"] = "".join(requirements_statement)
rv["requirements_statement"] = cge
for key, value in resources.items():
rv[key] = value
return rv
def process_new_format(self, client_group_and_requirements):
"""
New format is {'client_group' : 'njs', 'request_cpu' : 1, 'request_disk' :
:param client_group_and_requirements:
:return:
"""
reqs = json.loads(client_group_and_requirements)
def generate_requirements(self, cg_resources_requirements):
print(cg_resources_requirements)
if "{" in cg_resources_requirements:
reqs = self.process_new_format(cg_resources_requirements)
else:
reqs = self.process_old_format(cg_resources_requirements)
self.check_for_missing_requirements(reqs)
return self.resource_requirements(
request_cpus=reqs["request_cpus"],
request_disk=reqs["request_disk"],
request_memory=reqs["request_memory"],
requirements_statement=reqs["requirements"],
)
return r
@staticmethod
def check_for_missing_requirements(requirements):
for item in (
"client_group_expression",
"request_cpus",
"request_disk",
"request_memory",
):
if item not in requirements:
raise MissingCondorRequirementsException(
f"{item} not found in requirements"
)
def _process_requirements_new_format(self, requirements):
requirements = dict()
cg = requirements.get("client_group", "")
if cg is "":
# requirements[
if bool(requirements.get("regex", False)) is True:
cg["client_group_requirement"] = f'regexp("{cg}",CLIENTGROUP)'
else:
cg["client_group_requirement"] = f"+CLIENTGROUP == {client_group} "
| def process_old_format(self, cg_resources_requirements):
"""
Old format is njs,request_cpu=1,request_memory=1,request_disk=1,request_color=blue
Regex is assumed to be true
:param cg_resources_requirements:
:return:
"""
cg_res_req_split = cg_resources_requirements.split(',')
client_group = cg_res_req_split.pop(0)
requirements = dict()
for item in cg_res_req_split:
(req, value) = item.split('=')
requirements[req] = value
resources = self.get_default_resources(client_group)
if client_group is None or client_group is '':
client_group = resources[self.CG]
requirements_statement = []
for (key, value) in requirements.items():
if key in resources:
resources[key] = value
else:
requirements_statement.append(f'{key}={value}')
print(resources)
print(requirements)
del requirements[self.REQUEST_MEMORY]
del requirements[self.REQUEST_CPUS]
del requirements[self.REQUEST_DISK]
cge = f'regexp("{client_group}",CLIENTGROUP)'
requirements_statement.append(cge)
rv = dict()
rv[self.CG] = client_group
rv['client_group_expression'] = cge
rv['requirements'] = ''.join(requirements_statement)
rv['requirements_statement'] = cge
for (key, value) in resources.items():
rv[key] = value
return rv
def process_new_format(self, client_group_and_requirements):
"""
New format is {'client_group' : 'njs', 'request_cpu' : 1, 'request_disk' :
:param client_group_and_requirements:
:return:
"""
reqs = json.loads(client_group_and_requirements)
def generate_requirements(self, cg_resources_requirements):
print(cg_resources_requirements)
if '{' in cg_resources_requirements:
reqs = self.process_new_format(cg_resources_requirements)
else:
reqs = self.process_old_format(cg_resources_requirements)
self.check_for_missing_requirements(reqs)
return self.resource_requirements(request_cpus=reqs['request_cpus'], request_disk=reqs['request_disk'], request_memory=reqs['request_memory'], requirements_statement=reqs['requirements'])
return r
@staticmethod
def check_for_missing_requirements(requirements):
for item in ('client_group_expression', 'request_cpus', 'request_disk', 'request_memory'):
if item not in requirements:
raise missing_condor_requirements_exception(f'{item} not found in requirements')
def _process_requirements_new_format(self, requirements):
requirements = dict()
cg = requirements.get('client_group', '')
if cg is '':
if bool(requirements.get('regex', False)) is True:
cg['client_group_requirement'] = f'regexp("{cg}",CLIENTGROUP)'
else:
cg['client_group_requirement'] = f'+CLIENTGROUP == {client_group} ' |
"""
Given a list accounts, each element accounts[i] is a list of strings, where the first element accounts[i][0] is a name, and the rest of the elements are emails representing emails of the account.
Now, we would like to merge these accounts. Two accounts definitely belong to the same person if there is some email that is common to both accounts. Note that even if two accounts have the same name, they may belong to different people as people could have the same name. A person can have any number of accounts initially, but all of their accounts definitely have the same name.
After merging the accounts, return the accounts in the following format: the first element of each account is the name, and the rest of the elements are emails in sorted order. The accounts themselves can be returned in any order.
Example 1:
Input:
accounts = [["John", "johnsmith@mail.com", "john00@mail.com"], ["John", "johnnybravo@mail.com"], ["John", "johnsmith@mail.com", "john_newyork@mail.com"], ["Mary", "mary@mail.com"]]
Output: [["John", 'john00@mail.com', 'john_newyork@mail.com', 'johnsmith@mail.com'], ["John", "johnnybravo@mail.com"], ["Mary", "mary@mail.com"]]
Explanation:
The first and third John's are the same person as they have the common email "johnsmith@mail.com".
The second John and Mary are different people as none of their email addresses are used by other accounts.
We could return these lists in any order, for example the answer [['Mary', 'mary@mail.com'], ['John', 'johnnybravo@mail.com'],
['John', 'john00@mail.com', 'john_newyork@mail.com', 'johnsmith@mail.com']] would still be accepted.
Note:
The length of accounts will be in the range [1, 1000].
The length of accounts[i] will be in the range [1, 10].
The length of accounts[i][j] will be in the range [1, 30].
"""
class Solution:
def accountsMerge(self, accounts: List[List[str]]) -> List[List[str]]:
def get_connections(accounts):
track = {}
emails = {}
for i, acc in enumerate(accounts):
if i not in track:
track[i] = []
for j, email in enumerate(acc):
if j == 0:
continue
if email not in emails:
emails[email] = []
for k in emails[email]:
if k not in track:
track[k] = []
track[k].append(i)
track[i].append(k)
emails[email].append(i)
return track
track = get_connections(accounts)
visited = set()
parts = []
for i, acc in enumerate(accounts):
if i in visited:
continue
part = []
stack = [i]
while stack:
curr = stack.pop()
if curr in visited:
continue
visited.add(curr)
part.append(curr)
for ne in track.get(curr, []):
if ne in visited:
continue
stack.append(ne)
parts.append(part)
ret = []
for part in parts:
name = accounts[part[0]][0]
acc = set()
for pp in part:
acc = acc.union(set(accounts[pp][1:]))
ret.append([name] + sorted(acc))
return ret | """
Given a list accounts, each element accounts[i] is a list of strings, where the first element accounts[i][0] is a name, and the rest of the elements are emails representing emails of the account.
Now, we would like to merge these accounts. Two accounts definitely belong to the same person if there is some email that is common to both accounts. Note that even if two accounts have the same name, they may belong to different people as people could have the same name. A person can have any number of accounts initially, but all of their accounts definitely have the same name.
After merging the accounts, return the accounts in the following format: the first element of each account is the name, and the rest of the elements are emails in sorted order. The accounts themselves can be returned in any order.
Example 1:
Input:
accounts = [["John", "johnsmith@mail.com", "john00@mail.com"], ["John", "johnnybravo@mail.com"], ["John", "johnsmith@mail.com", "john_newyork@mail.com"], ["Mary", "mary@mail.com"]]
Output: [["John", 'john00@mail.com', 'john_newyork@mail.com', 'johnsmith@mail.com'], ["John", "johnnybravo@mail.com"], ["Mary", "mary@mail.com"]]
Explanation:
The first and third John's are the same person as they have the common email "johnsmith@mail.com".
The second John and Mary are different people as none of their email addresses are used by other accounts.
We could return these lists in any order, for example the answer [['Mary', 'mary@mail.com'], ['John', 'johnnybravo@mail.com'],
['John', 'john00@mail.com', 'john_newyork@mail.com', 'johnsmith@mail.com']] would still be accepted.
Note:
The length of accounts will be in the range [1, 1000].
The length of accounts[i] will be in the range [1, 10].
The length of accounts[i][j] will be in the range [1, 30].
"""
class Solution:
def accounts_merge(self, accounts: List[List[str]]) -> List[List[str]]:
def get_connections(accounts):
track = {}
emails = {}
for (i, acc) in enumerate(accounts):
if i not in track:
track[i] = []
for (j, email) in enumerate(acc):
if j == 0:
continue
if email not in emails:
emails[email] = []
for k in emails[email]:
if k not in track:
track[k] = []
track[k].append(i)
track[i].append(k)
emails[email].append(i)
return track
track = get_connections(accounts)
visited = set()
parts = []
for (i, acc) in enumerate(accounts):
if i in visited:
continue
part = []
stack = [i]
while stack:
curr = stack.pop()
if curr in visited:
continue
visited.add(curr)
part.append(curr)
for ne in track.get(curr, []):
if ne in visited:
continue
stack.append(ne)
parts.append(part)
ret = []
for part in parts:
name = accounts[part[0]][0]
acc = set()
for pp in part:
acc = acc.union(set(accounts[pp][1:]))
ret.append([name] + sorted(acc))
return ret |
class Solution(object):
def maxProfit(self, prices):
"""
:type prices: List[int]
:rtype: int
"""
if not prices:
return 0
n = len(prices)
if n == 1:
return 0
dp = [0 for i in range(n + 1)]
tmpmin = prices[0]
for i in range(1, n + 1):
tmpmin = min(tmpmin, prices[i - 1])
tmpres = prices[i - 1] - tmpmin
dp[i] = max(tmpres, dp[i - 1])
return dp[-1]
a = Solution()
a.maxProfit([7, 1, 5, 3, 6, 4])
| class Solution(object):
def max_profit(self, prices):
"""
:type prices: List[int]
:rtype: int
"""
if not prices:
return 0
n = len(prices)
if n == 1:
return 0
dp = [0 for i in range(n + 1)]
tmpmin = prices[0]
for i in range(1, n + 1):
tmpmin = min(tmpmin, prices[i - 1])
tmpres = prices[i - 1] - tmpmin
dp[i] = max(tmpres, dp[i - 1])
return dp[-1]
a = solution()
a.maxProfit([7, 1, 5, 3, 6, 4]) |
class Solution:
def calculateTime(self, keyboard: str, word: str) -> int:
positions = {key : i for i, key in enumerate(keyboard)}
n = len(word)
ans = 0
for i in range(1, n):
ans += abs(positions[word[i]] - positions[word[i - 1]])
return ans + positions[word[0]]
| class Solution:
def calculate_time(self, keyboard: str, word: str) -> int:
positions = {key: i for (i, key) in enumerate(keyboard)}
n = len(word)
ans = 0
for i in range(1, n):
ans += abs(positions[word[i]] - positions[word[i - 1]])
return ans + positions[word[0]] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Utility functions.
"""
def _check_columns(df_to_check, cols) -> None:
"""Check that a list of required column names is in a data frame
Args:
df_to_check: A DataFrame to check columns on.
cols (Iterable[str]): Required columns.
Returns:
None
Raises:
ValueError: if required cols are not a subset of column names in
``df_to_check``.
Examples:
>> df = pd.DataFrame({'col_a': [1,2], 'col_b': [2,4]})
>> check_columns(df, ['col_c'])
ValueError: Missing columns: `{col_c}`
"""
if isinstance(cols, str):
cols = [cols]
if not set(cols).issubset(df_to_check.columns):
missing_cols = set(cols).difference(df_to_check.columns)
raise ValueError(f"Missing columns: `{missing_cols}`.")
return None
| """
Utility functions.
"""
def _check_columns(df_to_check, cols) -> None:
"""Check that a list of required column names is in a data frame
Args:
df_to_check: A DataFrame to check columns on.
cols (Iterable[str]): Required columns.
Returns:
None
Raises:
ValueError: if required cols are not a subset of column names in
``df_to_check``.
Examples:
>> df = pd.DataFrame({'col_a': [1,2], 'col_b': [2,4]})
>> check_columns(df, ['col_c'])
ValueError: Missing columns: `{col_c}`
"""
if isinstance(cols, str):
cols = [cols]
if not set(cols).issubset(df_to_check.columns):
missing_cols = set(cols).difference(df_to_check.columns)
raise value_error(f'Missing columns: `{missing_cols}`.')
return None |
# -*- coding: utf-8 -*-
"""
neutrino_api
This file was automatically generated for NeutrinoAPI by APIMATIC v2.0 ( https://apimatic.io ).
"""
class BrowserBotResponse(object):
"""Implementation of the 'Browser Bot Response' model.
TODO: type model description here.
Attributes:
url (string): The page URL
content (string): The complete raw, decompressed and decoded page
content. Usually will be either HTML, JSON or XML
mime_type (string): The document MIME type
title (string): The document title
is_error (bool): True if an error has occurred loading the page. Check
the 'error-message' field for details
is_timeout (bool): True if a timeout occurred while loading the page.
You can set the timeout with the request parameter 'timeout'
error_message (string): Contains the error message if an error has
occurred ('is-error' will be true)
http_status_code (int): The HTTP status code the URL returned
http_status_message (string): The HTTP status message the URL
returned
is_http_ok (bool): True if the HTTP status is OK (200)
is_http_redirect (bool): True if the URL responded with an HTTP
redirect
http_redirect_url (string): The redirected URL if the URL responded
with an HTTP redirect
server_ip (string): The HTTP servers IP address
load_time (int): The number of seconds taken to load the page (from
initial request until DOM ready)
response_headers (dict<object, string>): Map containing all the HTTP
response headers the URL responded with
is_secure (bool): True if the page is secured using TLS/SSL
security_details (dict<object, string>): Map containing details of the
TLS/SSL setup
elements (list of string): Array containing all the elements matching
the supplied selector. Each element object will contain the text
content, HTML content and all current element attributes
exec_results (list of string): If you executed any JavaScript this
array holds the results as objects
"""
# Create a mapping from Model property names to API property names
_names = {
"url":'url',
"content":'content',
"mime_type":'mimeType',
"title":'title',
"is_error":'isError',
"is_timeout":'isTimeout',
"error_message":'errorMessage',
"http_status_code":'httpStatusCode',
"http_status_message":'httpStatusMessage',
"is_http_ok":'isHttpOk',
"is_http_redirect":'isHttpRedirect',
"http_redirect_url":'httpRedirectUrl',
"server_ip":'serverIp',
"load_time":'loadTime',
"response_headers":'responseHeaders',
"is_secure":'isSecure',
"security_details":'securityDetails',
"elements":'elements',
"exec_results":'execResults'
}
def __init__(self,
url=None,
content=None,
mime_type=None,
title=None,
is_error=None,
is_timeout=None,
error_message=None,
http_status_code=None,
http_status_message=None,
is_http_ok=None,
is_http_redirect=None,
http_redirect_url=None,
server_ip=None,
load_time=None,
response_headers=None,
is_secure=None,
security_details=None,
elements=None,
exec_results=None):
"""Constructor for the BrowserBotResponse class"""
# Initialize members of the class
self.url = url
self.content = content
self.mime_type = mime_type
self.title = title
self.is_error = is_error
self.is_timeout = is_timeout
self.error_message = error_message
self.http_status_code = http_status_code
self.http_status_message = http_status_message
self.is_http_ok = is_http_ok
self.is_http_redirect = is_http_redirect
self.http_redirect_url = http_redirect_url
self.server_ip = server_ip
self.load_time = load_time
self.response_headers = response_headers
self.is_secure = is_secure
self.security_details = security_details
self.elements = elements
self.exec_results = exec_results
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object as
obtained from the deserialization of the server's response. The keys
MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
url = dictionary.get('url')
content = dictionary.get('content')
mime_type = dictionary.get('mimeType')
title = dictionary.get('title')
is_error = dictionary.get('isError')
is_timeout = dictionary.get('isTimeout')
error_message = dictionary.get('errorMessage')
http_status_code = dictionary.get('httpStatusCode')
http_status_message = dictionary.get('httpStatusMessage')
is_http_ok = dictionary.get('isHttpOk')
is_http_redirect = dictionary.get('isHttpRedirect')
http_redirect_url = dictionary.get('httpRedirectUrl')
server_ip = dictionary.get('serverIp')
load_time = dictionary.get('loadTime')
response_headers = dictionary.get('responseHeaders')
is_secure = dictionary.get('isSecure')
security_details = dictionary.get('securityDetails')
elements = dictionary.get('elements')
exec_results = dictionary.get('execResults')
# Return an object of this model
return cls(url,
content,
mime_type,
title,
is_error,
is_timeout,
error_message,
http_status_code,
http_status_message,
is_http_ok,
is_http_redirect,
http_redirect_url,
server_ip,
load_time,
response_headers,
is_secure,
security_details,
elements,
exec_results)
| """
neutrino_api
This file was automatically generated for NeutrinoAPI by APIMATIC v2.0 ( https://apimatic.io ).
"""
class Browserbotresponse(object):
"""Implementation of the 'Browser Bot Response' model.
TODO: type model description here.
Attributes:
url (string): The page URL
content (string): The complete raw, decompressed and decoded page
content. Usually will be either HTML, JSON or XML
mime_type (string): The document MIME type
title (string): The document title
is_error (bool): True if an error has occurred loading the page. Check
the 'error-message' field for details
is_timeout (bool): True if a timeout occurred while loading the page.
You can set the timeout with the request parameter 'timeout'
error_message (string): Contains the error message if an error has
occurred ('is-error' will be true)
http_status_code (int): The HTTP status code the URL returned
http_status_message (string): The HTTP status message the URL
returned
is_http_ok (bool): True if the HTTP status is OK (200)
is_http_redirect (bool): True if the URL responded with an HTTP
redirect
http_redirect_url (string): The redirected URL if the URL responded
with an HTTP redirect
server_ip (string): The HTTP servers IP address
load_time (int): The number of seconds taken to load the page (from
initial request until DOM ready)
response_headers (dict<object, string>): Map containing all the HTTP
response headers the URL responded with
is_secure (bool): True if the page is secured using TLS/SSL
security_details (dict<object, string>): Map containing details of the
TLS/SSL setup
elements (list of string): Array containing all the elements matching
the supplied selector. Each element object will contain the text
content, HTML content and all current element attributes
exec_results (list of string): If you executed any JavaScript this
array holds the results as objects
"""
_names = {'url': 'url', 'content': 'content', 'mime_type': 'mimeType', 'title': 'title', 'is_error': 'isError', 'is_timeout': 'isTimeout', 'error_message': 'errorMessage', 'http_status_code': 'httpStatusCode', 'http_status_message': 'httpStatusMessage', 'is_http_ok': 'isHttpOk', 'is_http_redirect': 'isHttpRedirect', 'http_redirect_url': 'httpRedirectUrl', 'server_ip': 'serverIp', 'load_time': 'loadTime', 'response_headers': 'responseHeaders', 'is_secure': 'isSecure', 'security_details': 'securityDetails', 'elements': 'elements', 'exec_results': 'execResults'}
def __init__(self, url=None, content=None, mime_type=None, title=None, is_error=None, is_timeout=None, error_message=None, http_status_code=None, http_status_message=None, is_http_ok=None, is_http_redirect=None, http_redirect_url=None, server_ip=None, load_time=None, response_headers=None, is_secure=None, security_details=None, elements=None, exec_results=None):
"""Constructor for the BrowserBotResponse class"""
self.url = url
self.content = content
self.mime_type = mime_type
self.title = title
self.is_error = is_error
self.is_timeout = is_timeout
self.error_message = error_message
self.http_status_code = http_status_code
self.http_status_message = http_status_message
self.is_http_ok = is_http_ok
self.is_http_redirect = is_http_redirect
self.http_redirect_url = http_redirect_url
self.server_ip = server_ip
self.load_time = load_time
self.response_headers = response_headers
self.is_secure = is_secure
self.security_details = security_details
self.elements = elements
self.exec_results = exec_results
@classmethod
def from_dictionary(cls, dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object as
obtained from the deserialization of the server's response. The keys
MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
url = dictionary.get('url')
content = dictionary.get('content')
mime_type = dictionary.get('mimeType')
title = dictionary.get('title')
is_error = dictionary.get('isError')
is_timeout = dictionary.get('isTimeout')
error_message = dictionary.get('errorMessage')
http_status_code = dictionary.get('httpStatusCode')
http_status_message = dictionary.get('httpStatusMessage')
is_http_ok = dictionary.get('isHttpOk')
is_http_redirect = dictionary.get('isHttpRedirect')
http_redirect_url = dictionary.get('httpRedirectUrl')
server_ip = dictionary.get('serverIp')
load_time = dictionary.get('loadTime')
response_headers = dictionary.get('responseHeaders')
is_secure = dictionary.get('isSecure')
security_details = dictionary.get('securityDetails')
elements = dictionary.get('elements')
exec_results = dictionary.get('execResults')
return cls(url, content, mime_type, title, is_error, is_timeout, error_message, http_status_code, http_status_message, is_http_ok, is_http_redirect, http_redirect_url, server_ip, load_time, response_headers, is_secure, security_details, elements, exec_results) |
numbers = [0, 1, 2, 3, 4]
doubled_numbers = []
for num in numbers:
doubled_numbers.append(num * 2)
print(doubled_numbers)
# -- List comprehension --
numbers = [0, 1, 2, 3, 4] # list(range(5)) is better
doubled_numbers = [num * 2 for num in numbers]
# [num * 2 for num in range(5)] would be even better.
print(doubled_numbers)
# -- You can add anything to the new list --
friend_ages = [22, 31, 35, 37]
age_strings = [f"My friend is {age} years old." for age in friend_ages]
print(age_strings)
# -- This includes things like --
names = ["Rolf", "Bob", "Jen"]
lower = [name.lower() for name in names]
# That is particularly useful for working with user input.
# By turning everything to lowercase, it's less likely we'll miss a match.
friend = input("Enter your friend name: ")
friends = ["Rolf", "Bob", "Jen", "Charlie", "Anne"]
friends_lower = [name.lower() for name in friends]
if friend.lower() in friends_lower:
print(f"I know {friend}!")
| numbers = [0, 1, 2, 3, 4]
doubled_numbers = []
for num in numbers:
doubled_numbers.append(num * 2)
print(doubled_numbers)
numbers = [0, 1, 2, 3, 4]
doubled_numbers = [num * 2 for num in numbers]
print(doubled_numbers)
friend_ages = [22, 31, 35, 37]
age_strings = [f'My friend is {age} years old.' for age in friend_ages]
print(age_strings)
names = ['Rolf', 'Bob', 'Jen']
lower = [name.lower() for name in names]
friend = input('Enter your friend name: ')
friends = ['Rolf', 'Bob', 'Jen', 'Charlie', 'Anne']
friends_lower = [name.lower() for name in friends]
if friend.lower() in friends_lower:
print(f'I know {friend}!') |
# internal library
def ceil_pow2(n):
x = 0
while((1 << x) < n):
x += 1
return x
# internal library end
class segtree:
def __init__(self, op, e, n=0, ary=[]):
self.op = op
self.e = e
if n:
ary = [e()] * n
else:
n = len(ary)
self.n = n
self.log = ceil_pow2(n)
self.size = 1 << self.log
self.d = [e()] * (2 * self.size)
for i in range(n):
self.d[self.size + i] = ary[i]
for i in reversed(range(1, self.size)):
self.update(i)
def set(self, p, x):
p += self.size
self.d[p] = x
for i in range(1, self.log + 1):
self.update(p >> i)
def get(self, p):
return self.d[p + self.size]
def prod(self, l, r):
sml = self.e()
smr = self.e()
l += self.size
r += self.size
while l < r:
if l & 1:
sml = self.op(sml, self.d[l])
l += 1
if r & 1:
r -= 1
smr = self.op(self.d[r], smr)
l >>= 1
r >>= 1
return self.op(sml, smr)
def all_prod(self):
return self.d[1]
def max_right(self, l, f):
if l == self.n:
return self.n
l += self.size
sm = self.e()
while 1:
while l % 2 == 0:
l >>= 1
if f(self.op(sm, self.d[l])) == 0:
while l < self.size:
l *= 2
if f(self.op(sm, self.d[l])):
sm = self.op(sm, self.d[l])
l += 1
return l - self.size
sm = self.op(sm, self.d[l])
l += 1
if (l & -l) == l:
break
return self.n
def min_left(self, r, f):
assert(0 <= r and r < self.n)
if r == 0:
return 0
r += self.size
sm = self.e()
while 1:
r -= 1
while r > 1 and (r & 1):
r >>= 1
if not f(self.op(self.d[r], sm)):
while r < self.size:
if f(self.op(self.d[r], sm)):
sm = self.op(sm, self.d[r])
r -= 1
return r + 1 - self.size
sm = self.op(self.d[r], sm)
if (r & -r) == r:
break
return 0
def update(self, k):
self.d[k] = self.op(self.d[2 * k], self.d[2 * k + 1]) | def ceil_pow2(n):
x = 0
while 1 << x < n:
x += 1
return x
class Segtree:
def __init__(self, op, e, n=0, ary=[]):
self.op = op
self.e = e
if n:
ary = [e()] * n
else:
n = len(ary)
self.n = n
self.log = ceil_pow2(n)
self.size = 1 << self.log
self.d = [e()] * (2 * self.size)
for i in range(n):
self.d[self.size + i] = ary[i]
for i in reversed(range(1, self.size)):
self.update(i)
def set(self, p, x):
p += self.size
self.d[p] = x
for i in range(1, self.log + 1):
self.update(p >> i)
def get(self, p):
return self.d[p + self.size]
def prod(self, l, r):
sml = self.e()
smr = self.e()
l += self.size
r += self.size
while l < r:
if l & 1:
sml = self.op(sml, self.d[l])
l += 1
if r & 1:
r -= 1
smr = self.op(self.d[r], smr)
l >>= 1
r >>= 1
return self.op(sml, smr)
def all_prod(self):
return self.d[1]
def max_right(self, l, f):
if l == self.n:
return self.n
l += self.size
sm = self.e()
while 1:
while l % 2 == 0:
l >>= 1
if f(self.op(sm, self.d[l])) == 0:
while l < self.size:
l *= 2
if f(self.op(sm, self.d[l])):
sm = self.op(sm, self.d[l])
l += 1
return l - self.size
sm = self.op(sm, self.d[l])
l += 1
if l & -l == l:
break
return self.n
def min_left(self, r, f):
assert 0 <= r and r < self.n
if r == 0:
return 0
r += self.size
sm = self.e()
while 1:
r -= 1
while r > 1 and r & 1:
r >>= 1
if not f(self.op(self.d[r], sm)):
while r < self.size:
if f(self.op(self.d[r], sm)):
sm = self.op(sm, self.d[r])
r -= 1
return r + 1 - self.size
sm = self.op(self.d[r], sm)
if r & -r == r:
break
return 0
def update(self, k):
self.d[k] = self.op(self.d[2 * k], self.d[2 * k + 1]) |
class CompatibilityResult(object):
def __init__(self, mergerule, incompatible_rule_class_names=[]):
"""
:param mergerule: MergeRule
:param incompatible_rule_class_names: list of strings
"""
self._mergerule = mergerule
self._incompatible_rule_class_names = incompatible_rule_class_names
def rule(self):
return self._mergerule
def rule_class_name(self):
return self._mergerule.get_rule_class_name()
def is_compatible(self):
return len(self._incompatible_rule_class_names) > 0
def incompatible_rule_class_names(self):
return set(self._incompatible_rule_class_names)
| class Compatibilityresult(object):
def __init__(self, mergerule, incompatible_rule_class_names=[]):
"""
:param mergerule: MergeRule
:param incompatible_rule_class_names: list of strings
"""
self._mergerule = mergerule
self._incompatible_rule_class_names = incompatible_rule_class_names
def rule(self):
return self._mergerule
def rule_class_name(self):
return self._mergerule.get_rule_class_name()
def is_compatible(self):
return len(self._incompatible_rule_class_names) > 0
def incompatible_rule_class_names(self):
return set(self._incompatible_rule_class_names) |
class User:
# Class Attribute
active_users = 0
#class Methods
@classmethod
def display_active(cls):
return f"Total Active users are {cls.active_users}"
def __init__(self,first_name,last_name,age):
# instance Attribute/Variable
self.first_name = first_name
self.last_name = last_name
self.age = age
User.active_users += 1
# Instance method
def fullName(self):
return f"{self.first_name} {self.last_name}"
def initials(self):
return f"{self.first_name[0]}.{self.last_name[0]}."
def likes(self,thing):
return f"{self.first_name} likes {thing}"
def isSenior(self):
return (self.age > 65)
print('Active Users',User.active_users)
p = User('Qaidjohar','Jawadwala',70)
q = User('Mustafa','Poona',25)
print('Active Users',User.active_users)
r = User('Qaidjohar','Jawadwala',70)
print('R Active Users',User.active_users)
s = User('Mustafa','Poona',25)
print('Active Users',User.active_users)
print('P Active Users',p.active_users)
print(User.display_active())
print(User.fullName(self))
print(s.display_active())
| class User:
active_users = 0
@classmethod
def display_active(cls):
return f'Total Active users are {cls.active_users}'
def __init__(self, first_name, last_name, age):
self.first_name = first_name
self.last_name = last_name
self.age = age
User.active_users += 1
def full_name(self):
return f'{self.first_name} {self.last_name}'
def initials(self):
return f'{self.first_name[0]}.{self.last_name[0]}.'
def likes(self, thing):
return f'{self.first_name} likes {thing}'
def is_senior(self):
return self.age > 65
print('Active Users', User.active_users)
p = user('Qaidjohar', 'Jawadwala', 70)
q = user('Mustafa', 'Poona', 25)
print('Active Users', User.active_users)
r = user('Qaidjohar', 'Jawadwala', 70)
print('R Active Users', User.active_users)
s = user('Mustafa', 'Poona', 25)
print('Active Users', User.active_users)
print('P Active Users', p.active_users)
print(User.display_active())
print(User.fullName(self))
print(s.display_active()) |
class TextStats:
def _calc_symbol_freqs(self):
self.symbol_freq = {}
for c in self.text:
if c in self.symbol_freq:
self.symbol_freq[c] += 1
else:
self.symbol_freq[c] = 1
@staticmethod
def _is_valid_bigram(a, b):
""" 0nly count bigrams with 2 unique letters, as only those can
be optimiazed. Consider ' a letter, as it tends occur in common
words, such as "don't". """
return (a.isalpha() or a == "'") and \
(b.isalpha() or b == "'") and a != b
@staticmethod
def _is_valid_trigram(a, b, c):
""" 0nly count trigrams with 3 unique letters, as only those can
be optimiazed. Consider ' a letter, as it tends occur in common
words, such as "don't". """
return (a.isalpha() or a == "'") and \
(b.isalpha() or b == "'") and \
(c.isalpha() or c == "'") and \
a != b and b != c and c != a
def _calc_bigrams(self):
self.bigrams = {}
prev = ' '
for c in self.text.lower():
if self._is_valid_bigram(prev, c):
bigram = (prev, c)
if bigram in self.bigrams:
self.bigrams[bigram] += 1
else:
self.bigrams[bigram] = 1
prev = c
def _calc_trigrams(self):
self.trigrams = {}
prev2, prev1 = ' ', ' '
for c in self.text.lower():
if self._is_valid_trigram(prev2, prev1, c):
trigram = (prev2, prev1, c)
if trigram in self.trigrams:
self.trigrams[trigram] += 1
else:
self.trigrams[trigram] = 1
prev2, prev1 = prev1, c
def __init__(self, text):
self.text = text
self._calc_symbol_freqs()
self._calc_bigrams()
self._calc_trigrams()
#t = [(s[0]+s[1]+s[2], f) for s, f in self.trigrams.items() if "'" in s]
#t.sort(key=lambda a: a[1])
#print(t)
#print(len(self.bigrams))
#print(len(self.trigrams))
| class Textstats:
def _calc_symbol_freqs(self):
self.symbol_freq = {}
for c in self.text:
if c in self.symbol_freq:
self.symbol_freq[c] += 1
else:
self.symbol_freq[c] = 1
@staticmethod
def _is_valid_bigram(a, b):
""" 0nly count bigrams with 2 unique letters, as only those can
be optimiazed. Consider ' a letter, as it tends occur in common
words, such as "don't". """
return (a.isalpha() or a == "'") and (b.isalpha() or b == "'") and (a != b)
@staticmethod
def _is_valid_trigram(a, b, c):
""" 0nly count trigrams with 3 unique letters, as only those can
be optimiazed. Consider ' a letter, as it tends occur in common
words, such as "don't". """
return (a.isalpha() or a == "'") and (b.isalpha() or b == "'") and (c.isalpha() or c == "'") and (a != b) and (b != c) and (c != a)
def _calc_bigrams(self):
self.bigrams = {}
prev = ' '
for c in self.text.lower():
if self._is_valid_bigram(prev, c):
bigram = (prev, c)
if bigram in self.bigrams:
self.bigrams[bigram] += 1
else:
self.bigrams[bigram] = 1
prev = c
def _calc_trigrams(self):
self.trigrams = {}
(prev2, prev1) = (' ', ' ')
for c in self.text.lower():
if self._is_valid_trigram(prev2, prev1, c):
trigram = (prev2, prev1, c)
if trigram in self.trigrams:
self.trigrams[trigram] += 1
else:
self.trigrams[trigram] = 1
(prev2, prev1) = (prev1, c)
def __init__(self, text):
self.text = text
self._calc_symbol_freqs()
self._calc_bigrams()
self._calc_trigrams() |
test = {
'name': 'Problem 7',
'points': 3,
'suites': [
{
'cases': [
{
'code': r"""
>>> big_limit = 10
>>> meowstake_matches("wird", "wiry", big_limit)
1
>>> meowstake_matches("wird", "bird", big_limit)
1
>>> meowstake_matches("wird", "wir", big_limit)
1
>>> meowstake_matches("wird", "bwird", big_limit)
1
>>> meowstake_matches("speling", "spelling", big_limit)
1
>>> meowstake_matches("used", "use", big_limit)
1
>>> meowstake_matches("hash", "ash", big_limit)
1
>>> meowstake_matches("ash", "hash", big_limit)
1
>>> meowstake_matches("roses", "arose", big_limit) # roses -> aroses -> arose
2
>>> meowstake_matches("tesng", "testing", big_limit) # tesng -> testng -> testing
2
>>> meowstake_matches("rlogcul", "logical", big_limit) # rlogcul -> logcul -> logicul -> logical
3
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> small_words_list = ["spell", "nest", "test", "pest", "best", "bird", "wired",
... "abstraction", "abstract", "wire", "peeling", "gestate",
... "west", "spelling", "bastion"]
>>> autocorrect("speling", small_words_list, meowstake_matches, 10)
'spelling'
>>> autocorrect("abstrction", small_words_list, meowstake_matches, 10)
'abstraction'
>>> autocorrect("wird", small_words_list, meowstake_matches, 10)
'bird'
>>> autocorrect("gest", small_words_list, meowstake_matches, 10)
'nest'
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> # ***Check that the recursion stops when the limit is reached***
>>> import trace, io
>>> from contextlib import redirect_stdout
>>> with io.StringIO() as buf, redirect_stdout(buf):
... trace.Trace(trace=True).runfunc(meowstake_matches, "someawe", "awesome", 3)
... output = buf.getvalue()
>>> len([line for line in output.split('\n') if 'funcname' in line]) < 1000
True
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> meowstake_matches('thong', 'thong', 100)
0
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> meowstake_matches('place', 'wreat', 100)
5
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> meowstake_matches('pray', 'okee', 100)
4
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> meowstake_matches('cloit', 'cloit', 100)
0
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> meowstake_matches('yond', 'snd', 100)
2
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> meowstake_matches('tb', 'tb', 100)
0
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> meowstake_matches('gobi', 'gobi', 100)
0
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> meowstake_matches('watap', 'woitap', 100)
2
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> sum([meowstake_matches('baffy', 'btfi', k) > k for k in range(5)])
3
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> sum([meowstake_matches('else', 'konak', k) > k for k in range(5)])
5
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> sum([meowstake_matches('zygon', 'jzon', k) > k for k in range(5)])
3
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> meowstake_matches('lar', 'lar', 100)
0
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> meowstake_matches('shop', 'wopd', 100)
3
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> sum([meowstake_matches('pc', 'pc', k) > k for k in range(2)])
0
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> meowstake_matches('sail', 'sail', 100)
0
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> meowstake_matches('fiber', 'fbk', 100)
3
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> meowstake_matches('doff', 'def', 100)
2
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> meowstake_matches('meile', 'mqeile', 100)
1
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> sum([meowstake_matches('donor', 'doinor', k) > k for k in range(6)])
1
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> sum([meowstake_matches('meet', 'meeu', k) > k for k in range(4)])
1
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> sum([meowstake_matches('tic', 'tih', k) > k for k in range(3)])
1
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> sum([meowstake_matches('taft', 'hewer', k) > k for k in range(5)])
5
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> sum([meowstake_matches('moorn', 'toxa', k) > k for k in range(5)])
4
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> sum([meowstake_matches('hamal', 'hamal', k) > k for k in range(5)])
0
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> meowstake_matches('pridy', 'dance', 100)
5
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> meowstake_matches('dekko', 'zbk', 100)
4
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> sum([meowstake_matches('julio', 'juio', k) > k for k in range(5)])
1
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> sum([meowstake_matches('boist', 'spume', k) > k for k in range(5)])
5
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> meowstake_matches('jail', 'jaila', 100)
1
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> meowstake_matches('cumin', 'goes', 100)
5
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> sum([meowstake_matches('civil', 'whose', k) > k for k in range(5)])
5
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> sum([meowstake_matches('stead', 'ny', k) > k for k in range(5)])
5
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> meowstake_matches('mikie', 'mdiye', 100)
3
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> sum([meowstake_matches('utils', 'utils', k) > k for k in range(5)])
0
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> sum([meowstake_matches('nuque', 'nuq', k) > k for k in range(5)])
2
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> sum([meowstake_matches('chine', 'ziinx', k) > k for k in range(5)])
3
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> sum([meowstake_matches('tour', 'erase', k) > k for k in range(5)])
5
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> meowstake_matches('ak', 'rose', 100)
4
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> sum([meowstake_matches('sawah', 'shape', k) > k for k in range(5)])
4
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> meowstake_matches('elb', 'logia', 100)
5
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> meowstake_matches('noily', 'oibs', 100)
3
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> meowstake_matches('fluid', 'grad', 100)
4
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> meowstake_matches('titer', 'tskhteur', 100)
4
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> meowstake_matches('shood', 'shood', 100)
0
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> meowstake_matches('sher', 'xdhe', 100)
3
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> meowstake_matches('dayal', 'qualm', 100)
4
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> meowstake_matches('tenai', 'whata', 100)
5
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> meowstake_matches('bow', 'how', 100)
1
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> sum([meowstake_matches('tony', 'togqq', k) > k for k in range(5)])
3
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> sum([meowstake_matches('baby', 'ton', k) > k for k in range(4)])
4
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> meowstake_matches('seron', 'seron', 100)
0
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> sum([meowstake_matches('tame', 'tfme', k) > k for k in range(4)])
1
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> meowstake_matches('kissy', 'kisdsxk', 100)
3
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> sum([meowstake_matches('str', 'st', k) > k for k in range(3)])
1
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> meowstake_matches('enema', 'nemr', 100)
2
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> meowstake_matches('beden', 'beden', 100)
0
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> meowstake_matches('coral', 'coral', 100)
0
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> meowstake_matches('hack', 'rhack', 100)
1
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> meowstake_matches('alan', 'alan', 100)
0
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> meowstake_matches('aru', 'aru', 100)
0
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> meowstake_matches('tail', 'taiil', 100)
1
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> meowstake_matches('corps', 'ckcp', 100)
3
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> meowstake_matches('kazi', 'kazi', 100)
0
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> meowstake_matches('bone', 'bone', 100)
0
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> sum([meowstake_matches('dee', 'derv', k) > k for k in range(4)])
2
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> meowstake_matches('fuder', 'fuder', 100)
0
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> meowstake_matches('harl', 'hhtar', 100)
3
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> meowstake_matches('def', 'df', 100)
1
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> meowstake_matches('moio', 'yomo', 100)
2
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> sum([meowstake_matches('amnia', 'wna', k) > k for k in range(5)])
3
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> sum([meowstake_matches('pair', 'pair', k) > k for k in range(4)])
0
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> sum([meowstake_matches('peai', 'eabi', k) > k for k in range(4)])
2
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> sum([meowstake_matches('pryse', 'prysvf', k) > k for k in range(6)])
2
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> meowstake_matches('amelu', 'samp', 100)
4
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> meowstake_matches('weak', 'wk', 100)
2
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> meowstake_matches('atelo', 'atelo', 100)
0
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> meowstake_matches('uc', 'kc', 100)
1
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> sum([meowstake_matches('strew', 'jaup', k) > k for k in range(5)])
5
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> sum([meowstake_matches('dome', 'dume', k) > k for k in range(4)])
1
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> meowstake_matches('braze', 'sxaze', 100)
2
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> meowstake_matches('zaman', 'zadpamn', 100)
3
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> meowstake_matches('twank', 'renne', 100)
4
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> sum([meowstake_matches('pinky', 'opiky', k) > k for k in range(5)])
2
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> sum([meowstake_matches('spoke', 'spoke', k) > k for k in range(5)])
0
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> sum([meowstake_matches('recto', 'recto', k) > k for k in range(5)])
0
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> meowstake_matches('ula', 'ula', 100)
0
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> meowstake_matches('dame', 'froth', 100)
5
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> meowstake_matches('grane', 'griae', 100)
2
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> meowstake_matches('cycad', 'cqcad', 100)
1
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> meowstake_matches('creem', 'ashreem', 100)
3
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> sum([meowstake_matches('alky', 'alfy', k) > k for k in range(4)])
1
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> sum([meowstake_matches('finds', 'fid', k) > k for k in range(5)])
2
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> sum([meowstake_matches('argot', 'arxgot', k) > k for k in range(6)])
1
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> meowstake_matches('lc', 'roost', 100)
5
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> meowstake_matches('mi', 'iran', 100)
4
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> sum([meowstake_matches('faded', 'fabehc', k) > k for k in range(6)])
3
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> sum([meowstake_matches('slee', 'ble', k) > k for k in range(4)])
2
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> meowstake_matches('macro', 'macr', 100)
1
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> sum([meowstake_matches('bbs', 'bbj', k) > k for k in range(3)])
1
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> sum([meowstake_matches('roud', 'roud', k) > k for k in range(4)])
0
""",
'hidden': False,
'locked': False
}
],
'scored': True,
'setup': r"""
>>> from cats import meowstake_matches, autocorrect
""",
'teardown': '',
'type': 'doctest'
}
]
}
| test = {'name': 'Problem 7', 'points': 3, 'suites': [{'cases': [{'code': '\n >>> big_limit = 10\n >>> meowstake_matches("wird", "wiry", big_limit)\n 1\n >>> meowstake_matches("wird", "bird", big_limit)\n 1\n >>> meowstake_matches("wird", "wir", big_limit)\n 1\n >>> meowstake_matches("wird", "bwird", big_limit)\n 1\n >>> meowstake_matches("speling", "spelling", big_limit)\n 1\n >>> meowstake_matches("used", "use", big_limit)\n 1\n >>> meowstake_matches("hash", "ash", big_limit)\n 1\n >>> meowstake_matches("ash", "hash", big_limit)\n 1\n >>> meowstake_matches("roses", "arose", big_limit) # roses -> aroses -> arose\n 2\n >>> meowstake_matches("tesng", "testing", big_limit) # tesng -> testng -> testing\n 2\n >>> meowstake_matches("rlogcul", "logical", big_limit) # rlogcul -> logcul -> logicul -> logical\n 3\n ', 'hidden': False, 'locked': False}, {'code': '\n >>> small_words_list = ["spell", "nest", "test", "pest", "best", "bird", "wired",\n ... "abstraction", "abstract", "wire", "peeling", "gestate",\n ... "west", "spelling", "bastion"]\n >>> autocorrect("speling", small_words_list, meowstake_matches, 10)\n \'spelling\'\n >>> autocorrect("abstrction", small_words_list, meowstake_matches, 10)\n \'abstraction\'\n >>> autocorrect("wird", small_words_list, meowstake_matches, 10)\n \'bird\'\n >>> autocorrect("gest", small_words_list, meowstake_matches, 10)\n \'nest\'\n ', 'hidden': False, 'locked': False}, {'code': '\n >>> # ***Check that the recursion stops when the limit is reached***\n >>> import trace, io\n >>> from contextlib import redirect_stdout\n >>> with io.StringIO() as buf, redirect_stdout(buf):\n ... trace.Trace(trace=True).runfunc(meowstake_matches, "someawe", "awesome", 3)\n ... output = buf.getvalue()\n >>> len([line for line in output.split(\'\\n\') if \'funcname\' in line]) < 1000\n True\n ', 'hidden': False, 'locked': False}, {'code': "\n >>> meowstake_matches('thong', 'thong', 100)\n 0\n ", 'hidden': False, 'locked': False}, {'code': "\n >>> meowstake_matches('place', 'wreat', 100)\n 5\n ", 'hidden': False, 'locked': False}, {'code': "\n >>> meowstake_matches('pray', 'okee', 100)\n 4\n ", 'hidden': False, 'locked': False}, {'code': "\n >>> meowstake_matches('cloit', 'cloit', 100)\n 0\n ", 'hidden': False, 'locked': False}, {'code': "\n >>> meowstake_matches('yond', 'snd', 100)\n 2\n ", 'hidden': False, 'locked': False}, {'code': "\n >>> meowstake_matches('tb', 'tb', 100)\n 0\n ", 'hidden': False, 'locked': False}, {'code': "\n >>> meowstake_matches('gobi', 'gobi', 100)\n 0\n ", 'hidden': False, 'locked': False}, {'code': "\n >>> meowstake_matches('watap', 'woitap', 100)\n 2\n ", 'hidden': False, 'locked': False}, {'code': "\n >>> sum([meowstake_matches('baffy', 'btfi', k) > k for k in range(5)])\n 3\n ", 'hidden': False, 'locked': False}, {'code': "\n >>> sum([meowstake_matches('else', 'konak', k) > k for k in range(5)])\n 5\n ", 'hidden': False, 'locked': False}, {'code': "\n >>> sum([meowstake_matches('zygon', 'jzon', k) > k for k in range(5)])\n 3\n ", 'hidden': False, 'locked': False}, {'code': "\n >>> meowstake_matches('lar', 'lar', 100)\n 0\n ", 'hidden': False, 'locked': False}, {'code': "\n >>> meowstake_matches('shop', 'wopd', 100)\n 3\n ", 'hidden': False, 'locked': False}, {'code': "\n >>> sum([meowstake_matches('pc', 'pc', k) > k for k in range(2)])\n 0\n ", 'hidden': False, 'locked': False}, {'code': "\n >>> meowstake_matches('sail', 'sail', 100)\n 0\n ", 'hidden': False, 'locked': False}, {'code': "\n >>> meowstake_matches('fiber', 'fbk', 100)\n 3\n ", 'hidden': False, 'locked': False}, {'code': "\n >>> meowstake_matches('doff', 'def', 100)\n 2\n ", 'hidden': False, 'locked': False}, {'code': "\n >>> meowstake_matches('meile', 'mqeile', 100)\n 1\n ", 'hidden': False, 'locked': False}, {'code': "\n >>> sum([meowstake_matches('donor', 'doinor', k) > k for k in range(6)])\n 1\n ", 'hidden': False, 'locked': False}, {'code': "\n >>> sum([meowstake_matches('meet', 'meeu', k) > k for k in range(4)])\n 1\n ", 'hidden': False, 'locked': False}, {'code': "\n >>> sum([meowstake_matches('tic', 'tih', k) > k for k in range(3)])\n 1\n ", 'hidden': False, 'locked': False}, {'code': "\n >>> sum([meowstake_matches('taft', 'hewer', k) > k for k in range(5)])\n 5\n ", 'hidden': False, 'locked': False}, {'code': "\n >>> sum([meowstake_matches('moorn', 'toxa', k) > k for k in range(5)])\n 4\n ", 'hidden': False, 'locked': False}, {'code': "\n >>> sum([meowstake_matches('hamal', 'hamal', k) > k for k in range(5)])\n 0\n ", 'hidden': False, 'locked': False}, {'code': "\n >>> meowstake_matches('pridy', 'dance', 100)\n 5\n ", 'hidden': False, 'locked': False}, {'code': "\n >>> meowstake_matches('dekko', 'zbk', 100)\n 4\n ", 'hidden': False, 'locked': False}, {'code': "\n >>> sum([meowstake_matches('julio', 'juio', k) > k for k in range(5)])\n 1\n ", 'hidden': False, 'locked': False}, {'code': "\n >>> sum([meowstake_matches('boist', 'spume', k) > k for k in range(5)])\n 5\n ", 'hidden': False, 'locked': False}, {'code': "\n >>> meowstake_matches('jail', 'jaila', 100)\n 1\n ", 'hidden': False, 'locked': False}, {'code': "\n >>> meowstake_matches('cumin', 'goes', 100)\n 5\n ", 'hidden': False, 'locked': False}, {'code': "\n >>> sum([meowstake_matches('civil', 'whose', k) > k for k in range(5)])\n 5\n ", 'hidden': False, 'locked': False}, {'code': "\n >>> sum([meowstake_matches('stead', 'ny', k) > k for k in range(5)])\n 5\n ", 'hidden': False, 'locked': False}, {'code': "\n >>> meowstake_matches('mikie', 'mdiye', 100)\n 3\n ", 'hidden': False, 'locked': False}, {'code': "\n >>> sum([meowstake_matches('utils', 'utils', k) > k for k in range(5)])\n 0\n ", 'hidden': False, 'locked': False}, {'code': "\n >>> sum([meowstake_matches('nuque', 'nuq', k) > k for k in range(5)])\n 2\n ", 'hidden': False, 'locked': False}, {'code': "\n >>> sum([meowstake_matches('chine', 'ziinx', k) > k for k in range(5)])\n 3\n ", 'hidden': False, 'locked': False}, {'code': "\n >>> sum([meowstake_matches('tour', 'erase', k) > k for k in range(5)])\n 5\n ", 'hidden': False, 'locked': False}, {'code': "\n >>> meowstake_matches('ak', 'rose', 100)\n 4\n ", 'hidden': False, 'locked': False}, {'code': "\n >>> sum([meowstake_matches('sawah', 'shape', k) > k for k in range(5)])\n 4\n ", 'hidden': False, 'locked': False}, {'code': "\n >>> meowstake_matches('elb', 'logia', 100)\n 5\n ", 'hidden': False, 'locked': False}, {'code': "\n >>> meowstake_matches('noily', 'oibs', 100)\n 3\n ", 'hidden': False, 'locked': False}, {'code': "\n >>> meowstake_matches('fluid', 'grad', 100)\n 4\n ", 'hidden': False, 'locked': False}, {'code': "\n >>> meowstake_matches('titer', 'tskhteur', 100)\n 4\n ", 'hidden': False, 'locked': False}, {'code': "\n >>> meowstake_matches('shood', 'shood', 100)\n 0\n ", 'hidden': False, 'locked': False}, {'code': "\n >>> meowstake_matches('sher', 'xdhe', 100)\n 3\n ", 'hidden': False, 'locked': False}, {'code': "\n >>> meowstake_matches('dayal', 'qualm', 100)\n 4\n ", 'hidden': False, 'locked': False}, {'code': "\n >>> meowstake_matches('tenai', 'whata', 100)\n 5\n ", 'hidden': False, 'locked': False}, {'code': "\n >>> meowstake_matches('bow', 'how', 100)\n 1\n ", 'hidden': False, 'locked': False}, {'code': "\n >>> sum([meowstake_matches('tony', 'togqq', k) > k for k in range(5)])\n 3\n ", 'hidden': False, 'locked': False}, {'code': "\n >>> sum([meowstake_matches('baby', 'ton', k) > k for k in range(4)])\n 4\n ", 'hidden': False, 'locked': False}, {'code': "\n >>> meowstake_matches('seron', 'seron', 100)\n 0\n ", 'hidden': False, 'locked': False}, {'code': "\n >>> sum([meowstake_matches('tame', 'tfme', k) > k for k in range(4)])\n 1\n ", 'hidden': False, 'locked': False}, {'code': "\n >>> meowstake_matches('kissy', 'kisdsxk', 100)\n 3\n ", 'hidden': False, 'locked': False}, {'code': "\n >>> sum([meowstake_matches('str', 'st', k) > k for k in range(3)])\n 1\n ", 'hidden': False, 'locked': False}, {'code': "\n >>> meowstake_matches('enema', 'nemr', 100)\n 2\n ", 'hidden': False, 'locked': False}, {'code': "\n >>> meowstake_matches('beden', 'beden', 100)\n 0\n ", 'hidden': False, 'locked': False}, {'code': "\n >>> meowstake_matches('coral', 'coral', 100)\n 0\n ", 'hidden': False, 'locked': False}, {'code': "\n >>> meowstake_matches('hack', 'rhack', 100)\n 1\n ", 'hidden': False, 'locked': False}, {'code': "\n >>> meowstake_matches('alan', 'alan', 100)\n 0\n ", 'hidden': False, 'locked': False}, {'code': "\n >>> meowstake_matches('aru', 'aru', 100)\n 0\n ", 'hidden': False, 'locked': False}, {'code': "\n >>> meowstake_matches('tail', 'taiil', 100)\n 1\n ", 'hidden': False, 'locked': False}, {'code': "\n >>> meowstake_matches('corps', 'ckcp', 100)\n 3\n ", 'hidden': False, 'locked': False}, {'code': "\n >>> meowstake_matches('kazi', 'kazi', 100)\n 0\n ", 'hidden': False, 'locked': False}, {'code': "\n >>> meowstake_matches('bone', 'bone', 100)\n 0\n ", 'hidden': False, 'locked': False}, {'code': "\n >>> sum([meowstake_matches('dee', 'derv', k) > k for k in range(4)])\n 2\n ", 'hidden': False, 'locked': False}, {'code': "\n >>> meowstake_matches('fuder', 'fuder', 100)\n 0\n ", 'hidden': False, 'locked': False}, {'code': "\n >>> meowstake_matches('harl', 'hhtar', 100)\n 3\n ", 'hidden': False, 'locked': False}, {'code': "\n >>> meowstake_matches('def', 'df', 100)\n 1\n ", 'hidden': False, 'locked': False}, {'code': "\n >>> meowstake_matches('moio', 'yomo', 100)\n 2\n ", 'hidden': False, 'locked': False}, {'code': "\n >>> sum([meowstake_matches('amnia', 'wna', k) > k for k in range(5)])\n 3\n ", 'hidden': False, 'locked': False}, {'code': "\n >>> sum([meowstake_matches('pair', 'pair', k) > k for k in range(4)])\n 0\n ", 'hidden': False, 'locked': False}, {'code': "\n >>> sum([meowstake_matches('peai', 'eabi', k) > k for k in range(4)])\n 2\n ", 'hidden': False, 'locked': False}, {'code': "\n >>> sum([meowstake_matches('pryse', 'prysvf', k) > k for k in range(6)])\n 2\n ", 'hidden': False, 'locked': False}, {'code': "\n >>> meowstake_matches('amelu', 'samp', 100)\n 4\n ", 'hidden': False, 'locked': False}, {'code': "\n >>> meowstake_matches('weak', 'wk', 100)\n 2\n ", 'hidden': False, 'locked': False}, {'code': "\n >>> meowstake_matches('atelo', 'atelo', 100)\n 0\n ", 'hidden': False, 'locked': False}, {'code': "\n >>> meowstake_matches('uc', 'kc', 100)\n 1\n ", 'hidden': False, 'locked': False}, {'code': "\n >>> sum([meowstake_matches('strew', 'jaup', k) > k for k in range(5)])\n 5\n ", 'hidden': False, 'locked': False}, {'code': "\n >>> sum([meowstake_matches('dome', 'dume', k) > k for k in range(4)])\n 1\n ", 'hidden': False, 'locked': False}, {'code': "\n >>> meowstake_matches('braze', 'sxaze', 100)\n 2\n ", 'hidden': False, 'locked': False}, {'code': "\n >>> meowstake_matches('zaman', 'zadpamn', 100)\n 3\n ", 'hidden': False, 'locked': False}, {'code': "\n >>> meowstake_matches('twank', 'renne', 100)\n 4\n ", 'hidden': False, 'locked': False}, {'code': "\n >>> sum([meowstake_matches('pinky', 'opiky', k) > k for k in range(5)])\n 2\n ", 'hidden': False, 'locked': False}, {'code': "\n >>> sum([meowstake_matches('spoke', 'spoke', k) > k for k in range(5)])\n 0\n ", 'hidden': False, 'locked': False}, {'code': "\n >>> sum([meowstake_matches('recto', 'recto', k) > k for k in range(5)])\n 0\n ", 'hidden': False, 'locked': False}, {'code': "\n >>> meowstake_matches('ula', 'ula', 100)\n 0\n ", 'hidden': False, 'locked': False}, {'code': "\n >>> meowstake_matches('dame', 'froth', 100)\n 5\n ", 'hidden': False, 'locked': False}, {'code': "\n >>> meowstake_matches('grane', 'griae', 100)\n 2\n ", 'hidden': False, 'locked': False}, {'code': "\n >>> meowstake_matches('cycad', 'cqcad', 100)\n 1\n ", 'hidden': False, 'locked': False}, {'code': "\n >>> meowstake_matches('creem', 'ashreem', 100)\n 3\n ", 'hidden': False, 'locked': False}, {'code': "\n >>> sum([meowstake_matches('alky', 'alfy', k) > k for k in range(4)])\n 1\n ", 'hidden': False, 'locked': False}, {'code': "\n >>> sum([meowstake_matches('finds', 'fid', k) > k for k in range(5)])\n 2\n ", 'hidden': False, 'locked': False}, {'code': "\n >>> sum([meowstake_matches('argot', 'arxgot', k) > k for k in range(6)])\n 1\n ", 'hidden': False, 'locked': False}, {'code': "\n >>> meowstake_matches('lc', 'roost', 100)\n 5\n ", 'hidden': False, 'locked': False}, {'code': "\n >>> meowstake_matches('mi', 'iran', 100)\n 4\n ", 'hidden': False, 'locked': False}, {'code': "\n >>> sum([meowstake_matches('faded', 'fabehc', k) > k for k in range(6)])\n 3\n ", 'hidden': False, 'locked': False}, {'code': "\n >>> sum([meowstake_matches('slee', 'ble', k) > k for k in range(4)])\n 2\n ", 'hidden': False, 'locked': False}, {'code': "\n >>> meowstake_matches('macro', 'macr', 100)\n 1\n ", 'hidden': False, 'locked': False}, {'code': "\n >>> sum([meowstake_matches('bbs', 'bbj', k) > k for k in range(3)])\n 1\n ", 'hidden': False, 'locked': False}, {'code': "\n >>> sum([meowstake_matches('roud', 'roud', k) > k for k in range(4)])\n 0\n ", 'hidden': False, 'locked': False}], 'scored': True, 'setup': '\n >>> from cats import meowstake_matches, autocorrect\n ', 'teardown': '', 'type': 'doctest'}]} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.