content stringlengths 7 1.05M | fixed_cases stringlengths 1 1.28M |
|---|---|
# -*- coding: utf-8 -*-
""" VITA Person Finder, Controllers
@author: nursix
@see: U{http://eden.sahanafoundation.org/wiki/BluePrintVITA}
"""
prefix = request.controller
resourcename = request.function
if prefix not in deployment_settings.modules:
session.error = T("Module disabled!")
redirect(URL(r=request, c="default", f="index"))
# -----------------------------------------------------------------------------
def shn_menu():
""" Options menu """
response.menu_options = [
[T("Search for a Person"), False, aURL(r=request, f="index")],
[T("Missing Persons"), False, aURL(r=request, f="person"), [
[T("List"), False, aURL(r=request, f="person")],
[T("Add"), False, aURL(p="create", r=request, f="person", args="create")],
]]]
menu_selected = []
if session.rcvars and "pr_person" in session.rcvars:
person = db.pr_person
query = (person.id == session.rcvars["pr_person"])
record = db(query).select(person.id, limitby=(0, 1)).first()
if record:
name = shn_pr_person_represent(record.id)
menu_selected.append(["%s: %s" % (T("Person"), name), False,
URL(r=request, f="person", args=[record.id])])
if menu_selected:
menu_selected = [T("Open recent"), True, None, menu_selected]
response.menu_options.append(menu_selected)
shn_menu()
# -----------------------------------------------------------------------------
def index():
""" Module's Home Page """
# Module's nice name
try:
module_name = deployment_settings.modules[prefix].name_nice
except:
module_name = T("Missing Persons")
# Override prefix and resourcename
_prefix = "pr"
resourcename = "person"
# Choose table
tablename = "%s_%s" % (_prefix, resourcename)
table = db[tablename]
# Configure redirection and list fields
report_url = URL(r=request, c="pf", f=resourcename,
args=["[id]", "missing_report"])
s3xrc.model.configure(table,
create_next =report_url,
list_fields=["id",
"first_name",
"middle_name",
"last_name",
"gender",
"age_group",
"missing"])
# Pre-process
def prep(r):
""" Redirect to search/person view """
if r.representation == "html":
if not r.id:
r.method = "search"
else:
redirect(URL(r=request, f=resourcename, args=[r.id]))
return True
# Post-process
def postp(r, output):
""" Custom action buttons """
response.s3.actions = []
# Button labels
MISSING = str(T("Missing"))
SEEN = str(T("Seen"))
FOUND = str(T("Found"))
DETAILS = str(T("Details"))
if not r.component:
open_button_label = DETAILS
if auth.s3_logged_in():
# Define URLs
report_missing = str(URL(r=request, f=resourcename,
args=["[id]", "missing_report"]))
#report_seen = str(URL(r=request, f=resourcename,
#args=["[id]", "presence"],
#vars=dict(condition=vita.SEEN)))
report_found = str(URL(r=request, f=resourcename,
args=["[id]", "presence"],
vars=dict(condition=vita.CONFIRMED)))
# Set action buttons
response.s3.actions = [
dict(label=MISSING, _class="action-btn", url=report_missing),
#dict(label=SEEN, _class="action-btn", url=report_seen),
dict(label=FOUND, _class="action-btn", url=report_found),
]
# Is the current user reported missing?
if isinstance(output, dict):
person = s3_logged_in_person()
if person and db.pr_person[person].missing:
myself = URL(r=request, f=resourcename,
args=[person.id, "presence"],
vars=dict(condition=vita.CONFIRMED))
output.update(myself=myself)
else:
open_button_label = UPDATE
# Always have an Open-button
linkto = r.resource.crud._linkto(r, update=True)("[id]")
response.s3.actions.append(dict(label=open_button_label,
_class="action-btn", url=linkto))
return output
# Set hooks
response.s3.prep = prep
response.s3.postp = postp
if auth.s3_logged_in():
add_btn = A(T("Add Person"),
_class="action-btn",
_href=URL(r=request, f="person", args="create"))
else:
add_btn = None
# REST controllerperson
output = s3_rest_controller("pr", "person",
module_name=module_name,
add_btn=add_btn)
# Set view, update menu and return output
response.view = "pf/index.html"
response.title = module_name
shn_menu()
return output
# -----------------------------------------------------------------------------
def person():
""" RESTful CRUD controller """
prefix = "pr"
tablename = "%s_%s" % (prefix, resourcename)
table = db[tablename]
s3.crud_strings[tablename].update(
title_display = T("Missing Person Details"),
title_list = T("Missing Persons Registry"),
subtitle_list = T("Missing Persons"),
label_list_button = T("List Missing Persons"),
msg_list_empty = T("No Persons currently reported missing"))
s3xrc.model.configure(db.pr_group_membership,
list_fields=["id",
"group_id",
"group_head",
"description"])
s3xrc.model.configure(table,
# Redirect to missing report when a new person has been added
create_next = URL(r=request, c="pf", f="person", args=["[id]", "missing_report"]),
list_fields=["id",
"first_name",
"middle_name",
"last_name",
"gender",
"age_group",
"missing"])
def person_prep(r):
# Pre-populate observer fields
person_id = s3_logged_in_person()
if person:
db.pr_presence.observer.default = person_id
db.pr_presence.observer.writable = False
db.pr_presence.observer.comment = None
db.pf_missing_report.observer.default = person_id
db.pf_missing_report.observer.writable = False
db.pf_missing_report.observer.comment = None
# Copy config
if r.component_name == "config":
_config = db.gis_config
defaults = db(_config.id == 1).select(limitby=(0, 1)).first()
for key in defaults.keys():
if key not in ["id", "uuid", "mci", "update_record", "delete_record"]:
_config[key].default = defaults[key]
# Pre-populate presence condition from URL vars
elif r.component_name == "presence":
condition = r.request.vars.get("condition", None)
if condition:
try:
condition = int(condition)
except:
pass
else:
table = db.pr_presence
table.presence_condition.default = condition
table.presence_condition.readable = False
table.presence_condition.writable = False
if condition in vita.PERSISTANT_PRESENCE or \
condition in vita.ABSENCE:
s3xrc.model.configure(table,
mark_required=["location_id", "shelter_id"])
table.orig_id.readable = False
table.orig_id.writable = False
table.dest_id.readable = False
table.dest_id.writable = False
table.observer.readable = False
table.observer.writable = False
return True
response.s3.prep = person_prep
def person_postp(r, output):
# Action buttons
if r.interactive:
if not r.component:
label = READ
linkto = URL(r=request, f="person", args=("[id]", "missing_report"))
else:
label = UPDATE
linkto = s3xrc.crud._linkto(r)("[id]")
response.s3.actions = [
dict(label=str(label), _class="action-btn", url=str(linkto))]
if not r.component:
label = T("Found")
linkto = URL(r=request, f="person",
args=("[id]", "presence"),
vars=dict(condition=vita.CONFIRMED))
response.s3.actions.append(
dict(label=str(label), _class="action-btn", url=str(linkto)))
elif r.component_name == "presence":
if "showadd_btn" in output:
del output["showadd_btn"]
return output
response.s3.postp = person_postp
# Disable missing flag in person
db.pr_person.missing.readable = False
db.pr_person.missing.writable = False
db.pr_person.missing.default = True
# Disable person_id in missing report
db.pf_missing_report.person_id.readable = False
db.pf_missing_report.person_id.writable = False
# Show only missing persons in list views
if len(request.args) == 0:
response.s3.filter = (db.pr_person.missing == True)
# Resource header and tab list
pf_tabs = [(T("Missing Report"), "missing_report"),
(T("Person Details"), None),
(T("Physical Description"), "physical_description"),
(T("Images"), "image"),
(T("Identity"), "identity"),
(T("Address"), "address"),
(T("Contact Data"), "pe_contact"),
(T("Presence Log"), "presence")]
rheader = lambda r: shn_pr_rheader(r, tabs=pf_tabs)
# REST controller
output = s3_rest_controller("pr", resourcename, rheader=rheader)
# Update menu and return output
shn_menu()
return output
# -----------------------------------------------------------------------------
def download():
""" Download a file. """
return response.download(request, db)
# -----------------------------------------------------------------------------
def tooltip():
""" Ajax tooltips """
if "formfield" in request.vars:
response.view = "pr/ajaxtips/%s.html" % request.vars.formfield
return dict()
# -----------------------------------------------------------------------------
| """ VITA Person Finder, Controllers
@author: nursix
@see: U{http://eden.sahanafoundation.org/wiki/BluePrintVITA}
"""
prefix = request.controller
resourcename = request.function
if prefix not in deployment_settings.modules:
session.error = t('Module disabled!')
redirect(url(r=request, c='default', f='index'))
def shn_menu():
""" Options menu """
response.menu_options = [[t('Search for a Person'), False, a_url(r=request, f='index')], [t('Missing Persons'), False, a_url(r=request, f='person'), [[t('List'), False, a_url(r=request, f='person')], [t('Add'), False, a_url(p='create', r=request, f='person', args='create')]]]]
menu_selected = []
if session.rcvars and 'pr_person' in session.rcvars:
person = db.pr_person
query = person.id == session.rcvars['pr_person']
record = db(query).select(person.id, limitby=(0, 1)).first()
if record:
name = shn_pr_person_represent(record.id)
menu_selected.append(['%s: %s' % (t('Person'), name), False, url(r=request, f='person', args=[record.id])])
if menu_selected:
menu_selected = [t('Open recent'), True, None, menu_selected]
response.menu_options.append(menu_selected)
shn_menu()
def index():
""" Module's Home Page """
try:
module_name = deployment_settings.modules[prefix].name_nice
except:
module_name = t('Missing Persons')
_prefix = 'pr'
resourcename = 'person'
tablename = '%s_%s' % (_prefix, resourcename)
table = db[tablename]
report_url = url(r=request, c='pf', f=resourcename, args=['[id]', 'missing_report'])
s3xrc.model.configure(table, create_next=report_url, list_fields=['id', 'first_name', 'middle_name', 'last_name', 'gender', 'age_group', 'missing'])
def prep(r):
""" Redirect to search/person view """
if r.representation == 'html':
if not r.id:
r.method = 'search'
else:
redirect(url(r=request, f=resourcename, args=[r.id]))
return True
def postp(r, output):
""" Custom action buttons """
response.s3.actions = []
missing = str(t('Missing'))
seen = str(t('Seen'))
found = str(t('Found'))
details = str(t('Details'))
if not r.component:
open_button_label = DETAILS
if auth.s3_logged_in():
report_missing = str(url(r=request, f=resourcename, args=['[id]', 'missing_report']))
report_found = str(url(r=request, f=resourcename, args=['[id]', 'presence'], vars=dict(condition=vita.CONFIRMED)))
response.s3.actions = [dict(label=MISSING, _class='action-btn', url=report_missing), dict(label=FOUND, _class='action-btn', url=report_found)]
if isinstance(output, dict):
person = s3_logged_in_person()
if person and db.pr_person[person].missing:
myself = url(r=request, f=resourcename, args=[person.id, 'presence'], vars=dict(condition=vita.CONFIRMED))
output.update(myself=myself)
else:
open_button_label = UPDATE
linkto = r.resource.crud._linkto(r, update=True)('[id]')
response.s3.actions.append(dict(label=open_button_label, _class='action-btn', url=linkto))
return output
response.s3.prep = prep
response.s3.postp = postp
if auth.s3_logged_in():
add_btn = a(t('Add Person'), _class='action-btn', _href=url(r=request, f='person', args='create'))
else:
add_btn = None
output = s3_rest_controller('pr', 'person', module_name=module_name, add_btn=add_btn)
response.view = 'pf/index.html'
response.title = module_name
shn_menu()
return output
def person():
""" RESTful CRUD controller """
prefix = 'pr'
tablename = '%s_%s' % (prefix, resourcename)
table = db[tablename]
s3.crud_strings[tablename].update(title_display=t('Missing Person Details'), title_list=t('Missing Persons Registry'), subtitle_list=t('Missing Persons'), label_list_button=t('List Missing Persons'), msg_list_empty=t('No Persons currently reported missing'))
s3xrc.model.configure(db.pr_group_membership, list_fields=['id', 'group_id', 'group_head', 'description'])
s3xrc.model.configure(table, create_next=url(r=request, c='pf', f='person', args=['[id]', 'missing_report']), list_fields=['id', 'first_name', 'middle_name', 'last_name', 'gender', 'age_group', 'missing'])
def person_prep(r):
person_id = s3_logged_in_person()
if person:
db.pr_presence.observer.default = person_id
db.pr_presence.observer.writable = False
db.pr_presence.observer.comment = None
db.pf_missing_report.observer.default = person_id
db.pf_missing_report.observer.writable = False
db.pf_missing_report.observer.comment = None
if r.component_name == 'config':
_config = db.gis_config
defaults = db(_config.id == 1).select(limitby=(0, 1)).first()
for key in defaults.keys():
if key not in ['id', 'uuid', 'mci', 'update_record', 'delete_record']:
_config[key].default = defaults[key]
elif r.component_name == 'presence':
condition = r.request.vars.get('condition', None)
if condition:
try:
condition = int(condition)
except:
pass
else:
table = db.pr_presence
table.presence_condition.default = condition
table.presence_condition.readable = False
table.presence_condition.writable = False
if condition in vita.PERSISTANT_PRESENCE or condition in vita.ABSENCE:
s3xrc.model.configure(table, mark_required=['location_id', 'shelter_id'])
table.orig_id.readable = False
table.orig_id.writable = False
table.dest_id.readable = False
table.dest_id.writable = False
table.observer.readable = False
table.observer.writable = False
return True
response.s3.prep = person_prep
def person_postp(r, output):
if r.interactive:
if not r.component:
label = READ
linkto = url(r=request, f='person', args=('[id]', 'missing_report'))
else:
label = UPDATE
linkto = s3xrc.crud._linkto(r)('[id]')
response.s3.actions = [dict(label=str(label), _class='action-btn', url=str(linkto))]
if not r.component:
label = t('Found')
linkto = url(r=request, f='person', args=('[id]', 'presence'), vars=dict(condition=vita.CONFIRMED))
response.s3.actions.append(dict(label=str(label), _class='action-btn', url=str(linkto)))
elif r.component_name == 'presence':
if 'showadd_btn' in output:
del output['showadd_btn']
return output
response.s3.postp = person_postp
db.pr_person.missing.readable = False
db.pr_person.missing.writable = False
db.pr_person.missing.default = True
db.pf_missing_report.person_id.readable = False
db.pf_missing_report.person_id.writable = False
if len(request.args) == 0:
response.s3.filter = db.pr_person.missing == True
pf_tabs = [(t('Missing Report'), 'missing_report'), (t('Person Details'), None), (t('Physical Description'), 'physical_description'), (t('Images'), 'image'), (t('Identity'), 'identity'), (t('Address'), 'address'), (t('Contact Data'), 'pe_contact'), (t('Presence Log'), 'presence')]
rheader = lambda r: shn_pr_rheader(r, tabs=pf_tabs)
output = s3_rest_controller('pr', resourcename, rheader=rheader)
shn_menu()
return output
def download():
""" Download a file. """
return response.download(request, db)
def tooltip():
""" Ajax tooltips """
if 'formfield' in request.vars:
response.view = 'pr/ajaxtips/%s.html' % request.vars.formfield
return dict() |
class Solution:
def checkIfPangram(self, sentence: str) -> bool:
words = set(sentence)
if (len(words) != 26):
return False
else:
return True | class Solution:
def check_if_pangram(self, sentence: str) -> bool:
words = set(sentence)
if len(words) != 26:
return False
else:
return True |
TEXT = '''
<div class="row">
<div class="col-md-12">
<hr/>
<div class="page-footer">
<p class="page-footer"><a href="/legal/privacypolicy" target="_blank">Privacy Policy <i class='fa fa-pencil' style='font-size:16px;color:red'></i></a></p>
<p class="page-footer">Copyright © theblueplanet.net 2019 all rights reserved, except content provided by third parties.</p>
<p class="page-footer">We use cookies to enhance and improve our services. By using this site you agree to our <a href="/legal/cookies" target="_blank">use of cookies</a>.</p></div>
</div>
</div>''' | text = '\n<div class="row">\n <div class="col-md-12">\n <hr/>\n <div class="page-footer">\n <p class="page-footer"><a href="/legal/privacypolicy" target="_blank">Privacy Policy <i class=\'fa fa-pencil\' style=\'font-size:16px;color:red\'></i></a></p>\n <p class="page-footer">Copyright © theblueplanet.net 2019 all rights reserved, except content provided by third parties.</p>\n <p class="page-footer">We use cookies to enhance and improve our services. By using this site you agree to our <a href="/legal/cookies" target="_blank">use of cookies</a>.</p></div>\n </div>\n</div>' |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for `pyxq` package."""
def test_000_something():
"""Test something."""
| """Tests for `pyxq` package."""
def test_000_something():
"""Test something.""" |
class Dataset:
def __init__(self):
self.dataset = None
self.groundtruth = None
self.camera_file = None
self.camera = None
self.quat = None
self.init_pose = None
self.rgb_image = None
self.pre_assoc_file_path = None
self.descr = None
| class Dataset:
def __init__(self):
self.dataset = None
self.groundtruth = None
self.camera_file = None
self.camera = None
self.quat = None
self.init_pose = None
self.rgb_image = None
self.pre_assoc_file_path = None
self.descr = None |
"""
Module: 'uhashlib' on LEGO EV3 v1.0.0
"""
# MCU: sysname=ev3, nodename=ev3, release=('v1.0.0',), version=('0.0.0',), machine=ev3
# Stubber: 1.3.2
class sha1:
''
def digest():
pass
def update():
pass
class sha256:
''
def digest():
pass
def update():
pass
| """
Module: 'uhashlib' on LEGO EV3 v1.0.0
"""
class Sha1:
""""""
def digest():
pass
def update():
pass
class Sha256:
""""""
def digest():
pass
def update():
pass |
def policy(resource):
if resource['MaxPasswordAge'] is None:
return False
return resource['MaxPasswordAge'] <= 90
| def policy(resource):
if resource['MaxPasswordAge'] is None:
return False
return resource['MaxPasswordAge'] <= 90 |
class Solution(object):
def multiply(self, A, B):
"""
:type A: List[List[int]]
:type B: List[List[int]]
:rtype: List[List[int]]
"""
if A is None or B is None or len(A) == 0 or len(B) == 0:
return [[]]
m = len(A)
n = len(B[0])
res = [[0 for _ in range(n)] for _ in range(m)]
sparseB = [[] for _ in range(len(B))]
for i in range(len(B)):
for j in range(len(B[i])):
sparseB[i].append((j, B[i][j]))
for i in range(m):
for k in range(len(A[i])):
if A[i][k] != 0:
for p in range(len(sparseB[k])):
j, val = sparseB[k][p]
res[i][j] += A[i][k] * val
return res | class Solution(object):
def multiply(self, A, B):
"""
:type A: List[List[int]]
:type B: List[List[int]]
:rtype: List[List[int]]
"""
if A is None or B is None or len(A) == 0 or (len(B) == 0):
return [[]]
m = len(A)
n = len(B[0])
res = [[0 for _ in range(n)] for _ in range(m)]
sparse_b = [[] for _ in range(len(B))]
for i in range(len(B)):
for j in range(len(B[i])):
sparseB[i].append((j, B[i][j]))
for i in range(m):
for k in range(len(A[i])):
if A[i][k] != 0:
for p in range(len(sparseB[k])):
(j, val) = sparseB[k][p]
res[i][j] += A[i][k] * val
return res |
"""
Define the exception classes for aioharmony module.
"""
class HarmonyException(Exception):
"""Top level Harmony Exception"""
class HarmonyClient(HarmonyException):
"""
Top level exception for HarmonyClient
"""
class TimeOut(HarmonyClient, TimeoutError):
"""
Raised on timeouts
"""
| """
Define the exception classes for aioharmony module.
"""
class Harmonyexception(Exception):
"""Top level Harmony Exception"""
class Harmonyclient(HarmonyException):
"""
Top level exception for HarmonyClient
"""
class Timeout(HarmonyClient, TimeoutError):
"""
Raised on timeouts
""" |
def cubicip(ipparams, position, etc = []):
"""
This function fits the intra-pixel sensitivity effect using a 2D cubic.
Parameters
----------
a: cubic coefficient in y
b: cubic coefficient in x
c: coefficient of cross-term xy^2
d: coefficient of cross-term yx^2
e: quadratic coefficient in y
f: quadratic coefficient in x
g: coefficient of cross-term xy
h: linear coefficient in y
i: linear coefficient in x
j: constant
Returns
-------
returns the flux values for the intra-pixel model
Revisions
---------
2008-07-08 Kevin Stevenson, UCF
kevin218@knights.ucf.edu
Original version
"""
a = ipparams[0]
b = ipparams[1]
c = ipparams[2]
d = ipparams[3]
e = ipparams[4]
f = ipparams[5]
g = ipparams[6]
h = ipparams[7]
i = ipparams[8]
j = ipparams[9]
y, x, q = position
return a*y**3 + b*x**3 + c*y**2*x + d*y*x**2 + e*y**2 + f*x**2 + g*y*x + h*y + i*x + j
| def cubicip(ipparams, position, etc=[]):
"""
This function fits the intra-pixel sensitivity effect using a 2D cubic.
Parameters
----------
a: cubic coefficient in y
b: cubic coefficient in x
c: coefficient of cross-term xy^2
d: coefficient of cross-term yx^2
e: quadratic coefficient in y
f: quadratic coefficient in x
g: coefficient of cross-term xy
h: linear coefficient in y
i: linear coefficient in x
j: constant
Returns
-------
returns the flux values for the intra-pixel model
Revisions
---------
2008-07-08 Kevin Stevenson, UCF
kevin218@knights.ucf.edu
Original version
"""
a = ipparams[0]
b = ipparams[1]
c = ipparams[2]
d = ipparams[3]
e = ipparams[4]
f = ipparams[5]
g = ipparams[6]
h = ipparams[7]
i = ipparams[8]
j = ipparams[9]
(y, x, q) = position
return a * y ** 3 + b * x ** 3 + c * y ** 2 * x + d * y * x ** 2 + e * y ** 2 + f * x ** 2 + g * y * x + h * y + i * x + j |
"""
Initial jokes from stackoverflow - provided under CC BY-SA 3.0
http://stackoverflow.com/questions/234075/what-is-your-best-programmer
-joke?page=4&tab=votes#tab-top
Copied from pyjokes.
The purpose of this is so users without pyjokes pip installed can run
select_a_scrum_master.py
"""
scrum_chooser_jokes = [
"Complaining about the lack of smoking shelters, the nicotine addicted "
"Python programmers said there ought to be 'spaces for tabs'.",
"Ubuntu users are apt to get this joke.",
"Where should one store all the nerdy dad jokes? In a dada-base.",
"An SQL query goes into a bar, walks up to two tables and asks, 'Can I "
"join you?'",
"If you put a million monkeys at a million keyboards, one of them will "
"eventually write a Java program. The rest of them will write Perl.",
"I suggested holding a 'Python Object Oriented Programming Seminar', but "
"the acronym was unpopular.",
"How many programmers does it take to change a lightbulb? None, that's a "
"hardware problem.",
"What's the object-oriented way to become wealthy? Inheritance.",
"How many programmers does it take to change a lightbulb? None, they just "
"make darkness a standard.",
"Two bytes meet. The first byte asks, 'Are you ill?' The second byte "
"replies, 'No, just feeling a bit off.'",
"Two threads walk into a bar. The barkeeper looks up and yells, 'Hey, I "
"want don't any conditions race like time last!'",
"Old C programmers don't die, they're just cast into void.",
"Eight bytes walk into a bar. The bartender asks, 'Can I get you "
"anything?' 'Yeah,' replies the bytes. 'Make us a double.'",
"Why did the programmer quit his job? Because he didn't get arrays.",
"Why do Java programmers have to wear glasses? Because they don't see "
"sharp.",
"Software developers like to solve problems. If there are no problems "
"handily available, they will create their own.",
".NET was named .NET so that it wouldn't show up in a Unix directory "
"listing.",
"Hardware: The part of a computer that you can kick.",
"A programmer was found dead in the shower. Next to their body was a "
"bottle of shampoo with the instructions 'Lather, Rinse and Repeat'.",
"Optimist: The glass is half full. Pessimist: The glass is half empty. "
"Programmer: The glass is twice as large as necessary.",
"In C we had to code our own bugs. In C++ we can inherit them.",
"How come there is no obfuscated Perl contest? Because everyone would "
"win.",
"If you play a Windows CD backwards, you'll hear satanic chanting ... "
"worse still, if you play it forwards, it installs Windows.",
"How many programmers does it take to kill a cockroach? Two: one holds, "
"the other installs Windows on it.",
"What do you call a programmer from Finland? Nerdic.",
"What did the Java code say to the C code? A: You've got no class.",
"Why did Microsoft name their search engine BING? Because It's Not "
"Google.",
"Pirates go 'arg!', computer pirates go 'argv!'",
"Software salesmen and used-car salesmen differ in that the latter know "
"when they are lying.",
"Why do programmers confuse Halloween with Christmas? Because OCT 31 == "
"DEC 25.",
"How many Prolog programmers does it take to change a lightbulb? false.",
"Real programmers can write assembly code in any language.",
"Waiter: Would you like coffee or tea? Programmer: Yes.",
"If loving you is ROM I don't wanna read write.",
"A programmer walks into a foo...",
"A programmer walks into a bar and orders 1.38 root beers. The bartender "
"informs her it's a root beer float. She says 'Make it a double!'",
"Why are you always smiling? That's just my... regular expression.",
"ASCII stupid question, get a stupid ANSI.",
"A programmer had a problem. He thought to himself, 'I know, I'll solve it"
" with threads!'. has Now problems. two he",
"Why do sin and tan work? Just cos.",
"Java: Write once, run away.",
"I would tell you a joke about UDP, but you would never get it.",
"A QA engineer walks into a bar. Runs into a bar. Crawls into a bar. "
"Dances into a bar. Tiptoes into a bar. Rams a bar. Jumps into a bar.",
"My friend's in a band called '1023 Megabytes'... They haven't got a gig "
"yet!",
"I had a problem so I thought I'd use Java. Now I have a ProblemFactory.",
"QA Engineer walks into a bar. Orders a beer. Orders 0 beers. Orders "
"999999999 beers. Orders a lizard. Orders -1 beers. Orders a sfdeljknesv.",
"A product manager walks into a bar, asks for drink. Bartender says no, "
"but will consider adding later.",
"How do you generate a random string? Put a first year Computer Science "
"student in Vim and ask them to save and exit.",
"I've been using Vim for a long time now, mainly because I can't figure "
"out how to exit.",
"How do you know whether a person is a Vim user? Don't worry, they'll tell"
" you.",
"Waiter: He's choking! Is anyone a doctor? Programmer: I'm a Vim user.",
"3 Database Admins walked into a NoSQL bar. A little while later they "
"walked out because they couldn't find a table.",
"How to explain the movie Inception to a programmer? When you run a VM "
"inside another VM, inside another VM ... everything runs real slow!",
"There are only two hard problems in Computer Science: cache invalidation,"
" naming things and off-by-one-errors.",
"There are 10 types of people: those who understand binary and those who "
"don't.",
"There are 2 types of people: those who can extrapolate from incomplete "
"data sets...",
"There are II types of people: Those who understand Roman Numerals and "
"those who don't.",
"There are 10 types of people: those who understand hexadecimal and 15 "
"others.",
"There are 10 types of people: those who understand binary, those who "
"don't, and those who were expecting this joke to be in trinary.",
"There are 10 types of people: those who understand trinary, those who "
"don't, and those who have never heard of it.",
"What do you call eight hobbits? A hobbyte.",
"The best thing about a Boolean is even if you are wrong, you are only "
"off by a bit.",
"A good programmer is someone who always looks both ways before crossing "
"a one-way street.",
"There are two ways to write error-free programs; only the third one "
"works.",
"QAs consist of 55% water, 30% blood and 15% Jira tickets.",
"How many QAs does it take to change a lightbulb? They noticed that the "
"room was dark. They don't fix problems, they find them.",
"A programmer crashes a car at the bottom of a hill, a bystander asks what"
" happened, he says \"No idea. Let's push it back up and try again\".",
"What do you mean 911 is only for emergencies? I've got a merge conflict.",
"Writing PHP is like peeing in the swimming pool, everyone did it, but we"
" don't need to bring it up in public.",
"Why did the QA cross the road? To ruin everyone's day.",
"Number of days since I have encountered an array index error: -1.",
"Number of days since I have encountered an off-by-one error: 0.",
"Speed dating is useless. 5 minutes is not enough to properly explain the"
" benefits of the Unix philosophy.",
"Microsoft hold a bi-monthly internal \"productive week\" where they use "
"Google instead of Bing.",
"Schrodinger's attitude to web development: If I don't look at it in "
"Internet Explorer then there's a chance it looks fine.",
"Finding a good PHP developer is like looking for a needle in a haystack. "
"Or is it a hackstack in a needle?",
"Unix is user friendly. It's just very particular about who its friends "
"are.",
"The C language combines all the power of assembly language with all the "
"ease-of-use of assembly language.",
"What does 'Emacs' stand for? 'Exclusively used by middle aged computer "
"scientists.'",
"What does pyjokes have in common with Adobe Flash? It gets updated all "
"the time, but never gets any better.",
"Why does Waldo only wear stripes? Because he doesn't want to be spotted.",
"I went to a street where the houses were numbered 8k, 16k, 32k, 64k, "
"128k, 256k and 512k. It was a trip down Memory Lane.",
"!false, (It's funny because it's true)",
"['hip', 'hip'] (hip hip array!)",
]
# Program is only meant to be imported
if __name__ == "main":
raise SystemExit
| """
Initial jokes from stackoverflow - provided under CC BY-SA 3.0
http://stackoverflow.com/questions/234075/what-is-your-best-programmer
-joke?page=4&tab=votes#tab-top
Copied from pyjokes.
The purpose of this is so users without pyjokes pip installed can run
select_a_scrum_master.py
"""
scrum_chooser_jokes = ["Complaining about the lack of smoking shelters, the nicotine addicted Python programmers said there ought to be 'spaces for tabs'.", 'Ubuntu users are apt to get this joke.', 'Where should one store all the nerdy dad jokes? In a dada-base.', "An SQL query goes into a bar, walks up to two tables and asks, 'Can I join you?'", 'If you put a million monkeys at a million keyboards, one of them will eventually write a Java program. The rest of them will write Perl.', "I suggested holding a 'Python Object Oriented Programming Seminar', but the acronym was unpopular.", "How many programmers does it take to change a lightbulb? None, that's a hardware problem.", "What's the object-oriented way to become wealthy? Inheritance.", 'How many programmers does it take to change a lightbulb? None, they just make darkness a standard.', "Two bytes meet. The first byte asks, 'Are you ill?' The second byte replies, 'No, just feeling a bit off.'", "Two threads walk into a bar. The barkeeper looks up and yells, 'Hey, I want don't any conditions race like time last!'", "Old C programmers don't die, they're just cast into void.", "Eight bytes walk into a bar. The bartender asks, 'Can I get you anything?' 'Yeah,' replies the bytes. 'Make us a double.'", "Why did the programmer quit his job? Because he didn't get arrays.", "Why do Java programmers have to wear glasses? Because they don't see sharp.", 'Software developers like to solve problems. If there are no problems handily available, they will create their own.', ".NET was named .NET so that it wouldn't show up in a Unix directory listing.", 'Hardware: The part of a computer that you can kick.', "A programmer was found dead in the shower. Next to their body was a bottle of shampoo with the instructions 'Lather, Rinse and Repeat'.", 'Optimist: The glass is half full. Pessimist: The glass is half empty. Programmer: The glass is twice as large as necessary.', 'In C we had to code our own bugs. In C++ we can inherit them.', 'How come there is no obfuscated Perl contest? Because everyone would win.', "If you play a Windows CD backwards, you'll hear satanic chanting ... worse still, if you play it forwards, it installs Windows.", 'How many programmers does it take to kill a cockroach? Two: one holds, the other installs Windows on it.', 'What do you call a programmer from Finland? Nerdic.', "What did the Java code say to the C code? A: You've got no class.", "Why did Microsoft name their search engine BING? Because It's Not Google.", "Pirates go 'arg!', computer pirates go 'argv!'", 'Software salesmen and used-car salesmen differ in that the latter know when they are lying.', 'Why do programmers confuse Halloween with Christmas? Because OCT 31 == DEC 25.', 'How many Prolog programmers does it take to change a lightbulb? false.', 'Real programmers can write assembly code in any language.', 'Waiter: Would you like coffee or tea? Programmer: Yes.', "If loving you is ROM I don't wanna read write.", 'A programmer walks into a foo...', "A programmer walks into a bar and orders 1.38 root beers. The bartender informs her it's a root beer float. She says 'Make it a double!'", "Why are you always smiling? That's just my... regular expression.", 'ASCII stupid question, get a stupid ANSI.', "A programmer had a problem. He thought to himself, 'I know, I'll solve it with threads!'. has Now problems. two he", 'Why do sin and tan work? Just cos.', 'Java: Write once, run away.', 'I would tell you a joke about UDP, but you would never get it.', 'A QA engineer walks into a bar. Runs into a bar. Crawls into a bar. Dances into a bar. Tiptoes into a bar. Rams a bar. Jumps into a bar.', "My friend's in a band called '1023 Megabytes'... They haven't got a gig yet!", "I had a problem so I thought I'd use Java. Now I have a ProblemFactory.", 'QA Engineer walks into a bar. Orders a beer. Orders 0 beers. Orders 999999999 beers. Orders a lizard. Orders -1 beers. Orders a sfdeljknesv.', 'A product manager walks into a bar, asks for drink. Bartender says no, but will consider adding later.', 'How do you generate a random string? Put a first year Computer Science student in Vim and ask them to save and exit.', "I've been using Vim for a long time now, mainly because I can't figure out how to exit.", "How do you know whether a person is a Vim user? Don't worry, they'll tell you.", "Waiter: He's choking! Is anyone a doctor? Programmer: I'm a Vim user.", "3 Database Admins walked into a NoSQL bar. A little while later they walked out because they couldn't find a table.", 'How to explain the movie Inception to a programmer? When you run a VM inside another VM, inside another VM ... everything runs real slow!', 'There are only two hard problems in Computer Science: cache invalidation, naming things and off-by-one-errors.', "There are 10 types of people: those who understand binary and those who don't.", 'There are 2 types of people: those who can extrapolate from incomplete data sets...', "There are II types of people: Those who understand Roman Numerals and those who don't.", 'There are 10 types of people: those who understand hexadecimal and 15 others.', "There are 10 types of people: those who understand binary, those who don't, and those who were expecting this joke to be in trinary.", "There are 10 types of people: those who understand trinary, those who don't, and those who have never heard of it.", 'What do you call eight hobbits? A hobbyte.', 'The best thing about a Boolean is even if you are wrong, you are only off by a bit.', 'A good programmer is someone who always looks both ways before crossing a one-way street.', 'There are two ways to write error-free programs; only the third one works.', 'QAs consist of 55% water, 30% blood and 15% Jira tickets.', "How many QAs does it take to change a lightbulb? They noticed that the room was dark. They don't fix problems, they find them.", 'A programmer crashes a car at the bottom of a hill, a bystander asks what happened, he says "No idea. Let\'s push it back up and try again".', "What do you mean 911 is only for emergencies? I've got a merge conflict.", "Writing PHP is like peeing in the swimming pool, everyone did it, but we don't need to bring it up in public.", "Why did the QA cross the road? To ruin everyone's day.", 'Number of days since I have encountered an array index error: -1.', 'Number of days since I have encountered an off-by-one error: 0.', 'Speed dating is useless. 5 minutes is not enough to properly explain the benefits of the Unix philosophy.', 'Microsoft hold a bi-monthly internal "productive week" where they use Google instead of Bing.', "Schrodinger's attitude to web development: If I don't look at it in Internet Explorer then there's a chance it looks fine.", 'Finding a good PHP developer is like looking for a needle in a haystack. Or is it a hackstack in a needle?', "Unix is user friendly. It's just very particular about who its friends are.", 'The C language combines all the power of assembly language with all the ease-of-use of assembly language.', "What does 'Emacs' stand for? 'Exclusively used by middle aged computer scientists.'", 'What does pyjokes have in common with Adobe Flash? It gets updated all the time, but never gets any better.', "Why does Waldo only wear stripes? Because he doesn't want to be spotted.", 'I went to a street where the houses were numbered 8k, 16k, 32k, 64k, 128k, 256k and 512k. It was a trip down Memory Lane.', "!false, (It's funny because it's true)", "['hip', 'hip'] (hip hip array!)"]
if __name__ == 'main':
raise SystemExit |
# =============================================================================
# Fog Metrics Utilities
# =============================================================================
#
# Miscellaneous utility functions used to compute metrics
#
ACCEPTABLE_TYPES = (set, frozenset, dict)
def intersection_size(A, B):
if A is B:
return len(A)
if not isinstance(A, ACCEPTABLE_TYPES):
A = set(A)
if not isinstance(B, ACCEPTABLE_TYPES):
B = set(B)
if len(A) > len(B):
A, B = B, A
if len(A) == 0:
return 0
i = 0
for x in A:
if x in B:
i += 1
return i
| acceptable_types = (set, frozenset, dict)
def intersection_size(A, B):
if A is B:
return len(A)
if not isinstance(A, ACCEPTABLE_TYPES):
a = set(A)
if not isinstance(B, ACCEPTABLE_TYPES):
b = set(B)
if len(A) > len(B):
(a, b) = (B, A)
if len(A) == 0:
return 0
i = 0
for x in A:
if x in B:
i += 1
return i |
def test(r):
avg = round(sum(r) / len(r), 3)
hal = { "h": 0, "a": 0, "l": 0 }
for mark in r:
if mark > 8:
hal["h"] += 1
elif mark > 6:
hal["a"] += 1
else:
hal["l"] += 1
return [avg, hal, "They did well"] if hal["h"] == len(r) else [avg, hal]
| def test(r):
avg = round(sum(r) / len(r), 3)
hal = {'h': 0, 'a': 0, 'l': 0}
for mark in r:
if mark > 8:
hal['h'] += 1
elif mark > 6:
hal['a'] += 1
else:
hal['l'] += 1
return [avg, hal, 'They did well'] if hal['h'] == len(r) else [avg, hal] |
# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file.
'''
This module generates ANSI character codes to printing colors to terminals.
See: http://en.wikipedia.org/wiki/ANSI_escape_code
'''
CSI = '\033['
OSC = '\033]'
BEL = '\007'
def code_to_chars(code):
return CSI + str(code) + 'm'
def set_title(title):
return OSC + '2;' + title + BEL
def clear_screen(mode=2):
return CSI + str(mode) + 'J'
def clear_line(mode=2):
return CSI + str(mode) + 'K'
# This function needs to be able to take ANY given ANSI Color String and return
# It's value as the next incremented color code.
def increment_color(code):
""" Takes a Given Color Code Defined in a Subclass of AnsiCodes,
and returns the next modulating non-reseting color.
:param code: ANSI Color Constant defined in either Fore, Back, or Style.
These Color Constants come in the form '\033['+ X + 'm', where X is the actual integer that we want.
:type code: str
:return: Another ANSI Color Constant defined in the corresponding subclasses
:rtype: str
"""
# Extract the number from the code, this strips all the strings away from the number,
# And allows us to simply convert from string to integer directly
color_num = int(code.lstrip(CSI).rstrip('m'))
# Fore Integers are defined 30-37, 90-97. 39 is a reset value, and 38 is a blank
# Back Integers are defined 40-47, 100-107, 49 is a reset value, and 48 is also a blank
# Style Integers are defined as 1,2 & 22. 0 is a reset value.
# We want the Style, Fore, and Back integers to stay in their respective class,
# Which eliminates the possibility of using a circular buffer.
# Instead we have to use this spaghetti code logic in order to handle the fringe cases
# Ensuring that we're incrementing when needed and decrementing to the correct positions.
# Handle the fringe cases for the Fore and Back ANSI codes to jump to the non-standard codes
color_num = color_num + 53 if color_num == 47 or color_num == 37 else\
color_num - 67 if color_num == 107 or color_num == 97 else\
color_num + 1 if color_num >= 30 and color_num != (39 and 49) else\
1 if color_num == 22 else\
2 if color_num == 1 else\
22 if color_num == 2 else\
1 if color_num == 0 else color_num + 51
# At this point, 39 & 49 would be the only ones left
# So we just add 51 to them bringing them up to 90 or 100
# Return it as a string prepended with CSI and appended with 'm'
return CSI + str(color_num) + 'm'
class AnsiCodes(object):
def __init__(self):
# the subclasses declare class attributes which are numbers.
# Upon instantiation we define instance attributes, which are the same
# as the class attributes but wrapped with the ANSI escape sequence
for name in dir(self):
if not name.startswith('_'):
value = getattr(self, name)
setattr(self, name, code_to_chars(value))
class AnsiCursor(object):
def UP(self, n=1):
return CSI + str(n) + 'A'
def DOWN(self, n=1):
return CSI + str(n) + 'B'
def FORWARD(self, n=1):
return CSI + str(n) + 'C'
def BACK(self, n=1):
return CSI + str(n) + 'D'
def POS(self, x=1, y=1):
return CSI + str(y) + ';' + str(x) + 'H'
class AnsiFore(AnsiCodes):
BLACK = 30
RED = 31
GREEN = 32
YELLOW = 33
BLUE = 34
MAGENTA = 35
CYAN = 36
WHITE = 37
RESET = 39
# These are fairly well supported, but not part of the standard.
LIGHTBLACK_EX = 90
LIGHTRED_EX = 91
LIGHTGREEN_EX = 92
LIGHTYELLOW_EX = 93
LIGHTBLUE_EX = 94
LIGHTMAGENTA_EX = 95
LIGHTCYAN_EX = 96
LIGHTWHITE_EX = 97
class AnsiBack(AnsiCodes):
BLACK = 40
RED = 41
GREEN = 42
YELLOW = 43
BLUE = 44
MAGENTA = 45
CYAN = 46
WHITE = 47
RESET = 49
# These are fairly well supported, but not part of the standard.
LIGHTBLACK_EX = 100
LIGHTRED_EX = 101
LIGHTGREEN_EX = 102
LIGHTYELLOW_EX = 103
LIGHTBLUE_EX = 104
LIGHTMAGENTA_EX = 105
LIGHTCYAN_EX = 106
LIGHTWHITE_EX = 107
class AnsiStyle(AnsiCodes):
BRIGHT = 1
DIM = 2
NORMAL = 22
RESET_ALL = 0
Fore = AnsiFore()
Back = AnsiBack()
Style = AnsiStyle()
Cursor = AnsiCursor()
| """
This module generates ANSI character codes to printing colors to terminals.
See: http://en.wikipedia.org/wiki/ANSI_escape_code
"""
csi = '\x1b['
osc = '\x1b]'
bel = '\x07'
def code_to_chars(code):
return CSI + str(code) + 'm'
def set_title(title):
return OSC + '2;' + title + BEL
def clear_screen(mode=2):
return CSI + str(mode) + 'J'
def clear_line(mode=2):
return CSI + str(mode) + 'K'
def increment_color(code):
""" Takes a Given Color Code Defined in a Subclass of AnsiCodes,
and returns the next modulating non-reseting color.
:param code: ANSI Color Constant defined in either Fore, Back, or Style.
These Color Constants come in the form '\x1b['+ X + 'm', where X is the actual integer that we want.
:type code: str
:return: Another ANSI Color Constant defined in the corresponding subclasses
:rtype: str
"""
color_num = int(code.lstrip(CSI).rstrip('m'))
color_num = color_num + 53 if color_num == 47 or color_num == 37 else color_num - 67 if color_num == 107 or color_num == 97 else color_num + 1 if color_num >= 30 and color_num != (39 and 49) else 1 if color_num == 22 else 2 if color_num == 1 else 22 if color_num == 2 else 1 if color_num == 0 else color_num + 51
return CSI + str(color_num) + 'm'
class Ansicodes(object):
def __init__(self):
for name in dir(self):
if not name.startswith('_'):
value = getattr(self, name)
setattr(self, name, code_to_chars(value))
class Ansicursor(object):
def up(self, n=1):
return CSI + str(n) + 'A'
def down(self, n=1):
return CSI + str(n) + 'B'
def forward(self, n=1):
return CSI + str(n) + 'C'
def back(self, n=1):
return CSI + str(n) + 'D'
def pos(self, x=1, y=1):
return CSI + str(y) + ';' + str(x) + 'H'
class Ansifore(AnsiCodes):
black = 30
red = 31
green = 32
yellow = 33
blue = 34
magenta = 35
cyan = 36
white = 37
reset = 39
lightblack_ex = 90
lightred_ex = 91
lightgreen_ex = 92
lightyellow_ex = 93
lightblue_ex = 94
lightmagenta_ex = 95
lightcyan_ex = 96
lightwhite_ex = 97
class Ansiback(AnsiCodes):
black = 40
red = 41
green = 42
yellow = 43
blue = 44
magenta = 45
cyan = 46
white = 47
reset = 49
lightblack_ex = 100
lightred_ex = 101
lightgreen_ex = 102
lightyellow_ex = 103
lightblue_ex = 104
lightmagenta_ex = 105
lightcyan_ex = 106
lightwhite_ex = 107
class Ansistyle(AnsiCodes):
bright = 1
dim = 2
normal = 22
reset_all = 0
fore = ansi_fore()
back = ansi_back()
style = ansi_style()
cursor = ansi_cursor() |
print("import getmac")
def get_mac_address(interface=None, ip=None, ip6=None, hostname=None, network_request=True):
get_mac_address_content = f"{f'interface={interface}' if interface else ''}{f', ip={ip}' if ip else ''}{f', ip6={ip6}' if ip6 else ''}{f', hostname={hostname}' if hostname else ''}{f', network_request={network_request}' if network_request and not network_request == True else ''}"
print(f'getmac.get_mac_address({get_mac_address_content})')
return "00:00:00:00:00:00" | print('import getmac')
def get_mac_address(interface=None, ip=None, ip6=None, hostname=None, network_request=True):
get_mac_address_content = f"{(f'interface={interface}' if interface else '')}{(f', ip={ip}' if ip else '')}{(f', ip6={ip6}' if ip6 else '')}{(f', hostname={hostname}' if hostname else '')}{(f', network_request={network_request}' if network_request and (not network_request == True) else '')}"
print(f'getmac.get_mac_address({get_mac_address_content})')
return '00:00:00:00:00:00' |
_base_ = '../../../base.py'
# model settings
model = dict(
type='Classification',
pretrained=None,
backbone=dict(
type='ResNet',
depth=50,
in_channels=3,
out_indices=[4], # 0: conv-1, x: stage-x
# TODO(cjrd) should we be using BN here???
norm_cfg=dict(type='BN')),
head=dict(
type='ClsHead', with_avg_pool=True, in_channels=2048,
num_classes=10))
# dataset settings
data_source_cfg = dict(
type='ImageNet',
memcached=False,
mclient_path='/no/matter')
data_train_list = "data/imagenet/meta/train_1000.txt"
data_train_root = 'data/imagenet'
data_val_list = "data/imagenet/meta/val_1000.txt"
data_val_root = 'data/imagenet'
data_test_list = "data/imagenet/meta/test_1000.txt"
data_test_root = 'data/imagenet'
dataset_type = 'ClassificationDataset'
img_norm_cfg = dict(mean=[0.5,0.6,0.7], std=[0.1,0.2,0.3])
train_pipeline = [
dict(type='RandomResizedCrop', size=224),
dict(type='RandomHorizontalFlip'),
dict(type='ToTensor'),
dict(type='Normalize', **img_norm_cfg),
]
test_pipeline = [
dict(type='Resize', size=256),
dict(type='CenterCrop', size=224),
dict(type='ToTensor'),
dict(type='Normalize', **img_norm_cfg),
]
data = dict(
imgs_per_gpu=128, # total 512
workers_per_gpu=4,
train=dict(
type=dataset_type,
data_source=dict(
list_file=data_train_list, root=data_train_root,
**data_source_cfg),
pipeline=train_pipeline),
val=dict(
type=dataset_type,
data_source=dict(
list_file=data_val_list, root=data_val_root, **data_source_cfg),
pipeline=test_pipeline),
test=dict(
type=dataset_type,
data_source=dict(
list_file=data_test_list, root=data_test_root, **data_source_cfg),
pipeline=test_pipeline))
prefetch=False
| _base_ = '../../../base.py'
model = dict(type='Classification', pretrained=None, backbone=dict(type='ResNet', depth=50, in_channels=3, out_indices=[4], norm_cfg=dict(type='BN')), head=dict(type='ClsHead', with_avg_pool=True, in_channels=2048, num_classes=10))
data_source_cfg = dict(type='ImageNet', memcached=False, mclient_path='/no/matter')
data_train_list = 'data/imagenet/meta/train_1000.txt'
data_train_root = 'data/imagenet'
data_val_list = 'data/imagenet/meta/val_1000.txt'
data_val_root = 'data/imagenet'
data_test_list = 'data/imagenet/meta/test_1000.txt'
data_test_root = 'data/imagenet'
dataset_type = 'ClassificationDataset'
img_norm_cfg = dict(mean=[0.5, 0.6, 0.7], std=[0.1, 0.2, 0.3])
train_pipeline = [dict(type='RandomResizedCrop', size=224), dict(type='RandomHorizontalFlip'), dict(type='ToTensor'), dict(type='Normalize', **img_norm_cfg)]
test_pipeline = [dict(type='Resize', size=256), dict(type='CenterCrop', size=224), dict(type='ToTensor'), dict(type='Normalize', **img_norm_cfg)]
data = dict(imgs_per_gpu=128, workers_per_gpu=4, train=dict(type=dataset_type, data_source=dict(list_file=data_train_list, root=data_train_root, **data_source_cfg), pipeline=train_pipeline), val=dict(type=dataset_type, data_source=dict(list_file=data_val_list, root=data_val_root, **data_source_cfg), pipeline=test_pipeline), test=dict(type=dataset_type, data_source=dict(list_file=data_test_list, root=data_test_root, **data_source_cfg), pipeline=test_pipeline))
prefetch = False |
# Depth-first search
def dfs(node, explored):
if node not in explored:
explored.append(node)
for n in node.neighbors:
dfs(n, explored)
return explored
# Breadth-first search
def bfs(start, goal):
# begin = time()
explored = []
paths = [[start]]
while len(paths) > 0:
path1 = paths.pop(0)
lastnode = path1[-1]
if lastnode not in explored:
for n in lastnode.neighbors:
newpath = list(path1)
newpath.append(n)
paths.append(newpath)
if n == goal:
# print(time() - begin)
return newpath
explored.append(lastnode)
# pass
| def dfs(node, explored):
if node not in explored:
explored.append(node)
for n in node.neighbors:
dfs(n, explored)
return explored
def bfs(start, goal):
explored = []
paths = [[start]]
while len(paths) > 0:
path1 = paths.pop(0)
lastnode = path1[-1]
if lastnode not in explored:
for n in lastnode.neighbors:
newpath = list(path1)
newpath.append(n)
paths.append(newpath)
if n == goal:
return newpath
explored.append(lastnode) |
def counter(start=0):
n = start
while True:
yield n
n += 1
c = counter()
print(next(c)) # prints: 0
print(next(c)) # prints: 1
print(next(c)) # prints: 2
| def counter(start=0):
n = start
while True:
yield n
n += 1
c = counter()
print(next(c))
print(next(c))
print(next(c)) |
"""
Reindex the purchase records DataFrame to be indexed hierarchally, first by store,
then by person. Name these indexes 'Location'
and 'Name'. Then add a new entry.
"""
purchase_1 = pd.Series({'Name': 'Chris',
'Item Purchased': 'Dog Food',
'Cost': 22.50})
purchase_2 = pd.Series({'Name': 'Kevyn',
'Item Purchased': 'Kitty Litter',
'Cost': 2.50})
purchase_3 = pd.Series({'Name': 'Vinod',
'Item Purchased': 'Bird Seed',
'Cost': 5.00})
df = pd.DataFrame([purchase_1, purchase_2, purchase_3], index=['Store 1', 'Store 1', 'Store 2'])
df = df.set_index([df.index, 'Name'])
df.index.names = ['Location', 'Name']
df = df.append(pd.Series(data={'Cost': 3.00, 'Item Purchased': 'Kitty Food'}, name=('Store 2', 'Kevyn')))
df
| """
Reindex the purchase records DataFrame to be indexed hierarchally, first by store,
then by person. Name these indexes 'Location'
and 'Name'. Then add a new entry.
"""
purchase_1 = pd.Series({'Name': 'Chris', 'Item Purchased': 'Dog Food', 'Cost': 22.5})
purchase_2 = pd.Series({'Name': 'Kevyn', 'Item Purchased': 'Kitty Litter', 'Cost': 2.5})
purchase_3 = pd.Series({'Name': 'Vinod', 'Item Purchased': 'Bird Seed', 'Cost': 5.0})
df = pd.DataFrame([purchase_1, purchase_2, purchase_3], index=['Store 1', 'Store 1', 'Store 2'])
df = df.set_index([df.index, 'Name'])
df.index.names = ['Location', 'Name']
df = df.append(pd.Series(data={'Cost': 3.0, 'Item Purchased': 'Kitty Food'}, name=('Store 2', 'Kevyn')))
df |
"""
Use null statement as placeholder for future implementation
-----------------------------------------------------------
Python interpreter doesn't ignore pass. The difference between pass and comment,
that interpreter ignores comments. Pass is unll statement that means it is a
general palceholder for future implementation of functions, loops, classes.
Using pass help avoid the runtime errors and the code will be runable.
"""
print('This is the beginning of example')
# Using pass prevent running the for loop.
for i in range(4):
pass
print('\nEnd of examples') | """
Use null statement as placeholder for future implementation
-----------------------------------------------------------
Python interpreter doesn't ignore pass. The difference between pass and comment,
that interpreter ignores comments. Pass is unll statement that means it is a
general palceholder for future implementation of functions, loops, classes.
Using pass help avoid the runtime errors and the code will be runable.
"""
print('This is the beginning of example')
for i in range(4):
pass
print('\nEnd of examples') |
# list can store any type of data type.
temperatures = []
# add at the end.
temperatures.append(98.6)
temperatures.append(98.7)
print(temperatures)
er_temps = [102.2, 103.5]
# extends the original list.
temperatures.extend(er_temps)
print(temperatures)
# Concatenate
primary_care_doctors = ["Dr. Scholls", "Dr. Pepper"]
er_doctor = ["Doug", "Susan"]
all_doctors = primary_care_doctors + er_doctor
print(all_doctors)
hello = ["How", "are", "you"]
hello.extend("hello")
print(hello)
| temperatures = []
temperatures.append(98.6)
temperatures.append(98.7)
print(temperatures)
er_temps = [102.2, 103.5]
temperatures.extend(er_temps)
print(temperatures)
primary_care_doctors = ['Dr. Scholls', 'Dr. Pepper']
er_doctor = ['Doug', 'Susan']
all_doctors = primary_care_doctors + er_doctor
print(all_doctors)
hello = ['How', 'are', 'you']
hello.extend('hello')
print(hello) |
class Solution:
def traverse(self, l, dep):
for e in l:
if e.isInteger():
v = e.getInteger()
self.sumdep += dep * v
else:
self.traverse(e.getList(), dep + 1)
def depthSum(self, nestedList: List[NestedInteger]) -> int:
self.sumdep = 0
self.traverse(nestedList, 1)
return self.sumdep
| class Solution:
def traverse(self, l, dep):
for e in l:
if e.isInteger():
v = e.getInteger()
self.sumdep += dep * v
else:
self.traverse(e.getList(), dep + 1)
def depth_sum(self, nestedList: List[NestedInteger]) -> int:
self.sumdep = 0
self.traverse(nestedList, 1)
return self.sumdep |
class _DataPad:
data_types = ['video', 'audio', 'subtitles', 'data']
def __init__(self):
self.__data_type = None
self.media_stream = None
@property
def data_type(self):
return self.__data_type
@data_type.setter
def data_type(self, dt):
if dt in _DataPad.data_types:
self.__data_type = dt
else:
raise ValueError('Data type {} isn\'t allowed'.format(dt))
class VideoDataPad(_DataPad):
def __init__(self):
super().__init__()
self.data_type = 'video'
class AudioDataPad(_DataPad):
def __init__(self):
super().__init__()
self.data_type = 'audio'
class SubtitleDataPad(_DataPad):
def __init__(self):
super().__init__()
self.data_type = 'subtitle'
class ArbitraryDataPad(_DataPad):
def __init__(self):
super().__init__()
self.data_type = 'data'
class _InputNode:
def __init__(self, media_file=None, template=None):
pass
| class _Datapad:
data_types = ['video', 'audio', 'subtitles', 'data']
def __init__(self):
self.__data_type = None
self.media_stream = None
@property
def data_type(self):
return self.__data_type
@data_type.setter
def data_type(self, dt):
if dt in _DataPad.data_types:
self.__data_type = dt
else:
raise value_error("Data type {} isn't allowed".format(dt))
class Videodatapad(_DataPad):
def __init__(self):
super().__init__()
self.data_type = 'video'
class Audiodatapad(_DataPad):
def __init__(self):
super().__init__()
self.data_type = 'audio'
class Subtitledatapad(_DataPad):
def __init__(self):
super().__init__()
self.data_type = 'subtitle'
class Arbitrarydatapad(_DataPad):
def __init__(self):
super().__init__()
self.data_type = 'data'
class _Inputnode:
def __init__(self, media_file=None, template=None):
pass |
class Solution(object):
def maxSubArray(self, nums):
max_sum=float('-inf')
cur_sum=0
for i in range(len(nums)):
cur_sum += nums[i]
if cur_sum>max_sum:
max_sum = cur_sum
if cur_sum<0:
cur_sum = 0
return max_sum
| class Solution(object):
def max_sub_array(self, nums):
max_sum = float('-inf')
cur_sum = 0
for i in range(len(nums)):
cur_sum += nums[i]
if cur_sum > max_sum:
max_sum = cur_sum
if cur_sum < 0:
cur_sum = 0
return max_sum |
# Problem: https://www.hackerrank.com/challenges/s10-the-central-limit-theorem-3/problem
# Score: 30
# inputs
mean = 500
std = 80
n = 100
z = 1.96
# characteristics of sample
mean = mean
std = std / n**(1/2)
# Find the 95% interval
print(round(mean - std * z, 2))
print(round(mean + std * z, 2))
| mean = 500
std = 80
n = 100
z = 1.96
mean = mean
std = std / n ** (1 / 2)
print(round(mean - std * z, 2))
print(round(mean + std * z, 2)) |
class Solution:
def validateStackSequences(self, pushed, popped):
"""
:type pushed: List[int]
:type popped: List[int]
:rtype: bool
"""
stack = []
pushed_index, popped_index = 0, 0
while pushed_index < len(pushed) or popped_index < len(popped):
if len(stack) > 0 and popped_index < len(popped) and stack[-1] == popped[popped_index]:
stack.pop()
popped_index += 1
elif pushed_index < len(pushed):
stack.append(pushed[pushed_index])
pushed_index += 1
else:
return False
return True
if __name__ == "__main__":
print(Solution().validateStackSequences([1, 2, 3, 4, 5], [4, 5, 3, 2, 1]))
print(Solution().validateStackSequences([1, 2, 3, 4, 5], [4, 3, 5, 1, 2]))
| class Solution:
def validate_stack_sequences(self, pushed, popped):
"""
:type pushed: List[int]
:type popped: List[int]
:rtype: bool
"""
stack = []
(pushed_index, popped_index) = (0, 0)
while pushed_index < len(pushed) or popped_index < len(popped):
if len(stack) > 0 and popped_index < len(popped) and (stack[-1] == popped[popped_index]):
stack.pop()
popped_index += 1
elif pushed_index < len(pushed):
stack.append(pushed[pushed_index])
pushed_index += 1
else:
return False
return True
if __name__ == '__main__':
print(solution().validateStackSequences([1, 2, 3, 4, 5], [4, 5, 3, 2, 1]))
print(solution().validateStackSequences([1, 2, 3, 4, 5], [4, 3, 5, 1, 2])) |
# Portions Copyright (c) 2005 Nokia Corporation
#
# Secret Labs' Regular Expression Engine
#
# various symbols used by the regular expression engine.
# run this script to update the _sre include files!
#
# Copyright (c) 1998-2001 by Secret Labs AB. All rights reserved.
#
# See the sre.py file for information on usage and redistribution.
#
"""Internal support module for sre"""
# update when constants are added or removed
MAGIC = 20010701
# max code word in this release
MAXREPEAT = 65535
# SRE standard exception (access as sre.error)
# should this really be here?
class error(Exception):
pass
# operators
FAILURE = "failure"
SUCCESS = "success"
ANY = "any"
ANY_ALL = "any_all"
ASSERT = "assert"
ASSERT_NOT = "assert_not"
AT = "at"
BIGCHARSET = "bigcharset"
BRANCH = "branch"
CALL = "call"
CATEGORY = "category"
CHARSET = "charset"
GROUPREF = "groupref"
GROUPREF_IGNORE = "groupref_ignore"
IN = "in"
IN_IGNORE = "in_ignore"
INFO = "info"
JUMP = "jump"
LITERAL = "literal"
LITERAL_IGNORE = "literal_ignore"
MARK = "mark"
MAX_REPEAT = "max_repeat"
MAX_UNTIL = "max_until"
MIN_REPEAT = "min_repeat"
MIN_UNTIL = "min_until"
NEGATE = "negate"
NOT_LITERAL = "not_literal"
NOT_LITERAL_IGNORE = "not_literal_ignore"
RANGE = "range"
REPEAT = "repeat"
REPEAT_ONE = "repeat_one"
SUBPATTERN = "subpattern"
# positions
AT_BEGINNING = "at_beginning"
AT_BEGINNING_LINE = "at_beginning_line"
AT_BEGINNING_STRING = "at_beginning_string"
AT_BOUNDARY = "at_boundary"
AT_NON_BOUNDARY = "at_non_boundary"
AT_END = "at_end"
AT_END_LINE = "at_end_line"
AT_END_STRING = "at_end_string"
AT_LOC_BOUNDARY = "at_loc_boundary"
AT_LOC_NON_BOUNDARY = "at_loc_non_boundary"
AT_UNI_BOUNDARY = "at_uni_boundary"
AT_UNI_NON_BOUNDARY = "at_uni_non_boundary"
# categories
CATEGORY_DIGIT = "category_digit"
CATEGORY_NOT_DIGIT = "category_not_digit"
CATEGORY_SPACE = "category_space"
CATEGORY_NOT_SPACE = "category_not_space"
CATEGORY_WORD = "category_word"
CATEGORY_NOT_WORD = "category_not_word"
CATEGORY_LINEBREAK = "category_linebreak"
CATEGORY_NOT_LINEBREAK = "category_not_linebreak"
CATEGORY_LOC_WORD = "category_loc_word"
CATEGORY_LOC_NOT_WORD = "category_loc_not_word"
CATEGORY_UNI_DIGIT = "category_uni_digit"
CATEGORY_UNI_NOT_DIGIT = "category_uni_not_digit"
CATEGORY_UNI_SPACE = "category_uni_space"
CATEGORY_UNI_NOT_SPACE = "category_uni_not_space"
CATEGORY_UNI_WORD = "category_uni_word"
CATEGORY_UNI_NOT_WORD = "category_uni_not_word"
CATEGORY_UNI_LINEBREAK = "category_uni_linebreak"
CATEGORY_UNI_NOT_LINEBREAK = "category_uni_not_linebreak"
OPCODES = [
# failure=0 success=1 (just because it looks better that way :-)
FAILURE, SUCCESS,
ANY, ANY_ALL,
ASSERT, ASSERT_NOT,
AT,
BRANCH,
CALL,
CATEGORY,
CHARSET, BIGCHARSET,
GROUPREF, GROUPREF_IGNORE,
IN, IN_IGNORE,
INFO,
JUMP,
LITERAL, LITERAL_IGNORE,
MARK,
MAX_UNTIL,
MIN_UNTIL,
NOT_LITERAL, NOT_LITERAL_IGNORE,
NEGATE,
RANGE,
REPEAT,
REPEAT_ONE,
SUBPATTERN
]
ATCODES = [
AT_BEGINNING, AT_BEGINNING_LINE, AT_BEGINNING_STRING, AT_BOUNDARY,
AT_NON_BOUNDARY, AT_END, AT_END_LINE, AT_END_STRING,
AT_LOC_BOUNDARY, AT_LOC_NON_BOUNDARY, AT_UNI_BOUNDARY,
AT_UNI_NON_BOUNDARY
]
CHCODES = [
CATEGORY_DIGIT, CATEGORY_NOT_DIGIT, CATEGORY_SPACE,
CATEGORY_NOT_SPACE, CATEGORY_WORD, CATEGORY_NOT_WORD,
CATEGORY_LINEBREAK, CATEGORY_NOT_LINEBREAK, CATEGORY_LOC_WORD,
CATEGORY_LOC_NOT_WORD, CATEGORY_UNI_DIGIT, CATEGORY_UNI_NOT_DIGIT,
CATEGORY_UNI_SPACE, CATEGORY_UNI_NOT_SPACE, CATEGORY_UNI_WORD,
CATEGORY_UNI_NOT_WORD, CATEGORY_UNI_LINEBREAK,
CATEGORY_UNI_NOT_LINEBREAK
]
def makedict(list):
d = {}
i = 0
for item in list:
d[item] = i
i = i + 1
return d
OPCODES = makedict(OPCODES)
ATCODES = makedict(ATCODES)
CHCODES = makedict(CHCODES)
# replacement operations for "ignore case" mode
OP_IGNORE = {
GROUPREF: GROUPREF_IGNORE,
IN: IN_IGNORE,
LITERAL: LITERAL_IGNORE,
NOT_LITERAL: NOT_LITERAL_IGNORE
}
AT_MULTILINE = {
AT_BEGINNING: AT_BEGINNING_LINE,
AT_END: AT_END_LINE
}
AT_LOCALE = {
AT_BOUNDARY: AT_LOC_BOUNDARY,
AT_NON_BOUNDARY: AT_LOC_NON_BOUNDARY
}
AT_UNICODE = {
AT_BOUNDARY: AT_UNI_BOUNDARY,
AT_NON_BOUNDARY: AT_UNI_NON_BOUNDARY
}
CH_LOCALE = {
CATEGORY_DIGIT: CATEGORY_DIGIT,
CATEGORY_NOT_DIGIT: CATEGORY_NOT_DIGIT,
CATEGORY_SPACE: CATEGORY_SPACE,
CATEGORY_NOT_SPACE: CATEGORY_NOT_SPACE,
CATEGORY_WORD: CATEGORY_LOC_WORD,
CATEGORY_NOT_WORD: CATEGORY_LOC_NOT_WORD,
CATEGORY_LINEBREAK: CATEGORY_LINEBREAK,
CATEGORY_NOT_LINEBREAK: CATEGORY_NOT_LINEBREAK
}
CH_UNICODE = {
CATEGORY_DIGIT: CATEGORY_UNI_DIGIT,
CATEGORY_NOT_DIGIT: CATEGORY_UNI_NOT_DIGIT,
CATEGORY_SPACE: CATEGORY_UNI_SPACE,
CATEGORY_NOT_SPACE: CATEGORY_UNI_NOT_SPACE,
CATEGORY_WORD: CATEGORY_UNI_WORD,
CATEGORY_NOT_WORD: CATEGORY_UNI_NOT_WORD,
CATEGORY_LINEBREAK: CATEGORY_UNI_LINEBREAK,
CATEGORY_NOT_LINEBREAK: CATEGORY_UNI_NOT_LINEBREAK
}
# flags
SRE_FLAG_TEMPLATE = 1 # template mode (disable backtracking)
SRE_FLAG_IGNORECASE = 2 # case insensitive
SRE_FLAG_LOCALE = 4 # honour system locale
SRE_FLAG_MULTILINE = 8 # treat target as multiline string
SRE_FLAG_DOTALL = 16 # treat target as a single string
SRE_FLAG_UNICODE = 32 # use unicode locale
SRE_FLAG_VERBOSE = 64 # ignore whitespace and comments
SRE_FLAG_DEBUG = 128 # debugging
# flags for INFO primitive
SRE_INFO_PREFIX = 1 # has prefix
SRE_INFO_LITERAL = 2 # entire pattern is literal (given by prefix)
SRE_INFO_CHARSET = 4 # pattern starts with character from given set
| """Internal support module for sre"""
magic = 20010701
maxrepeat = 65535
class Error(Exception):
pass
failure = 'failure'
success = 'success'
any = 'any'
any_all = 'any_all'
assert = 'assert'
assert_not = 'assert_not'
at = 'at'
bigcharset = 'bigcharset'
branch = 'branch'
call = 'call'
category = 'category'
charset = 'charset'
groupref = 'groupref'
groupref_ignore = 'groupref_ignore'
in = 'in'
in_ignore = 'in_ignore'
info = 'info'
jump = 'jump'
literal = 'literal'
literal_ignore = 'literal_ignore'
mark = 'mark'
max_repeat = 'max_repeat'
max_until = 'max_until'
min_repeat = 'min_repeat'
min_until = 'min_until'
negate = 'negate'
not_literal = 'not_literal'
not_literal_ignore = 'not_literal_ignore'
range = 'range'
repeat = 'repeat'
repeat_one = 'repeat_one'
subpattern = 'subpattern'
at_beginning = 'at_beginning'
at_beginning_line = 'at_beginning_line'
at_beginning_string = 'at_beginning_string'
at_boundary = 'at_boundary'
at_non_boundary = 'at_non_boundary'
at_end = 'at_end'
at_end_line = 'at_end_line'
at_end_string = 'at_end_string'
at_loc_boundary = 'at_loc_boundary'
at_loc_non_boundary = 'at_loc_non_boundary'
at_uni_boundary = 'at_uni_boundary'
at_uni_non_boundary = 'at_uni_non_boundary'
category_digit = 'category_digit'
category_not_digit = 'category_not_digit'
category_space = 'category_space'
category_not_space = 'category_not_space'
category_word = 'category_word'
category_not_word = 'category_not_word'
category_linebreak = 'category_linebreak'
category_not_linebreak = 'category_not_linebreak'
category_loc_word = 'category_loc_word'
category_loc_not_word = 'category_loc_not_word'
category_uni_digit = 'category_uni_digit'
category_uni_not_digit = 'category_uni_not_digit'
category_uni_space = 'category_uni_space'
category_uni_not_space = 'category_uni_not_space'
category_uni_word = 'category_uni_word'
category_uni_not_word = 'category_uni_not_word'
category_uni_linebreak = 'category_uni_linebreak'
category_uni_not_linebreak = 'category_uni_not_linebreak'
opcodes = [FAILURE, SUCCESS, ANY, ANY_ALL, ASSERT, ASSERT_NOT, AT, BRANCH, CALL, CATEGORY, CHARSET, BIGCHARSET, GROUPREF, GROUPREF_IGNORE, IN, IN_IGNORE, INFO, JUMP, LITERAL, LITERAL_IGNORE, MARK, MAX_UNTIL, MIN_UNTIL, NOT_LITERAL, NOT_LITERAL_IGNORE, NEGATE, RANGE, REPEAT, REPEAT_ONE, SUBPATTERN]
atcodes = [AT_BEGINNING, AT_BEGINNING_LINE, AT_BEGINNING_STRING, AT_BOUNDARY, AT_NON_BOUNDARY, AT_END, AT_END_LINE, AT_END_STRING, AT_LOC_BOUNDARY, AT_LOC_NON_BOUNDARY, AT_UNI_BOUNDARY, AT_UNI_NON_BOUNDARY]
chcodes = [CATEGORY_DIGIT, CATEGORY_NOT_DIGIT, CATEGORY_SPACE, CATEGORY_NOT_SPACE, CATEGORY_WORD, CATEGORY_NOT_WORD, CATEGORY_LINEBREAK, CATEGORY_NOT_LINEBREAK, CATEGORY_LOC_WORD, CATEGORY_LOC_NOT_WORD, CATEGORY_UNI_DIGIT, CATEGORY_UNI_NOT_DIGIT, CATEGORY_UNI_SPACE, CATEGORY_UNI_NOT_SPACE, CATEGORY_UNI_WORD, CATEGORY_UNI_NOT_WORD, CATEGORY_UNI_LINEBREAK, CATEGORY_UNI_NOT_LINEBREAK]
def makedict(list):
d = {}
i = 0
for item in list:
d[item] = i
i = i + 1
return d
opcodes = makedict(OPCODES)
atcodes = makedict(ATCODES)
chcodes = makedict(CHCODES)
op_ignore = {GROUPREF: GROUPREF_IGNORE, IN: IN_IGNORE, LITERAL: LITERAL_IGNORE, NOT_LITERAL: NOT_LITERAL_IGNORE}
at_multiline = {AT_BEGINNING: AT_BEGINNING_LINE, AT_END: AT_END_LINE}
at_locale = {AT_BOUNDARY: AT_LOC_BOUNDARY, AT_NON_BOUNDARY: AT_LOC_NON_BOUNDARY}
at_unicode = {AT_BOUNDARY: AT_UNI_BOUNDARY, AT_NON_BOUNDARY: AT_UNI_NON_BOUNDARY}
ch_locale = {CATEGORY_DIGIT: CATEGORY_DIGIT, CATEGORY_NOT_DIGIT: CATEGORY_NOT_DIGIT, CATEGORY_SPACE: CATEGORY_SPACE, CATEGORY_NOT_SPACE: CATEGORY_NOT_SPACE, CATEGORY_WORD: CATEGORY_LOC_WORD, CATEGORY_NOT_WORD: CATEGORY_LOC_NOT_WORD, CATEGORY_LINEBREAK: CATEGORY_LINEBREAK, CATEGORY_NOT_LINEBREAK: CATEGORY_NOT_LINEBREAK}
ch_unicode = {CATEGORY_DIGIT: CATEGORY_UNI_DIGIT, CATEGORY_NOT_DIGIT: CATEGORY_UNI_NOT_DIGIT, CATEGORY_SPACE: CATEGORY_UNI_SPACE, CATEGORY_NOT_SPACE: CATEGORY_UNI_NOT_SPACE, CATEGORY_WORD: CATEGORY_UNI_WORD, CATEGORY_NOT_WORD: CATEGORY_UNI_NOT_WORD, CATEGORY_LINEBREAK: CATEGORY_UNI_LINEBREAK, CATEGORY_NOT_LINEBREAK: CATEGORY_UNI_NOT_LINEBREAK}
sre_flag_template = 1
sre_flag_ignorecase = 2
sre_flag_locale = 4
sre_flag_multiline = 8
sre_flag_dotall = 16
sre_flag_unicode = 32
sre_flag_verbose = 64
sre_flag_debug = 128
sre_info_prefix = 1
sre_info_literal = 2
sre_info_charset = 4 |
##-------------------------------------------------------------------
"""
Given a binary tree, return the level order traversal of
its nodes' values. (ie, from left to right, level by level).
For example:
Given binary tree [3,9,20,null,null,15,7],
3
/ \
9 20
/ \
15 7
return its level order traversal as:
[
[3],
[9,20],
[15,7]
]
##-------------------------------------------------------------------
"""
def level_order(root):
ans = []
if not root:
return ans
level = [root]
while level:
current = []
new_level = []
for node in level:
current.append(node.val)
if node.left:
new_level.append(node.left)
if node.right:
new_level.append(node.right)
level = new_level
ans.append(current)
return ans
| """
Given a binary tree, return the level order traversal of
its nodes' values. (ie, from left to right, level by level).
For example:
Given binary tree [3,9,20,null,null,15,7],
3
/ 9 20
/ 15 7
return its level order traversal as:
[
[3],
[9,20],
[15,7]
]
##-------------------------------------------------------------------
"""
def level_order(root):
ans = []
if not root:
return ans
level = [root]
while level:
current = []
new_level = []
for node in level:
current.append(node.val)
if node.left:
new_level.append(node.left)
if node.right:
new_level.append(node.right)
level = new_level
ans.append(current)
return ans |
def pytest_addoption(parser):
parser.addoption("--core_url", action="store", default="localhost:6565")
parser.addoption("--serving_url", action="store", default="localhost:6566")
parser.addoption("--jobcontroller_url", action="store", default="localhost:6570")
parser.addoption("--allow_dirty", action="store", default="False")
parser.addoption(
"--gcs_path", action="store", default="gs://feast-templocation-kf-feast/"
)
parser.addoption("--enable_auth", action="store", default="False")
parser.addoption("--kafka_brokers", action="store", default="localhost:9092")
| def pytest_addoption(parser):
parser.addoption('--core_url', action='store', default='localhost:6565')
parser.addoption('--serving_url', action='store', default='localhost:6566')
parser.addoption('--jobcontroller_url', action='store', default='localhost:6570')
parser.addoption('--allow_dirty', action='store', default='False')
parser.addoption('--gcs_path', action='store', default='gs://feast-templocation-kf-feast/')
parser.addoption('--enable_auth', action='store', default='False')
parser.addoption('--kafka_brokers', action='store', default='localhost:9092') |
class Cab:
def __init__(self,kms,type_of_cab,year):
self.__kms = kms
self.__type_of_cab = type_of_cab
self.__year = year
def get_kms(self):
"""
Returns kms
Parameters:
self
returns:
int: return self.__hms
"""
return self.__kms
def get_type_of_car(self):
"""
Returns type of car
Parameters:
self
returns:
int: return self.__type_of_car
"""
return self.__type_of_cab
def get_year(self):
"""
Returns year
Parameters:
self
returns:
int: return self.__year
"""
return self.__year
def __gt__(self,other):
return self.__year > other.get_year()
def __eq__(self,other):
return self.__year == other.get_year() and self.__type_of_cab == other.get_type_of_car()
def __repr__(self):
print(f"kms: {self.__kms} type of car {self.__type_of_cab} year : {self.__year}")
class Sedan(Cab):
def __init__(self,kms,type_of_cab,year):
Cab.__init__(self,kms,type_of_cab,year)
self.__price_per_km = 2.5
def calculate_fare(self):
"""
Calculation of Fare
Parameters:
self
returns:
int: return Cab.get_kms(self) * self.__price_per_km
"""
return Cab.get_kms(self) * self.__price_per_km
class Hatchback(Cab):
def __init__(self,kms,type_of_cab,year):
Cab.__init__(self,kms,type_of_cab,year)
self.__price_per_km = 2.2
def calculate_fare(self):
"""
Calculation of Fare
Parameters:
self
returns:
int: return Cab.get_kms(self) * self.__price_per_km
"""
return Cab.get_kms(self) * self.__price_per_km
| class Cab:
def __init__(self, kms, type_of_cab, year):
self.__kms = kms
self.__type_of_cab = type_of_cab
self.__year = year
def get_kms(self):
"""
Returns kms
Parameters:
self
returns:
int: return self.__hms
"""
return self.__kms
def get_type_of_car(self):
"""
Returns type of car
Parameters:
self
returns:
int: return self.__type_of_car
"""
return self.__type_of_cab
def get_year(self):
"""
Returns year
Parameters:
self
returns:
int: return self.__year
"""
return self.__year
def __gt__(self, other):
return self.__year > other.get_year()
def __eq__(self, other):
return self.__year == other.get_year() and self.__type_of_cab == other.get_type_of_car()
def __repr__(self):
print(f'kms: {self.__kms} type of car {self.__type_of_cab} year : {self.__year}')
class Sedan(Cab):
def __init__(self, kms, type_of_cab, year):
Cab.__init__(self, kms, type_of_cab, year)
self.__price_per_km = 2.5
def calculate_fare(self):
"""
Calculation of Fare
Parameters:
self
returns:
int: return Cab.get_kms(self) * self.__price_per_km
"""
return Cab.get_kms(self) * self.__price_per_km
class Hatchback(Cab):
def __init__(self, kms, type_of_cab, year):
Cab.__init__(self, kms, type_of_cab, year)
self.__price_per_km = 2.2
def calculate_fare(self):
"""
Calculation of Fare
Parameters:
self
returns:
int: return Cab.get_kms(self) * self.__price_per_km
"""
return Cab.get_kms(self) * self.__price_per_km |
"""
Fourth algorithm function for application.
Fill in your own algorithmic code here.
"""
def calculate4(self):
'Calculate ROI and other params given historical stock data using ?? algorithm.'
return False
| """
Fourth algorithm function for application.
Fill in your own algorithmic code here.
"""
def calculate4(self):
"""Calculate ROI and other params given historical stock data using ?? algorithm."""
return False |
def count_change(money, coins):
# your implementation here
coins.sort()
result = 0
cache = [coins[0]]
for i,coin in enumerate(coins):
while sum(cache) < money:
if sum(cache) == money:
result += 1
cache.append(coin)
| def count_change(money, coins):
coins.sort()
result = 0
cache = [coins[0]]
for (i, coin) in enumerate(coins):
while sum(cache) < money:
if sum(cache) == money:
result += 1
cache.append(coin) |
# -*- coding: utf-8 -*-
def main():
n = int(input())
st = [0 for _ in range(n)]
for i in range(n):
si, ti = map(int, input().split())
st[i] = (si, ti)
time = sorted(st, key=lambda x: x[0])
ans = 1
last = time[0][1]
for j in range(1, n):
last = max(last, time[j - 1][1])
if last < time[j][0]:
ans += 1
print(ans)
if __name__ == '__main__':
main()
| def main():
n = int(input())
st = [0 for _ in range(n)]
for i in range(n):
(si, ti) = map(int, input().split())
st[i] = (si, ti)
time = sorted(st, key=lambda x: x[0])
ans = 1
last = time[0][1]
for j in range(1, n):
last = max(last, time[j - 1][1])
if last < time[j][0]:
ans += 1
print(ans)
if __name__ == '__main__':
main() |
class AzEntity(object):
"""Base class for Azurelib objects"""
def __init__(self, service, name):
"""Creates an Azurelib AzEntity
Args:
service: either a azure.storage.TableService
or a azure.storage.QueueService
name: entity's name. May be None
"""
super(AzEntity, self).__init__()
self._service = service
self.select(name)
def select(self, name):
"""Sets the entity's name. Required for
operations on live Azure instances"""
if name:
name = name.replace("_", "")
# from azurelib.storage import AzBlob
#
# if not isinstance(self, AzBlob):
name = name.replace(".", "")
self._name = name
def get_name(self):
"""Returns the entity's name"""
return self._name
def get_service(self):
"""Returns the entity's service"""
return self._service
| class Azentity(object):
"""Base class for Azurelib objects"""
def __init__(self, service, name):
"""Creates an Azurelib AzEntity
Args:
service: either a azure.storage.TableService
or a azure.storage.QueueService
name: entity's name. May be None
"""
super(AzEntity, self).__init__()
self._service = service
self.select(name)
def select(self, name):
"""Sets the entity's name. Required for
operations on live Azure instances"""
if name:
name = name.replace('_', '')
name = name.replace('.', '')
self._name = name
def get_name(self):
"""Returns the entity's name"""
return self._name
def get_service(self):
"""Returns the entity's service"""
return self._service |
ALL_IM_SIZE = (72, 208, 208) # Resize to 72, 256, 256 then crop to 208
CROP = 24
N_CLASSES = 5
ROI_ORDER = ['SpinalCord', 'Lung_R', 'Lung_L', 'Heart', 'Esophagus']
PIXEL_SPACING = 0.975625 # Normalize to this value
INPLANE_SIZE = 512
SLICE_THICKNESS = 3 | all_im_size = (72, 208, 208)
crop = 24
n_classes = 5
roi_order = ['SpinalCord', 'Lung_R', 'Lung_L', 'Heart', 'Esophagus']
pixel_spacing = 0.975625
inplane_size = 512
slice_thickness = 3 |
def molecule_iterator_filter(
molecule_iterator,
min_fragments=None,
max_fragments=None,
min_mapping_qual=None,
max_mapping_qual=None,
min_ivt_duplicates=None,
max_ivt_duplicates=None,
both_pairs_mapped=None,
min_span=None,
max_span=None
):
""" Filter iterable with molecules
molecule_iterator (iterable) : molecules to filter from
min_fragments (int) : minimum amount of fragments associated to molecule
max_fragments (int) : maximum amount of fragments associated to molecule
min_mapping_qual(int) : minimum maximum mapping quality for a single associated fragment
max_mapping_qual(int) : maximum maximum mapping quality for a single associated fragment
min_ivt_duplicates(int) : minimum amount of in vitro transcription copies
max_ivt_duplicates(int) : maximum amount of in vitro transcription copies
both_pairs_mapped(bool) : molecule should have at least one fragment with both pairs mapped
min_span(int) : minimum amount of bases aligned with reference
max_span : maximum amount of bases aligned with reference
"""
for molecule in molecule_iterator:
if min_fragments is not None and len(molecule)<min_fragments:
continue
if max_fragments is not None and len(molecule)>max_fragments:
continue
if min_mapping_qual is not None and molecule.get_max_mapping_qual()<min_mapping_qual:
continue
if max_mapping_qual is not None and molecule.get_max_mapping_qual()>max_mapping_qual:
continue
if min_ivt_duplicates is not None and len( molecule.get_rt_reactions() ) < min_ivt_duplicates:
continue
if min_ivt_duplicates is not None and len( molecule.get_rt_reactions() ) > max_ivt_duplicates:
continue
if both_pairs_mapped is not None:
found_both = False
for fragment in molecule:
if fragment.has_R1() and fragment.has_R2():
found_both=True
if not found_both:
continue
if min_span is not None and molecule.get_safely_aligned_length()<min_span:
continue
if max_span is not None and molecule.get_safely_aligned_length()>max_span:
continue
yield molecule
| def molecule_iterator_filter(molecule_iterator, min_fragments=None, max_fragments=None, min_mapping_qual=None, max_mapping_qual=None, min_ivt_duplicates=None, max_ivt_duplicates=None, both_pairs_mapped=None, min_span=None, max_span=None):
""" Filter iterable with molecules
molecule_iterator (iterable) : molecules to filter from
min_fragments (int) : minimum amount of fragments associated to molecule
max_fragments (int) : maximum amount of fragments associated to molecule
min_mapping_qual(int) : minimum maximum mapping quality for a single associated fragment
max_mapping_qual(int) : maximum maximum mapping quality for a single associated fragment
min_ivt_duplicates(int) : minimum amount of in vitro transcription copies
max_ivt_duplicates(int) : maximum amount of in vitro transcription copies
both_pairs_mapped(bool) : molecule should have at least one fragment with both pairs mapped
min_span(int) : minimum amount of bases aligned with reference
max_span : maximum amount of bases aligned with reference
"""
for molecule in molecule_iterator:
if min_fragments is not None and len(molecule) < min_fragments:
continue
if max_fragments is not None and len(molecule) > max_fragments:
continue
if min_mapping_qual is not None and molecule.get_max_mapping_qual() < min_mapping_qual:
continue
if max_mapping_qual is not None and molecule.get_max_mapping_qual() > max_mapping_qual:
continue
if min_ivt_duplicates is not None and len(molecule.get_rt_reactions()) < min_ivt_duplicates:
continue
if min_ivt_duplicates is not None and len(molecule.get_rt_reactions()) > max_ivt_duplicates:
continue
if both_pairs_mapped is not None:
found_both = False
for fragment in molecule:
if fragment.has_R1() and fragment.has_R2():
found_both = True
if not found_both:
continue
if min_span is not None and molecule.get_safely_aligned_length() < min_span:
continue
if max_span is not None and molecule.get_safely_aligned_length() > max_span:
continue
yield molecule |
""" Symlet 12 wavelet """
class Symlet12:
"""
Properties
----------
near symmetric, orthogonal, biorthogonal
All values are from http://wavelets.pybytes.com/wavelet/sym12/
"""
__name__ = "Symlet Wavelet 12"
__motherWaveletLength__ = 24 # length of the mother wavelet
__transformWaveletLength__ = 2 # minimum wavelength of input signal
# decomposition filter
# low-pass
decompositionLowFilter = [
0.00011196719424656033,
-1.1353928041541452e-05,
-0.0013497557555715387,
0.00018021409008538188,
0.007414965517654251,
-0.0014089092443297553,
-0.024220722675013445,
0.0075537806116804775,
0.04917931829966084,
-0.03584883073695439,
-0.022162306170337816,
0.39888597239022,
0.7634790977836572,
0.46274103121927235,
-0.07833262231634322,
-0.17037069723886492,
0.01530174062247884,
0.05780417944550566,
-0.0026043910313322326,
-0.014589836449234145,
0.00030764779631059454,
0.002350297614183465,
-1.8158078862617515e-05,
-0.0001790665869750869,
]
# high-pass
decompositionHighFilter = [
0.0001790665869750869,
-1.8158078862617515e-05,
-0.002350297614183465,
0.00030764779631059454,
0.014589836449234145,
-0.0026043910313322326,
-0.05780417944550566,
0.01530174062247884,
0.17037069723886492,
-0.07833262231634322,
-0.46274103121927235,
0.7634790977836572,
-0.39888597239022,
-0.022162306170337816,
0.03584883073695439,
0.04917931829966084,
-0.0075537806116804775,
-0.024220722675013445,
0.0014089092443297553,
0.007414965517654251,
-0.00018021409008538188,
-0.0013497557555715387,
1.1353928041541452e-05,
0.00011196719424656033,
]
# reconstruction filters
# low pass
reconstructionLowFilter = [
-0.0001790665869750869,
-1.8158078862617515e-05,
0.002350297614183465,
0.00030764779631059454,
-0.014589836449234145,
-0.0026043910313322326,
0.05780417944550566,
0.01530174062247884,
-0.17037069723886492,
-0.07833262231634322,
0.46274103121927235,
0.7634790977836572,
0.39888597239022,
-0.022162306170337816,
-0.03584883073695439,
0.04917931829966084,
0.0075537806116804775,
-0.024220722675013445,
-0.0014089092443297553,
0.007414965517654251,
0.00018021409008538188,
-0.0013497557555715387,
-1.1353928041541452e-05,
0.00011196719424656033,
]
# high-pass
reconstructionHighFilter = [
0.00011196719424656033,
1.1353928041541452e-05,
-0.0013497557555715387,
-0.00018021409008538188,
0.007414965517654251,
0.0014089092443297553,
-0.024220722675013445,
-0.0075537806116804775,
0.04917931829966084,
0.03584883073695439,
-0.022162306170337816,
-0.39888597239022,
0.7634790977836572,
-0.46274103121927235,
-0.07833262231634322,
0.17037069723886492,
0.01530174062247884,
-0.05780417944550566,
-0.0026043910313322326,
0.014589836449234145,
0.00030764779631059454,
-0.002350297614183465,
-1.8158078862617515e-05,
0.0001790665869750869,
]
| """ Symlet 12 wavelet """
class Symlet12:
"""
Properties
----------
near symmetric, orthogonal, biorthogonal
All values are from http://wavelets.pybytes.com/wavelet/sym12/
"""
__name__ = 'Symlet Wavelet 12'
__mother_wavelet_length__ = 24
__transform_wavelet_length__ = 2
decomposition_low_filter = [0.00011196719424656033, -1.1353928041541452e-05, -0.0013497557555715387, 0.00018021409008538188, 0.007414965517654251, -0.0014089092443297553, -0.024220722675013445, 0.0075537806116804775, 0.04917931829966084, -0.03584883073695439, -0.022162306170337816, 0.39888597239022, 0.7634790977836572, 0.46274103121927235, -0.07833262231634322, -0.17037069723886492, 0.01530174062247884, 0.05780417944550566, -0.0026043910313322326, -0.014589836449234145, 0.00030764779631059454, 0.002350297614183465, -1.8158078862617515e-05, -0.0001790665869750869]
decomposition_high_filter = [0.0001790665869750869, -1.8158078862617515e-05, -0.002350297614183465, 0.00030764779631059454, 0.014589836449234145, -0.0026043910313322326, -0.05780417944550566, 0.01530174062247884, 0.17037069723886492, -0.07833262231634322, -0.46274103121927235, 0.7634790977836572, -0.39888597239022, -0.022162306170337816, 0.03584883073695439, 0.04917931829966084, -0.0075537806116804775, -0.024220722675013445, 0.0014089092443297553, 0.007414965517654251, -0.00018021409008538188, -0.0013497557555715387, 1.1353928041541452e-05, 0.00011196719424656033]
reconstruction_low_filter = [-0.0001790665869750869, -1.8158078862617515e-05, 0.002350297614183465, 0.00030764779631059454, -0.014589836449234145, -0.0026043910313322326, 0.05780417944550566, 0.01530174062247884, -0.17037069723886492, -0.07833262231634322, 0.46274103121927235, 0.7634790977836572, 0.39888597239022, -0.022162306170337816, -0.03584883073695439, 0.04917931829966084, 0.0075537806116804775, -0.024220722675013445, -0.0014089092443297553, 0.007414965517654251, 0.00018021409008538188, -0.0013497557555715387, -1.1353928041541452e-05, 0.00011196719424656033]
reconstruction_high_filter = [0.00011196719424656033, 1.1353928041541452e-05, -0.0013497557555715387, -0.00018021409008538188, 0.007414965517654251, 0.0014089092443297553, -0.024220722675013445, -0.0075537806116804775, 0.04917931829966084, 0.03584883073695439, -0.022162306170337816, -0.39888597239022, 0.7634790977836572, -0.46274103121927235, -0.07833262231634322, 0.17037069723886492, 0.01530174062247884, -0.05780417944550566, -0.0026043910313322326, 0.014589836449234145, 0.00030764779631059454, -0.002350297614183465, -1.8158078862617515e-05, 0.0001790665869750869] |
# Complete the rotLeft function below.
def go():
a= [1,2,3]
d = 1
result = rotLeft(a, d)
return result
# print(result, a, d)
def rotLeft(a, d):
# append and pop
value = a.pop(d)
a.append(value)
# print(a)
# return -1
return a
| def go():
a = [1, 2, 3]
d = 1
result = rot_left(a, d)
return result
def rot_left(a, d):
value = a.pop(d)
a.append(value)
return a |
# Copyright (c) 2012 The Khronos Group Inc.
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and /or associated documentation files (the "Materials "), to deal in the Materials without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Materials, and to permit persons to whom the Materials are furnished to do so, subject to
# the following conditions:
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Materials.
# THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS.
class FKeySupplier:
def __init__(self):
self.__returnedKeys = []
self.__maxKey = -1
def __str__(self):
string = "["
for key in self.GetKeyGenerator():
string = string + str(key) + ", "
if (len(string) > 1):
string = string[:-2] # remove the comma and space
string = string + "]"
return string
def NextKey(self):
if (len(self.__returnedKeys) == 0):
self.__maxKey = self.__maxKey + 1
return self.__maxKey
else:
return self.__returnedKeys.pop()
# Assumes you return a key that was given out.
def ReturnKey(self, key):
if (key == self.__maxKey):
self.__maxKey = self.__maxKey - 1
while (self.__returnedKeys.count(self.__maxKey) != 0):
self.__returnedKeys.remove(self.__maxKey)
self.__maxKey = self.__maxKey - 1
else:
self.__returnedKeys.append(key)
def GetKeyGenerator(self):
for key in range(0, self.__maxKey + 1):
if (self.__returnedKeys.count(key) == 0):
yield key
| class Fkeysupplier:
def __init__(self):
self.__returnedKeys = []
self.__maxKey = -1
def __str__(self):
string = '['
for key in self.GetKeyGenerator():
string = string + str(key) + ', '
if len(string) > 1:
string = string[:-2]
string = string + ']'
return string
def next_key(self):
if len(self.__returnedKeys) == 0:
self.__maxKey = self.__maxKey + 1
return self.__maxKey
else:
return self.__returnedKeys.pop()
def return_key(self, key):
if key == self.__maxKey:
self.__maxKey = self.__maxKey - 1
while self.__returnedKeys.count(self.__maxKey) != 0:
self.__returnedKeys.remove(self.__maxKey)
self.__maxKey = self.__maxKey - 1
else:
self.__returnedKeys.append(key)
def get_key_generator(self):
for key in range(0, self.__maxKey + 1):
if self.__returnedKeys.count(key) == 0:
yield key |
__all__ = ['GreetTheWorld']
class GreetTheWorld:
def __init__(self, name: str):
self._name = name
def say_hello(self):
print(f'Hello from GreetTheWorld: {self._name}')
| __all__ = ['GreetTheWorld']
class Greettheworld:
def __init__(self, name: str):
self._name = name
def say_hello(self):
print(f'Hello from GreetTheWorld: {self._name}') |
VERSION = ('0', '2', '4')
def get_version():
return '.'.join(VERSION)
| version = ('0', '2', '4')
def get_version():
return '.'.join(VERSION) |
fears = [
"the unknown",
"stepping up",
"going outside",
"getting old",
"being a coward",
"intimacy",
"the public",
"lonliness",
"death",
"losing autonomy",
"seperation",
"mutulation",
"repationships",
"enclosed spaces",
"letting out",
"sharing emotions",
"being judged"
] | fears = ['the unknown', 'stepping up', 'going outside', 'getting old', 'being a coward', 'intimacy', 'the public', 'lonliness', 'death', 'losing autonomy', 'seperation', 'mutulation', 'repationships', 'enclosed spaces', 'letting out', 'sharing emotions', 'being judged'] |
print("--------------------------start--------------------------")
diction={}
d=1
#Function to parse the bracketed problem statement and put the intermediate results(could be terminal or non terminal) into dictionary.
def parse(expression):
def _helper(iter):
items = []
global d
for item in iter:
if item == '(':
result, closeparen = _helper(iter)
if not closeparen:
raise ValueError("Bad expression: Please check if you missed the parentheses")
diction["T"+str(d)] = result;
#print (result)
if 4 > len(result) > 0 :
result="T"+str(d)
d=d+1
items.append(result)
#print (result)
elif item == ')':
return items, True
else:
items.append(item)
return items, False
return _helper(iter(expression))[0]
problem_stmt="-(((p)&(-(q)))|((p)|(r)))"
#Please uncomment one of the following problem and comment above for checking the output.
#Also don't give any spaces or If you want we can write trimming code for handling this condition
#problem_stmt="((p)|(-(q)))"
#problem_stmt="((p)|(q))"
#problem_stmt="-((p)&(q))"
#problem_stmt="(((p)|(q))|(r))"
#problem_stmt="(((p)&(q))|((p)&(r)))|((q)&(r))"
#problem_stmt="(((p)&((q)|(r)))|((q)|(r)))"
problem = parse(problem_stmt)
#adding last/root element
diction["T"+str(d)]=problem;
#print 'Debugging point#1 : '+str(d)
# now performing flipping of operators over the and/or over the 3 length elements
final_dict={}
j=0;
for i in range(len(diction)):
#print 'Debugging point#2 : '+str(i+1) +' = ' + ' '.join(diction["T"+str(i+1)])
j=j+1
#if len=3 that means involvement of binary operator
if len(diction["T"+str(i+1)])==3 :
#print 'Debugging point#3 : '+''.join(diction[diction["T"+str(i+1)][2]])
# used join in following statement because a single element of dictionary is treated as list
a=''.join(diction[diction["T"+str(i+1)][0]])
b=''.join(diction[diction["T"+str(i+1)][2]])
#resetting the non-terminal symbols with terminal symbols
if len(a)>1:
diction["T"+str(i+1)][0]= '('+a+')'
else :
diction["T"+str(i+1)][0]= a
if len(b)>1:
diction["T"+str(i+1)][2]= '('+b+')'
else :
diction["T"+str(i+1)][2]= b
final_dict[j]= ''.join(diction["T"+str(i+1)][0]) +'&'+''.join(diction["T"+str(i+1)][2])
j=j+1
final_dict[j]= ''.join(diction["T"+str(i+1)][0]) +'|'+''.join(diction["T"+str(i+1)][2])
#if len=2 that means involvement of unary operator
elif len(diction["T"+str(i+1)])==2 :
#since here a would be an operator
a=''.join(diction["T"+str(i+1)][0])
#since here b would be a non terminal symbol
b=''.join(diction[diction["T"+str(i+1)][1]])
#resetting the non-terminal symbols with terminal symbols
diction["T"+str(i+1)][1]= '('+b+')'
final_dict[j]=a+'('+b+')'
else :
if len(diction) !=i+1:
final_dict[j]= ''.join(diction["T"+str(i+1)])
#print str(j)+'Debugging point#4 : '+''.join(final_dict[j])
print ("Your problem statement : " + problem_stmt)
print ("Your plausible ALT solutions are as followed : ")
for k in range(len(final_dict)):
print (str(k+1) +' = ' + ''.join(final_dict[k+1]))
print ("--------------------------end--------------------------") | print('--------------------------start--------------------------')
diction = {}
d = 1
def parse(expression):
def _helper(iter):
items = []
global d
for item in iter:
if item == '(':
(result, closeparen) = _helper(iter)
if not closeparen:
raise value_error('Bad expression: Please check if you missed the parentheses')
diction['T' + str(d)] = result
if 4 > len(result) > 0:
result = 'T' + str(d)
d = d + 1
items.append(result)
elif item == ')':
return (items, True)
else:
items.append(item)
return (items, False)
return _helper(iter(expression))[0]
problem_stmt = '-(((p)&(-(q)))|((p)|(r)))'
problem = parse(problem_stmt)
diction['T' + str(d)] = problem
final_dict = {}
j = 0
for i in range(len(diction)):
j = j + 1
if len(diction['T' + str(i + 1)]) == 3:
a = ''.join(diction[diction['T' + str(i + 1)][0]])
b = ''.join(diction[diction['T' + str(i + 1)][2]])
if len(a) > 1:
diction['T' + str(i + 1)][0] = '(' + a + ')'
else:
diction['T' + str(i + 1)][0] = a
if len(b) > 1:
diction['T' + str(i + 1)][2] = '(' + b + ')'
else:
diction['T' + str(i + 1)][2] = b
final_dict[j] = ''.join(diction['T' + str(i + 1)][0]) + '&' + ''.join(diction['T' + str(i + 1)][2])
j = j + 1
final_dict[j] = ''.join(diction['T' + str(i + 1)][0]) + '|' + ''.join(diction['T' + str(i + 1)][2])
elif len(diction['T' + str(i + 1)]) == 2:
a = ''.join(diction['T' + str(i + 1)][0])
b = ''.join(diction[diction['T' + str(i + 1)][1]])
diction['T' + str(i + 1)][1] = '(' + b + ')'
final_dict[j] = a + '(' + b + ')'
elif len(diction) != i + 1:
final_dict[j] = ''.join(diction['T' + str(i + 1)])
print('Your problem statement : ' + problem_stmt)
print('Your plausible ALT solutions are as followed : ')
for k in range(len(final_dict)):
print(str(k + 1) + ' = ' + ''.join(final_dict[k + 1]))
print('--------------------------end--------------------------') |
# implementation of the trie data structure
class TrieNode:
def __init__(self):
self.children = {}
self.char = '*'
self.value = None
self.end = False
def put(self, key, value):
if len(key) == 0:
print("Error: Empty Key")
return False
head = key[0]
if head in self.children:
current_node = self.children[head]
else:
current_node = TrieNode()
current_node.char = head
self.children[head] = current_node
if len(key) > 1:
tail = key[1:]
current_node.put(tail, value)
else:
current_node.value = value
current_node.end = True
return True
def get(self, key):
if len(key) == 0:
print("Error: Empty Key")
return None
head = key[0]
if head in self.children:
current_node = self.children[head]
else:
return None
val = None
if len(key) > 1:
tail = key[1:]
val = current_node.get(tail)
elif current_node.end:
val = current_node.value
return val
def query(self, top_lvl_key, keys):
if len(top_lvl_key) == 0:
print('Error: empty top level key')
return None
val = self.get(top_lvl_key)
if val is None:
return None
if len(keys) == 0:
return val
else:
for key in keys:
if len(key) == 0:
print('Error: query secondary key path contains empty key')
return None
try:
val = val.get(key)
except (AttributeError, TypeError):
return None
return val
def delete(self, key):
if len(key) == 0:
print("Error: Empty Key")
return None
head = key[0]
if head in self.children:
current_node = self.children[head]
else:
return None
val = None
if len(key) > 1:
tail = key[1:]
val = current_node.delete(tail)
elif current_node.end:
current_node.end = False
val = current_node.value
current_node.value = None
return val
| class Trienode:
def __init__(self):
self.children = {}
self.char = '*'
self.value = None
self.end = False
def put(self, key, value):
if len(key) == 0:
print('Error: Empty Key')
return False
head = key[0]
if head in self.children:
current_node = self.children[head]
else:
current_node = trie_node()
current_node.char = head
self.children[head] = current_node
if len(key) > 1:
tail = key[1:]
current_node.put(tail, value)
else:
current_node.value = value
current_node.end = True
return True
def get(self, key):
if len(key) == 0:
print('Error: Empty Key')
return None
head = key[0]
if head in self.children:
current_node = self.children[head]
else:
return None
val = None
if len(key) > 1:
tail = key[1:]
val = current_node.get(tail)
elif current_node.end:
val = current_node.value
return val
def query(self, top_lvl_key, keys):
if len(top_lvl_key) == 0:
print('Error: empty top level key')
return None
val = self.get(top_lvl_key)
if val is None:
return None
if len(keys) == 0:
return val
else:
for key in keys:
if len(key) == 0:
print('Error: query secondary key path contains empty key')
return None
try:
val = val.get(key)
except (AttributeError, TypeError):
return None
return val
def delete(self, key):
if len(key) == 0:
print('Error: Empty Key')
return None
head = key[0]
if head in self.children:
current_node = self.children[head]
else:
return None
val = None
if len(key) > 1:
tail = key[1:]
val = current_node.delete(tail)
elif current_node.end:
current_node.end = False
val = current_node.value
current_node.value = None
return val |
if __name__ == '__main__':
N, M = map(int, input().split())
for i in range(1, N, 2):
print((i * ".|.").center(M, "-"))
print("welcome".upper().center(M, "-"))
for i in range(N-2, -1, -2):
print((i * ".|.").center(M, "-"))
| if __name__ == '__main__':
(n, m) = map(int, input().split())
for i in range(1, N, 2):
print((i * '.|.').center(M, '-'))
print('welcome'.upper().center(M, '-'))
for i in range(N - 2, -1, -2):
print((i * '.|.').center(M, '-')) |
# -*- coding:utf-8 -*-
# https://leetcode.com/problems/restore-ip-addresses/description/
class Solution(object):
def restoreIpAddresses(self, s):
"""
:type s: str
:rtype: List[str]
"""
ret = []
def f(s, address):
if not s or len(address) == 4:
not s and len(address) == 4 and ret.append(''.join(address))
return
bg, ed = 1, 1 if s[0] == '0' else min(3, len(s))
for i in xrange(bg, ed + 1):
digit = s[:i]
if 0 <= int(digit) <= 255:
if len(address) < 3:
address.append(digit + '.')
else:
address.append(digit)
f(s[i:], address)
address.pop()
f(s, [])
return ret | class Solution(object):
def restore_ip_addresses(self, s):
"""
:type s: str
:rtype: List[str]
"""
ret = []
def f(s, address):
if not s or len(address) == 4:
not s and len(address) == 4 and ret.append(''.join(address))
return
(bg, ed) = (1, 1 if s[0] == '0' else min(3, len(s)))
for i in xrange(bg, ed + 1):
digit = s[:i]
if 0 <= int(digit) <= 255:
if len(address) < 3:
address.append(digit + '.')
else:
address.append(digit)
f(s[i:], address)
address.pop()
f(s, [])
return ret |
print("Welcome to the tip calculator.")
bill = float(input("What was the total bill? $"))
tip = int(input("What percentage tip would you like to give? 10, 12, or 15? "))
total_people = int(input("How many people to split the bill? "))
total_tip = (bill * (tip / 100))
total_bill = (bill + total_tip)
payment = (total_bill / total_people)
print(f"Each person should pay: ${payment:.2f}") | print('Welcome to the tip calculator.')
bill = float(input('What was the total bill? $'))
tip = int(input('What percentage tip would you like to give? 10, 12, or 15? '))
total_people = int(input('How many people to split the bill? '))
total_tip = bill * (tip / 100)
total_bill = bill + total_tip
payment = total_bill / total_people
print(f'Each person should pay: ${payment:.2f}') |
class LyricsNotFound(Exception):
pass
class ElasticSearchConnectionError(Exception):
pass
class InvalidRepository(Exception):
pass
class ArtistNotFound(Exception):
pass
class ConfigError(Exception):
pass
| class Lyricsnotfound(Exception):
pass
class Elasticsearchconnectionerror(Exception):
pass
class Invalidrepository(Exception):
pass
class Artistnotfound(Exception):
pass
class Configerror(Exception):
pass |
# Create an infinite loop and break it when the user enters the word 'stop'.
while True:
# Ask the user for a word.
word = input("Enter an word: ")
# Stop when the user enters the word 'stop'.
if word == "stop":
break
# Print the word back to the user followed by its length.
print(word, len(word))
# Similarly to Task1 instead of an infinite while loop we could have used a finite while loop with the expression
# word != 'stop', like this: while word != 'stop'.
| while True:
word = input('Enter an word: ')
if word == 'stop':
break
print(word, len(word)) |
def math():
n = int(input())
for i in range(n):
if i == n-1:
print('Ho', end='')
else:
print('Ho', end=' ')
print('!')
if __name__ == '__main__':
math()
| def math():
n = int(input())
for i in range(n):
if i == n - 1:
print('Ho', end='')
else:
print('Ho', end=' ')
print('!')
if __name__ == '__main__':
math() |
load(":actions.bzl", "nim_compile")
def _nim_binary_impl(ctx):
# Declare an output file for the main package and compile it from srcs.
executable = ctx.actions.declare_file(ctx.label.name)
nim_exe= ctx.executable._nim
nim_compile(
ctx,
nim_exe = nim_exe,
projectfile = ctx.file.projectfile,
srcs = ctx.files.srcs,
out = executable,
)
# Return the DefaultInfo provider. This tells Bazel what files should be
# built when someone asks to build a nim_binary rule. It also says which
# one is executable (in this case, there's only one).
return [
DefaultInfo(
files = depset([executable]),
executable = executable,
)
]
nim_binary = rule(
_nim_binary_impl,
attrs = {
"srcs": attr.label_list(
allow_files = [".nim"],
doc = "Source files to compile for this package",
),
"projectfile": attr.label(
allow_single_file = True,
doc = "Nim 'projectfile' to compile for this package",
mandatory = True,
),
"_nim": attr.label(
allow_single_file = True,
default = "@nim_prebuilt//:exe",
executable = True,
cfg = "exec",
)
},
doc = "Builds an executable program from Nim-lang source code",
executable = True,
) | load(':actions.bzl', 'nim_compile')
def _nim_binary_impl(ctx):
executable = ctx.actions.declare_file(ctx.label.name)
nim_exe = ctx.executable._nim
nim_compile(ctx, nim_exe=nim_exe, projectfile=ctx.file.projectfile, srcs=ctx.files.srcs, out=executable)
return [default_info(files=depset([executable]), executable=executable)]
nim_binary = rule(_nim_binary_impl, attrs={'srcs': attr.label_list(allow_files=['.nim'], doc='Source files to compile for this package'), 'projectfile': attr.label(allow_single_file=True, doc="Nim 'projectfile' to compile for this package", mandatory=True), '_nim': attr.label(allow_single_file=True, default='@nim_prebuilt//:exe', executable=True, cfg='exec')}, doc='Builds an executable program from Nim-lang source code', executable=True) |
#!/usr/bin/env python3
sumsq = sum([i**2 for i in range(1, 101)])
sqsum = sum(range(1, 101))**2
print(sqsum-sumsq)
| sumsq = sum([i ** 2 for i in range(1, 101)])
sqsum = sum(range(1, 101)) ** 2
print(sqsum - sumsq) |
class HostOperationSystemVersion(object):
def read_get(self, name, idx_name, unity_client):
return unity_client.get_host_os_type(idx_name)
class HostOperationSystemVersionColumn(object):
def get_idx(self, name, idx, unity_client):
return unity_client.get_hosts()
| class Hostoperationsystemversion(object):
def read_get(self, name, idx_name, unity_client):
return unity_client.get_host_os_type(idx_name)
class Hostoperationsystemversioncolumn(object):
def get_idx(self, name, idx, unity_client):
return unity_client.get_hosts() |
k=int(input())
m=int(input())
list1=[int(i) for i in range(1,k+1)]
for i in range(m):
action=int(input())
list2=[]
for e in range(0,len(list1)+1-action,action):
if e==0:
pass
else:
list2.append(e)
for c in list2:
list1.remove(c)
list1.sort()
for i in list1:
print(i) | k = int(input())
m = int(input())
list1 = [int(i) for i in range(1, k + 1)]
for i in range(m):
action = int(input())
list2 = []
for e in range(0, len(list1) + 1 - action, action):
if e == 0:
pass
else:
list2.append(e)
for c in list2:
list1.remove(c)
list1.sort()
for i in list1:
print(i) |
"""Tests the routes for the Errors module."""
def test_404(app, client):
"""Check if the application can route to a 404."""
with app.app_context():
response = client.get('/unknown_page')
assert response.status_code == 404
assert 'Page Not Found' in str(response.data)
def test_500(app, client):
"""Check if the application can route to a 500."""
with app.app_context():
response = client.get('/err500')
assert response.status_code == 500
assert 'An Error Has Occurred' in str(response.data)
| """Tests the routes for the Errors module."""
def test_404(app, client):
"""Check if the application can route to a 404."""
with app.app_context():
response = client.get('/unknown_page')
assert response.status_code == 404
assert 'Page Not Found' in str(response.data)
def test_500(app, client):
"""Check if the application can route to a 500."""
with app.app_context():
response = client.get('/err500')
assert response.status_code == 500
assert 'An Error Has Occurred' in str(response.data) |
class GenericTreeNode:
def __init__(self, data=None, firstChild=None, nextSibling=None):
self.data = data
self.firstChild = firstChild
self.nextSibling = nextSibling
def initializeGenericTree() -> GenericTreeNode:
c = GenericTreeNode(4)
e = GenericTreeNode(5)
a = GenericTreeNode(3, firstChild=c, nextSibling=e)
b = GenericTreeNode(2, nextSibling=a)
d = GenericTreeNode(1, firstChild=b)
return d
def initializeGenericTreeForSiblings() -> GenericTreeNode:
c = GenericTreeNode(4)
d = GenericTreeNode(5)
a = GenericTreeNode(3, firstChild=c, nextSibling=d)
b = GenericTreeNode(2, firstChild=a)
return b
| class Generictreenode:
def __init__(self, data=None, firstChild=None, nextSibling=None):
self.data = data
self.firstChild = firstChild
self.nextSibling = nextSibling
def initialize_generic_tree() -> GenericTreeNode:
c = generic_tree_node(4)
e = generic_tree_node(5)
a = generic_tree_node(3, firstChild=c, nextSibling=e)
b = generic_tree_node(2, nextSibling=a)
d = generic_tree_node(1, firstChild=b)
return d
def initialize_generic_tree_for_siblings() -> GenericTreeNode:
c = generic_tree_node(4)
d = generic_tree_node(5)
a = generic_tree_node(3, firstChild=c, nextSibling=d)
b = generic_tree_node(2, firstChild=a)
return b |
'''
Descripttion:
version:
Author: HuSharp
Date: 2021-02-22 12:12:48
LastEditors: HuSharp
LastEditTime: 2021-02-23 23:16:08
@Email: 8211180515@csu.edu.cn
'''
class A:
z = -1
def f(self, x):
return B(x-1)
class B(A):
n = 4
def __init__(self, y):
if y:
self.z = self.f(y)
else:
self.z = C(y+1)
class C(B):
def f(self, x):
return x | """
Descripttion:
version:
Author: HuSharp
Date: 2021-02-22 12:12:48
LastEditors: HuSharp
LastEditTime: 2021-02-23 23:16:08
@Email: 8211180515@csu.edu.cn
"""
class A:
z = -1
def f(self, x):
return b(x - 1)
class B(A):
n = 4
def __init__(self, y):
if y:
self.z = self.f(y)
else:
self.z = c(y + 1)
class C(B):
def f(self, x):
return x |
# coding: utf-8
#
# Copyright 2022 :Barry-Thomas-Paul: Moss
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http: // www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Const Class
# this is a auto generated file generated by Cheetah
# Libre Office Version: 7.3
# Namespace: com.sun.star.i18n
class LocaleItem(object):
"""
Const Class
These are not used with the API but with an OOo internal wrapper class that caches the contents of an instance of LocaleDataItem and uses these values to access it's members for faster access.
Whenever locale data items were added these values and the wrapper class would have to be adjusted to give the application an easier access.
.. deprecated::
Class is deprecated.
See Also:
`API LocaleItem <https://api.libreoffice.org/docs/idl/ref/namespacecom_1_1sun_1_1star_1_1i18n_1_1LocaleItem.html>`_
"""
__ooo_ns__: str = 'com.sun.star.i18n'
__ooo_full_ns__: str = 'com.sun.star.i18n.LocaleItem'
__ooo_type_name__: str = 'const'
DATE_SEPARATOR = 0
THOUSAND_SEPARATOR = 1
DECIMAL_SEPARATOR = 2
TIME_SEPARATOR = 3
TIME_100SEC_SEPARATOR = 4
LIST_SEPARATOR = 5
SINGLE_QUOTATION_START = 6
SINGLE_QUOTATION_END = 7
DOUBLE_QUOTATION_START = 8
DOUBLE_QUOTATION_END = 9
MEASUREMENT_SYSTEM = 10
TIME_AM = 11
TIME_PM = 12
LONG_DATE_DAY_OF_WEEK_SEPARATOR = 13
LONG_DATE_DAY_SEPARATOR = 14
LONG_DATE_MONTH_SEPARATOR = 15
LONG_DATE_YEAR_SEPARATOR = 16
COUNT = 17
"""
count of items available
"""
DECIMAL_SEPARATOR_ALTERNATIVE = 17
COUNT2 = 18
"""
count of items available
"""
__all__ = ['LocaleItem']
| class Localeitem(object):
"""
Const Class
These are not used with the API but with an OOo internal wrapper class that caches the contents of an instance of LocaleDataItem and uses these values to access it's members for faster access.
Whenever locale data items were added these values and the wrapper class would have to be adjusted to give the application an easier access.
.. deprecated::
Class is deprecated.
See Also:
`API LocaleItem <https://api.libreoffice.org/docs/idl/ref/namespacecom_1_1sun_1_1star_1_1i18n_1_1LocaleItem.html>`_
"""
__ooo_ns__: str = 'com.sun.star.i18n'
__ooo_full_ns__: str = 'com.sun.star.i18n.LocaleItem'
__ooo_type_name__: str = 'const'
date_separator = 0
thousand_separator = 1
decimal_separator = 2
time_separator = 3
time_100_sec_separator = 4
list_separator = 5
single_quotation_start = 6
single_quotation_end = 7
double_quotation_start = 8
double_quotation_end = 9
measurement_system = 10
time_am = 11
time_pm = 12
long_date_day_of_week_separator = 13
long_date_day_separator = 14
long_date_month_separator = 15
long_date_year_separator = 16
count = 17
'\n count of items available\n '
decimal_separator_alternative = 17
count2 = 18
'\n count of items available\n '
__all__ = ['LocaleItem'] |
def print_full_name(a, b):
b=b+'!'
print("Hello",a,b, "You just delved into python.")
| def print_full_name(a, b):
b = b + '!'
print('Hello', a, b, 'You just delved into python.') |
class Element:
def __init__(self, value):
self.value = value
self.next = None
class LinkedList:
def __init__(self, head=None):
self.head = head
def append(self, new_element):
current = self.head
if self.head:
while current.next:
current = current.next
current.next = new_element
else:
self.head = new_element
def get_position(self, position):
current_element = self.head
try:
for _ in range(1, position):
current_element = current_element.next
return current_element
except:
return None
def insert_first(self, new_element):
current = self.head
self.head = new_element
new_element.next = current
def delete_first(self):
try:
poped_item = self.head
self.head = self.head.next
return poped_item
except:
return None
class Stack(LinkedList):
def __init__(self, top=None):
self.linkedlist = LinkedList(top)
def push(self, new_element):
self.linkedlist.insert_first(new_element)
def pop(self):
return self.linkedlist.delete_first()
| class Element:
def __init__(self, value):
self.value = value
self.next = None
class Linkedlist:
def __init__(self, head=None):
self.head = head
def append(self, new_element):
current = self.head
if self.head:
while current.next:
current = current.next
current.next = new_element
else:
self.head = new_element
def get_position(self, position):
current_element = self.head
try:
for _ in range(1, position):
current_element = current_element.next
return current_element
except:
return None
def insert_first(self, new_element):
current = self.head
self.head = new_element
new_element.next = current
def delete_first(self):
try:
poped_item = self.head
self.head = self.head.next
return poped_item
except:
return None
class Stack(LinkedList):
def __init__(self, top=None):
self.linkedlist = linked_list(top)
def push(self, new_element):
self.linkedlist.insert_first(new_element)
def pop(self):
return self.linkedlist.delete_first() |
"""
Vol I Lab __: GMRES
Name:
Date:
"""
#Problem 1: Implement the following function
def gmres(A, b, x0, k=100, tol=1e-8):
'''Calculate approximate solution of Ax=b using GMRES algorithm.
INPUTS:
A - Callable function that calculates Ax for any input vector x.
b - A NumPy array of length m.
x0 - An arbitrary initial guess.
k - Maximum number of iterations of the GMRES algorithm. Defaults to 100.
tol - Stop iterating if the residual is less than 'tol'. Defaults to 1e-8.
RETURN:
Return (y, res) where 'y' is an approximate solution to Ax=b and 'res'
is the residual.
'''
raise NotImplementedError("Problem 1 incomplete.")
#Problem 2: Implement the following two functions
def plot_gmres(A, b, x0, tol=1e-8):
'''Use the GMRES algorithm to approximate the solution to Ax=b. Plot the
eigenvalues of A and the convergence of the algorithm.
INPUTS:
A - A 2-D NumPy array of shape mxm.
b - A 1-D NumPy array of length m.
x0 - An arbitrary initial guess.
tol - Stop iterating and create the desired plots when the residual is
less than 'tol'. Defaults to 1e-8.
OUTPUT:
Follow the GMRES algorithm until the residual is less than tol, for a
maximum of m iterations. Then create the two following plots (subplots
of a single figure):
1. Plot the eigenvalues of A in the complex plane.
2. Plot the convergence of the GMRES algorithm by plotting the
iteration number on the x-axis and the residual on the y-axis.
Use a log scale on the y-axis.
'''
raise NotImplementedError("Problem 2 incomplete.")
def make_plots(m=200):
'''Create the matrix An defined in problem 2 in the manual
for n = -4, -2, -0, 2, 4. Call plot_gmres on each, with b
a vector of ones, and an initial guess x0 a vector of zeros.
Print a statement explaining how the convergence relates to
the eigenvalues.
'''
raise NotImplementedError("make_plots not yet implemented.")
#Problem 3: Implement the following two functions
def gmres_k(Amul, b, x0, k=5, tol=1E-8, restarts=50):
'''Use the GMRES(k) algorithm to approximate the solution to Ax=b.
INPUTS:
A - A Callable function that calculates Ax for any vector x.
b - A NumPy array.
x0 - An arbitrary initial guess.
k - Maximum number of iterations of the GMRES algorithm before
restarting. Defaults to 100.
tol - Stop iterating if the residual is less than 'tol'. Defaults
to 1E-8.
restarts - Maximum number of restarts. Defaults to 50.
OUTPUT:
Return (y, res) where 'y' is an approximate solution to Ax=b and 'res'
is the residual.
'''
raise NotImplementedError("Problem 3 incomplete.")
def time_gmres(m=200):
'''Time the gmres and gmres_k functions on each of the matrices
from problem 2. Let x0 be a vector of zeros or anything you like.
The results might be more dramatic with an x0 of larger magnitude.
Print your results. What do you observe?
'''
raise NotImplementedError("time_gmres not yet implemented.")
| """
Vol I Lab __: GMRES
Name:
Date:
"""
def gmres(A, b, x0, k=100, tol=1e-08):
"""Calculate approximate solution of Ax=b using GMRES algorithm.
INPUTS:
A - Callable function that calculates Ax for any input vector x.
b - A NumPy array of length m.
x0 - An arbitrary initial guess.
k - Maximum number of iterations of the GMRES algorithm. Defaults to 100.
tol - Stop iterating if the residual is less than 'tol'. Defaults to 1e-8.
RETURN:
Return (y, res) where 'y' is an approximate solution to Ax=b and 'res'
is the residual.
"""
raise not_implemented_error('Problem 1 incomplete.')
def plot_gmres(A, b, x0, tol=1e-08):
"""Use the GMRES algorithm to approximate the solution to Ax=b. Plot the
eigenvalues of A and the convergence of the algorithm.
INPUTS:
A - A 2-D NumPy array of shape mxm.
b - A 1-D NumPy array of length m.
x0 - An arbitrary initial guess.
tol - Stop iterating and create the desired plots when the residual is
less than 'tol'. Defaults to 1e-8.
OUTPUT:
Follow the GMRES algorithm until the residual is less than tol, for a
maximum of m iterations. Then create the two following plots (subplots
of a single figure):
1. Plot the eigenvalues of A in the complex plane.
2. Plot the convergence of the GMRES algorithm by plotting the
iteration number on the x-axis and the residual on the y-axis.
Use a log scale on the y-axis.
"""
raise not_implemented_error('Problem 2 incomplete.')
def make_plots(m=200):
"""Create the matrix An defined in problem 2 in the manual
for n = -4, -2, -0, 2, 4. Call plot_gmres on each, with b
a vector of ones, and an initial guess x0 a vector of zeros.
Print a statement explaining how the convergence relates to
the eigenvalues.
"""
raise not_implemented_error('make_plots not yet implemented.')
def gmres_k(Amul, b, x0, k=5, tol=1e-08, restarts=50):
"""Use the GMRES(k) algorithm to approximate the solution to Ax=b.
INPUTS:
A - A Callable function that calculates Ax for any vector x.
b - A NumPy array.
x0 - An arbitrary initial guess.
k - Maximum number of iterations of the GMRES algorithm before
restarting. Defaults to 100.
tol - Stop iterating if the residual is less than 'tol'. Defaults
to 1E-8.
restarts - Maximum number of restarts. Defaults to 50.
OUTPUT:
Return (y, res) where 'y' is an approximate solution to Ax=b and 'res'
is the residual.
"""
raise not_implemented_error('Problem 3 incomplete.')
def time_gmres(m=200):
"""Time the gmres and gmres_k functions on each of the matrices
from problem 2. Let x0 be a vector of zeros or anything you like.
The results might be more dramatic with an x0 of larger magnitude.
Print your results. What do you observe?
"""
raise not_implemented_error('time_gmres not yet implemented.') |
"""
This class Environment sets up the universe for the robot.
"""
class Environment:
"""Environment class
"""
def __init__(self):
self._detected_obj: [CoherentItem] = None
| """
This class Environment sets up the universe for the robot.
"""
class Environment:
"""Environment class
"""
def __init__(self):
self._detected_obj: [CoherentItem] = None |
def c12_config():
return {
"data_table": "default",
"diag_table": "default",
"experiment_name": "default",
"forcing": "gs://vcm-fv3config/data/base_forcing/v1.1/",
"orographic_forcing": "gs://vcm-fv3config/data/orographic_data/v1.0",
"initial_conditions": "gs://vcm-fv3config/data/initial_conditions/gfs_c12_example/v1.0",
"namelist": {
"amip_interp_nml": {
"data_set": "reynolds_oi",
"date_out_of_range": "climo",
"interp_oi_sst": True,
"no_anom_sst": False,
"use_ncep_ice": False,
"use_ncep_sst": True,
},
"atmos_model_nml": {
"blocksize": 24,
"chksum_debug": False,
"dycore_only": False,
"fdiag": 0.0,
"fhmax": 1024.0,
"fhmaxhf": -1.0,
"fhout": 0.25,
"fhouthf": 0.0,
},
"cires_ugwp_nml": {
"knob_ugwp_azdir": [2, 4, 4, 4],
"knob_ugwp_doaxyz": 1,
"knob_ugwp_doheat": 1,
"knob_ugwp_dokdis": 0,
"knob_ugwp_effac": [1, 1, 1, 1],
"knob_ugwp_ndx4lh": 4,
"knob_ugwp_solver": 2,
"knob_ugwp_source": [1, 1, 1, 0],
"knob_ugwp_stoch": [0, 0, 0, 0],
"knob_ugwp_version": 0,
"knob_ugwp_wvspec": [1, 32, 32, 32],
"launch_level": 55,
},
"coupler_nml": {
"atmos_nthreads": 1,
"calendar": "julian",
"current_date": [2016, 8, 1, 0, 0, 0],
"days": 0,
"dt_atmos": 900,
"dt_ocean": 900,
"hours": 0,
"memuse_verbose": True,
"minutes": 30,
"months": 0,
"ncores_per_node": 32,
"seconds": 0,
"use_hyper_thread": True,
},
"diag_manager_nml": {"prepend_date": False},
"external_ic_nml": {
"checker_tr": False,
"filtered_terrain": True,
"gfs_dwinds": True,
"levp": 64,
"nt_checker": 0,
},
"fms_io_nml": {
"checksum_required": False,
"max_files_r": 100,
"max_files_w": 100,
},
"fms_nml": {
"clock_grain": "ROUTINE",
"domains_stack_size": 3000000,
"print_memory_usage": False,
},
"fv_core_nml": {
"a_imp": 1.0,
"adjust_dry_mass": False,
"beta": 0.0,
"consv_am": False,
"consv_te": 1.0,
"d2_bg": 0.0,
"d2_bg_k1": 0.16,
"d2_bg_k2": 0.02,
"d4_bg": 0.15,
"d_con": 1.0,
"d_ext": 0.0,
"dddmp": 0.2,
"delt_max": 0.002,
"dnats": 1,
"do_sat_adj": True,
"do_vort_damp": True,
"dwind_2d": False,
"external_ic": True,
"fill": True,
"fv_debug": False,
"fv_sg_adj": 900,
"gfs_phil": False,
"hord_dp": 6,
"hord_mt": 6,
"hord_tm": 6,
"hord_tr": 8,
"hord_vt": 6,
"hydrostatic": False,
"io_layout": [1, 1],
"k_split": 1,
"ke_bg": 0.0,
"kord_mt": 10,
"kord_tm": -10,
"kord_tr": 10,
"kord_wz": 10,
"layout": [1, 1],
"make_nh": True,
"mountain": False,
"n_split": 6,
"n_sponge": 4,
"na_init": 1,
"ncep_ic": False,
"nggps_ic": True,
"no_dycore": False,
"nord": 2,
"npx": 13,
"npy": 13,
"npz": 63,
"ntiles": 6,
"nudge": False,
"nudge_qv": True,
"nwat": 6,
"p_fac": 0.1,
"phys_hydrostatic": False,
"print_freq": 3,
"range_warn": True,
"reset_eta": False,
"rf_cutoff": 800.0,
"rf_fast": False,
"tau": 5.0,
"use_hydro_pressure": False,
"vtdm4": 0.06,
"warm_start": False,
"z_tracer": True,
},
"fv_grid_nml": {},
"gfdl_cloud_microphysics_nml": {
"c_cracw": 0.8,
"c_paut": 0.5,
"c_pgacs": 0.01,
"c_psaci": 0.05,
"ccn_l": 300.0,
"ccn_o": 100.0,
"const_vg": False,
"const_vi": False,
"const_vr": False,
"const_vs": False,
"de_ice": False,
"do_qa": True,
"do_sedi_heat": False,
"dw_land": 0.16,
"dw_ocean": 0.1,
"fast_sat_adj": True,
"fix_negative": True,
"icloud_f": 1,
"mono_prof": True,
"mp_time": 450.0,
"prog_ccn": False,
"qi0_crt": 8e-05,
"qi_lim": 1.0,
"ql_gen": 0.001,
"ql_mlt": 0.001,
"qs0_crt": 0.001,
"rad_graupel": True,
"rad_rain": True,
"rad_snow": True,
"rh_inc": 0.3,
"rh_inr": 0.3,
"rh_ins": 0.3,
"rthresh": 1e-05,
"sedi_transport": False,
"tau_g2v": 900.0,
"tau_i2s": 1000.0,
"tau_l2v": [225.0],
"tau_v2l": 150.0,
"use_ccn": True,
"use_ppm": False,
"vg_max": 12.0,
"vi_max": 1.0,
"vr_max": 12.0,
"vs_max": 2.0,
"z_slope_ice": True,
"z_slope_liq": True,
},
"gfs_physics_nml": {
"cal_pre": False,
"cdmbgwd": [3.5, 0.25],
"cnvcld": False,
"cnvgwd": True,
"debug": False,
"dspheat": True,
"fhcyc": 24.0,
"fhlwr": 3600.0,
"fhswr": 3600.0,
"fhzero": 0.25,
"hybedmf": True,
"iaer": 111,
"ialb": 1,
"ico2": 2,
"iems": 1,
"imfdeepcnv": 2,
"imfshalcnv": 2,
"imp_physics": 11,
"isol": 2,
"isot": 1,
"isubc_lw": 2,
"isubc_sw": 2,
"ivegsrc": 1,
"ldiag3d": False,
"lwhtr": True,
"ncld": 5,
"nst_anl": True,
"pdfcld": False,
"pre_rad": False,
"prslrd0": 0.0,
"random_clds": False,
"redrag": True,
"shal_cnv": True,
"swhtr": True,
"trans_trac": True,
"use_ufo": True,
},
"interpolator_nml": {"interp_method": "conserve_great_circle"},
"nam_stochy": {"lat_s": 96, "lon_s": 192, "ntrunc": 94},
"namsfc": {
"fabsl": 99999,
"faisl": 99999,
"faiss": 99999,
"fnabsc": "grb/global_mxsnoalb.uariz.t1534.3072.1536.rg.grb",
"fnacna": "",
"fnaisc": "grb/CFSR.SEAICE.1982.2012.monthly.clim.grb",
"fnalbc": "grb/global_snowfree_albedo.bosu.t1534.3072.1536.rg.grb",
"fnalbc2": "grb/global_albedo4.1x1.grb",
"fnglac": "grb/global_glacier.2x2.grb",
"fnmskh": "grb/seaice_newland.grb",
"fnmxic": "grb/global_maxice.2x2.grb",
"fnslpc": "grb/global_slope.1x1.grb",
"fnsmcc": "grb/global_soilmgldas.t1534.3072.1536.grb",
"fnsnoa": "",
"fnsnoc": "grb/global_snoclim.1.875.grb",
"fnsotc": "grb/global_soiltype.statsgo.t1534.3072.1536.rg.grb",
"fntg3c": "grb/global_tg3clim.2.6x1.5.grb",
"fntsfa": "",
"fntsfc": "grb/RTGSST.1982.2012.monthly.clim.grb",
"fnvegc": "grb/global_vegfrac.0.144.decpercent.grb",
"fnvetc": "grb/global_vegtype.igbp.t1534.3072.1536.rg.grb",
"fnvmnc": "grb/global_shdmin.0.144x0.144.grb",
"fnvmxc": "grb/global_shdmax.0.144x0.144.grb",
"fnzorc": "igbp",
"fsicl": 99999,
"fsics": 99999,
"fslpl": 99999,
"fsmcl": [99999, 99999, 99999],
"fsnol": 99999,
"fsnos": 99999,
"fsotl": 99999,
"ftsfl": 99999,
"ftsfs": 90,
"fvetl": 99999,
"fvmnl": 99999,
"fvmxl": 99999,
"ldebug": False,
},
},
}
| def c12_config():
return {'data_table': 'default', 'diag_table': 'default', 'experiment_name': 'default', 'forcing': 'gs://vcm-fv3config/data/base_forcing/v1.1/', 'orographic_forcing': 'gs://vcm-fv3config/data/orographic_data/v1.0', 'initial_conditions': 'gs://vcm-fv3config/data/initial_conditions/gfs_c12_example/v1.0', 'namelist': {'amip_interp_nml': {'data_set': 'reynolds_oi', 'date_out_of_range': 'climo', 'interp_oi_sst': True, 'no_anom_sst': False, 'use_ncep_ice': False, 'use_ncep_sst': True}, 'atmos_model_nml': {'blocksize': 24, 'chksum_debug': False, 'dycore_only': False, 'fdiag': 0.0, 'fhmax': 1024.0, 'fhmaxhf': -1.0, 'fhout': 0.25, 'fhouthf': 0.0}, 'cires_ugwp_nml': {'knob_ugwp_azdir': [2, 4, 4, 4], 'knob_ugwp_doaxyz': 1, 'knob_ugwp_doheat': 1, 'knob_ugwp_dokdis': 0, 'knob_ugwp_effac': [1, 1, 1, 1], 'knob_ugwp_ndx4lh': 4, 'knob_ugwp_solver': 2, 'knob_ugwp_source': [1, 1, 1, 0], 'knob_ugwp_stoch': [0, 0, 0, 0], 'knob_ugwp_version': 0, 'knob_ugwp_wvspec': [1, 32, 32, 32], 'launch_level': 55}, 'coupler_nml': {'atmos_nthreads': 1, 'calendar': 'julian', 'current_date': [2016, 8, 1, 0, 0, 0], 'days': 0, 'dt_atmos': 900, 'dt_ocean': 900, 'hours': 0, 'memuse_verbose': True, 'minutes': 30, 'months': 0, 'ncores_per_node': 32, 'seconds': 0, 'use_hyper_thread': True}, 'diag_manager_nml': {'prepend_date': False}, 'external_ic_nml': {'checker_tr': False, 'filtered_terrain': True, 'gfs_dwinds': True, 'levp': 64, 'nt_checker': 0}, 'fms_io_nml': {'checksum_required': False, 'max_files_r': 100, 'max_files_w': 100}, 'fms_nml': {'clock_grain': 'ROUTINE', 'domains_stack_size': 3000000, 'print_memory_usage': False}, 'fv_core_nml': {'a_imp': 1.0, 'adjust_dry_mass': False, 'beta': 0.0, 'consv_am': False, 'consv_te': 1.0, 'd2_bg': 0.0, 'd2_bg_k1': 0.16, 'd2_bg_k2': 0.02, 'd4_bg': 0.15, 'd_con': 1.0, 'd_ext': 0.0, 'dddmp': 0.2, 'delt_max': 0.002, 'dnats': 1, 'do_sat_adj': True, 'do_vort_damp': True, 'dwind_2d': False, 'external_ic': True, 'fill': True, 'fv_debug': False, 'fv_sg_adj': 900, 'gfs_phil': False, 'hord_dp': 6, 'hord_mt': 6, 'hord_tm': 6, 'hord_tr': 8, 'hord_vt': 6, 'hydrostatic': False, 'io_layout': [1, 1], 'k_split': 1, 'ke_bg': 0.0, 'kord_mt': 10, 'kord_tm': -10, 'kord_tr': 10, 'kord_wz': 10, 'layout': [1, 1], 'make_nh': True, 'mountain': False, 'n_split': 6, 'n_sponge': 4, 'na_init': 1, 'ncep_ic': False, 'nggps_ic': True, 'no_dycore': False, 'nord': 2, 'npx': 13, 'npy': 13, 'npz': 63, 'ntiles': 6, 'nudge': False, 'nudge_qv': True, 'nwat': 6, 'p_fac': 0.1, 'phys_hydrostatic': False, 'print_freq': 3, 'range_warn': True, 'reset_eta': False, 'rf_cutoff': 800.0, 'rf_fast': False, 'tau': 5.0, 'use_hydro_pressure': False, 'vtdm4': 0.06, 'warm_start': False, 'z_tracer': True}, 'fv_grid_nml': {}, 'gfdl_cloud_microphysics_nml': {'c_cracw': 0.8, 'c_paut': 0.5, 'c_pgacs': 0.01, 'c_psaci': 0.05, 'ccn_l': 300.0, 'ccn_o': 100.0, 'const_vg': False, 'const_vi': False, 'const_vr': False, 'const_vs': False, 'de_ice': False, 'do_qa': True, 'do_sedi_heat': False, 'dw_land': 0.16, 'dw_ocean': 0.1, 'fast_sat_adj': True, 'fix_negative': True, 'icloud_f': 1, 'mono_prof': True, 'mp_time': 450.0, 'prog_ccn': False, 'qi0_crt': 8e-05, 'qi_lim': 1.0, 'ql_gen': 0.001, 'ql_mlt': 0.001, 'qs0_crt': 0.001, 'rad_graupel': True, 'rad_rain': True, 'rad_snow': True, 'rh_inc': 0.3, 'rh_inr': 0.3, 'rh_ins': 0.3, 'rthresh': 1e-05, 'sedi_transport': False, 'tau_g2v': 900.0, 'tau_i2s': 1000.0, 'tau_l2v': [225.0], 'tau_v2l': 150.0, 'use_ccn': True, 'use_ppm': False, 'vg_max': 12.0, 'vi_max': 1.0, 'vr_max': 12.0, 'vs_max': 2.0, 'z_slope_ice': True, 'z_slope_liq': True}, 'gfs_physics_nml': {'cal_pre': False, 'cdmbgwd': [3.5, 0.25], 'cnvcld': False, 'cnvgwd': True, 'debug': False, 'dspheat': True, 'fhcyc': 24.0, 'fhlwr': 3600.0, 'fhswr': 3600.0, 'fhzero': 0.25, 'hybedmf': True, 'iaer': 111, 'ialb': 1, 'ico2': 2, 'iems': 1, 'imfdeepcnv': 2, 'imfshalcnv': 2, 'imp_physics': 11, 'isol': 2, 'isot': 1, 'isubc_lw': 2, 'isubc_sw': 2, 'ivegsrc': 1, 'ldiag3d': False, 'lwhtr': True, 'ncld': 5, 'nst_anl': True, 'pdfcld': False, 'pre_rad': False, 'prslrd0': 0.0, 'random_clds': False, 'redrag': True, 'shal_cnv': True, 'swhtr': True, 'trans_trac': True, 'use_ufo': True}, 'interpolator_nml': {'interp_method': 'conserve_great_circle'}, 'nam_stochy': {'lat_s': 96, 'lon_s': 192, 'ntrunc': 94}, 'namsfc': {'fabsl': 99999, 'faisl': 99999, 'faiss': 99999, 'fnabsc': 'grb/global_mxsnoalb.uariz.t1534.3072.1536.rg.grb', 'fnacna': '', 'fnaisc': 'grb/CFSR.SEAICE.1982.2012.monthly.clim.grb', 'fnalbc': 'grb/global_snowfree_albedo.bosu.t1534.3072.1536.rg.grb', 'fnalbc2': 'grb/global_albedo4.1x1.grb', 'fnglac': 'grb/global_glacier.2x2.grb', 'fnmskh': 'grb/seaice_newland.grb', 'fnmxic': 'grb/global_maxice.2x2.grb', 'fnslpc': 'grb/global_slope.1x1.grb', 'fnsmcc': 'grb/global_soilmgldas.t1534.3072.1536.grb', 'fnsnoa': '', 'fnsnoc': 'grb/global_snoclim.1.875.grb', 'fnsotc': 'grb/global_soiltype.statsgo.t1534.3072.1536.rg.grb', 'fntg3c': 'grb/global_tg3clim.2.6x1.5.grb', 'fntsfa': '', 'fntsfc': 'grb/RTGSST.1982.2012.monthly.clim.grb', 'fnvegc': 'grb/global_vegfrac.0.144.decpercent.grb', 'fnvetc': 'grb/global_vegtype.igbp.t1534.3072.1536.rg.grb', 'fnvmnc': 'grb/global_shdmin.0.144x0.144.grb', 'fnvmxc': 'grb/global_shdmax.0.144x0.144.grb', 'fnzorc': 'igbp', 'fsicl': 99999, 'fsics': 99999, 'fslpl': 99999, 'fsmcl': [99999, 99999, 99999], 'fsnol': 99999, 'fsnos': 99999, 'fsotl': 99999, 'ftsfl': 99999, 'ftsfs': 90, 'fvetl': 99999, 'fvmnl': 99999, 'fvmxl': 99999, 'ldebug': False}}} |
class ValidatorVector:
value = None
def __get__(self, obj, objtype=None):
return self.value
def __set__(self, obj, value):
if value is not None:
if not isinstance(value, list):
raise TypeError(f'Expected {value!r} to be an list')
if obj.n is not None:
if len(value) != obj.n:
raise TypeError("Vector should be of length " + str(obj.n))
self.value = value
| class Validatorvector:
value = None
def __get__(self, obj, objtype=None):
return self.value
def __set__(self, obj, value):
if value is not None:
if not isinstance(value, list):
raise type_error(f'Expected {value!r} to be an list')
if obj.n is not None:
if len(value) != obj.n:
raise type_error('Vector should be of length ' + str(obj.n))
self.value = value |
#
# Collective Knowledge (dealing with table)
#
# See CK LICENSE.txt for licensing details
# See CK COPYRIGHT.txt for copyright details
#
# Developer: Grigori Fursin, Grigori.Fursin@cTuning.org, http://fursin.net
#
cfg={} # Will be updated by CK (meta description of this module)
work={} # Will be updated by CK (temporal data)
ck=None # Will be updated by CK (initialized CK kernel)
# Local settings
##############################################################################
# Initialize module
def init(i):
"""
Input: {}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
}
"""
return {'return':0}
##############################################################################
# draw table
def draw(i):
"""
Input: {
table - table to draw [[],[],[]...], [[],[],[]...] ...]
(out) - txt (default) or html
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
string - output
}
"""
o=i.get('out','')
table=i.get('table',[])
s=''
if len(table)>0:
lx=len(table[0])
lwidth=[]
for l in range(0, lx):
lwidth.append(-1)
# If 'txt', check length of all entries
if o=='txt':
for t in table:
for l in range(0, lx):
sx=str(t[l])
lw=lwidth[l]
if lw==-1 or len(sx)>lw:
lwidth[l]=len(sx)
for t in table:
for l in range(0, lx):
sx=str(t[l])
lw=lwidth[l]
s+=sx.ljust(lw+2)
s+='\n'
else:
s='<html>\n'
s+=' <body>\n'
s+=' <table border="1">\n'
for t in table:
s+=' <tr>\n'
for l in range(0, lx):
sx=str(t[l])
s+=' <td>'+sx+'</td>\n'
s+=' </tr>\n'
s+=' </table>\n'
s+=' </body>\n'
s+='<html>\n'
return {'return':0, 'string':s}
##############################################################################
# prepare table (in HTML and LaTex)
def prepare(i):
"""
Input: {
table
table_header
(table_custom)
(table_style)
(header_style)
(header_element_style)
(element_style)
(row_style)
(html_before_table)
(html_after_table)
(tex_before_table)
(tex_after_table)
(record_html) - file (with path) to record produced HTML
(record_tex) - file (with path) to record produced TEX
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
html - prepared HTML
tex - prepared :aTex
}
"""
table=i['table']
table_header=i['table_header']
table_custom=i.get('table_custom',{})
h=i.get('html_before_table','') # HTML
t=i.get('tex_before_table','') # LaTex
ts=i.get('table_style','')
hs=i.get('header_style','')
rs=i.get('row_style','')
hes=i.get('header_element_style','')
es=i.get('element_style','')
# Preparing Header
h+='<table '+ts+'>\n'
t+=' \\begin{tabular}{|'
for x in table_header:
t+=x.get('tex','')+'|'
t+='}\n'
h+=' <tr '+hs+'>\n'
t+=' \\hline\n'
t+=' '
first=True
for x in table_header:
n=x.get('name','')
if first:
first=False
else:
t+=' & '
t+='\\textbf{'+n+'}'
if x.get('html_change_space','')=='yes':
n=n.replace(' ',' ')
h+=' <td '+hes+'>\n'
h+=' <b>'+n+'</b>\n'
h+=' </td>\n'
h+=' </tr>\n'
t+=' \\\\ \n'
# Preparing table
for ix in range(0, len(table)):
x=table[ix]
cx={}
if ix<len(table_custom):
cx=table_custom[ix]
rs1=cx.get('row_style','')
h+=' <tr '+rs+' '+rs1+'>\n'
t+=' \\hline\n'
t+=' '
first=True
for iy in range(0, len(x)):
st={}
if iy<len(table_header):
st=table_header[iy]
y=cx.get('field_'+str(iy)+'_html','')
if y=='':
y=str(x[iy])
if st.get('html_change_space','')=='yes':
y=y.replace(' ',' ')
y=y.replace('\\newline',' ')
h+=' <td '+es+'>\n'
if st.get('html_before','')!='':
h+=' '+st['html_before']
h+=' '+str(y)+'\n'
if st.get('html_after','')!='':
h+=' '+st['html_after']
h+=' </td>\n'
z=cx.get('field_'+str(iy)+'_tex','')
if z=='':
z=str(x[iy])
if first:
first=False
else:
t+=' & '
if st.get('tex_before','')!='':
t+=st['tex_before']
t+=' '+str(z)+' '
if st.get('tex_after','')!='':
t+=st['tex_after']
t+='\\\\\n'
h+=' </tr>\n'
t+=" \\hline\n"
# Finalizing
h+='</table>\n'
h+=i.get('html_after_table','')
t+=' \\end{tabular}'
t+=' '+i.get('tex_after_table','')
# Check if record
if i.get('record_html','')!='':
r=ck.save_text_file({'text_file':i['record_html'], 'string':h})
if i.get('record_tex','')!='':
r=ck.save_text_file({'text_file':i['record_tex'], 'string':t})
return {'return':0, 'html':h, 'tex':t}
| cfg = {}
work = {}
ck = None
def init(i):
"""
Input: {}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
}
"""
return {'return': 0}
def draw(i):
"""
Input: {
table - table to draw [[],[],[]...], [[],[],[]...] ...]
(out) - txt (default) or html
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
string - output
}
"""
o = i.get('out', '')
table = i.get('table', [])
s = ''
if len(table) > 0:
lx = len(table[0])
lwidth = []
for l in range(0, lx):
lwidth.append(-1)
if o == 'txt':
for t in table:
for l in range(0, lx):
sx = str(t[l])
lw = lwidth[l]
if lw == -1 or len(sx) > lw:
lwidth[l] = len(sx)
for t in table:
for l in range(0, lx):
sx = str(t[l])
lw = lwidth[l]
s += sx.ljust(lw + 2)
s += '\n'
else:
s = '<html>\n'
s += ' <body>\n'
s += ' <table border="1">\n'
for t in table:
s += ' <tr>\n'
for l in range(0, lx):
sx = str(t[l])
s += ' <td>' + sx + '</td>\n'
s += ' </tr>\n'
s += ' </table>\n'
s += ' </body>\n'
s += '<html>\n'
return {'return': 0, 'string': s}
def prepare(i):
"""
Input: {
table
table_header
(table_custom)
(table_style)
(header_style)
(header_element_style)
(element_style)
(row_style)
(html_before_table)
(html_after_table)
(tex_before_table)
(tex_after_table)
(record_html) - file (with path) to record produced HTML
(record_tex) - file (with path) to record produced TEX
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
html - prepared HTML
tex - prepared :aTex
}
"""
table = i['table']
table_header = i['table_header']
table_custom = i.get('table_custom', {})
h = i.get('html_before_table', '')
t = i.get('tex_before_table', '')
ts = i.get('table_style', '')
hs = i.get('header_style', '')
rs = i.get('row_style', '')
hes = i.get('header_element_style', '')
es = i.get('element_style', '')
h += '<table ' + ts + '>\n'
t += ' \\begin{tabular}{|'
for x in table_header:
t += x.get('tex', '') + '|'
t += '}\n'
h += ' <tr ' + hs + '>\n'
t += ' \\hline\n'
t += ' '
first = True
for x in table_header:
n = x.get('name', '')
if first:
first = False
else:
t += ' & '
t += '\\textbf{' + n + '}'
if x.get('html_change_space', '') == 'yes':
n = n.replace(' ', ' ')
h += ' <td ' + hes + '>\n'
h += ' <b>' + n + '</b>\n'
h += ' </td>\n'
h += ' </tr>\n'
t += ' \\\\ \n'
for ix in range(0, len(table)):
x = table[ix]
cx = {}
if ix < len(table_custom):
cx = table_custom[ix]
rs1 = cx.get('row_style', '')
h += ' <tr ' + rs + ' ' + rs1 + '>\n'
t += ' \\hline\n'
t += ' '
first = True
for iy in range(0, len(x)):
st = {}
if iy < len(table_header):
st = table_header[iy]
y = cx.get('field_' + str(iy) + '_html', '')
if y == '':
y = str(x[iy])
if st.get('html_change_space', '') == 'yes':
y = y.replace(' ', ' ')
y = y.replace('\\newline', ' ')
h += ' <td ' + es + '>\n'
if st.get('html_before', '') != '':
h += ' ' + st['html_before']
h += ' ' + str(y) + '\n'
if st.get('html_after', '') != '':
h += ' ' + st['html_after']
h += ' </td>\n'
z = cx.get('field_' + str(iy) + '_tex', '')
if z == '':
z = str(x[iy])
if first:
first = False
else:
t += ' & '
if st.get('tex_before', '') != '':
t += st['tex_before']
t += ' ' + str(z) + ' '
if st.get('tex_after', '') != '':
t += st['tex_after']
t += '\\\\\n'
h += ' </tr>\n'
t += ' \\hline\n'
h += '</table>\n'
h += i.get('html_after_table', '')
t += ' \\end{tabular}'
t += ' ' + i.get('tex_after_table', '')
if i.get('record_html', '') != '':
r = ck.save_text_file({'text_file': i['record_html'], 'string': h})
if i.get('record_tex', '') != '':
r = ck.save_text_file({'text_file': i['record_tex'], 'string': t})
return {'return': 0, 'html': h, 'tex': t} |
# Copyright (c) 2017 Cable Television Laboratories, Inc. ("CableLabs")
# and others. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class Project:
"""
SNAPS domain class for Projects. Should contain attributes that
are shared amongst cloud providers
"""
def __init__(self, name, project_id, domain_id=None):
"""
Constructor
:param name: the project's name
:param project_id: the project's id
:param domain_id: the project's domain id
"""
self.name = name
self.id = project_id
self.domain_id = domain_id
def __eq__(self, other):
return self.name == other.name and self.id == other.id
class Domain:
"""
SNAPS domain class for OpenStack Keystone v3+ domains.
"""
def __init__(self, name, domain_id=None):
"""
Constructor
:param name: the project's name
:param domain_id: the project's domain id
"""
self.name = name
self.id = domain_id
def __eq__(self, other):
return self.name == other.name and self.id == other.id
class ComputeQuotas:
"""
SNAPS domain class for holding project quotas for compute services
"""
def __init__(self, nova_quotas=None, **kwargs):
"""
Constructor
:param nova_quotas: the OS nova quota object
"""
if nova_quotas:
self.metadata_items = nova_quotas.metadata_items
self.cores = nova_quotas.cores # aka. VCPUs
self.instances = nova_quotas.instances
self.injected_files = nova_quotas.injected_files
self.injected_file_content_bytes = nova_quotas.injected_file_content_bytes
self.ram = nova_quotas.ram
self.fixed_ips = nova_quotas.fixed_ips
self.key_pairs = nova_quotas.key_pairs
else:
self.metadata_items = kwargs.get('metadata_items')
self.cores = kwargs.get('cores') # aka. VCPUs
self.instances = kwargs.get('instances')
self.injected_files = kwargs.get('injected_files')
self.injected_file_content_bytes = kwargs.get(
'injected_file_content_bytes')
self.ram = kwargs.get('ram')
self.fixed_ips = kwargs.get('fixed_ips')
self.key_pairs = kwargs.get('key_pairs')
def __eq__(self, other):
return (self.metadata_items == other.metadata_items and
self.cores == other.cores and
self.instances == other.instances and
self.injected_files == other.injected_files and
self.injected_file_content_bytes == other.injected_file_content_bytes and
self.fixed_ips == other.fixed_ips and
self.key_pairs == other.key_pairs)
class NetworkQuotas:
"""
SNAPS domain class for holding project quotas for networking services
"""
def __init__(self, **neutron_quotas):
"""
Constructor
:param neutron_quotas: the OS network quota object
"""
# Networks settings here
self.security_group = neutron_quotas['security_group']
self.security_group_rule = neutron_quotas['security_group_rule']
self.floatingip = neutron_quotas['floatingip']
self.network = neutron_quotas['network']
self.port = neutron_quotas['port']
self.router = neutron_quotas['router']
self.subnet = neutron_quotas['subnet']
def __eq__(self, other):
return (self.security_group == other.security_group and
self.security_group_rule == other.security_group_rule and
self.floatingip == other.floatingip and
self.network == other.network and
self.port == other.port and
self.router == other.router and
self.subnet == other.subnet)
| class Project:
"""
SNAPS domain class for Projects. Should contain attributes that
are shared amongst cloud providers
"""
def __init__(self, name, project_id, domain_id=None):
"""
Constructor
:param name: the project's name
:param project_id: the project's id
:param domain_id: the project's domain id
"""
self.name = name
self.id = project_id
self.domain_id = domain_id
def __eq__(self, other):
return self.name == other.name and self.id == other.id
class Domain:
"""
SNAPS domain class for OpenStack Keystone v3+ domains.
"""
def __init__(self, name, domain_id=None):
"""
Constructor
:param name: the project's name
:param domain_id: the project's domain id
"""
self.name = name
self.id = domain_id
def __eq__(self, other):
return self.name == other.name and self.id == other.id
class Computequotas:
"""
SNAPS domain class for holding project quotas for compute services
"""
def __init__(self, nova_quotas=None, **kwargs):
"""
Constructor
:param nova_quotas: the OS nova quota object
"""
if nova_quotas:
self.metadata_items = nova_quotas.metadata_items
self.cores = nova_quotas.cores
self.instances = nova_quotas.instances
self.injected_files = nova_quotas.injected_files
self.injected_file_content_bytes = nova_quotas.injected_file_content_bytes
self.ram = nova_quotas.ram
self.fixed_ips = nova_quotas.fixed_ips
self.key_pairs = nova_quotas.key_pairs
else:
self.metadata_items = kwargs.get('metadata_items')
self.cores = kwargs.get('cores')
self.instances = kwargs.get('instances')
self.injected_files = kwargs.get('injected_files')
self.injected_file_content_bytes = kwargs.get('injected_file_content_bytes')
self.ram = kwargs.get('ram')
self.fixed_ips = kwargs.get('fixed_ips')
self.key_pairs = kwargs.get('key_pairs')
def __eq__(self, other):
return self.metadata_items == other.metadata_items and self.cores == other.cores and (self.instances == other.instances) and (self.injected_files == other.injected_files) and (self.injected_file_content_bytes == other.injected_file_content_bytes) and (self.fixed_ips == other.fixed_ips) and (self.key_pairs == other.key_pairs)
class Networkquotas:
"""
SNAPS domain class for holding project quotas for networking services
"""
def __init__(self, **neutron_quotas):
"""
Constructor
:param neutron_quotas: the OS network quota object
"""
self.security_group = neutron_quotas['security_group']
self.security_group_rule = neutron_quotas['security_group_rule']
self.floatingip = neutron_quotas['floatingip']
self.network = neutron_quotas['network']
self.port = neutron_quotas['port']
self.router = neutron_quotas['router']
self.subnet = neutron_quotas['subnet']
def __eq__(self, other):
return self.security_group == other.security_group and self.security_group_rule == other.security_group_rule and (self.floatingip == other.floatingip) and (self.network == other.network) and (self.port == other.port) and (self.router == other.router) and (self.subnet == other.subnet) |
def variableName(name):
if name[0].isdigit(): return False
for c in name:
if not c.isalnum() and not c == '_':
return False
return True | def variable_name(name):
if name[0].isdigit():
return False
for c in name:
if not c.isalnum() and (not c == '_'):
return False
return True |
deepspeech = True #Use Mozilla DeepSPeech
google = True #Use Google Cloud API Speech to Text
remove_wav_dir = True
data_dir = './data/'
input_dir = data_dir+'audio_examples/'
google_credentials = data_dir+ 'project_0123.json' #Replace with your json file from:
'''
https://console.cloud.google.com/apis/credentials/serviceaccountkey?_ga=2.135125617.-1992684214.1559066420
More info here:
https://cloud.google.com/docs/authentication/production
'''
# These will be created automatically:
wav_dir = input_dir[:-1]+'_wavs/'
deepspeech_models = data_dir+'deepspeech-0.5.0-models/'
output_dir = data_dir+'transcriptions/'
| deepspeech = True
google = True
remove_wav_dir = True
data_dir = './data/'
input_dir = data_dir + 'audio_examples/'
google_credentials = data_dir + 'project_0123.json'
'\nhttps://console.cloud.google.com/apis/credentials/serviceaccountkey?_ga=2.135125617.-1992684214.1559066420\nMore info here:\nhttps://cloud.google.com/docs/authentication/production\n'
wav_dir = input_dir[:-1] + '_wavs/'
deepspeech_models = data_dir + 'deepspeech-0.5.0-models/'
output_dir = data_dir + 'transcriptions/' |
[
[
0.0036339,
0.00360887,
0.00275196,
0.0019008,
0.00174623,
0.00346563,
0.00081923,
0.00105703,
],
[
0.00380165,
0.00361068,
0.0027048,
0.00190774,
0.00170164,
0.00382391,
0.00086244,
0.00106248,
],
]
| [[0.0036339, 0.00360887, 0.00275196, 0.0019008, 0.00174623, 0.00346563, 0.00081923, 0.00105703], [0.00380165, 0.00361068, 0.0027048, 0.00190774, 0.00170164, 0.00382391, 0.00086244, 0.00106248]] |
__title__ = 'tes'
__description__ = 'AlfaStrakhovanie TES API Python SDK'
__url__ = ''
__version__ = '0.3.1'
__author__ = 'Sergey Popinevskiy'
__author_email__ = 'sergey.popinevskiy@gmail.com'
__license__ = 'MIT'
__copyright__ = 'Copyright 2020 Sergey Popinevskiy'
| __title__ = 'tes'
__description__ = 'AlfaStrakhovanie TES API Python SDK'
__url__ = ''
__version__ = '0.3.1'
__author__ = 'Sergey Popinevskiy'
__author_email__ = 'sergey.popinevskiy@gmail.com'
__license__ = 'MIT'
__copyright__ = 'Copyright 2020 Sergey Popinevskiy' |
inp = input('Enter a sentence: ') # Gets users input
old = 'abcdefghijklmnopqrstuvwxyz' # Lists for indexing if character is lower case
new = 'zyxwvutsrqponmlkjihgfedcba'
oldcaps = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' # Lists for indexing if character is uppercase
newcaps = 'ZYXWVUTSRQPONMLKJIHGFEDCBA'
for i in inp: # Iterate through code for each character in the input
if i.isupper(): # if the character is uppercase, run the code with the uppercase lists
index = oldcaps.index(i) # Gets the index from the first list
print(newcaps[index], end='') # Prints that index in the 2nd list, the opposites, and doesn't start a new line
elif i.islower(): # elif, the character is lowercase, run the code with lowercase lists
index = old.index(i) # Gets the index from the first list
print(new[index], end='') # Prints that index in the 2nd list, the opposites, and doesn't start a new line
else: # Else, it must be a special character, or space
print(i, end='') # So change nothing, print the character and don't start a new line. | inp = input('Enter a sentence: ')
old = 'abcdefghijklmnopqrstuvwxyz'
new = 'zyxwvutsrqponmlkjihgfedcba'
oldcaps = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
newcaps = 'ZYXWVUTSRQPONMLKJIHGFEDCBA'
for i in inp:
if i.isupper():
index = oldcaps.index(i)
print(newcaps[index], end='')
elif i.islower():
index = old.index(i)
print(new[index], end='')
else:
print(i, end='') |
# Alpaca login info
API_URL='https://paper-api.alpaca.markets'
API_KEY=''
API_SECRET=''
# Postgres login info
DB_HOST = 'localhost'
DB_USER = 'postgres'
DB_PASS = 'password'
DB_NAME = 'stocks'
# AlphaVantage login info
ALPHA_API_KEY = ''
| api_url = 'https://paper-api.alpaca.markets'
api_key = ''
api_secret = ''
db_host = 'localhost'
db_user = 'postgres'
db_pass = 'password'
db_name = 'stocks'
alpha_api_key = '' |
"""Contains email templates for Epicast and helper functions for using them.
Email categories include:
- `alerts`: custom, one-time `notifications`-type email for a manual user group
- `notifications`: for all users, says that the CDC published new data
- `reminders`: for users with missing forecasts, a reminder that the deadline
is soon
"""
class EpicastEmails:
"""Templating for Epicast emails."""
class Template:
"""Namespace for email template text."""
# TODO: update to literal template strings after upgrade to Python 3.6+
# see https://www.python.org/dev/peps/pep-0498/
# a tag which precedes the subject in all emails
SUBJECT_TAG = '[Crowdcast]'
# the unsubscribe section embedded in all emails
UNSUBSCRIBE = {
'text': '''
----------
[This is an automated message. To edit your email preferences or to
stop receiving these emails, follow the unsubscribe link below.]
Unsubscribe: https://delphi.cmu.edu/crowdcast/preferences.php?user=%s
''',
'html': '''
<hr>
<p style="color: #666; font-size: 0.8em;">
[This is an automated message. To edit your email preferences or to
stop receiving these emails, click the unsubscribe link below.]
<br>
<a href="https://delphi.cmu.edu/crowdcast/preferences.php?user=%s">
Unsubscribe</a>
</p>
''',
}
# placeholder for one-off emails sent on special occasions
ALERT = {
'subject': 'Crowdcast Needs Your Help',
'text': '''
Dear %s,
[alert text here]
-The DELPHI Team
''',
'html': '''
<p>
Dear %s,
</p><p>
[alert text here]
</p><p>
-The DELPHI Team
</p>
''',
}
# the optional scoring section embedded in weekly notifications
SCORE = {
'text': '''
Your overall score is: %d (ranked #%d)
Note: To be listed on the leaderboards, simply enter your initials on
the preferences page at
https://delphi.cmu.edu/crowdcast/preferences.php?user=%s
You can find the leaderboards at
https://delphi.cmu.edu/crowdcast/scores.php
''',
'html': '''
<p>
Your overall score is: %d (<i>ranked #%d</i>)
<br>
Note: To be listed on the <a
href="https://delphi.cmu.edu/crowdcast/scores.php">leaderboards</a>,
simply enter your initials on the preferences page <a
href="https://delphi.cmu.edu/crowdcast/preferences.php?user=%s">
here</a>.
</p>
''',
}
# the weekly "new data available" notification
NOTIFICATION = {
'subject': 'New Data Available (Deadline: Monday 10 AM)',
'text': '''
Dear %s,
The CDC has released another week of influenza-like-illness (ILI)
surveillance data. A new round of covid19-related forecasting is now
underway, and we need your forecasts! We are asking you to please
submit your forecasts by 10:00 AM (ET) this coming Monday. Thank you so
much for your support and cooperation!
To login and submit your forecasts, visit
https://delphi.cmu.edu/crowdcast/
and enter your User ID: %s
{SCORE}
Thank you again for your participation, and good luck on your
forecasts!
Happy Forecasting!
-The DELPHI Team
''',
'html': '''
<p>
Dear %s,
</p><p>
The CDC has released another week of influenza-like-illness (ILI)
surveillance data. A new round of covid19-related forecasting is now
underway, and we need your forecasts! We are asking you to please
submit your forecasts by <b>10:00 AM (ET)</b> this coming Monday.
Thank you so much for your support and cooperation!
</p><p>
To login and submit your forecasts, click <a
href="https://delphi.cmu.edu/crowdcast/launch.php?user=%s">here</a>
or visit https://delphi.cmu.edu/crowdcast/ and enter your User ID: %s
</p>{SCORE}<p>
Thank you again for your participation, and good luck on your
forecasts!
</p><p>
Happy Forecasting!
<br>
-The DELPHI Team
</p>
''',
}
# the weekly "forecast due soon" reminder
REMINDER = {
'subject': 'Forecasts Needed (Deadline: Monday 10AM)',
'text': '''
Dear %s,
This is just a friendly reminder that your influenza-like-illness (ILI)
forecasts are due by 10:00AM (ET) on Monday. Thank you so much for your
support and cooperation!
To login and submit your forecasts, visit
https://delphi.cmu.edu/crowdcast and enter your User ID: %s.
Happy Forecasting!
-The DELPHI Team
''',
'html': '''
<p>
Dear %s,
</p><p>
This is just a friendly reminder that your influenza-like-illness
(ILI) forecasts are due by <b>10:00AM (ET) on Monday</b>. Thank you
so much for your support and cooperation!
</p><p>
To login and submit your forecasts, click <a
href="https://delphi.cmu.edu/crowdcast/launch.php?user=%s">here</a>
or visit https://delphi.cmu.edu/crowdcast/ and enter your User ID: %s
</p><p>
Happy Forecasting!
<br>
-The DELPHI Team
</p>
''',
}
@staticmethod
def prepare(text):
"""Trim surrounding whitespace and use network-style CRLF line endings."""
return '\r\n'.join([line.strip() for line in text.split('\n')]).strip()
@staticmethod
def compose(
user_id,
subject,
text_template,
html_template,
text_values,
html_values):
"""Create final subject and body from templates and values."""
final_subject = EpicastEmails.Template.SUBJECT_TAG + ' ' + subject
text_template += EpicastEmails.Template.UNSUBSCRIBE['text']
text_values += (user_id,)
final_text = EpicastEmails.prepare(text_template) % text_values
html_template += EpicastEmails.Template.UNSUBSCRIBE['html']
html_values += (user_id,)
temp_html = EpicastEmails.prepare(html_template) % html_values
final_html = '<html><body>' + temp_html + '</body></html>'
return final_subject, final_text, final_html
@staticmethod
def get_alert(user_id, user_name):
"""Fill out and return the alert email."""
template = EpicastEmails.Template.ALERT
values = (user_name,)
return EpicastEmails.compose(
user_id,
template['subject'],
template['text'],
template['html'],
values,
values)
@staticmethod
def get_notification(
user_id, user_name, last_score, last_rank, total_score, total_rank):
"""Fill out and return the notification email."""
template = EpicastEmails.Template.NOTIFICATION
subject = template['subject']
text = template['text']
html = template['html']
text_values = (user_name, user_id)
html_values = (user_name, user_id, user_id)
if last_score > 0:
# include the embedded scoring section
text = text.replace('{SCORE}', EpicastEmails.Template.SCORE['text'])
html = html.replace('{SCORE}', EpicastEmails.Template.SCORE['html'])
score_values = (total_score, total_rank, user_id)
text_values += score_values
html_values += score_values
else:
# omit the embedded scoring section
text = text.replace('{SCORE}', '')
html = html.replace('{SCORE}', '')
return EpicastEmails.compose(
user_id, subject, text, html, text_values, html_values)
@staticmethod
def get_reminder(user_id, user_name):
"""Fill out and return the reminder email."""
template = EpicastEmails.Template.REMINDER
text_values = (user_name, user_id)
html_values = (user_name, user_id, user_id)
return EpicastEmails.compose(
user_id,
template['subject'],
template['text'],
template['html'],
text_values,
html_values)
| """Contains email templates for Epicast and helper functions for using them.
Email categories include:
- `alerts`: custom, one-time `notifications`-type email for a manual user group
- `notifications`: for all users, says that the CDC published new data
- `reminders`: for users with missing forecasts, a reminder that the deadline
is soon
"""
class Epicastemails:
"""Templating for Epicast emails."""
class Template:
"""Namespace for email template text."""
subject_tag = '[Crowdcast]'
unsubscribe = {'text': '\n ----------\n\n [This is an automated message. To edit your email preferences or to\n stop receiving these emails, follow the unsubscribe link below.]\n\n Unsubscribe: https://delphi.cmu.edu/crowdcast/preferences.php?user=%s\n ', 'html': '\n <hr>\n <p style="color: #666; font-size: 0.8em;">\n [This is an automated message. To edit your email preferences or to\n stop receiving these emails, click the unsubscribe link below.]\n <br>\n <a href="https://delphi.cmu.edu/crowdcast/preferences.php?user=%s">\n Unsubscribe</a>\n </p>\n '}
alert = {'subject': 'Crowdcast Needs Your Help', 'text': '\n Dear %s,\n\n [alert text here]\n\n -The DELPHI Team\n ', 'html': '\n <p>\n Dear %s,\n </p><p>\n [alert text here]\n </p><p>\n -The DELPHI Team\n </p>\n '}
score = {'text': '\n Your overall score is: %d (ranked #%d)\n\n Note: To be listed on the leaderboards, simply enter your initials on\n the preferences page at\n https://delphi.cmu.edu/crowdcast/preferences.php?user=%s\n\n You can find the leaderboards at\n https://delphi.cmu.edu/crowdcast/scores.php\n ', 'html': '\n <p>\n Your overall score is: %d (<i>ranked #%d</i>)\n <br>\n Note: To be listed on the <a\n href="https://delphi.cmu.edu/crowdcast/scores.php">leaderboards</a>,\n simply enter your initials on the preferences page <a\n href="https://delphi.cmu.edu/crowdcast/preferences.php?user=%s">\n here</a>.\n </p>\n '}
notification = {'subject': 'New Data Available (Deadline: Monday 10 AM)', 'text': '\n Dear %s,\n\n The CDC has released another week of influenza-like-illness (ILI)\n surveillance data. A new round of covid19-related forecasting is now\n underway, and we need your forecasts! We are asking you to please\n submit your forecasts by 10:00 AM (ET) this coming Monday. Thank you so\n much for your support and cooperation!\n\n To login and submit your forecasts, visit\n https://delphi.cmu.edu/crowdcast/\n and enter your User ID: %s\n\n {SCORE}\n\n Thank you again for your participation, and good luck on your\n forecasts!\n\n Happy Forecasting!\n -The DELPHI Team\n ', 'html': '\n <p>\n Dear %s,\n </p><p>\n The CDC has released another week of influenza-like-illness (ILI)\n surveillance data. A new round of covid19-related forecasting is now\n underway, and we need your forecasts! We are asking you to please\n submit your forecasts by <b>10:00 AM (ET)</b> this coming Monday.\n Thank you so much for your support and cooperation!\n </p><p>\n To login and submit your forecasts, click <a\n href="https://delphi.cmu.edu/crowdcast/launch.php?user=%s">here</a>\n or visit https://delphi.cmu.edu/crowdcast/ and enter your User ID: %s\n </p>{SCORE}<p>\n Thank you again for your participation, and good luck on your\n forecasts!\n </p><p>\n Happy Forecasting!\n <br>\n -The DELPHI Team\n </p>\n '}
reminder = {'subject': 'Forecasts Needed (Deadline: Monday 10AM)', 'text': '\n Dear %s,\n\n This is just a friendly reminder that your influenza-like-illness (ILI)\n forecasts are due by 10:00AM (ET) on Monday. Thank you so much for your\n support and cooperation!\n\n To login and submit your forecasts, visit\n https://delphi.cmu.edu/crowdcast and enter your User ID: %s.\n\n Happy Forecasting!\n\n -The DELPHI Team\n ', 'html': '\n <p>\n Dear %s,\n </p><p>\n This is just a friendly reminder that your influenza-like-illness\n (ILI) forecasts are due by <b>10:00AM (ET) on Monday</b>. Thank you\n so much for your support and cooperation!\n </p><p>\n To login and submit your forecasts, click <a\n href="https://delphi.cmu.edu/crowdcast/launch.php?user=%s">here</a>\n or visit https://delphi.cmu.edu/crowdcast/ and enter your User ID: %s\n </p><p>\n Happy Forecasting!\n <br>\n -The DELPHI Team\n </p>\n '}
@staticmethod
def prepare(text):
"""Trim surrounding whitespace and use network-style CRLF line endings."""
return '\r\n'.join([line.strip() for line in text.split('\n')]).strip()
@staticmethod
def compose(user_id, subject, text_template, html_template, text_values, html_values):
"""Create final subject and body from templates and values."""
final_subject = EpicastEmails.Template.SUBJECT_TAG + ' ' + subject
text_template += EpicastEmails.Template.UNSUBSCRIBE['text']
text_values += (user_id,)
final_text = EpicastEmails.prepare(text_template) % text_values
html_template += EpicastEmails.Template.UNSUBSCRIBE['html']
html_values += (user_id,)
temp_html = EpicastEmails.prepare(html_template) % html_values
final_html = '<html><body>' + temp_html + '</body></html>'
return (final_subject, final_text, final_html)
@staticmethod
def get_alert(user_id, user_name):
"""Fill out and return the alert email."""
template = EpicastEmails.Template.ALERT
values = (user_name,)
return EpicastEmails.compose(user_id, template['subject'], template['text'], template['html'], values, values)
@staticmethod
def get_notification(user_id, user_name, last_score, last_rank, total_score, total_rank):
"""Fill out and return the notification email."""
template = EpicastEmails.Template.NOTIFICATION
subject = template['subject']
text = template['text']
html = template['html']
text_values = (user_name, user_id)
html_values = (user_name, user_id, user_id)
if last_score > 0:
text = text.replace('{SCORE}', EpicastEmails.Template.SCORE['text'])
html = html.replace('{SCORE}', EpicastEmails.Template.SCORE['html'])
score_values = (total_score, total_rank, user_id)
text_values += score_values
html_values += score_values
else:
text = text.replace('{SCORE}', '')
html = html.replace('{SCORE}', '')
return EpicastEmails.compose(user_id, subject, text, html, text_values, html_values)
@staticmethod
def get_reminder(user_id, user_name):
"""Fill out and return the reminder email."""
template = EpicastEmails.Template.REMINDER
text_values = (user_name, user_id)
html_values = (user_name, user_id, user_id)
return EpicastEmails.compose(user_id, template['subject'], template['text'], template['html'], text_values, html_values) |
# simulation nodes tree
class Tree:
def __init__(self):
# import nodes
self.imp_nds = {}
# simulation nodes
self.sim_nds = {}
# cache nodes
self.cache_nds = {}
| class Tree:
def __init__(self):
self.imp_nds = {}
self.sim_nds = {}
self.cache_nds = {} |
# -*- coding: utf-8 -*-
# Copyright 2019 Cohesity Inc.
class EULAConfiguration(object):
"""Implementation of the 'EULA Configuration.' model.
Specifies the End User License Agreement acceptance information.
Attributes:
license_key (string): Specifies the license key.
signed_by_user (string): Specifies the login account name for the
Cohesity user who accepted the End User License Agreement.
signed_time (long|int): Specifies the time that the End User License
Agreement was accepted.
signed_version (long|int): Specifies the version of the End User
License Agreement that was accepted.
"""
# Create a mapping from Model property names to API property names
_names = {
"license_key":'licenseKey',
"signed_by_user":'signedByUser',
"signed_time":'signedTime',
"signed_version":'signedVersion'
}
def __init__(self,
license_key=None,
signed_by_user=None,
signed_time=None,
signed_version=None):
"""Constructor for the EULAConfiguration class"""
# Initialize members of the class
self.license_key = license_key
self.signed_by_user = signed_by_user
self.signed_time = signed_time
self.signed_version = signed_version
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object as
obtained from the deserialization of the server's response. The keys
MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
license_key = dictionary.get('licenseKey')
signed_by_user = dictionary.get('signedByUser')
signed_time = dictionary.get('signedTime')
signed_version = dictionary.get('signedVersion')
# Return an object of this model
return cls(license_key,
signed_by_user,
signed_time,
signed_version)
| class Eulaconfiguration(object):
"""Implementation of the 'EULA Configuration.' model.
Specifies the End User License Agreement acceptance information.
Attributes:
license_key (string): Specifies the license key.
signed_by_user (string): Specifies the login account name for the
Cohesity user who accepted the End User License Agreement.
signed_time (long|int): Specifies the time that the End User License
Agreement was accepted.
signed_version (long|int): Specifies the version of the End User
License Agreement that was accepted.
"""
_names = {'license_key': 'licenseKey', 'signed_by_user': 'signedByUser', 'signed_time': 'signedTime', 'signed_version': 'signedVersion'}
def __init__(self, license_key=None, signed_by_user=None, signed_time=None, signed_version=None):
"""Constructor for the EULAConfiguration class"""
self.license_key = license_key
self.signed_by_user = signed_by_user
self.signed_time = signed_time
self.signed_version = signed_version
@classmethod
def from_dictionary(cls, dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object as
obtained from the deserialization of the server's response. The keys
MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
license_key = dictionary.get('licenseKey')
signed_by_user = dictionary.get('signedByUser')
signed_time = dictionary.get('signedTime')
signed_version = dictionary.get('signedVersion')
return cls(license_key, signed_by_user, signed_time, signed_version) |
URL_JOB_SEARCH_HHRU = 'https://hh.ru/search/vacancy'
PARAM_HHRU_QUERY = 'text'
PARAM_HHRU_EXP = 'experience'
PARAM_HHRU_AREA = 'area'
PARAM_HHRU_PAGE = 'page'
VACANCIES_PER_PAGE = 50
| url_job_search_hhru = 'https://hh.ru/search/vacancy'
param_hhru_query = 'text'
param_hhru_exp = 'experience'
param_hhru_area = 'area'
param_hhru_page = 'page'
vacancies_per_page = 50 |
#
# Roommate Finder v0.9
#
# This query is intended to find pairs of roommates. It almost works!
# There's something not quite right about it, though. Find and fix the bug.
#
QUERY = '''
select a.id, b.id, a.building, a.room
from residences as a, residences as b
where a.building = b.building
and a.room = b.room
and a.id != b.id
group by a.room
order by a.building, a.room;
'''
#
# To see the complete residences table, uncomment this query and press "Test Run":
#
# QUERY = "select id, building, room from residences;"
#
| query = '\nselect a.id, b.id, a.building, a.room\n from residences as a, residences as b\n where a.building = b.building\n and a.room = b.room\n and a.id != b.id\n group by a.room\n order by a.building, a.room;\n' |
"""
Doubly Linked Lists have references to the next and previous elements
"""
class ListElement:
def __init__(self, data, prev_item=None, next_item=None):
self.data = data
self.next_item = prev_item
self.prev_item = next_item
def append(self, data):
if self.next_item:
self.next_item.append(data)
else:
self.next_item = ListElement(data, prev_item=self)
def prepend(self, data):
if self.prev_item:
self.prev_item.append(data)
else:
self.prev_item = ListElement(data, next_item=self)
| """
Doubly Linked Lists have references to the next and previous elements
"""
class Listelement:
def __init__(self, data, prev_item=None, next_item=None):
self.data = data
self.next_item = prev_item
self.prev_item = next_item
def append(self, data):
if self.next_item:
self.next_item.append(data)
else:
self.next_item = list_element(data, prev_item=self)
def prepend(self, data):
if self.prev_item:
self.prev_item.append(data)
else:
self.prev_item = list_element(data, next_item=self) |
# Integer
age = 22
print(age)
print(type(age))
# Float
grade = 9.8
print(grade)
print(type(grade))
# Boolean
is_published, offline = True, False
print(is_published, offline)
print(type(is_published), type(offline))
# String
movie_name = 'Nobody'
print(movie_name)
print(type(movie_name))
# List (Ordered)
mixed = [1, 2, 3.2, 4, 5.4, True, 'Hello', ['a', 'b', 'c']]
print(mixed)
print(type(mixed))
# Dictionary (python 3.7 > ordered)
movie_details = {
'name': 'Nobody',
'year': 2019,
'director': 'Me',
'cast': ['a', 'b', 'c']
}
print(movie_details)
print(type(movie_details))
# Tuple (unchangeable and ordered)
mixed_tuple = (1, 2, 3.2, 4, 5.4, True, 'Hello', ['a', 'b', 'c'])
print(mixed_tuple)
print(type(mixed_tuple))
# Set (unordered, unchangeable, unindexed)
mixed_set = {1, 2, 3.2, 4, 5.4, "Python", 'Hello'}
print(mixed_set)
print(type(mixed_set))
| age = 22
print(age)
print(type(age))
grade = 9.8
print(grade)
print(type(grade))
(is_published, offline) = (True, False)
print(is_published, offline)
print(type(is_published), type(offline))
movie_name = 'Nobody'
print(movie_name)
print(type(movie_name))
mixed = [1, 2, 3.2, 4, 5.4, True, 'Hello', ['a', 'b', 'c']]
print(mixed)
print(type(mixed))
movie_details = {'name': 'Nobody', 'year': 2019, 'director': 'Me', 'cast': ['a', 'b', 'c']}
print(movie_details)
print(type(movie_details))
mixed_tuple = (1, 2, 3.2, 4, 5.4, True, 'Hello', ['a', 'b', 'c'])
print(mixed_tuple)
print(type(mixed_tuple))
mixed_set = {1, 2, 3.2, 4, 5.4, 'Python', 'Hello'}
print(mixed_set)
print(type(mixed_set)) |
# """
# This is Sea's API interface.
# You should not implement it, or speculate about its implementation
# """
class Sea(object):
def hasShips(self, topRight: 'Point', bottomLeft: 'Point') -> bool:
pass
class Point(object):
def __init__(self, x: int, y: int):
self.x = x
self.y = y
class Solution(object):
def countShips(self, sea: 'Sea', topRight: 'Point', bottomLeft: 'Point') -> int:
top_right_x = topRight.x
top_right_y = topRight.y
bottom_left_x = bottomLeft.x
bottom_left_y = bottomLeft.y
if bottom_left_x > top_right_x or bottom_left_y > top_right_y or not sea.hasShips(topRight, bottomLeft):
return 0
if bottom_left_x == top_right_x and bottom_left_y == top_right_y:
return 1
mid_x = (bottom_left_x + top_right_x) >> 1
mid_y = (bottom_left_y + top_right_y) >> 1
blocks = [[Point(mid_x, top_right_y), Point(bottom_left_x, mid_y + 1)],
[Point(top_right_x, top_right_y), Point(mid_x + 1, mid_y + 1)],
[Point(top_right_x, mid_y), Point(mid_x + 1, bottom_left_y)],
[Point(mid_x, mid_y), Point(bottom_left_x, bottom_left_y)]]
res = 0
for block in blocks:
top_right_coord = block[0]
bottom_left_coord = block[1]
res += self.countShips(sea, top_right_coord, bottom_left_coord)
if res == 10:
return res
return res
| class Sea(object):
def has_ships(self, topRight: 'Point', bottomLeft: 'Point') -> bool:
pass
class Point(object):
def __init__(self, x: int, y: int):
self.x = x
self.y = y
class Solution(object):
def count_ships(self, sea: 'Sea', topRight: 'Point', bottomLeft: 'Point') -> int:
top_right_x = topRight.x
top_right_y = topRight.y
bottom_left_x = bottomLeft.x
bottom_left_y = bottomLeft.y
if bottom_left_x > top_right_x or bottom_left_y > top_right_y or (not sea.hasShips(topRight, bottomLeft)):
return 0
if bottom_left_x == top_right_x and bottom_left_y == top_right_y:
return 1
mid_x = bottom_left_x + top_right_x >> 1
mid_y = bottom_left_y + top_right_y >> 1
blocks = [[point(mid_x, top_right_y), point(bottom_left_x, mid_y + 1)], [point(top_right_x, top_right_y), point(mid_x + 1, mid_y + 1)], [point(top_right_x, mid_y), point(mid_x + 1, bottom_left_y)], [point(mid_x, mid_y), point(bottom_left_x, bottom_left_y)]]
res = 0
for block in blocks:
top_right_coord = block[0]
bottom_left_coord = block[1]
res += self.countShips(sea, top_right_coord, bottom_left_coord)
if res == 10:
return res
return res |
# Base node class
class Node():
def __init__(self):
self.children = [None]*26
self.isend = False
#Trie class
class Trie():
def __init__(self):
self.__root = Node()
def __len__(self):
"""
returns length of the trie
Returns:
[int]: [length of the trie]
"""
return len(self.search_by_prefix(''))
def __str__(self):
"""
returns all values in the trie
Returns:
[string]: [all values in the trie]
"""
ll = self.search_by_prefix('')
string = ""
for i in ll:
string += i
string += "\n"
return string
def chartoint(self, character):
"""
returns the unicode value of a character
Returns:
[int]: [integer value of the character]
"""
return ord(character) - ord('a')
def remove(self, string):
"""
removes an item from the trie
Args:
string ([string]): [string to be removed]
Raises:
ValueError: [handles if the keyword does not exist in the trie]
ValueError: [handles if the keyword does not exist in the trie]
"""
ptr = self.__root
length = len(string)
exists = True
for idx in range(length):
i = self.chartoint(string[idx])
if ptr.children[i] is not None:
ptr = ptr.children[i]
else:
exists = False
if ptr.isend is not True:
exists = False
if exists == False:
print("Keyword does not exist in trie")
ptr.isend = False
return
def insert(self, string):
"""
inserts a string into the trie
Args:
string ([string]): [string to be inserted into the trie]
"""
ptr = self.__root
length = len(string)
for idx in range(length):
i = self.chartoint(string[idx])
if ptr.children[i] is not None:
ptr = ptr.children[i]
else:
ptr.children[i] = Node()
ptr = ptr.children[i]
ptr.isend = True
def search(self, string):
"""
searches for a string inside the trie
Args:
string ([string]): [string to search for]
Returns:
[bool]: [if the string is found in the trie or not]
"""
ptr = self.__root
length = len(string)
for idx in range(length):
i = self.chartoint(string[idx])
if ptr.children[i] is not None:
ptr = ptr.children[i]
else:
return False
if ptr.isend is not True:
return False
return True
def __getall(self, ptr, key, key_list):
"""
gets all values given arguments
Args:
ptr ([type])
key ([type])
key_list ([list])
"""
if ptr is None:
key_list.append(key)
return
if ptr.isend is True:
key_list.append(key)
for i in range(26):
if ptr.children[i] is not None:
self.__getall(ptr.children[i], key +
chr(ord('a') + i), key_list)
def search_by_prefix(self, key):
"""
gets all strings starting with a prefix
Args:
key ([string]): [key to search by]
Returns:
[list]: [list with all strings starting with key]
"""
ptr = self.__root
key_list = []
length = len(key)
for idx in range(length):
i = self.chartoint(key[idx])
if ptr.children[i] is not None:
ptr = ptr.children[i]
else:
return None
self.__getall(ptr, key, key_list)
return key_list
| class Node:
def __init__(self):
self.children = [None] * 26
self.isend = False
class Trie:
def __init__(self):
self.__root = node()
def __len__(self):
"""
returns length of the trie
Returns:
[int]: [length of the trie]
"""
return len(self.search_by_prefix(''))
def __str__(self):
"""
returns all values in the trie
Returns:
[string]: [all values in the trie]
"""
ll = self.search_by_prefix('')
string = ''
for i in ll:
string += i
string += '\n'
return string
def chartoint(self, character):
"""
returns the unicode value of a character
Returns:
[int]: [integer value of the character]
"""
return ord(character) - ord('a')
def remove(self, string):
"""
removes an item from the trie
Args:
string ([string]): [string to be removed]
Raises:
ValueError: [handles if the keyword does not exist in the trie]
ValueError: [handles if the keyword does not exist in the trie]
"""
ptr = self.__root
length = len(string)
exists = True
for idx in range(length):
i = self.chartoint(string[idx])
if ptr.children[i] is not None:
ptr = ptr.children[i]
else:
exists = False
if ptr.isend is not True:
exists = False
if exists == False:
print('Keyword does not exist in trie')
ptr.isend = False
return
def insert(self, string):
"""
inserts a string into the trie
Args:
string ([string]): [string to be inserted into the trie]
"""
ptr = self.__root
length = len(string)
for idx in range(length):
i = self.chartoint(string[idx])
if ptr.children[i] is not None:
ptr = ptr.children[i]
else:
ptr.children[i] = node()
ptr = ptr.children[i]
ptr.isend = True
def search(self, string):
"""
searches for a string inside the trie
Args:
string ([string]): [string to search for]
Returns:
[bool]: [if the string is found in the trie or not]
"""
ptr = self.__root
length = len(string)
for idx in range(length):
i = self.chartoint(string[idx])
if ptr.children[i] is not None:
ptr = ptr.children[i]
else:
return False
if ptr.isend is not True:
return False
return True
def __getall(self, ptr, key, key_list):
"""
gets all values given arguments
Args:
ptr ([type])
key ([type])
key_list ([list])
"""
if ptr is None:
key_list.append(key)
return
if ptr.isend is True:
key_list.append(key)
for i in range(26):
if ptr.children[i] is not None:
self.__getall(ptr.children[i], key + chr(ord('a') + i), key_list)
def search_by_prefix(self, key):
"""
gets all strings starting with a prefix
Args:
key ([string]): [key to search by]
Returns:
[list]: [list with all strings starting with key]
"""
ptr = self.__root
key_list = []
length = len(key)
for idx in range(length):
i = self.chartoint(key[idx])
if ptr.children[i] is not None:
ptr = ptr.children[i]
else:
return None
self.__getall(ptr, key, key_list)
return key_list |
sequence = input().split(", ")
my_dict = {}
for el in sequence:
ascii_number = ord(el)
if el not in my_dict:
my_dict[el] = ascii_number
print(my_dict) | sequence = input().split(', ')
my_dict = {}
for el in sequence:
ascii_number = ord(el)
if el not in my_dict:
my_dict[el] = ascii_number
print(my_dict) |
# @desc Can a triangle be made by these sides?
# @desc By Jake '24
def form_triangle(side1, side2, side3):
if side1 + side2 > side3 and side1 + side3 > side2 and side2 + side3 > side1:
return True
else:
return False
def main():
print(form_triangle(10, 5, 25))
print(form_triangle(3, 4, 5))
print(form_triangle(6, 2, 7))
print(form_triangle(12, 6, 5))
print(form_triangle(8, 19, 20))
if __name__ == '__main__':
main()
| def form_triangle(side1, side2, side3):
if side1 + side2 > side3 and side1 + side3 > side2 and (side2 + side3 > side1):
return True
else:
return False
def main():
print(form_triangle(10, 5, 25))
print(form_triangle(3, 4, 5))
print(form_triangle(6, 2, 7))
print(form_triangle(12, 6, 5))
print(form_triangle(8, 19, 20))
if __name__ == '__main__':
main() |
def main():
# 1 add
# 2 multiply
# 99 halt
# What value is at position 0
part_one()
part_two()
def part_one():
print("Part One")
with open("input.txt", "r") as input_file:
for line in input_file:
numbers = list(map(int, line.split(",")))
# Restore
numbers[1] = 12
numbers[2] = 2
i = 0
while i < len(numbers):
n = numbers[i]
if n == 99:
break
n1 = numbers[numbers[i + 1]]
n2 = numbers[numbers[i + 2]]
if n == 1:
result = n1 + n2
elif n == 2:
result = n1 * n2
else:
print("Error!")
break
numbers[numbers[i + 3]] = result
i += 4
print(numbers[0], numbers)
def part_two():
print("Part Two")
with open("input.txt", "r") as input_file:
for line in input_file:
for i_noun in range(0, 100):
for i_verb in range(0, 100):
numbers = list(map(int, line.split(",")))
numbers[1] = i_noun
numbers[2] = i_verb
i = 0
while i < len(numbers):
n = numbers[i]
if n == 99:
break
noun = numbers[numbers[i + 1]]
verb = numbers[numbers[i + 2]]
if n == 1:
result = noun + verb
elif n == 2:
result = noun * verb
else:
print("Error!")
break
numbers[numbers[i + 3]] = result
i += 4
if numbers[0] == 19690720:
print(numbers[0], " = 100 * ", i_noun, " * ", i_verb, numbers)
if __name__ == "__main__":
main()
| def main():
part_one()
part_two()
def part_one():
print('Part One')
with open('input.txt', 'r') as input_file:
for line in input_file:
numbers = list(map(int, line.split(',')))
numbers[1] = 12
numbers[2] = 2
i = 0
while i < len(numbers):
n = numbers[i]
if n == 99:
break
n1 = numbers[numbers[i + 1]]
n2 = numbers[numbers[i + 2]]
if n == 1:
result = n1 + n2
elif n == 2:
result = n1 * n2
else:
print('Error!')
break
numbers[numbers[i + 3]] = result
i += 4
print(numbers[0], numbers)
def part_two():
print('Part Two')
with open('input.txt', 'r') as input_file:
for line in input_file:
for i_noun in range(0, 100):
for i_verb in range(0, 100):
numbers = list(map(int, line.split(',')))
numbers[1] = i_noun
numbers[2] = i_verb
i = 0
while i < len(numbers):
n = numbers[i]
if n == 99:
break
noun = numbers[numbers[i + 1]]
verb = numbers[numbers[i + 2]]
if n == 1:
result = noun + verb
elif n == 2:
result = noun * verb
else:
print('Error!')
break
numbers[numbers[i + 3]] = result
i += 4
if numbers[0] == 19690720:
print(numbers[0], ' = 100 * ', i_noun, ' * ', i_verb, numbers)
if __name__ == '__main__':
main() |
# Written by Mayank
# This program is used to first parse the dataset files for training
# and testing and then compile them into dataparsed.cpp file
# from where we can load the data into sgx enclave.
accdataAC = ""
accdataBD = ""
acclabelAC = ""
acclabelBD = ""
accdatasample = ""
acclabelsample = ""
#First accdataAC
file = open("mnist_data_8_AC", "r")
while True:
curline = file.readline()
#print(curline)
if len(curline) is 0:
break
curline = curline[:-1]
if len(curline) > 0:
accdataAC += curline + ' '
file.close()
#First accdataBD
file = open("mnist_data_8_BD", "r")
while True:
curline = file.readline()
#print(curline)
if len(curline) is 0:
break
curline = curline[:-1]
if len(curline) > 0:
accdataBD += curline + ' '
file.close()
#First acclabelAC
file = open("mnist_labels_8_AC", "r")
while True:
curline = file.readline()
#print(curline)
if len(curline) is 0:
break
curline = curline[:-1]
if len(curline) > 0:
acclabelAC += curline + ' '
file.close()
#First acclabelBD
file = open("mnist_labels_8_BD", "r")
while True:
curline = file.readline()
#print(curline)
if len(curline) is 0:
break
curline = curline[:-1]
if len(curline) > 0:
acclabelBD += curline + ' '
file.close()
#First accdatasample
file = open("mnist_data_8_samples", "r")
while True:
curline = file.readline()
#print(curline)
if len(curline) is 0:
break
curline = curline[:-1]
if len(curline) > 0:
accdatasample += curline + ' '
file.close()
#First acclabelsample
file = open("mnist_labels_8_samples", "r")
while True:
curline = file.readline()
#print(curline)
if len(curline) is 0:
break
curline = curline[:-1]
if len(curline) > 0:
acclabelsample += curline + ' '
file.close()
# Slice on whitespaces
split_dataAC = accdataAC.split()
split_dataBD = accdataBD.split()
split_labelAC = acclabelAC.split()
split_labelBD = acclabelBD.split()
split_datasample = accdatasample.split()
split_labelsample = acclabelsample.split()
cppfile = open("dataparsed.cpp", "w")
cppfile.seek(0)
cppfile.truncate()
boilerplate = "#include \"dataparsed.h\"\n"
cppfile.write(boilerplate)
lenlist = [];
#DataAC
declaration_full = ""
curlen = len(split_dataAC)
lenlist.append(curlen)
declaration_full = declaration_full + "int dataAC["+str(curlen)+"] = {"
for i in range(curlen):
if i == curlen-1:
declaration_full = declaration_full + split_dataAC[i]
else:
declaration_full = declaration_full + split_dataAC[i] + ", "
declaration_full = declaration_full + "};\n"
cppfile.write(declaration_full)
#DataBD
declaration_full = ""
curlen = len(split_dataBD)
lenlist.append(curlen)
declaration_full = declaration_full + "int dataBD["+str(curlen)+"] = {"
for i in range(curlen):
if i == curlen-1:
declaration_full = declaration_full + split_dataBD[i]
else:
declaration_full = declaration_full + split_dataBD[i] + ", "
declaration_full = declaration_full + "};\n"
cppfile.write(declaration_full)
#LabelAC
declaration_full = ""
curlen = len(split_labelAC)
lenlist.append(curlen)
declaration_full = declaration_full + "int labelAC["+str(curlen)+"] = {"
for i in range(curlen):
if i == curlen-1:
declaration_full = declaration_full + split_labelAC[i]
else:
declaration_full = declaration_full + split_labelAC[i] + ", "
declaration_full = declaration_full + "};\n"
cppfile.write(declaration_full)
#LabelBD
declaration_full = ""
curlen = len(split_labelBD)
lenlist.append(curlen)
declaration_full = declaration_full + "int labelBD["+str(curlen)+"] = {"
for i in range(curlen):
if i == curlen-1:
declaration_full = declaration_full + split_labelBD[i]
else:
declaration_full = declaration_full + split_labelBD[i] + ", "
declaration_full = declaration_full + "};\n"
cppfile.write(declaration_full)
#Datasample
declaration_full = ""
curlen = len(split_datasample)
lenlist.append(curlen)
declaration_full = declaration_full + "int datasample["+str(curlen)+"] = {"
for i in range(curlen):
if i == curlen-1:
declaration_full = declaration_full + split_datasample[i]
else:
declaration_full = declaration_full + split_datasample[i] + ", "
declaration_full = declaration_full + "};\n"
cppfile.write(declaration_full)
#Labelsample
declaration_full = ""
curlen = len(split_labelsample)
lenlist.append(curlen)
declaration_full = declaration_full + "int labelsample["+str(curlen)+"] = {"
for i in range(curlen):
if i == curlen-1:
declaration_full = declaration_full + split_labelsample[i]
else:
declaration_full = declaration_full + split_labelsample[i] + ", "
declaration_full = declaration_full + "};\n"
cppfile.write(declaration_full)
print("Contents of data files compiled and written to dataparsed.cpp")
cppfile.close()
cppheader = open("dataparsed.h", "w")
cppheader.seek(0)
cppheader.truncate()
headerinfo = "#include <string>\n#include <stdio.h>\n#include <stdlib.h>\nextern int dataAC["+str(lenlist[0])+"];\nextern int dataBD["+str(lenlist[1])+"];\nextern int labelAC["+str(lenlist[2])+"];\nextern int labelBD["+str(lenlist[3])+"];\nextern int datasample["+str(lenlist[4])+"];\nextern int labelsample["+str(lenlist[5])+"];"
cppheader.write(headerinfo)
cppheader.close()
print("Header file written")
| accdata_ac = ''
accdata_bd = ''
acclabel_ac = ''
acclabel_bd = ''
accdatasample = ''
acclabelsample = ''
file = open('mnist_data_8_AC', 'r')
while True:
curline = file.readline()
if len(curline) is 0:
break
curline = curline[:-1]
if len(curline) > 0:
accdata_ac += curline + ' '
file.close()
file = open('mnist_data_8_BD', 'r')
while True:
curline = file.readline()
if len(curline) is 0:
break
curline = curline[:-1]
if len(curline) > 0:
accdata_bd += curline + ' '
file.close()
file = open('mnist_labels_8_AC', 'r')
while True:
curline = file.readline()
if len(curline) is 0:
break
curline = curline[:-1]
if len(curline) > 0:
acclabel_ac += curline + ' '
file.close()
file = open('mnist_labels_8_BD', 'r')
while True:
curline = file.readline()
if len(curline) is 0:
break
curline = curline[:-1]
if len(curline) > 0:
acclabel_bd += curline + ' '
file.close()
file = open('mnist_data_8_samples', 'r')
while True:
curline = file.readline()
if len(curline) is 0:
break
curline = curline[:-1]
if len(curline) > 0:
accdatasample += curline + ' '
file.close()
file = open('mnist_labels_8_samples', 'r')
while True:
curline = file.readline()
if len(curline) is 0:
break
curline = curline[:-1]
if len(curline) > 0:
acclabelsample += curline + ' '
file.close()
split_data_ac = accdataAC.split()
split_data_bd = accdataBD.split()
split_label_ac = acclabelAC.split()
split_label_bd = acclabelBD.split()
split_datasample = accdatasample.split()
split_labelsample = acclabelsample.split()
cppfile = open('dataparsed.cpp', 'w')
cppfile.seek(0)
cppfile.truncate()
boilerplate = '#include "dataparsed.h"\n'
cppfile.write(boilerplate)
lenlist = []
declaration_full = ''
curlen = len(split_dataAC)
lenlist.append(curlen)
declaration_full = declaration_full + 'int dataAC[' + str(curlen) + '] = {'
for i in range(curlen):
if i == curlen - 1:
declaration_full = declaration_full + split_dataAC[i]
else:
declaration_full = declaration_full + split_dataAC[i] + ', '
declaration_full = declaration_full + '};\n'
cppfile.write(declaration_full)
declaration_full = ''
curlen = len(split_dataBD)
lenlist.append(curlen)
declaration_full = declaration_full + 'int dataBD[' + str(curlen) + '] = {'
for i in range(curlen):
if i == curlen - 1:
declaration_full = declaration_full + split_dataBD[i]
else:
declaration_full = declaration_full + split_dataBD[i] + ', '
declaration_full = declaration_full + '};\n'
cppfile.write(declaration_full)
declaration_full = ''
curlen = len(split_labelAC)
lenlist.append(curlen)
declaration_full = declaration_full + 'int labelAC[' + str(curlen) + '] = {'
for i in range(curlen):
if i == curlen - 1:
declaration_full = declaration_full + split_labelAC[i]
else:
declaration_full = declaration_full + split_labelAC[i] + ', '
declaration_full = declaration_full + '};\n'
cppfile.write(declaration_full)
declaration_full = ''
curlen = len(split_labelBD)
lenlist.append(curlen)
declaration_full = declaration_full + 'int labelBD[' + str(curlen) + '] = {'
for i in range(curlen):
if i == curlen - 1:
declaration_full = declaration_full + split_labelBD[i]
else:
declaration_full = declaration_full + split_labelBD[i] + ', '
declaration_full = declaration_full + '};\n'
cppfile.write(declaration_full)
declaration_full = ''
curlen = len(split_datasample)
lenlist.append(curlen)
declaration_full = declaration_full + 'int datasample[' + str(curlen) + '] = {'
for i in range(curlen):
if i == curlen - 1:
declaration_full = declaration_full + split_datasample[i]
else:
declaration_full = declaration_full + split_datasample[i] + ', '
declaration_full = declaration_full + '};\n'
cppfile.write(declaration_full)
declaration_full = ''
curlen = len(split_labelsample)
lenlist.append(curlen)
declaration_full = declaration_full + 'int labelsample[' + str(curlen) + '] = {'
for i in range(curlen):
if i == curlen - 1:
declaration_full = declaration_full + split_labelsample[i]
else:
declaration_full = declaration_full + split_labelsample[i] + ', '
declaration_full = declaration_full + '};\n'
cppfile.write(declaration_full)
print('Contents of data files compiled and written to dataparsed.cpp')
cppfile.close()
cppheader = open('dataparsed.h', 'w')
cppheader.seek(0)
cppheader.truncate()
headerinfo = '#include <string>\n#include <stdio.h>\n#include <stdlib.h>\nextern int dataAC[' + str(lenlist[0]) + '];\nextern int dataBD[' + str(lenlist[1]) + '];\nextern int labelAC[' + str(lenlist[2]) + '];\nextern int labelBD[' + str(lenlist[3]) + '];\nextern int datasample[' + str(lenlist[4]) + '];\nextern int labelsample[' + str(lenlist[5]) + '];'
cppheader.write(headerinfo)
cppheader.close()
print('Header file written') |
# We're going to use a custom timer, so we don't actually have to do anything
# in these functions.
def top():
mid1()
mid2()
mid3(5)
C1.samename()
C2.samename()
def mid1():
bot()
for i in range(5):
mid2()
bot()
def mid2():
bot()
def bot():
pass
def mid3(x):
if x > 0:
mid4(x)
def mid4(x):
mid3(x - 1)
class C1(object):
@staticmethod
def samename():
pass
class C2(object):
@staticmethod
def samename():
pass
expected_output_py2 = """event: ns : Nanoseconds
events: ns
summary: 59000
fl=<filename>
fn=top
5 6000
cfl=<filename>
cfn=mid1
calls=1 13
5 27000
cfl=<filename>
cfn=mid2
calls=1 20
5 3000
cfl=<filename>
cfn=mid3
calls=1 28
5 21000
cfl=<filename>
cfn=samename:38
calls=1 38
5 1000
cfl=<filename>
cfn=samename:44
calls=1 44
5 1000
fl=<filename>
fn=mid1
13 9000
cfl=<filename>
cfn=mid2
calls=5 20
13 15000
cfl=<filename>
cfn=bot
calls=2 24
13 2000
cfl=~
cfn=<range>
calls=1 0
13 1000
fl=<filename>
fn=mid2
20 12000
cfl=<filename>
cfn=bot
calls=6 24
20 6000
fl=<filename>
fn=bot
24 8000
fl=<filename>
fn=mid3
28 11000
cfl=<filename>
cfn=mid4
calls=5 33
28 19000
fl=<filename>
fn=mid4
33 10000
cfl=<filename>
cfn=mid3
calls=5 28
33 17000
fl=<filename>
fn=samename:38
38 1000
fl=<filename>
fn=samename:44
44 1000
fl=~
fn=<method 'disable' of '_lsprof.Profiler' objects>
0 1000
fl=~
fn=<range>
0 1000
""".replace('<filename>', top.__code__.co_filename)
expected_output_py3 = """event: ns : Nanoseconds
events: ns
summary: 57000
fl=<filename>
fn=top
5 6000
cfl=<filename>
cfn=mid1
calls=1 13
5 25000
cfl=<filename>
cfn=mid2
calls=1 20
5 3000
cfl=<filename>
cfn=mid3
calls=1 28
5 21000
cfl=<filename>
cfn=samename:38
calls=1 38
5 1000
cfl=<filename>
cfn=samename:44
calls=1 44
5 1000
fl=<filename>
fn=mid1
13 8000
cfl=<filename>
cfn=mid2
calls=5 20
13 15000
cfl=<filename>
cfn=bot
calls=2 24
13 2000
fl=<filename>
fn=mid2
20 12000
cfl=<filename>
cfn=bot
calls=6 24
20 6000
fl=<filename>
fn=bot
24 8000
fl=<filename>
fn=mid3
28 11000
cfl=<filename>
cfn=mid4
calls=5 33
28 19000
fl=<filename>
fn=mid4
33 10000
cfl=<filename>
cfn=mid3
calls=5 28
33 17000
fl=<filename>
fn=samename:38
38 1000
fl=<filename>
fn=samename:44
44 1000
fl=~
fn=<method 'disable' of '_lsprof.Profiler' objects>
0 1000
""".replace('<filename>', __file__)
| def top():
mid1()
mid2()
mid3(5)
C1.samename()
C2.samename()
def mid1():
bot()
for i in range(5):
mid2()
bot()
def mid2():
bot()
def bot():
pass
def mid3(x):
if x > 0:
mid4(x)
def mid4(x):
mid3(x - 1)
class C1(object):
@staticmethod
def samename():
pass
class C2(object):
@staticmethod
def samename():
pass
expected_output_py2 = "event: ns : Nanoseconds\nevents: ns\nsummary: 59000\nfl=<filename>\nfn=top\n5 6000\ncfl=<filename>\ncfn=mid1\ncalls=1 13\n5 27000\ncfl=<filename>\ncfn=mid2\ncalls=1 20\n5 3000\ncfl=<filename>\ncfn=mid3\ncalls=1 28\n5 21000\ncfl=<filename>\ncfn=samename:38\ncalls=1 38\n5 1000\ncfl=<filename>\ncfn=samename:44\ncalls=1 44\n5 1000\n\nfl=<filename>\nfn=mid1\n13 9000\ncfl=<filename>\ncfn=mid2\ncalls=5 20\n13 15000\ncfl=<filename>\ncfn=bot\ncalls=2 24\n13 2000\ncfl=~\ncfn=<range>\ncalls=1 0\n13 1000\n\nfl=<filename>\nfn=mid2\n20 12000\ncfl=<filename>\ncfn=bot\ncalls=6 24\n20 6000\n\nfl=<filename>\nfn=bot\n24 8000\n\nfl=<filename>\nfn=mid3\n28 11000\ncfl=<filename>\ncfn=mid4\ncalls=5 33\n28 19000\n\nfl=<filename>\nfn=mid4\n33 10000\ncfl=<filename>\ncfn=mid3\ncalls=5 28\n33 17000\n\nfl=<filename>\nfn=samename:38\n38 1000\n\nfl=<filename>\nfn=samename:44\n44 1000\n\nfl=~\nfn=<method 'disable' of '_lsprof.Profiler' objects>\n0 1000\n\nfl=~\nfn=<range>\n0 1000\n\n".replace('<filename>', top.__code__.co_filename)
expected_output_py3 = "event: ns : Nanoseconds\nevents: ns\nsummary: 57000\nfl=<filename>\nfn=top\n5 6000\ncfl=<filename>\ncfn=mid1\ncalls=1 13\n5 25000\ncfl=<filename>\ncfn=mid2\ncalls=1 20\n5 3000\ncfl=<filename>\ncfn=mid3\ncalls=1 28\n5 21000\ncfl=<filename>\ncfn=samename:38\ncalls=1 38\n5 1000\ncfl=<filename>\ncfn=samename:44\ncalls=1 44\n5 1000\n\nfl=<filename>\nfn=mid1\n13 8000\ncfl=<filename>\ncfn=mid2\ncalls=5 20\n13 15000\ncfl=<filename>\ncfn=bot\ncalls=2 24\n13 2000\n\nfl=<filename>\nfn=mid2\n20 12000\ncfl=<filename>\ncfn=bot\ncalls=6 24\n20 6000\n\nfl=<filename>\nfn=bot\n24 8000\n\nfl=<filename>\nfn=mid3\n28 11000\ncfl=<filename>\ncfn=mid4\ncalls=5 33\n28 19000\n\nfl=<filename>\nfn=mid4\n33 10000\ncfl=<filename>\ncfn=mid3\ncalls=5 28\n33 17000\n\nfl=<filename>\nfn=samename:38\n38 1000\n\nfl=<filename>\nfn=samename:44\n44 1000\n\nfl=~\nfn=<method 'disable' of '_lsprof.Profiler' objects>\n0 1000\n\n".replace('<filename>', __file__) |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Thu May 18 11:06:15 2017
@author: tomislav
"""
def sliding_window(image, stepSize=20, windowSize=(200, 200)):
for y in xrange(0, image.shape[0]-windowSize[1]+stepSize, stepSize):
for x in xrange(0, image.shape[1]-windowSize[0]+stepSize, stepSize):
yield (x, y, image[y:y+windowSize[1], x:x+windowSize[0]])
| """
Created on Thu May 18 11:06:15 2017
@author: tomislav
"""
def sliding_window(image, stepSize=20, windowSize=(200, 200)):
for y in xrange(0, image.shape[0] - windowSize[1] + stepSize, stepSize):
for x in xrange(0, image.shape[1] - windowSize[0] + stepSize, stepSize):
yield (x, y, image[y:y + windowSize[1], x:x + windowSize[0]]) |
"""
0784. Letter Case Permutation
Medium
Given a string S, we can transform every letter individually to be lowercase or uppercase to create another string.
Return a list of all possible strings we could create. You can return the output in any order.
Example 1:
Input: S = "a1b2"
Output: ["a1b2","a1B2","A1b2","A1B2"]
Example 2:
Input: S = "3z4"
Output: ["3z4","3Z4"]
Example 3:
Input: S = "12345"
Output: ["12345"]
Example 4:
Input: S = "0"
Output: ["0"]
Constraints:
S will be a string with length between 1 and 12.
S will consist only of letters or digits.
"""
class Solution:
def letterCasePermutation(self, S: str) -> List[str]:
res = [""]
for s in S:
if s.isalpha():
res = [i+j for i in res for j in [s.lower(), s.upper()]]
else:
res = [i+s for i in res]
return res | """
0784. Letter Case Permutation
Medium
Given a string S, we can transform every letter individually to be lowercase or uppercase to create another string.
Return a list of all possible strings we could create. You can return the output in any order.
Example 1:
Input: S = "a1b2"
Output: ["a1b2","a1B2","A1b2","A1B2"]
Example 2:
Input: S = "3z4"
Output: ["3z4","3Z4"]
Example 3:
Input: S = "12345"
Output: ["12345"]
Example 4:
Input: S = "0"
Output: ["0"]
Constraints:
S will be a string with length between 1 and 12.
S will consist only of letters or digits.
"""
class Solution:
def letter_case_permutation(self, S: str) -> List[str]:
res = ['']
for s in S:
if s.isalpha():
res = [i + j for i in res for j in [s.lower(), s.upper()]]
else:
res = [i + s for i in res]
return res |
# -*- coding: utf-8 -*-
"""
Created on Sat Jul 14 19:08:07 2018
@author: Yilin Liu
"""
class Parameter(object):
def __init__(self, re, it, co, no):
self.regularization = re
self.most_iter_num = it
self.convergence = co
self.nonconvexity = no
def print_value(self):
print(self.regularization, self.most_iter_num,
self.convergence, self.nonconvexity) | """
Created on Sat Jul 14 19:08:07 2018
@author: Yilin Liu
"""
class Parameter(object):
def __init__(self, re, it, co, no):
self.regularization = re
self.most_iter_num = it
self.convergence = co
self.nonconvexity = no
def print_value(self):
print(self.regularization, self.most_iter_num, self.convergence, self.nonconvexity) |
# **Please read this problem entirely!!** The majority of this problem consists of learning how to read code, which is an incredibly useful and important skill. At the end, you will implement a short function. Be sure to take your time on this problem - it may seem easy, but reading someone else's code can be challenging and this is an important exercise.
#
#
# Representing hands
# A hand is the set of letters held by a player during the game. The player is initially dealt a set of random letters. For example, the player could start out with the following hand: a, q, l, m, u, i, l. In our program, a hand will be represented as a dictionary: the keys are (lowercase) letters and the values are the number of times the particular letter is repeated in that hand. For example, the above hand would be represented as:
#
# hand = {'a':1, 'q':1, 'l':2, 'm':1, 'u':1, 'i':1}
# Notice how the repeated letter 'l' is represented. Remember that with a dictionary, the usual way to access a value is hand['a'], where 'a' is the key we want to find. However, this only works if the key is in the dictionary; otherwise, we get a KeyError. To avoid this, we can use the call hand.get('a',0). This is the "safe" way to access a value if we are not sure the key is in the dictionary. d.get(key,default) returns the value for key if key is in the dictionary d, else default. If default is not given, it returns None, so that this method never raises a KeyError. For example:
#
# >>> hand['e']
# Traceback (most recent call last):
# File "<stdin>", line 1, in <module>
# KeyError: 'e'
# >>> hand.get('e', 0)
# 0
# Converting words into dictionary representation
# One useful function we've defined for you is getFrequencyDict, defined near the top of ps4a.py. When given a string of letters as an input, it returns a dictionary where the keys are letters and the values are the number of times that letter is represented in the input string. For example:
#
# >>> getFrequencyDict("hello")
# {'h': 1, 'e': 1, 'l': 2, 'o': 1}
# As you can see, this is the same kind of dictionary we use to represent hands.
#
# Displaying a hand
# Given a hand represented as a dictionary, we want to display it in a user-friendly way. We have provided the implementation for this in the displayHand function. Take a few minutes right now to read through this function carefully and understand what it does and how it works.
#
# Generating a random hand
# The hand a player is dealt is a set of letters chosen at random. We provide you with the implementation of a function that generates this random hand, dealHand. The function takes as input a positive integer n, and returns a new object, a hand containing n lowercase letters. Again, take a few minutes (right now!) to read through this function carefully and understand what it does and how it works.
#
# Removing letters from a hand (you implement this)
# The player starts with a hand, a set of letters. As the player spells out words, letters from this set are used up. For example, the player could start out with the following hand: a, q, l, m, u, i, l. The player could choose to spell the word quail . This would leave the following letters in the player's hand: l, m. Your task is to implement the function updateHand, which takes in two inputs - a hand and a word (string). updateHand uses letters from the hand to spell the word, and then returns a copy of the hand, containing only the letters remaining. For example:
#
# >>> hand = {'a':1, 'q':1, 'l':2, 'm':1, 'u':1, 'i':1}
# >>> displayHand(hand) # Implemented for you
# a q l l m u i
# >>> hand = updateHand(hand, 'quail') # You implement this function!
# >>> hand
# {'a':0, 'q':0, 'l':1, 'm':1, 'u':0, 'i':0}
# >>> displayHand(hand)
# l m
# Implement the updateHand function. Make sure this function has no side effects: i.e., it must not mutate the hand passed in. Before pasting your function definition here, be sure you've passed the appropriate tests in test_ps4a.py.
hand = {'a':1, 'q':1, 'l':2, 'm':1, 'u':1, 'i':1}
def updateHand(hand, word):
"""
Assumes that 'hand' has all the letters in word.
In other words, this assumes that however many times
a letter appears in 'word', 'hand' has at least as
many of that letter in it.
Updates the hand: uses up the letters in the given word
and returns the new hand, without those letters in it.
Has no side effects: does not modify hand.
word: string
hand: dictionary (string -> int)
returns: dictionary (string -> int)
"""
hand_clone = hand.copy()
for c in word:
hand_clone[c] = hand_clone.get(c, 0) - 1
return hand_clone
print(updateHand(hand, 'quail'))
| hand = {'a': 1, 'q': 1, 'l': 2, 'm': 1, 'u': 1, 'i': 1}
def update_hand(hand, word):
"""
Assumes that 'hand' has all the letters in word.
In other words, this assumes that however many times
a letter appears in 'word', 'hand' has at least as
many of that letter in it.
Updates the hand: uses up the letters in the given word
and returns the new hand, without those letters in it.
Has no side effects: does not modify hand.
word: string
hand: dictionary (string -> int)
returns: dictionary (string -> int)
"""
hand_clone = hand.copy()
for c in word:
hand_clone[c] = hand_clone.get(c, 0) - 1
return hand_clone
print(update_hand(hand, 'quail')) |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
def get_package_data():
return {'pypit.tests': ['files/*']}
| def get_package_data():
return {'pypit.tests': ['files/*']} |
"""
sm.sendNext("want some item?")
sm.giveAndEquip(1382000)
sm.dispose()
""" | """
sm.sendNext("want some item?")
sm.giveAndEquip(1382000)
sm.dispose()
""" |
class Item:
def __init__(self, id, assignment_id = None, inherent = None, max_inherent = None):
self.id = id
self.assignment_id = assignment_id
self.inherent = inherent
self.max_inherent = max_inherent
self.reviews = {}
class User:
def __init__(self, name):
self.name = name
self.users = set()
self.reviews = {}
class Review:
def __init__(self, review_id = None, grade = None, extra_informative_feature = None):
self.review_id = id
self.grade = grade
self.extra_informative_feature = extra_informative_feature
class Graph:
def __init__(self):
self.items = set()
self.users = set()
self.reviews = {}
self.items_with_ground_truth = []
self.user_dict = {}
self.item_dict = {}
def add_item(self, item_id, inherent = None, max_inherent = None, assignment_id = None):
item = Item(id = item_id, inherent= inherent, max_inherent= max_inherent, assignment_id= assignment_id)
self.item_dict[item_id] = item
self.items = self.items | {item}
def add_user(self, user_name):
user = User(user_name)
self.user_dict[user_name] = user
self.users = self.users | {user}
def get_user(self, user_name):
return self.user_dict.get(user_name)
def get_item(self, item_id):
return self.item_dict.get(item_id)
def has_voted(self, user_name,item_id):
if not user_name in self.user_dict or not item_id in self.item_dict:
return False
if (self.get_user(user_name), self.get_item(item_id)) in self.reviews:
return True
else:
return False
def get_no_of_votes(self, item_id):
if not item_id in self.item_dict:
return 0
return len(self.get_item(item_id).reviews)
def add_review(self, user_name, item_id, review, assignment_id = None):
"""
Adds a review to the graph.
It inserts the review to the generic dictionary of reviews
but also to the item.reviews and user.reviews dictionaries.
There is redundancy of information but enhances accessibility.
"""
# If user name or item id are not in the graph create respective objects
if not user_name in self.user_dict:
self.add_user(user_name)
if not item_id in self.item_dict:
self.add_item(item_id, assignment_id= assignment_id)
# Get user and item objects that correspond to user name and user id
user = self.get_user(user_name)
item = self.get_item(item_id)
# add review to the dictionaries
item.reviews[user] = review
user.reviews[item] = review
self.reviews[(user, item)] = review | class Item:
def __init__(self, id, assignment_id=None, inherent=None, max_inherent=None):
self.id = id
self.assignment_id = assignment_id
self.inherent = inherent
self.max_inherent = max_inherent
self.reviews = {}
class User:
def __init__(self, name):
self.name = name
self.users = set()
self.reviews = {}
class Review:
def __init__(self, review_id=None, grade=None, extra_informative_feature=None):
self.review_id = id
self.grade = grade
self.extra_informative_feature = extra_informative_feature
class Graph:
def __init__(self):
self.items = set()
self.users = set()
self.reviews = {}
self.items_with_ground_truth = []
self.user_dict = {}
self.item_dict = {}
def add_item(self, item_id, inherent=None, max_inherent=None, assignment_id=None):
item = item(id=item_id, inherent=inherent, max_inherent=max_inherent, assignment_id=assignment_id)
self.item_dict[item_id] = item
self.items = self.items | {item}
def add_user(self, user_name):
user = user(user_name)
self.user_dict[user_name] = user
self.users = self.users | {user}
def get_user(self, user_name):
return self.user_dict.get(user_name)
def get_item(self, item_id):
return self.item_dict.get(item_id)
def has_voted(self, user_name, item_id):
if not user_name in self.user_dict or not item_id in self.item_dict:
return False
if (self.get_user(user_name), self.get_item(item_id)) in self.reviews:
return True
else:
return False
def get_no_of_votes(self, item_id):
if not item_id in self.item_dict:
return 0
return len(self.get_item(item_id).reviews)
def add_review(self, user_name, item_id, review, assignment_id=None):
"""
Adds a review to the graph.
It inserts the review to the generic dictionary of reviews
but also to the item.reviews and user.reviews dictionaries.
There is redundancy of information but enhances accessibility.
"""
if not user_name in self.user_dict:
self.add_user(user_name)
if not item_id in self.item_dict:
self.add_item(item_id, assignment_id=assignment_id)
user = self.get_user(user_name)
item = self.get_item(item_id)
item.reviews[user] = review
user.reviews[item] = review
self.reviews[user, item] = review |
description = 'LakeShore 340 cryo controller'
group = 'optional'
includes = ['alias_T']
tango_base = 'tango://miractrl.mira.frm2:10000/mira/'
devices = dict(
T_ls2 = device('nicos.devices.entangle.TemperatureController',
description = 'temperature regulation',
tangodevice = tango_base + 'ls2/ls_control1',
pollinterval = 0.7,
maxage = 2,
),
T_ls2_A = device('nicos.devices.entangle.Sensor',
description = 'sensor A',
tangodevice = tango_base + 'ls2/ls_sensor1',
pollinterval = 0.7,
maxage = 2,
),
T_ls2_B = device('nicos.devices.entangle.Sensor',
description = 'sensor B',
tangodevice = tango_base + 'ls2/ls_sensor2',
pollinterval = 0.7,
maxage = 2,
),
T_ls2_C = device('nicos.devices.entangle.Sensor',
description = 'sensor C',
tangodevice = tango_base + 'ls2/ls_sensor3',
pollinterval = 0.7,
maxage = 2,
),
T_ls2_D = device('nicos.devices.entangle.Sensor',
description = 'sensor D',
tangodevice = tango_base + 'ls2/ls_sensor4',
pollinterval = 0.7,
maxage = 2,
),
)
alias_config = {
'T': {'T_ls2': 180}, # lower than default T_ccr5
'Ts': {'T_ls2_A': 60, 'T_ls2_B': 50},
}
| description = 'LakeShore 340 cryo controller'
group = 'optional'
includes = ['alias_T']
tango_base = 'tango://miractrl.mira.frm2:10000/mira/'
devices = dict(T_ls2=device('nicos.devices.entangle.TemperatureController', description='temperature regulation', tangodevice=tango_base + 'ls2/ls_control1', pollinterval=0.7, maxage=2), T_ls2_A=device('nicos.devices.entangle.Sensor', description='sensor A', tangodevice=tango_base + 'ls2/ls_sensor1', pollinterval=0.7, maxage=2), T_ls2_B=device('nicos.devices.entangle.Sensor', description='sensor B', tangodevice=tango_base + 'ls2/ls_sensor2', pollinterval=0.7, maxage=2), T_ls2_C=device('nicos.devices.entangle.Sensor', description='sensor C', tangodevice=tango_base + 'ls2/ls_sensor3', pollinterval=0.7, maxage=2), T_ls2_D=device('nicos.devices.entangle.Sensor', description='sensor D', tangodevice=tango_base + 'ls2/ls_sensor4', pollinterval=0.7, maxage=2))
alias_config = {'T': {'T_ls2': 180}, 'Ts': {'T_ls2_A': 60, 'T_ls2_B': 50}} |
class ActivePetUpdateRequestPacket:
def __init__(self):
self.type = "ACTIVEPETUPDATEREQUEST"
self.commandType = 0
self.instanceId = 0
def write(self, writer):
writer.writeByte(self.commandType)
writer.writeInt32(self.instanceId)
def read(self, reader):
self.commandType = reader.readByte()
self.instanceId = reader.readInt32()
| class Activepetupdaterequestpacket:
def __init__(self):
self.type = 'ACTIVEPETUPDATEREQUEST'
self.commandType = 0
self.instanceId = 0
def write(self, writer):
writer.writeByte(self.commandType)
writer.writeInt32(self.instanceId)
def read(self, reader):
self.commandType = reader.readByte()
self.instanceId = reader.readInt32() |
### create tuple of 3 elements:
point3d = (4, 0, 3)
print(point3d[0], point3d[1], point3d[2])
# 4 0 3
print(point3d[-1], point3d[-2], point3d[-3])
# 3 0 4
### retrieve tuple items
address = ('Bulgaria', 'Sofia', 'Nezabravka str', 14)
country = address[0]
town = address[1]
street = address[2]
street_num = address[3]
print(country, town, street, street_num)
# Bulgaria Sofia Nezabravka str 14
### change a tuple item:
# address[0] = "France"
# TypeError: 'tuple' object does not support item assignment
### create tuple with 3 elements:
ada_birth_date = (10, "December", 1815)
# retrieve tuple elements:
ada_birth_day = ada_birth_date[0]
ada_birth_month = ada_birth_date[1]
ada_birth_year = ada_birth_date[2]
print("Ada is born on {} {} in {}".format(ada_birth_month, ada_birth_day, ada_birth_year))
# Ada is born on December 10 in 181
| point3d = (4, 0, 3)
print(point3d[0], point3d[1], point3d[2])
print(point3d[-1], point3d[-2], point3d[-3])
address = ('Bulgaria', 'Sofia', 'Nezabravka str', 14)
country = address[0]
town = address[1]
street = address[2]
street_num = address[3]
print(country, town, street, street_num)
ada_birth_date = (10, 'December', 1815)
ada_birth_day = ada_birth_date[0]
ada_birth_month = ada_birth_date[1]
ada_birth_year = ada_birth_date[2]
print('Ada is born on {} {} in {}'.format(ada_birth_month, ada_birth_day, ada_birth_year)) |
__author__ = "Jason Lin"
__email__ = "jason40418@yahoo.com.tw"
__license__ = "MIT"
__version__ = "1.0.0"
| __author__ = 'Jason Lin'
__email__ = 'jason40418@yahoo.com.tw'
__license__ = 'MIT'
__version__ = '1.0.0' |
# -*- coding: utf-8 -*-
# Scrapy settings for scraper project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# http://doc.scrapy.org/en/latest/topics/settings.html
# http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
# http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'scraper'
SPIDER_MODULES = ['scraper.spiders']
NEWSPIDER_MODULE = 'scraper.spiders'
JOBDIR = 'jobs/crunchbase'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
# Firefox
# USER_AGENT = 'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:52.0) Gecko/20100101 Firefox/52.0'
# Edge
USER_AGENT = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.79 Safari/537.36 Edge/14.14393'
# Obey robots.txt rules
ROBOTSTXT_OBEY = False
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See http://scrapy.readthedocs.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
DOWNLOAD_DELAY = 6
# The download delay setting will honor only one of:
CONCURRENT_REQUESTS_PER_DOMAIN = 1
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
# COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
# DEFAULT_REQUEST_HEADERS = {
# 'Upgrade-Insecure-Requests': '1',
# 'Connection': 'keep-alive',
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
# }
# Enable or disable spider middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'scraper.middlewares.ScraperSpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
# 'scraper.middlewares.MyCustomDownloaderMiddleware': 543,
#}
# Enable or disable extensions
# See http://scrapy.readthedocs.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See http://scrapy.readthedocs.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
'scraper.pipelines.ScraperPipeline': 300,
}
# Enable and configure the AutoThrottle extension (disabled by default)
# See http://doc.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
HTTPCACHE_ENABLED = True
HTTPCACHE_EXPIRATION_SECS = 0
HTTPCACHE_DIR = 'httpcache'
HTTPCACHE_IGNORE_HTTP_CODES = [416]
HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
##########################################################
# Uncomment the following if using scrapy-rotating-proxies
##########################################################
# DOWNLOADER_MIDDLEWARES = {
# 'rotating_proxies.middlewares.RotatingProxyMiddleware': 610,
# 'rotating_proxies.middlewares.BanDetectionMiddleware': 620,
# }
# def load_lines(path):
# with open(path, 'rb') as f:
# return [line.strip() for line in
# f.read().decode('utf8').splitlines()
# if line.strip()]
# ROTATING_PROXY_LIST = load_lines('proxies.txt')
############################################
## Uncomment the following if using Scrapoxy
############################################
CONCURRENT_REQUESTS_PER_DOMAIN = 1
RETRY_TIMES = 3
# PROXY
PROXY = 'http://127.0.0.1:8888/?noconnect'
# SCRAPOXY
API_SCRAPOXY = 'http://127.0.0.1:8889/api'
API_SCRAPOXY_PASSWORD = 'password'
# BLACKLISTING
BLACKLIST_HTTP_STATUS_CODES = [ 500, 416 ]
DOWNLOADER_MIDDLEWARES = {
'scrapoxy.downloadmiddlewares.proxy.ProxyMiddleware': 100,
'scrapoxy.downloadmiddlewares.wait.WaitMiddleware': 101,
'scrapoxy.downloadmiddlewares.scale.ScaleMiddleware': 102,
'scrapy.downloadermiddlewares.httpproxy.HttpProxyMiddleware': None,
'scrapoxy.downloadmiddlewares.blacklist.BlacklistDownloaderMiddleware': 950,
}
| bot_name = 'scraper'
spider_modules = ['scraper.spiders']
newspider_module = 'scraper.spiders'
jobdir = 'jobs/crunchbase'
user_agent = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.79 Safari/537.36 Edge/14.14393'
robotstxt_obey = False
download_delay = 6
concurrent_requests_per_domain = 1
item_pipelines = {'scraper.pipelines.ScraperPipeline': 300}
httpcache_enabled = True
httpcache_expiration_secs = 0
httpcache_dir = 'httpcache'
httpcache_ignore_http_codes = [416]
httpcache_storage = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
concurrent_requests_per_domain = 1
retry_times = 3
proxy = 'http://127.0.0.1:8888/?noconnect'
api_scrapoxy = 'http://127.0.0.1:8889/api'
api_scrapoxy_password = 'password'
blacklist_http_status_codes = [500, 416]
downloader_middlewares = {'scrapoxy.downloadmiddlewares.proxy.ProxyMiddleware': 100, 'scrapoxy.downloadmiddlewares.wait.WaitMiddleware': 101, 'scrapoxy.downloadmiddlewares.scale.ScaleMiddleware': 102, 'scrapy.downloadermiddlewares.httpproxy.HttpProxyMiddleware': None, 'scrapoxy.downloadmiddlewares.blacklist.BlacklistDownloaderMiddleware': 950} |
class TotoInterface:
def machin(self):
raise NotImplementedError
| class Totointerface:
def machin(self):
raise NotImplementedError |
# Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
DEPS = [
'build/file',
'recipe_engine/path',
'recipe_engine/properties',
'recipe_engine/step',
'recipe_engine/time',
]
| deps = ['build/file', 'recipe_engine/path', 'recipe_engine/properties', 'recipe_engine/step', 'recipe_engine/time'] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.