content stringlengths 7 1.05M | fixed_cases stringlengths 1 1.28M |
|---|---|
#
# PySNMP MIB module ZYXEL-OUT-OF-BAND-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/ZYXEL-OUT-OF-BAND-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 21:45:06 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
Integer, ObjectIdentifier, OctetString = mibBuilder.importSymbols("ASN1", "Integer", "ObjectIdentifier", "OctetString")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueSizeConstraint, ValueRangeConstraint, ConstraintsIntersection, ConstraintsUnion, SingleValueConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueSizeConstraint", "ValueRangeConstraint", "ConstraintsIntersection", "ConstraintsUnion", "SingleValueConstraint")
ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup")
ModuleIdentity, Counter32, Unsigned32, iso, ObjectIdentity, IpAddress, TimeTicks, Gauge32, Integer32, Bits, NotificationType, MibIdentifier, Counter64, MibScalar, MibTable, MibTableRow, MibTableColumn = mibBuilder.importSymbols("SNMPv2-SMI", "ModuleIdentity", "Counter32", "Unsigned32", "iso", "ObjectIdentity", "IpAddress", "TimeTicks", "Gauge32", "Integer32", "Bits", "NotificationType", "MibIdentifier", "Counter64", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn")
TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString")
esMgmt, = mibBuilder.importSymbols("ZYXEL-ES-SMI", "esMgmt")
zyxelOutOfBand = ModuleIdentity((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 58))
if mibBuilder.loadTexts: zyxelOutOfBand.setLastUpdated('201207010000Z')
if mibBuilder.loadTexts: zyxelOutOfBand.setOrganization('Enterprise Solution ZyXEL')
zyxelOutOfBandIpSetup = MibIdentifier((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 58, 1))
zyOutOfBandIpAddress = MibScalar((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 58, 1, 1), IpAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: zyOutOfBandIpAddress.setStatus('current')
zyOutOfBandSubnetMask = MibScalar((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 58, 1, 2), IpAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: zyOutOfBandSubnetMask.setStatus('current')
zyOutOfBandGateway = MibScalar((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 58, 1, 3), IpAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: zyOutOfBandGateway.setStatus('current')
mibBuilder.exportSymbols("ZYXEL-OUT-OF-BAND-MIB", zyOutOfBandGateway=zyOutOfBandGateway, zyOutOfBandSubnetMask=zyOutOfBandSubnetMask, zyxelOutOfBand=zyxelOutOfBand, zyOutOfBandIpAddress=zyOutOfBandIpAddress, PYSNMP_MODULE_ID=zyxelOutOfBand, zyxelOutOfBandIpSetup=zyxelOutOfBandIpSetup)
| (integer, object_identifier, octet_string) = mibBuilder.importSymbols('ASN1', 'Integer', 'ObjectIdentifier', 'OctetString')
(named_values,) = mibBuilder.importSymbols('ASN1-ENUMERATION', 'NamedValues')
(value_size_constraint, value_range_constraint, constraints_intersection, constraints_union, single_value_constraint) = mibBuilder.importSymbols('ASN1-REFINEMENT', 'ValueSizeConstraint', 'ValueRangeConstraint', 'ConstraintsIntersection', 'ConstraintsUnion', 'SingleValueConstraint')
(module_compliance, notification_group) = mibBuilder.importSymbols('SNMPv2-CONF', 'ModuleCompliance', 'NotificationGroup')
(module_identity, counter32, unsigned32, iso, object_identity, ip_address, time_ticks, gauge32, integer32, bits, notification_type, mib_identifier, counter64, mib_scalar, mib_table, mib_table_row, mib_table_column) = mibBuilder.importSymbols('SNMPv2-SMI', 'ModuleIdentity', 'Counter32', 'Unsigned32', 'iso', 'ObjectIdentity', 'IpAddress', 'TimeTicks', 'Gauge32', 'Integer32', 'Bits', 'NotificationType', 'MibIdentifier', 'Counter64', 'MibScalar', 'MibTable', 'MibTableRow', 'MibTableColumn')
(textual_convention, display_string) = mibBuilder.importSymbols('SNMPv2-TC', 'TextualConvention', 'DisplayString')
(es_mgmt,) = mibBuilder.importSymbols('ZYXEL-ES-SMI', 'esMgmt')
zyxel_out_of_band = module_identity((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 58))
if mibBuilder.loadTexts:
zyxelOutOfBand.setLastUpdated('201207010000Z')
if mibBuilder.loadTexts:
zyxelOutOfBand.setOrganization('Enterprise Solution ZyXEL')
zyxel_out_of_band_ip_setup = mib_identifier((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 58, 1))
zy_out_of_band_ip_address = mib_scalar((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 58, 1, 1), ip_address()).setMaxAccess('readwrite')
if mibBuilder.loadTexts:
zyOutOfBandIpAddress.setStatus('current')
zy_out_of_band_subnet_mask = mib_scalar((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 58, 1, 2), ip_address()).setMaxAccess('readwrite')
if mibBuilder.loadTexts:
zyOutOfBandSubnetMask.setStatus('current')
zy_out_of_band_gateway = mib_scalar((1, 3, 6, 1, 4, 1, 890, 1, 15, 3, 58, 1, 3), ip_address()).setMaxAccess('readwrite')
if mibBuilder.loadTexts:
zyOutOfBandGateway.setStatus('current')
mibBuilder.exportSymbols('ZYXEL-OUT-OF-BAND-MIB', zyOutOfBandGateway=zyOutOfBandGateway, zyOutOfBandSubnetMask=zyOutOfBandSubnetMask, zyxelOutOfBand=zyxelOutOfBand, zyOutOfBandIpAddress=zyOutOfBandIpAddress, PYSNMP_MODULE_ID=zyxelOutOfBand, zyxelOutOfBandIpSetup=zyxelOutOfBandIpSetup) |
""" Class description goes here. """
__author__ = 'Alex Barcelo <alex.barcelo@bsc.es>'
__copyright__ = '2015 Barcelona Supercomputing Center (BSC-CNS)'
class DataClaySerializable(object):
__slots__ = ()
| """ Class description goes here. """
__author__ = 'Alex Barcelo <alex.barcelo@bsc.es>'
__copyright__ = '2015 Barcelona Supercomputing Center (BSC-CNS)'
class Dataclayserializable(object):
__slots__ = () |
# -*- coding: utf-8 -*-
YEAR_CHOICES = (
('', '---------'),
('1', 'Freshman'),
('2', 'Sophmore'),
('3', 'Junior'),
('4', 'Senior'),
)
GRADUATE_DEGREE = (
('M.S.', 'M.S.'),
('Ph.D', 'Ph.D'),
('M.D.', 'M.D.'),
('Other', 'Other'),
)
GRANTS_PROCESS_STAGES = (
('', '---------'),
('Pre-Award (Application Process)', 'Pre-Award (Application Process)'),
(
'Post-Award (Award Acceptance/Grant Management)',
'Post-Award (Award Acceptance/Grant Management)',
),
('Both', 'Both'),
)
UNDERGRADUATE_DEGREE = (
("Bachelor's degree", "Bachelor's degree"),
("Associate's degree/certificate", "Associate's degree/certificate"),
)
WSGC_SCHOOL = (
('Alverno College', 'Alverno College'),
('Carthage College', 'Carthage College'),
('Chief Dull Knife College', 'Chief Dull Knife College'),
('College of Menominee Nation', 'College of Menominee Nation'),
('Colorado School of Mines', 'Colorado School of Mines'),
('Concordia University', 'Concordia University'),
('Lawrence University', 'Lawrence University'),
('Leech Lake Tribal College', 'Leech Lake Tribal College'),
('Little Big Horn College', 'Little Big Horn College'),
('Marquette University', 'Marquette University'),
('Medical College of Wisconsin', 'Medical College of Wisconsin'),
('Milwaukee School of Engineering', 'Milwaukee School of Engineering'),
('Moraine Park Technical College', 'Moraine Park Technical College'),
('Northern Arizona University', 'Northern Arizona University'),
('Northwest Indian College', 'Northwest Indian College'),
('Ripon College', 'Ripon College'),
('St. Norbert College', 'St. Norbert College'),
('Turtle Mountain Community College', 'Turtle Mountain Community College'),
('University of Alaska-Fairbanks', 'University of Alaska-Fairbanks'),
('University of California-Los Angeles', 'University of California-Los Angeles'),
('UW Fox Valley', 'UW Fox Valley'),
('UW Green Bay', 'UW Green Bay'),
('UW LaCrosse', 'UW LaCrosse'),
('UW Madison', 'UW Madison'),
('UW Milwaukee', 'UW Milwaukee'),
('UW Oshkosh', 'UW Oshkosh'),
('UW Parkside', 'UW Parkside'),
('UW Platteville', 'UW Platteville'),
('UW River Falls', 'UW River Falls'),
('UW Sheboygan', 'UW Sheboygan'),
('UW Stevens Point', 'UW Stevens Point'),
('UW Stout', 'UW Stout'),
('UW Superior', 'UW Superior'),
('UW Washington County', 'UW Washington County'),
('UW Whitewater', 'UW Whitewater'),
('Utah State University-Eastern Blanding', 'Utah State University-Eastern Blanding'),
('Western Technical College', 'Western Technical College'),
('Wisconsin Lutheran College', 'Wisconsin Lutheran College'),
('Other', 'Other'),
)
MAJORS = (
('Aeronautical Engineering', 'Aeronautical Engineering'),
('Aerospace Engineering', 'Aerospace Engineering'),
('Applied Physics', 'Applied Physics'),
('Astronomy', 'Astronomy'),
('Astrophysics', 'Astrophysics'),
('Atmoshperic Sciences', 'Atmoshperic Sciences'),
('Biochemistry', 'Biochemistry'),
('Biology', 'Biology'),
('Biomedical Engineering', 'Biomedical Engineering'),
('Biomedical Science', 'Biomedical Science'),
('Biophysics', 'Biophysics'),
('Biotechnology', 'Biotechnology'),
('Chemical Engineering', 'Chemical Engineering'),
('Chemistry', 'Chemistry'),
('Civil Engineering', 'Civil Engineering'),
('Computer Engineering', 'Computer Engineering'),
('Computer Science', 'Computer Science'),
('Electrical Engineering', 'Electrical Engineering'),
('Environmental Science', 'Environmental Science'),
('Environmental Studies', 'Environmental Studies'),
('Geography', 'Geography'),
('Geology', 'Geology'),
('Geophysics', 'Geophysics'),
('Geoscience', 'Geoscience'),
('Industrial Engineering', 'Industrial Engineering'),
('Kinesiology', 'Kinesiology'),
('Mathematics', 'Mathematics'),
('Mechanical Engineering', 'Mechanical Engineering'),
('Meteorology', 'Meteorology'),
('Microbiology', 'Microbiology'),
('Molecular and Cell Biology', 'Molecular and Cell Biology'),
(
'Molecular and Environmental Plant Science',
'Molecular and Environmental Plant Science',
),
('Neuroscience', 'Neuroscience'),
('Nuclear Engineering', 'Nuclear Engineering'),
('Oceanography', 'Oceanography'),
('Other', 'Other'),
('Physics', 'Physics'),
('Statistics', 'Statistics'),
('Systems Engineering', 'Systems Engineering'),
)
PROGRAM_CHOICES = (
('AerospaceOutreach', 'Aerospace Outreach'),
('ClarkGraduateFellowship', 'Dr. Laurel Salton Clark Memorial Research Fellowship'),
('CollegiateRocketCompetition', 'Collegiate Rocket Competition'),
('EarlyStageInvestigator', 'Early-Stage Investigator'),
('FirstNationsRocketCompetition', 'First Nations Rocket Competition'),
('GraduateFellowship', 'WSGC Graduate and Professional Research Fellowship'),
('HighAltitudeBalloonLaunch', 'High Altitude Balloon Launch'),
('HighAltitudeBalloonPayload', 'High Altitude Balloon Payload'),
('HigherEducationInitiatives', 'Higher Education Initiatives'),
('IndustryInternship', 'Industry Internship'),
('MidwestHighPoweredRocketCompetition', 'Midwest High Powered Rocket Competition'),
('NasaCompetition', 'NASA Competition'),
('ProfessionalProgramStudent', 'Professional Program Student'),
('ResearchInfrastructure', 'Research Infrastructure'),
('RocketLaunchTeam', 'Rocket Launch Team'),
('SpecialInitiatives', 'Special Initiatives'),
('StemBridgeScholarship', 'STEM Bridge Scholarship'),
('UndergraduateResearch', 'Undergraduate Research Fellowship'),
('UndergraduateScholarship', 'Undergraduate Scholarship'),
(
'UnmannedAerialVehiclesResearchScholarship',
'Unmanned Aerial Vehicles Research Scholarship',
),
('WomenInAviationScholarship', 'Women in Aviation Scholarship'),
)
| year_choices = (('', '---------'), ('1', 'Freshman'), ('2', 'Sophmore'), ('3', 'Junior'), ('4', 'Senior'))
graduate_degree = (('M.S.', 'M.S.'), ('Ph.D', 'Ph.D'), ('M.D.', 'M.D.'), ('Other', 'Other'))
grants_process_stages = (('', '---------'), ('Pre-Award (Application Process)', 'Pre-Award (Application Process)'), ('Post-Award (Award Acceptance/Grant Management)', 'Post-Award (Award Acceptance/Grant Management)'), ('Both', 'Both'))
undergraduate_degree = (("Bachelor's degree", "Bachelor's degree"), ("Associate's degree/certificate", "Associate's degree/certificate"))
wsgc_school = (('Alverno College', 'Alverno College'), ('Carthage College', 'Carthage College'), ('Chief Dull Knife College', 'Chief Dull Knife College'), ('College of Menominee Nation', 'College of Menominee Nation'), ('Colorado School of Mines', 'Colorado School of Mines'), ('Concordia University', 'Concordia University'), ('Lawrence University', 'Lawrence University'), ('Leech Lake Tribal College', 'Leech Lake Tribal College'), ('Little Big Horn College', 'Little Big Horn College'), ('Marquette University', 'Marquette University'), ('Medical College of Wisconsin', 'Medical College of Wisconsin'), ('Milwaukee School of Engineering', 'Milwaukee School of Engineering'), ('Moraine Park Technical College', 'Moraine Park Technical College'), ('Northern Arizona University', 'Northern Arizona University'), ('Northwest Indian College', 'Northwest Indian College'), ('Ripon College', 'Ripon College'), ('St. Norbert College', 'St. Norbert College'), ('Turtle Mountain Community College', 'Turtle Mountain Community College'), ('University of Alaska-Fairbanks', 'University of Alaska-Fairbanks'), ('University of California-Los Angeles', 'University of California-Los Angeles'), ('UW Fox Valley', 'UW Fox Valley'), ('UW Green Bay', 'UW Green Bay'), ('UW LaCrosse', 'UW LaCrosse'), ('UW Madison', 'UW Madison'), ('UW Milwaukee', 'UW Milwaukee'), ('UW Oshkosh', 'UW Oshkosh'), ('UW Parkside', 'UW Parkside'), ('UW Platteville', 'UW Platteville'), ('UW River Falls', 'UW River Falls'), ('UW Sheboygan', 'UW Sheboygan'), ('UW Stevens Point', 'UW Stevens Point'), ('UW Stout', 'UW Stout'), ('UW Superior', 'UW Superior'), ('UW Washington County', 'UW Washington County'), ('UW Whitewater', 'UW Whitewater'), ('Utah State University-Eastern Blanding', 'Utah State University-Eastern Blanding'), ('Western Technical College', 'Western Technical College'), ('Wisconsin Lutheran College', 'Wisconsin Lutheran College'), ('Other', 'Other'))
majors = (('Aeronautical Engineering', 'Aeronautical Engineering'), ('Aerospace Engineering', 'Aerospace Engineering'), ('Applied Physics', 'Applied Physics'), ('Astronomy', 'Astronomy'), ('Astrophysics', 'Astrophysics'), ('Atmoshperic Sciences', 'Atmoshperic Sciences'), ('Biochemistry', 'Biochemistry'), ('Biology', 'Biology'), ('Biomedical Engineering', 'Biomedical Engineering'), ('Biomedical Science', 'Biomedical Science'), ('Biophysics', 'Biophysics'), ('Biotechnology', 'Biotechnology'), ('Chemical Engineering', 'Chemical Engineering'), ('Chemistry', 'Chemistry'), ('Civil Engineering', 'Civil Engineering'), ('Computer Engineering', 'Computer Engineering'), ('Computer Science', 'Computer Science'), ('Electrical Engineering', 'Electrical Engineering'), ('Environmental Science', 'Environmental Science'), ('Environmental Studies', 'Environmental Studies'), ('Geography', 'Geography'), ('Geology', 'Geology'), ('Geophysics', 'Geophysics'), ('Geoscience', 'Geoscience'), ('Industrial Engineering', 'Industrial Engineering'), ('Kinesiology', 'Kinesiology'), ('Mathematics', 'Mathematics'), ('Mechanical Engineering', 'Mechanical Engineering'), ('Meteorology', 'Meteorology'), ('Microbiology', 'Microbiology'), ('Molecular and Cell Biology', 'Molecular and Cell Biology'), ('Molecular and Environmental Plant Science', 'Molecular and Environmental Plant Science'), ('Neuroscience', 'Neuroscience'), ('Nuclear Engineering', 'Nuclear Engineering'), ('Oceanography', 'Oceanography'), ('Other', 'Other'), ('Physics', 'Physics'), ('Statistics', 'Statistics'), ('Systems Engineering', 'Systems Engineering'))
program_choices = (('AerospaceOutreach', 'Aerospace Outreach'), ('ClarkGraduateFellowship', 'Dr. Laurel Salton Clark Memorial Research Fellowship'), ('CollegiateRocketCompetition', 'Collegiate Rocket Competition'), ('EarlyStageInvestigator', 'Early-Stage Investigator'), ('FirstNationsRocketCompetition', 'First Nations Rocket Competition'), ('GraduateFellowship', 'WSGC Graduate and Professional Research Fellowship'), ('HighAltitudeBalloonLaunch', 'High Altitude Balloon Launch'), ('HighAltitudeBalloonPayload', 'High Altitude Balloon Payload'), ('HigherEducationInitiatives', 'Higher Education Initiatives'), ('IndustryInternship', 'Industry Internship'), ('MidwestHighPoweredRocketCompetition', 'Midwest High Powered Rocket Competition'), ('NasaCompetition', 'NASA Competition'), ('ProfessionalProgramStudent', 'Professional Program Student'), ('ResearchInfrastructure', 'Research Infrastructure'), ('RocketLaunchTeam', 'Rocket Launch Team'), ('SpecialInitiatives', 'Special Initiatives'), ('StemBridgeScholarship', 'STEM Bridge Scholarship'), ('UndergraduateResearch', 'Undergraduate Research Fellowship'), ('UndergraduateScholarship', 'Undergraduate Scholarship'), ('UnmannedAerialVehiclesResearchScholarship', 'Unmanned Aerial Vehicles Research Scholarship'), ('WomenInAviationScholarship', 'Women in Aviation Scholarship')) |
legacy_dummy_settings = {
"name": "Rosalind Franklin",
"version": 42,
"steps_per_mm": "M92 X80.00 Y80.00 Z400 A400 B768 C768",
"gantry_steps_per_mm": {"X": 80.00, "Y": 80.00, "Z": 400, "A": 400},
"acceleration": {"X": 3, "Y": 2, "Z": 15, "A": 15, "B": 2, "C": 2},
"z_retract_distance": 2,
"tip_length": 999,
"left_mount_offset": [-34, 0, 0],
"serial_speed": 888,
"default_current": {"X": 1, "Y": 2, "Z": 3, "A": 4, "B": 5, "C": 6},
"low_current": {"X": 1, "Y": 2, "Z": 3, "A": 4, "B": 5, "C": 6},
"high_current": {"X": 1, "Y": 2, "Z": 3, "A": 4, "B": 5, "C": 6},
"default_max_speed": {"X": 1, "Y": 2, "Z": 3, "A": 4, "B": 5, "C": 6},
"default_pipette_configs": {
"homePosition": 220,
"maxTravel": 30,
"stepsPerMM": 768,
},
"log_level": "NADA",
}
migrated_dummy_settings = {
"name": "Rosalind Franklin",
"version": 4,
"gantry_steps_per_mm": {"X": 80.0, "Y": 80.0, "Z": 400.0, "A": 400.0},
"acceleration": {"X": 3, "Y": 2, "Z": 15, "A": 15, "B": 2, "C": 2},
"z_retract_distance": 2,
"left_mount_offset": [-34, 0, 0],
"serial_speed": 888,
"default_current": {
"default": {"X": 1.25, "Y": 1.25, "Z": 0.5, "A": 0.5, "B": 0.05, "C": 0.05},
"2.1": {"X": 1, "Y": 2, "Z": 3, "A": 4, "B": 5, "C": 6},
},
"low_current": {
"default": {"X": 0.7, "Y": 0.7, "Z": 0.1, "A": 0.1, "B": 0.05, "C": 0.05},
"2.1": {"X": 1, "Y": 2, "Z": 3, "A": 4, "B": 5, "C": 6},
},
"high_current": {
"default": {"X": 1.25, "Y": 1.25, "Z": 0.5, "A": 0.5, "B": 0.05, "C": 0.05},
"2.1": {"X": 1, "Y": 2, "Z": 3, "A": 4, "B": 5, "C": 6},
},
"default_max_speed": {"X": 1, "Y": 2, "Z": 3, "A": 4, "B": 5, "C": 6},
"default_pipette_configs": {
"homePosition": 220,
"maxTravel": 30,
"stepsPerMM": 768,
},
"log_level": "NADA",
}
new_dummy_settings = {
"name": "Marie Curie",
"version": 4,
"gantry_steps_per_mm": {"X": 80.0, "Y": 80.0, "Z": 400.0, "A": 400.0},
"acceleration": {"X": 3, "Y": 2, "Z": 15, "A": 15, "B": 2, "C": 2},
"z_retract_distance": 2,
"left_mount_offset": [-34, 0, 0],
"serial_speed": 888,
"default_current": {
"default": {"X": 1.25, "Y": 1.25, "Z": 0.8, "A": 0.8, "B": 0.05, "C": 0.05},
"2.1": {"X": 1, "Y": 2, "Z": 3, "A": 4, "B": 5, "C": 6},
},
"low_current": {
"default": {"X": 0.7, "Y": 0.7, "Z": 0.7, "A": 0.7, "B": 0.7, "C": 0.7},
"2.1": {"X": 1, "Y": 2, "Z": 3, "A": 4, "B": 5, "C": 6},
},
"high_current": {
"default": {"X": 0.7, "Y": 0.7, "Z": 0.7, "A": 0.7, "B": 0.7, "C": 0.7},
"2.1": {"X": 1, "Y": 2, "Z": 3, "A": 4, "B": 5, "C": 6},
},
"default_max_speed": {"X": 1, "Y": 2, "Z": 3, "A": 4, "B": 5, "C": 6},
"default_pipette_configs": {
"homePosition": 220,
"maxTravel": 30,
"stepsPerMM": 768,
},
"log_level": "NADA",
}
| legacy_dummy_settings = {'name': 'Rosalind Franklin', 'version': 42, 'steps_per_mm': 'M92 X80.00 Y80.00 Z400 A400 B768 C768', 'gantry_steps_per_mm': {'X': 80.0, 'Y': 80.0, 'Z': 400, 'A': 400}, 'acceleration': {'X': 3, 'Y': 2, 'Z': 15, 'A': 15, 'B': 2, 'C': 2}, 'z_retract_distance': 2, 'tip_length': 999, 'left_mount_offset': [-34, 0, 0], 'serial_speed': 888, 'default_current': {'X': 1, 'Y': 2, 'Z': 3, 'A': 4, 'B': 5, 'C': 6}, 'low_current': {'X': 1, 'Y': 2, 'Z': 3, 'A': 4, 'B': 5, 'C': 6}, 'high_current': {'X': 1, 'Y': 2, 'Z': 3, 'A': 4, 'B': 5, 'C': 6}, 'default_max_speed': {'X': 1, 'Y': 2, 'Z': 3, 'A': 4, 'B': 5, 'C': 6}, 'default_pipette_configs': {'homePosition': 220, 'maxTravel': 30, 'stepsPerMM': 768}, 'log_level': 'NADA'}
migrated_dummy_settings = {'name': 'Rosalind Franklin', 'version': 4, 'gantry_steps_per_mm': {'X': 80.0, 'Y': 80.0, 'Z': 400.0, 'A': 400.0}, 'acceleration': {'X': 3, 'Y': 2, 'Z': 15, 'A': 15, 'B': 2, 'C': 2}, 'z_retract_distance': 2, 'left_mount_offset': [-34, 0, 0], 'serial_speed': 888, 'default_current': {'default': {'X': 1.25, 'Y': 1.25, 'Z': 0.5, 'A': 0.5, 'B': 0.05, 'C': 0.05}, '2.1': {'X': 1, 'Y': 2, 'Z': 3, 'A': 4, 'B': 5, 'C': 6}}, 'low_current': {'default': {'X': 0.7, 'Y': 0.7, 'Z': 0.1, 'A': 0.1, 'B': 0.05, 'C': 0.05}, '2.1': {'X': 1, 'Y': 2, 'Z': 3, 'A': 4, 'B': 5, 'C': 6}}, 'high_current': {'default': {'X': 1.25, 'Y': 1.25, 'Z': 0.5, 'A': 0.5, 'B': 0.05, 'C': 0.05}, '2.1': {'X': 1, 'Y': 2, 'Z': 3, 'A': 4, 'B': 5, 'C': 6}}, 'default_max_speed': {'X': 1, 'Y': 2, 'Z': 3, 'A': 4, 'B': 5, 'C': 6}, 'default_pipette_configs': {'homePosition': 220, 'maxTravel': 30, 'stepsPerMM': 768}, 'log_level': 'NADA'}
new_dummy_settings = {'name': 'Marie Curie', 'version': 4, 'gantry_steps_per_mm': {'X': 80.0, 'Y': 80.0, 'Z': 400.0, 'A': 400.0}, 'acceleration': {'X': 3, 'Y': 2, 'Z': 15, 'A': 15, 'B': 2, 'C': 2}, 'z_retract_distance': 2, 'left_mount_offset': [-34, 0, 0], 'serial_speed': 888, 'default_current': {'default': {'X': 1.25, 'Y': 1.25, 'Z': 0.8, 'A': 0.8, 'B': 0.05, 'C': 0.05}, '2.1': {'X': 1, 'Y': 2, 'Z': 3, 'A': 4, 'B': 5, 'C': 6}}, 'low_current': {'default': {'X': 0.7, 'Y': 0.7, 'Z': 0.7, 'A': 0.7, 'B': 0.7, 'C': 0.7}, '2.1': {'X': 1, 'Y': 2, 'Z': 3, 'A': 4, 'B': 5, 'C': 6}}, 'high_current': {'default': {'X': 0.7, 'Y': 0.7, 'Z': 0.7, 'A': 0.7, 'B': 0.7, 'C': 0.7}, '2.1': {'X': 1, 'Y': 2, 'Z': 3, 'A': 4, 'B': 5, 'C': 6}}, 'default_max_speed': {'X': 1, 'Y': 2, 'Z': 3, 'A': 4, 'B': 5, 'C': 6}, 'default_pipette_configs': {'homePosition': 220, 'maxTravel': 30, 'stepsPerMM': 768}, 'log_level': 'NADA'} |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# LeetCode 1832. Check if the Sentence Is Pangram
# https://leetcode.com/problems/check-if-the-sentence-is-pangram/
class CheckIfTheSentenceIsPangram:
def checkIfPangram(self, sentence: str) -> bool:
return len(set(sentence)) == 26
if __name__ == "__main__":
pass
# EOF
| class Checkifthesentenceispangram:
def check_if_pangram(self, sentence: str) -> bool:
return len(set(sentence)) == 26
if __name__ == '__main__':
pass |
#!/usr/bin/env python
# Justin's shot at optimizing QAOA parameters
def optimize_obj(obj_val, params=None):
beta = 0.5
gamma = 0.7
return (
beta, gamma, obj_val(beta, gamma)
) # return some optimization trace. It will eventually go into optimize.optimize_modularity, so it should at least contain optimal angles
| def optimize_obj(obj_val, params=None):
beta = 0.5
gamma = 0.7
return (beta, gamma, obj_val(beta, gamma)) |
__title__ = 'evernote2'
__description__ = 'another Evernote SDK for Python'
__url__ = 'https://github.com/JackonYang/evernote2'
__version__ = '1.0.0'
# __build__ = 0x022500
__author__ = 'Jackon Yang'
__author_email__ = 'i@jackon.me'
__license__ = 'BSD'
__copyright__ = 'Copyright 2020 Jackon Yang'
| __title__ = 'evernote2'
__description__ = 'another Evernote SDK for Python'
__url__ = 'https://github.com/JackonYang/evernote2'
__version__ = '1.0.0'
__author__ = 'Jackon Yang'
__author_email__ = 'i@jackon.me'
__license__ = 'BSD'
__copyright__ = 'Copyright 2020 Jackon Yang' |
lw = np.linspace(.5, 15, 8)
for i in xrange(8):
plt.plot(x, i*y, colors[i], linewidth=lw[i])
plt.ylim([-1, 8])
plt.show() | lw = np.linspace(0.5, 15, 8)
for i in xrange(8):
plt.plot(x, i * y, colors[i], linewidth=lw[i])
plt.ylim([-1, 8])
plt.show() |
# -*- coding: utf-8 -*-
def main():
a = int(input())
b = int(input())
c = int(input())
d = int(input())
diff = a * b - c * d
print('DIFERENCA =', diff)
if __name__ == '__main__':
main() | def main():
a = int(input())
b = int(input())
c = int(input())
d = int(input())
diff = a * b - c * d
print('DIFERENCA =', diff)
if __name__ == '__main__':
main() |
class Solution:
# Optimised Row by Row (Accepted), O(n) time and space, where n = total elems in pascal triangle
def generate(self, numRows: int) -> List[List[int]]:
n, res = 1, [[1]]
while n < numRows:
n += 1
l = [1]*n
for i in range((n-1)//2):
l[i+1] = l[n-2-i] = res[n-2][i] + res[n-2][i+1]
res.append(l)
return res
# Map (Top Voted), O(n) time and space
def generate(self, numRows):
res = [[1]]
for i in range(1, numRows):
res.append(
list(map(lambda x, y: x+y, res[-1] + [0], [0] + res[-1])))
return res
# 4 Liner (Top Voted), O(n) time and space
def generate(self, numRows):
pascal = [[1]*(i+1) for i in range(numRows)]
for i in range(numRows):
for j in range(1, i):
pascal[i][j] = pascal[i-1][j-1] + pascal[i-1][j]
return pascal
| class Solution:
def generate(self, numRows: int) -> List[List[int]]:
(n, res) = (1, [[1]])
while n < numRows:
n += 1
l = [1] * n
for i in range((n - 1) // 2):
l[i + 1] = l[n - 2 - i] = res[n - 2][i] + res[n - 2][i + 1]
res.append(l)
return res
def generate(self, numRows):
res = [[1]]
for i in range(1, numRows):
res.append(list(map(lambda x, y: x + y, res[-1] + [0], [0] + res[-1])))
return res
def generate(self, numRows):
pascal = [[1] * (i + 1) for i in range(numRows)]
for i in range(numRows):
for j in range(1, i):
pascal[i][j] = pascal[i - 1][j - 1] + pascal[i - 1][j]
return pascal |
# Python 3
testcases = int(input().strip())
for test in range(testcases):
string = input().strip()
ascii_string = [ord(c) for c in string]
length = len(string)
funny = True
for i in range(1, length):
if abs(ascii_string[i] - ascii_string[i - 1]) != abs(ascii_string[length - i - 1] - ascii_string[length - i]):
funny = False
break
if funny:
print('Funny')
else:
print('Not Funny')
| testcases = int(input().strip())
for test in range(testcases):
string = input().strip()
ascii_string = [ord(c) for c in string]
length = len(string)
funny = True
for i in range(1, length):
if abs(ascii_string[i] - ascii_string[i - 1]) != abs(ascii_string[length - i - 1] - ascii_string[length - i]):
funny = False
break
if funny:
print('Funny')
else:
print('Not Funny') |
#!/usr/bin/env python
# CHATBOT PARAMETERS:
# CLIENT PARAMETERS:
CLIENTBUFFERSIZE = 64 # buffer size
# SERVER PARAMETERS:
SERVERBUFFERSIZE = 64 # buffer size
HOST = "127.0.0.1"
PORT = 9000
# wireprotocol PARAMETERS:
# (probably best not to change DELIM)
| clientbuffersize = 64
serverbuffersize = 64
host = '127.0.0.1'
port = 9000 |
def maxSlidingWindow(nums, k):
ans = []
queue = []
for i, v in enumerate(nums):
# corner case, when front element is outside the window
if queue and queue[0] == i - k:
queue.pop(0)
# pop all elements smaller than new element to be added
# so after the new element is added, maximum is at queue front
while queue and nums[queue[-1]] < v:
queue.pop()
queue.append(i)
# when i reaches k - 1, there are k elements in window
# from now on, append sliding max in every step
if i + 1 >= k:
ans.append(nums[queue[0]])
return ans
maxSlidingWindow([1,3,-1,-3,5,3,6,7], 3) | def max_sliding_window(nums, k):
ans = []
queue = []
for (i, v) in enumerate(nums):
if queue and queue[0] == i - k:
queue.pop(0)
while queue and nums[queue[-1]] < v:
queue.pop()
queue.append(i)
if i + 1 >= k:
ans.append(nums[queue[0]])
return ans
max_sliding_window([1, 3, -1, -3, 5, 3, 6, 7], 3) |
# init for sext package
""" Setuptools extensions that can be shared across projects
Typical use for these routines is as a git subtree merge
For example::
# Add a remote pointing to repository
git remote add nisext git://github.com/nipy/nisext.git
git fetch nisext
# Label nisext history as merged
git merge -s ours --no-commit nisext/master
# Read nisext contents as nisext subdirectory
git read-tree --prefix=nisext/ -u nisext/master
git commit -m "Merge nisext project as subtree"
Then you would typically add a makefile target like::
# Update nisext subtree from remote
update-nisext:
git fetch nisext
git merge --squash -s subtree --no-commit nisext/master
and commit when you have changes you want. This allows you to keep the nisext
tree updated from the upstream repository, but the tree will be there and ready
for someone without this machinery or remote.
"""
| """ Setuptools extensions that can be shared across projects
Typical use for these routines is as a git subtree merge
For example::
# Add a remote pointing to repository
git remote add nisext git://github.com/nipy/nisext.git
git fetch nisext
# Label nisext history as merged
git merge -s ours --no-commit nisext/master
# Read nisext contents as nisext subdirectory
git read-tree --prefix=nisext/ -u nisext/master
git commit -m "Merge nisext project as subtree"
Then you would typically add a makefile target like::
# Update nisext subtree from remote
update-nisext:
git fetch nisext
git merge --squash -s subtree --no-commit nisext/master
and commit when you have changes you want. This allows you to keep the nisext
tree updated from the upstream repository, but the tree will be there and ready
for someone without this machinery or remote.
""" |
#!/usr/bin/env python3
#
# SPDX-FileCopyrightText: (c) 2020 Tristan Gingold <tgingold@free.fr>
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# TODO:
# * gen verilog/vhdl netlist
# * gen config (adjust powers)
# * read info from LEF
config_sky130_fd_hd = {
'dff': {'name': 'sky130_fd_sc_hd__dfxtp_4', 'width': 19 * 460,
'input': 'D', 'output': 'Q', 'clock': 'CLK'},
'cdly15_1': {'name': 'sky130_fd_sc_hd__clkdlybuf4s15_1', 'width': 8 * 460,
'input': 'A', 'output': 'X'},
'cdly15_2': {'name': 'sky130_fd_sc_hd__clkdlybuf4s15_2', 'width': 9 * 460,
'input': 'A', 'output': 'X'},
'cdly18_1': {'name': 'sky130_fd_sc_hd__clkdlybuf4s18_1', 'width': 8 * 460,
'input': 'A', 'output': 'X'},
'cdly18_2': {'name': 'sky130_fd_sc_hd__clkdlybuf4s18_1', 'width': 8 * 460,
'input': 'A', 'output': 'X'},
'cdly25_1': {'name': 'sky130_fd_sc_hd__clkdlybuf4s25_1', 'width': 8 * 460,
'input': 'A', 'output': 'X'},
'cdly25_2': {'name': 'sky130_fd_sc_hd__clkdlybuf4s25_2', 'width': 8 * 460,
'input': 'A', 'output': 'X'},
'cdly50_1': {'name': 'sky130_fd_sc_hd__clkdlybuf4s50_1', 'width': 8 * 460,
'input': 'A', 'output': 'X'},
'cdly50_2': {'name': 'sky130_fd_sc_hd__clkdlybuf4s50_2', 'width': 9 * 460,
'input': 'A', 'output': 'X'},
'cbuf_1': {'name': 'sky130_fd_sc_hd__clkbuf_1', 'width': 3 * 460,
'input': 'A', 'output': 'X'},
'cbuf_2': {'name': 'sky130_fd_sc_hd__clkbuf_2', 'width': 4 * 460,
'input': 'A', 'output': 'X'},
'cbuf_4': {'name': 'sky130_fd_sc_hd__clkbuf_4', 'width': 6 * 460,
'input': 'A', 'output': 'X'},
'cbuf_8': {'name': 'sky130_fd_sc_hd__clkbuf_2', 'width': 11 * 460,
'input': 'A', 'output': 'X'},
'cbuf_16': {'name': 'sky130_fd_sc_hd__clkbuf_16', 'width': 20 * 460,
'input': 'A', 'output': 'X'},
'cinv_1': {'name': 'sky130_fd_sc_hd__clkinv_1', 'width': 3 * 460,
'input': 'A', 'output': 'Y'},
'cinv_2': {'name': 'sky130_fd_sc_hd__clkinv_2', 'width': 4 * 460,
'input': 'A', 'output': 'Y'},
'inv_1': {'name': 'sky130_fd_sc_hd__inv_1', 'width': 3 * 460,
'input': 'A', 'output': 'Y'},
'mux2': {'name': 'sky130_fd_sc_hd__mux2_1', 'width': 9 * 460,
'in0': 'A0', 'in1': 'A1', 'sel': 'S', 'output': 'X'},
'decap': {'name': 'sky130_fd_sc_hd__decap_3', 'width': 3 * 460},
'tap': {'name': 'sky130_fd_sc_hd__tapvpwrvgnd_1', 'width': 1 * 460},
'fill1': {'name': 'sky130_fd_sc_hd__fill_1', 'width': 1 * 460},
'fill2': {'name': 'sky130_fd_sc_hd__fill_2', 'width': 2 * 460},
'fill4': {'name': 'sky130_fd_sc_hd__fill_4', 'width': 4 * 460},
'fill8': {'name': 'sky130_fd_sc_hd__fill_8', 'width': 8 * 460},
}
config_sky130_fd_hs = {
'dff': {'name': 'sky130_fd_sc_hs__dfxtp_4', 'width': 20 * 480,
'input': 'D', 'output': 'Q', 'clock': 'CLK'},
'dly4_1': {'name': 'sky130_fd_sc_hs__dlygate4sd1_1', 'width': 8 * 480,
'input': 'A', 'output': 'X'},
'cdinv_1': {'name': 'sky130_fd_sc_hs__clkdlyinv3sd1_1', 'width': 6 * 480,
'input': 'A', 'output': 'Y'},
'mux2': {'name': 'sky130_fd_sc_hs__mux2_1', 'width': 9 * 480,
'in0': 'A0', 'in1': 'A1', 'sel': 'S', 'output': 'X'},
'decap': {'name': 'sky130_fd_sc_hs__decap_4', 'width': 4 * 480},
'tap': {'name': 'sky130_fd_sc_hs__tapvpwrvgnd_1', 'width': 1 * 480},
'fill1': {'name': 'sky130_fd_sc_hs__fill_1', 'width': 1 * 480},
'fill2': {'name': 'sky130_fd_sc_hs__fill_2', 'width': 2 * 480},
'fill4': {'name': 'sky130_fd_sc_hs__fill_4', 'width': 4 * 480},
'fill8': {'name': 'sky130_fd_sc_hs__fill_8', 'width': 8 * 480},
}
config_sky130_fd_ls = {
'dff': {'name': 'sky130_fd_sc_ls__dfxtp_4', 'width': 20 * 480,
'input': 'D', 'output': 'Q', 'clock': 'CLK'},
'dly4_1': {'name': 'sky130_fd_sc_ls__dlygate4sd1_1', 'width': 8 * 480,
'input': 'A', 'output': 'X'},
'cdinv_1': {'name': 'sky130_fd_sc_ls__clkdlyinv3sd1_1', 'width': 6 * 480,
'input': 'A', 'output': 'Y'},
'mux2': {'name': 'sky130_fd_sc_ls__mux2_1', 'width': 9 * 480,
'in0': 'A0', 'in1': 'A1', 'sel': 'S', 'output': 'X'},
'decap': {'name': 'sky130_fd_sc_ls__decap_4', 'width': 4 * 480},
'tap': {'name': 'sky130_fd_sc_ls__tapvpwrvgnd_1', 'width': 1 * 480},
'fill1': {'name': 'sky130_fd_sc_ls__fill_1', 'width': 1 * 480},
'fill2': {'name': 'sky130_fd_sc_ls__fill_2', 'width': 2 * 480},
'fill4': {'name': 'sky130_fd_sc_ls__fill_4', 'width': 4 * 480},
'fill8': {'name': 'sky130_fd_sc_ls__fill_8', 'width': 8 * 480},
}
config_sky130_fd_ms = {
'dff': {'name': 'sky130_fd_sc_ms__dfxtp_4', 'width': 20 * 480,
'input': 'D', 'output': 'Q', 'clock': 'CLK'},
'dly4_1': {'name': 'sky130_fd_sc_ms__dlygate4sd1_1', 'width': 8 * 480,
'input': 'A', 'output': 'X'},
'cdinv_1': {'name': 'sky130_fd_sc_ms__clkdlyinv3sd1_1', 'width': 6 * 480,
'input': 'A', 'output': 'Y'},
'mux2': {'name': 'sky130_fd_sc_ms__mux2_1', 'width': 9 * 480,
'in0': 'A0', 'in1': 'A1', 'sel': 'S', 'output': 'X'},
'decap': {'name': 'sky130_fd_sc_ms__decap_4', 'width': 4 * 480},
'tap': {'name': 'sky130_fd_sc_ms__tapvpwrvgnd_1', 'width': 1 * 480},
'fill1': {'name': 'sky130_fd_sc_ms__fill_1', 'width': 1 * 480},
'fill2': {'name': 'sky130_fd_sc_ms__fill_2', 'width': 2 * 480},
'fill4': {'name': 'sky130_fd_sc_ms__fill_4', 'width': 4 * 480},
'fill8': {'name': 'sky130_fd_sc_ms__fill_8', 'width': 8 * 480},
}
config_sky130_osu_18T_hs = {
'dff': {'name': 'sky130_osu_sc_18T_hs__dff_1', 'width': 66 * 110,
'input': 'D', 'output': 'Q', 'clock': 'CK'},
'buf_1': {'name': 'sky130_osu_sc_18T_hs__buf_1', 'width': 13 * 110,
'input': 'A', 'output': 'Y'},
'mux2': {'name': 'sky130_osu_sc_18T_hs__mux2_1', 'width': 25 * 110,
'in0': 'A0', 'in1': 'A1', 'sel': 'S0', 'output': 'Y'},
'decap': {'name': 'sky130_osu_sc_18T_hs__decap_1', 'width': 9 * 110},
'fill1': {'name': 'sky130_osu_sc_18T_hs__fill_1', 'width': 1 * 110},
'fill2': {'name': 'sky130_osu_sc_18T_hs__fill_2', 'width': 2 * 110},
'fill4': {'name': 'sky130_osu_sc_18T_hs__fill_4', 'width': 4 * 110},
'fill8': {'name': 'sky130_osu_sc_18T_hs__fill_8', 'width': 8 * 110},
'fill16': {'name': 'sky130_osu_sc_18T_hs__fill_16', 'width': 16 * 110},
'fill32': {'name': 'sky130_osu_sc_18T_hs__fill_32', 'width': 32 * 110},
}
# with/height: from the site size in tech LEF.
# Tracks: layer: (HPITCH, VPITCH, WD)
# pins: layer used to place pins
TECHS = {
'fd_hd': {'cells': config_sky130_fd_hd, 'width': 460, 'height': 2720,
'tracks': {'li1': (460, 340, 170),
'met1': (340, 340, 140),
'met2': (460, 460, 140),
'met3': (680, 680, 300),
'met4': (920, 920, 300),
'met5': (3400, 3400, 1600)},
'site': 'unithd',
'pins': ('met2', 'met3'),
'libname': 'sky130_fd_sc_hd'},
'fd_hs': {'cells': config_sky130_fd_hs, 'width': 480, 'height': 3330,
'tracks': {'li1': (480, 370, 170),
'met1': (370, 370, 140),
'met2': (480, 480, 140),
'met3': (740, 740, 300),
'met4': (960, 960, 300),
'met5': (3330, 3330, 1600)},
'site': 'unit',
'pins': ('met2', 'met3'),
'libname': 'sky130_fd_sc_hs'},
'fd_ls': {'cells': config_sky130_fd_ls, 'width': 480, 'height': 3330,
'tracks': {'li1': (480, 480, 170),
'met1': (370, 370, 140),
'met2': (480, 480, 140),
'met3': (740, 740, 300),
'met4': (960, 960, 300),
'met5': (3330, 3330, 1600)},
'site': 'unit',
'pins': ('met2', 'met3'),
'libname': 'sky130_fd_sc_ls'},
'fd_ms': {'cells': config_sky130_fd_ms, 'width': 480, 'height': 3330,
'tracks': {'li1': (480, 480, 170),
'met1': (370, 370, 140),
'met2': (480, 480, 140),
'met3': (740, 740, 300),
'met4': (960, 960, 300),
'met5': (3330, 3330, 1600)},
'site': 'unit',
'pins': ('met2', 'met3'),
'libname': 'sky130_fd_sc_ms'},
'osu_18T_hs': {'cells': config_sky130_osu_18T_hs, 'width': 110, 'height': 6660,
'tracks': {'met1': (370, 370, 140),
'met2': (480, 480, 140),
'met3': (740, 740, 300),
'met4': (960, 960, 300),
'met5': (3330, 3330, 1600)},
'site': '18T',
'pins': ('met2', 'met3'),
'libname': 'sky130_osu_sc_18T_hs'},
}
class GenDef:
def __init__(self, tech, name):
self.name = name
self.tech = TECHS[tech]
self.pintech = TECHS['fd_hd']
self.row_width = self.tech['width']
self.row_height = self.tech['height']
self.cells = self.tech['cells']
self.hmargin = 12 * self.row_width # = 5520
self.vmargin = 2 * self.row_height
self.nrow = 0 # Number of rows
self.rowl = 0 # Length of rows
self.rows = []
self.nets = []
self.pins = []
self.ppow = None # power name (for hdl output)
self.pgnd = None
self.components = []
self.build_fillers()
def build_rows(self, nrow):
self.nrow = nrow
for i in range(self.nrow):
r = {'comps': [], 'width': 0,
'x': self.hmargin, 'y': self.vmargin + i * self.row_height,
'orientation': "FS" if i % 2 == 0 else "N"}
self.rows.append(r)
class Net:
def __init__(self, name):
self.name = name
self.conn = []
def add_net(self, name):
n = GenDef.Net(name)
self.nets.append(n)
return n
class Pin:
def __init__(self, name, io):
self.name = name
self.dir = io
self.net = None
self.place = None
self.offset = None
self.layer = None
def add_pin(self, name, io):
"""Add a pin, return the corresponding net"""
assert io in "IO"
n = self.add_net(name)
p = GenDef.Pin(name, io)
p.net = n
self.pins.append(p)
n.conn.append((None, p))
return p
def place_pin(self, pin, orient, offset):
assert pin.place is None, "pin already placed"
assert orient in "NSEW"
pin.place = orient
offset += self.hmargin if orient in "NS" else self.vmargin
# Adjust pin position: put it on the grid
idx = 0 if orient in "NS" else 1
pin.layer = self.pintech['pins'][idx]
pitch = self.pintech['tracks'][pin.layer][idx]
offset -= pitch // 2
offset = (offset // pitch) * pitch
offset += pitch // 2
pin.offset = offset
class Component:
def __init__(self, name, model):
self.name = name
self.model = model
self.flip = False
self.conns = []
def add_component(self, name, model):
comp = GenDef.Component(name, model)
self.components.append(comp)
return comp
def place_component(self, comp, row):
assert row >= 0
self.rows[row]['comps'].append(comp)
self.rows[row]['width'] += comp.model['width']
def connect(self, net, inst, port):
net.conn.append((inst, port))
if inst is not None:
inst.conns.append({'port': port, 'net': net})
def build_fillers(self):
fillers = [v for k, v in self.cells.items() if k.startswith('fill')]
self.fillers = sorted(fillers,
key=lambda key: key['width'], reverse=True)
self.fill_label = 0
def _add_fill(self, row, comp):
c = self.add_component('FILL_{}'.format(self.fill_label), comp)
self.place_component(c, row)
self.fill_label += 1
def pad_rows(self):
"""Add fillers so that all rows have the same length"""
wd = max([r['width'] for r in self.rows])
tap = self.cells.get('tap')
for i, r in enumerate(self.rows):
for f in self.fillers:
while r['width'] + f['width'] <= wd:
# Also add taps in case of very long fill.
if (tap and f is self.fillers[0]
and r['width'] + f['width'] + tap['width'] <= wd):
self._add_fill(i, tap)
self._add_fill(i, f)
assert r['width'] == wd
def row_add_fill(self, row, wd):
wd *= self.row_width
for f in self.fillers:
if wd == 0:
break
fw = f['width']
while wd >= fw:
self._add_fill(row, f)
wd -= fw
def build_tap_decap(self, row, idx):
# tap
if 'tap' in self.cells:
tap = self.add_component('tap{}_{}'.format(row, idx),
self.cells['tap'])
self.place_component(tap, row)
# decap
decap = self.add_component('decap{}_{}'.format(row, idx),
self.cells['decap'])
self.place_component(decap, row)
def compute_size(self):
self.rowl = max(r['width'] for r in self.rows) // self.row_width
self.x_size = self.rowl * self.row_width + 2 * self.hmargin
self.y_size = self.nrow * self.row_height + 2 * self.vmargin
def set_power_pin(self, ppow, pgnd):
self.ppow = ppow
self.pgnd = pgnd
def disp_def_hdr(self, f):
print("VERSION 5.8 ;", file=f)
print('DIVIDERCHAR "/" ;', file=f)
print('BUSBITCHARS "[]" ;', file=f)
print('DESIGN {} ;'.format(self.name), file=f)
print('UNITS DISTANCE MICRONS 1000 ;', file=f)
print('DIEAREA ( 0 0 ) ( {} {} ) ;'.format(
self.x_size, self.y_size), file=f)
def disp_def_row(self, f):
for i in range(self.nrow):
r = self.rows[i]
print("ROW ROW_{} {} {} {} {} DO {} BY 1 STEP {} 0 ;".format(
i, self.tech['site'], r['x'], r['y'], r['orientation'],
self.rowl, self.row_width),
file=f)
def disp_def_tracks(self, f):
for layer, (xpitch, ypitch, wd) in self.tech['tracks'].items():
print("TRACKS X {} DO {} STEP {} LAYER {} ;".format(
xpitch // 2,
(self.x_size + xpitch // 2) // xpitch, xpitch, layer),
file=f)
print("TRACKS Y {} DO {} STEP {} LAYER {} ;".format(
ypitch // 2,
(self.y_size + ypitch // 2) // ypitch, ypitch, layer),
file=f)
def disp_def_components(self, f):
ncomps = sum([len(r['comps']) for r in self.rows])
print('COMPONENTS {} ;'.format(ncomps), file=f)
for r in self.rows:
x = r['x']
y = r['y']
orient = r['orientation']
for c in r['comps']:
print(' - {} {}'.format(c.name, c.model['name']),
end='', file=f)
if c.flip:
if orient[0] == 'F':
corient = orient[1:]
else:
corient = 'F' + orient
else:
corient = orient
print(' + FIXED ( {} {} ) {}'.format(
x, y, corient), end='', file=f)
x += c.model['width']
print(' ;', file=f)
print('END COMPONENTS', file=f)
def disp_def_pins(self, f):
print('PINS {} ;'.format(len(self.pins)), file=f)
for p in self.pins:
print(' - {} + NET {}'.format(p.name, p.net.name),
end='', file=f)
print(' + DIRECTION {}'.format(
{'I': 'INPUT', 'O': 'OUTPUT'}[p.dir]), end='', file=f)
print(' + USE SIGNAL', end='', file=f)
idx = 0 if p.place in "NS" else 1
pinwd = self.pintech['tracks'][p.layer][2]
pinpitch = self.pintech['tracks'][p.layer][idx]
corepitch = self.tech['tracks'][p.layer][idx]
corewd = self.tech['tracks'][p.layer][2]
if p.place in "NS":
# In general: met2
pinln = pinwd
if p.place == 'S':
y = pinwd
else:
y = self.y_size - pinwd
print(' + PLACED ( {} {} ) {} '.format(
p.offset, y, p.place), end='', file=f)
print(' + LAYER {} ( {} {} ) ( {} {} )'.format(
p.layer,
-pinwd, -pinln, pinwd, pinln), end='', file=f)
elif p.place in "EW":
# In general: met3
if p.place == 'W':
x = pinwd
else:
x = self.x_size - pinwd
print(' + PLACED ( {} {} ) N '.format(
x, p.offset), end='', file=f)
if corepitch != pinpitch:
pinln = pinpitch + pinwd
else:
pinln = pinwd
print(' + LAYER {} ( {} {} ) ( {} {} )'.format(
p.layer,
-pinwd, -pinwd, pinwd, pinln), end='', file=f)
print(' ;', file=f)
print('END PINS', file=f)
def disp_def_nets(self, f):
print('NETS {} ;'.format(len(self.nets)), file=f)
for n in self.nets:
print(' - {}'.format(n.name), end='', file=f)
for inst, port in n.conn:
if inst is None:
# This is a pin.
print(' ( PIN {} )'.format(port.name), end='', file=f)
else:
# This is an instance
print(' ( {} {} )'.format(
inst.name, inst.model[port]), end='', file=f)
print(file=f)
print(' + USE SIGNAL ;', file=f)
print('END NETS', file=f)
def disp_def(self, filename):
with open(filename, 'w') as f:
self.disp_def_hdr(f)
self.disp_def_row(f)
self.disp_def_tracks(f)
self.disp_def_components(f)
self.disp_def_pins(f)
self.disp_def_nets(f)
print('END DESIGN', file=f)
def write_config(self, filename):
with open(filename, 'w') as f:
print('set ::env(STD_CELL_LIBRARY) "{}"'.format(
self.tech['libname']), file=f)
print(file=f)
# Horizontal lines must agree with the parent
pdn_hpitch = 153180 # From configuration/floorplan.tcl
pdn_hoffset = 90 + self.row_height
if self.y_size < pdn_hpitch // 2:
print('Design is too small: height={}, power pitch={}'.format(
self.y_size, pdn_hpitch))
pdn_vpitch = 153600
if self.x_size > pdn_vpitch:
# Align
vpitch = (pdn_vpitch // self.row_width) * self.row_width
else:
vpitch = (self.rowl // 2) * self.row_width
print('set ::env(FP_PDN_VOFFSET) 0', file=f)
print('set ::env(FP_PDN_VPITCH) {}'.format(vpitch / 1000), file=f)
print('set ::env(FP_PDN_HOFFSET) {}'.format(
pdn_hoffset / 1000), file=f)
print('set ::env(FP_PDN_HPITCH) {}'.format(
pdn_hpitch / 1000), file=f)
print(file=f)
print('set ::env(FP_SIZING) absolute', file=f)
print('set ::env(DIE_AREA) "0 0 {} {}"'.format(
self.x_size / 1000, self.y_size / 1000), file=f)
def _add_net_name(self, dct, name, obj):
b = name.find('[')
if b == -1:
idx = None
else:
# This is part of a bus.
idx = int(name[b + 1:-1])
name = name[:b]
if name in dct:
dct[name][idx] = obj
else:
dct[name] = {idx: obj}
def write_verilog_range(self, f, key):
if key[0] is not None:
assert min(key) == 0
assert max(key) == len(key) - 1
f.write(" [{}:0]".format(len(key) - 1))
def write_verilog(self, f):
# 1. gather input-outputs
pins = {}
for p in self.pins:
self._add_net_name(pins, p.name, p)
f.write("module {} (\n".format(self.name))
for i, name in enumerate(sorted(pins.keys())):
p = pins[name]
k = list(p.keys())
first = p[k[0]]
if i != 0:
f.write(",\n")
f.write(" {}".format({'I': 'input', 'O': 'output'}[first.dir]))
self.write_verilog_range(f, k)
f.write(" {}".format(name))
f.write(");\n")
# 2. gather wires
wires = {}
for n in self.nets:
self._add_net_name(wires, n.name, n)
for name in sorted(wires.keys()):
w = wires[name]
k = list(w.keys())
f.write(" wire")
self.write_verilog_range(f, k)
f.write(" {};\n".format(name))
# 3. write cells
for c in self.components:
if not c.conns:
# Discard components without connections (fill, taps...)
continue
f.write(" {} {}(".format(c.model['name'], c.name))
for i, conn in enumerate(c.conns):
if i != 0:
f.write(", ")
f.write(".{}({})".format(c.model[conn['port']],
conn['net'].name))
f.write(");\n")
f.write("endmodule\n")
def write_vhdl_component(self, f):
pins = {}
for p in self.pins:
self._add_net_name(pins, p.name, p)
f.write(" component {} is\n".format(self.name))
f.write(" port (\n")
for i, name in enumerate(sorted(pins.keys())):
p = pins[name]
k = list(p.keys())
first = p[k[0]]
if i != 0:
f.write(";\n")
f.write(" {}: {}".format(
name, {'I': 'in ', 'O': 'out'}[first.dir]))
if k[0] is not None:
assert min(k) == 0
assert max(k) == len(k) - 1
f.write(" std_logic_vector({} downto 0)".format(len(k) - 1))
else:
f.write(" std_logic")
if self.ppow:
f.write(";\n")
f.write(" \\{}\\: std_logic".format(self.ppow))
if self.pgnd:
f.write(";\n")
f.write(" \\{}\\: std_logic".format(self.pgnd))
f.write(");\n")
f.write(" end component;\n")
def write_magic_net(self, f):
print(' Netlist File', file=f)
for n in self.nets:
# print(' {}'.format(n.name), file=f)
print(file=f)
for inst, port in n.conn:
if inst is None:
# This is a pin.
print('{}'.format(port.name), file=f)
else:
# This is an instance
print('{}/{}'.format(
inst.name, inst.model[port]), file=f)
| config_sky130_fd_hd = {'dff': {'name': 'sky130_fd_sc_hd__dfxtp_4', 'width': 19 * 460, 'input': 'D', 'output': 'Q', 'clock': 'CLK'}, 'cdly15_1': {'name': 'sky130_fd_sc_hd__clkdlybuf4s15_1', 'width': 8 * 460, 'input': 'A', 'output': 'X'}, 'cdly15_2': {'name': 'sky130_fd_sc_hd__clkdlybuf4s15_2', 'width': 9 * 460, 'input': 'A', 'output': 'X'}, 'cdly18_1': {'name': 'sky130_fd_sc_hd__clkdlybuf4s18_1', 'width': 8 * 460, 'input': 'A', 'output': 'X'}, 'cdly18_2': {'name': 'sky130_fd_sc_hd__clkdlybuf4s18_1', 'width': 8 * 460, 'input': 'A', 'output': 'X'}, 'cdly25_1': {'name': 'sky130_fd_sc_hd__clkdlybuf4s25_1', 'width': 8 * 460, 'input': 'A', 'output': 'X'}, 'cdly25_2': {'name': 'sky130_fd_sc_hd__clkdlybuf4s25_2', 'width': 8 * 460, 'input': 'A', 'output': 'X'}, 'cdly50_1': {'name': 'sky130_fd_sc_hd__clkdlybuf4s50_1', 'width': 8 * 460, 'input': 'A', 'output': 'X'}, 'cdly50_2': {'name': 'sky130_fd_sc_hd__clkdlybuf4s50_2', 'width': 9 * 460, 'input': 'A', 'output': 'X'}, 'cbuf_1': {'name': 'sky130_fd_sc_hd__clkbuf_1', 'width': 3 * 460, 'input': 'A', 'output': 'X'}, 'cbuf_2': {'name': 'sky130_fd_sc_hd__clkbuf_2', 'width': 4 * 460, 'input': 'A', 'output': 'X'}, 'cbuf_4': {'name': 'sky130_fd_sc_hd__clkbuf_4', 'width': 6 * 460, 'input': 'A', 'output': 'X'}, 'cbuf_8': {'name': 'sky130_fd_sc_hd__clkbuf_2', 'width': 11 * 460, 'input': 'A', 'output': 'X'}, 'cbuf_16': {'name': 'sky130_fd_sc_hd__clkbuf_16', 'width': 20 * 460, 'input': 'A', 'output': 'X'}, 'cinv_1': {'name': 'sky130_fd_sc_hd__clkinv_1', 'width': 3 * 460, 'input': 'A', 'output': 'Y'}, 'cinv_2': {'name': 'sky130_fd_sc_hd__clkinv_2', 'width': 4 * 460, 'input': 'A', 'output': 'Y'}, 'inv_1': {'name': 'sky130_fd_sc_hd__inv_1', 'width': 3 * 460, 'input': 'A', 'output': 'Y'}, 'mux2': {'name': 'sky130_fd_sc_hd__mux2_1', 'width': 9 * 460, 'in0': 'A0', 'in1': 'A1', 'sel': 'S', 'output': 'X'}, 'decap': {'name': 'sky130_fd_sc_hd__decap_3', 'width': 3 * 460}, 'tap': {'name': 'sky130_fd_sc_hd__tapvpwrvgnd_1', 'width': 1 * 460}, 'fill1': {'name': 'sky130_fd_sc_hd__fill_1', 'width': 1 * 460}, 'fill2': {'name': 'sky130_fd_sc_hd__fill_2', 'width': 2 * 460}, 'fill4': {'name': 'sky130_fd_sc_hd__fill_4', 'width': 4 * 460}, 'fill8': {'name': 'sky130_fd_sc_hd__fill_8', 'width': 8 * 460}}
config_sky130_fd_hs = {'dff': {'name': 'sky130_fd_sc_hs__dfxtp_4', 'width': 20 * 480, 'input': 'D', 'output': 'Q', 'clock': 'CLK'}, 'dly4_1': {'name': 'sky130_fd_sc_hs__dlygate4sd1_1', 'width': 8 * 480, 'input': 'A', 'output': 'X'}, 'cdinv_1': {'name': 'sky130_fd_sc_hs__clkdlyinv3sd1_1', 'width': 6 * 480, 'input': 'A', 'output': 'Y'}, 'mux2': {'name': 'sky130_fd_sc_hs__mux2_1', 'width': 9 * 480, 'in0': 'A0', 'in1': 'A1', 'sel': 'S', 'output': 'X'}, 'decap': {'name': 'sky130_fd_sc_hs__decap_4', 'width': 4 * 480}, 'tap': {'name': 'sky130_fd_sc_hs__tapvpwrvgnd_1', 'width': 1 * 480}, 'fill1': {'name': 'sky130_fd_sc_hs__fill_1', 'width': 1 * 480}, 'fill2': {'name': 'sky130_fd_sc_hs__fill_2', 'width': 2 * 480}, 'fill4': {'name': 'sky130_fd_sc_hs__fill_4', 'width': 4 * 480}, 'fill8': {'name': 'sky130_fd_sc_hs__fill_8', 'width': 8 * 480}}
config_sky130_fd_ls = {'dff': {'name': 'sky130_fd_sc_ls__dfxtp_4', 'width': 20 * 480, 'input': 'D', 'output': 'Q', 'clock': 'CLK'}, 'dly4_1': {'name': 'sky130_fd_sc_ls__dlygate4sd1_1', 'width': 8 * 480, 'input': 'A', 'output': 'X'}, 'cdinv_1': {'name': 'sky130_fd_sc_ls__clkdlyinv3sd1_1', 'width': 6 * 480, 'input': 'A', 'output': 'Y'}, 'mux2': {'name': 'sky130_fd_sc_ls__mux2_1', 'width': 9 * 480, 'in0': 'A0', 'in1': 'A1', 'sel': 'S', 'output': 'X'}, 'decap': {'name': 'sky130_fd_sc_ls__decap_4', 'width': 4 * 480}, 'tap': {'name': 'sky130_fd_sc_ls__tapvpwrvgnd_1', 'width': 1 * 480}, 'fill1': {'name': 'sky130_fd_sc_ls__fill_1', 'width': 1 * 480}, 'fill2': {'name': 'sky130_fd_sc_ls__fill_2', 'width': 2 * 480}, 'fill4': {'name': 'sky130_fd_sc_ls__fill_4', 'width': 4 * 480}, 'fill8': {'name': 'sky130_fd_sc_ls__fill_8', 'width': 8 * 480}}
config_sky130_fd_ms = {'dff': {'name': 'sky130_fd_sc_ms__dfxtp_4', 'width': 20 * 480, 'input': 'D', 'output': 'Q', 'clock': 'CLK'}, 'dly4_1': {'name': 'sky130_fd_sc_ms__dlygate4sd1_1', 'width': 8 * 480, 'input': 'A', 'output': 'X'}, 'cdinv_1': {'name': 'sky130_fd_sc_ms__clkdlyinv3sd1_1', 'width': 6 * 480, 'input': 'A', 'output': 'Y'}, 'mux2': {'name': 'sky130_fd_sc_ms__mux2_1', 'width': 9 * 480, 'in0': 'A0', 'in1': 'A1', 'sel': 'S', 'output': 'X'}, 'decap': {'name': 'sky130_fd_sc_ms__decap_4', 'width': 4 * 480}, 'tap': {'name': 'sky130_fd_sc_ms__tapvpwrvgnd_1', 'width': 1 * 480}, 'fill1': {'name': 'sky130_fd_sc_ms__fill_1', 'width': 1 * 480}, 'fill2': {'name': 'sky130_fd_sc_ms__fill_2', 'width': 2 * 480}, 'fill4': {'name': 'sky130_fd_sc_ms__fill_4', 'width': 4 * 480}, 'fill8': {'name': 'sky130_fd_sc_ms__fill_8', 'width': 8 * 480}}
config_sky130_osu_18_t_hs = {'dff': {'name': 'sky130_osu_sc_18T_hs__dff_1', 'width': 66 * 110, 'input': 'D', 'output': 'Q', 'clock': 'CK'}, 'buf_1': {'name': 'sky130_osu_sc_18T_hs__buf_1', 'width': 13 * 110, 'input': 'A', 'output': 'Y'}, 'mux2': {'name': 'sky130_osu_sc_18T_hs__mux2_1', 'width': 25 * 110, 'in0': 'A0', 'in1': 'A1', 'sel': 'S0', 'output': 'Y'}, 'decap': {'name': 'sky130_osu_sc_18T_hs__decap_1', 'width': 9 * 110}, 'fill1': {'name': 'sky130_osu_sc_18T_hs__fill_1', 'width': 1 * 110}, 'fill2': {'name': 'sky130_osu_sc_18T_hs__fill_2', 'width': 2 * 110}, 'fill4': {'name': 'sky130_osu_sc_18T_hs__fill_4', 'width': 4 * 110}, 'fill8': {'name': 'sky130_osu_sc_18T_hs__fill_8', 'width': 8 * 110}, 'fill16': {'name': 'sky130_osu_sc_18T_hs__fill_16', 'width': 16 * 110}, 'fill32': {'name': 'sky130_osu_sc_18T_hs__fill_32', 'width': 32 * 110}}
techs = {'fd_hd': {'cells': config_sky130_fd_hd, 'width': 460, 'height': 2720, 'tracks': {'li1': (460, 340, 170), 'met1': (340, 340, 140), 'met2': (460, 460, 140), 'met3': (680, 680, 300), 'met4': (920, 920, 300), 'met5': (3400, 3400, 1600)}, 'site': 'unithd', 'pins': ('met2', 'met3'), 'libname': 'sky130_fd_sc_hd'}, 'fd_hs': {'cells': config_sky130_fd_hs, 'width': 480, 'height': 3330, 'tracks': {'li1': (480, 370, 170), 'met1': (370, 370, 140), 'met2': (480, 480, 140), 'met3': (740, 740, 300), 'met4': (960, 960, 300), 'met5': (3330, 3330, 1600)}, 'site': 'unit', 'pins': ('met2', 'met3'), 'libname': 'sky130_fd_sc_hs'}, 'fd_ls': {'cells': config_sky130_fd_ls, 'width': 480, 'height': 3330, 'tracks': {'li1': (480, 480, 170), 'met1': (370, 370, 140), 'met2': (480, 480, 140), 'met3': (740, 740, 300), 'met4': (960, 960, 300), 'met5': (3330, 3330, 1600)}, 'site': 'unit', 'pins': ('met2', 'met3'), 'libname': 'sky130_fd_sc_ls'}, 'fd_ms': {'cells': config_sky130_fd_ms, 'width': 480, 'height': 3330, 'tracks': {'li1': (480, 480, 170), 'met1': (370, 370, 140), 'met2': (480, 480, 140), 'met3': (740, 740, 300), 'met4': (960, 960, 300), 'met5': (3330, 3330, 1600)}, 'site': 'unit', 'pins': ('met2', 'met3'), 'libname': 'sky130_fd_sc_ms'}, 'osu_18T_hs': {'cells': config_sky130_osu_18T_hs, 'width': 110, 'height': 6660, 'tracks': {'met1': (370, 370, 140), 'met2': (480, 480, 140), 'met3': (740, 740, 300), 'met4': (960, 960, 300), 'met5': (3330, 3330, 1600)}, 'site': '18T', 'pins': ('met2', 'met3'), 'libname': 'sky130_osu_sc_18T_hs'}}
class Gendef:
def __init__(self, tech, name):
self.name = name
self.tech = TECHS[tech]
self.pintech = TECHS['fd_hd']
self.row_width = self.tech['width']
self.row_height = self.tech['height']
self.cells = self.tech['cells']
self.hmargin = 12 * self.row_width
self.vmargin = 2 * self.row_height
self.nrow = 0
self.rowl = 0
self.rows = []
self.nets = []
self.pins = []
self.ppow = None
self.pgnd = None
self.components = []
self.build_fillers()
def build_rows(self, nrow):
self.nrow = nrow
for i in range(self.nrow):
r = {'comps': [], 'width': 0, 'x': self.hmargin, 'y': self.vmargin + i * self.row_height, 'orientation': 'FS' if i % 2 == 0 else 'N'}
self.rows.append(r)
class Net:
def __init__(self, name):
self.name = name
self.conn = []
def add_net(self, name):
n = GenDef.Net(name)
self.nets.append(n)
return n
class Pin:
def __init__(self, name, io):
self.name = name
self.dir = io
self.net = None
self.place = None
self.offset = None
self.layer = None
def add_pin(self, name, io):
"""Add a pin, return the corresponding net"""
assert io in 'IO'
n = self.add_net(name)
p = GenDef.Pin(name, io)
p.net = n
self.pins.append(p)
n.conn.append((None, p))
return p
def place_pin(self, pin, orient, offset):
assert pin.place is None, 'pin already placed'
assert orient in 'NSEW'
pin.place = orient
offset += self.hmargin if orient in 'NS' else self.vmargin
idx = 0 if orient in 'NS' else 1
pin.layer = self.pintech['pins'][idx]
pitch = self.pintech['tracks'][pin.layer][idx]
offset -= pitch // 2
offset = offset // pitch * pitch
offset += pitch // 2
pin.offset = offset
class Component:
def __init__(self, name, model):
self.name = name
self.model = model
self.flip = False
self.conns = []
def add_component(self, name, model):
comp = GenDef.Component(name, model)
self.components.append(comp)
return comp
def place_component(self, comp, row):
assert row >= 0
self.rows[row]['comps'].append(comp)
self.rows[row]['width'] += comp.model['width']
def connect(self, net, inst, port):
net.conn.append((inst, port))
if inst is not None:
inst.conns.append({'port': port, 'net': net})
def build_fillers(self):
fillers = [v for (k, v) in self.cells.items() if k.startswith('fill')]
self.fillers = sorted(fillers, key=lambda key: key['width'], reverse=True)
self.fill_label = 0
def _add_fill(self, row, comp):
c = self.add_component('FILL_{}'.format(self.fill_label), comp)
self.place_component(c, row)
self.fill_label += 1
def pad_rows(self):
"""Add fillers so that all rows have the same length"""
wd = max([r['width'] for r in self.rows])
tap = self.cells.get('tap')
for (i, r) in enumerate(self.rows):
for f in self.fillers:
while r['width'] + f['width'] <= wd:
if tap and f is self.fillers[0] and (r['width'] + f['width'] + tap['width'] <= wd):
self._add_fill(i, tap)
self._add_fill(i, f)
assert r['width'] == wd
def row_add_fill(self, row, wd):
wd *= self.row_width
for f in self.fillers:
if wd == 0:
break
fw = f['width']
while wd >= fw:
self._add_fill(row, f)
wd -= fw
def build_tap_decap(self, row, idx):
if 'tap' in self.cells:
tap = self.add_component('tap{}_{}'.format(row, idx), self.cells['tap'])
self.place_component(tap, row)
decap = self.add_component('decap{}_{}'.format(row, idx), self.cells['decap'])
self.place_component(decap, row)
def compute_size(self):
self.rowl = max((r['width'] for r in self.rows)) // self.row_width
self.x_size = self.rowl * self.row_width + 2 * self.hmargin
self.y_size = self.nrow * self.row_height + 2 * self.vmargin
def set_power_pin(self, ppow, pgnd):
self.ppow = ppow
self.pgnd = pgnd
def disp_def_hdr(self, f):
print('VERSION 5.8 ;', file=f)
print('DIVIDERCHAR "/" ;', file=f)
print('BUSBITCHARS "[]" ;', file=f)
print('DESIGN {} ;'.format(self.name), file=f)
print('UNITS DISTANCE MICRONS 1000 ;', file=f)
print('DIEAREA ( 0 0 ) ( {} {} ) ;'.format(self.x_size, self.y_size), file=f)
def disp_def_row(self, f):
for i in range(self.nrow):
r = self.rows[i]
print('ROW ROW_{} {} {} {} {} DO {} BY 1 STEP {} 0 ;'.format(i, self.tech['site'], r['x'], r['y'], r['orientation'], self.rowl, self.row_width), file=f)
def disp_def_tracks(self, f):
for (layer, (xpitch, ypitch, wd)) in self.tech['tracks'].items():
print('TRACKS X {} DO {} STEP {} LAYER {} ;'.format(xpitch // 2, (self.x_size + xpitch // 2) // xpitch, xpitch, layer), file=f)
print('TRACKS Y {} DO {} STEP {} LAYER {} ;'.format(ypitch // 2, (self.y_size + ypitch // 2) // ypitch, ypitch, layer), file=f)
def disp_def_components(self, f):
ncomps = sum([len(r['comps']) for r in self.rows])
print('COMPONENTS {} ;'.format(ncomps), file=f)
for r in self.rows:
x = r['x']
y = r['y']
orient = r['orientation']
for c in r['comps']:
print(' - {} {}'.format(c.name, c.model['name']), end='', file=f)
if c.flip:
if orient[0] == 'F':
corient = orient[1:]
else:
corient = 'F' + orient
else:
corient = orient
print(' + FIXED ( {} {} ) {}'.format(x, y, corient), end='', file=f)
x += c.model['width']
print(' ;', file=f)
print('END COMPONENTS', file=f)
def disp_def_pins(self, f):
print('PINS {} ;'.format(len(self.pins)), file=f)
for p in self.pins:
print(' - {} + NET {}'.format(p.name, p.net.name), end='', file=f)
print(' + DIRECTION {}'.format({'I': 'INPUT', 'O': 'OUTPUT'}[p.dir]), end='', file=f)
print(' + USE SIGNAL', end='', file=f)
idx = 0 if p.place in 'NS' else 1
pinwd = self.pintech['tracks'][p.layer][2]
pinpitch = self.pintech['tracks'][p.layer][idx]
corepitch = self.tech['tracks'][p.layer][idx]
corewd = self.tech['tracks'][p.layer][2]
if p.place in 'NS':
pinln = pinwd
if p.place == 'S':
y = pinwd
else:
y = self.y_size - pinwd
print(' + PLACED ( {} {} ) {} '.format(p.offset, y, p.place), end='', file=f)
print(' + LAYER {} ( {} {} ) ( {} {} )'.format(p.layer, -pinwd, -pinln, pinwd, pinln), end='', file=f)
elif p.place in 'EW':
if p.place == 'W':
x = pinwd
else:
x = self.x_size - pinwd
print(' + PLACED ( {} {} ) N '.format(x, p.offset), end='', file=f)
if corepitch != pinpitch:
pinln = pinpitch + pinwd
else:
pinln = pinwd
print(' + LAYER {} ( {} {} ) ( {} {} )'.format(p.layer, -pinwd, -pinwd, pinwd, pinln), end='', file=f)
print(' ;', file=f)
print('END PINS', file=f)
def disp_def_nets(self, f):
print('NETS {} ;'.format(len(self.nets)), file=f)
for n in self.nets:
print(' - {}'.format(n.name), end='', file=f)
for (inst, port) in n.conn:
if inst is None:
print(' ( PIN {} )'.format(port.name), end='', file=f)
else:
print(' ( {} {} )'.format(inst.name, inst.model[port]), end='', file=f)
print(file=f)
print(' + USE SIGNAL ;', file=f)
print('END NETS', file=f)
def disp_def(self, filename):
with open(filename, 'w') as f:
self.disp_def_hdr(f)
self.disp_def_row(f)
self.disp_def_tracks(f)
self.disp_def_components(f)
self.disp_def_pins(f)
self.disp_def_nets(f)
print('END DESIGN', file=f)
def write_config(self, filename):
with open(filename, 'w') as f:
print('set ::env(STD_CELL_LIBRARY) "{}"'.format(self.tech['libname']), file=f)
print(file=f)
pdn_hpitch = 153180
pdn_hoffset = 90 + self.row_height
if self.y_size < pdn_hpitch // 2:
print('Design is too small: height={}, power pitch={}'.format(self.y_size, pdn_hpitch))
pdn_vpitch = 153600
if self.x_size > pdn_vpitch:
vpitch = pdn_vpitch // self.row_width * self.row_width
else:
vpitch = self.rowl // 2 * self.row_width
print('set ::env(FP_PDN_VOFFSET) 0', file=f)
print('set ::env(FP_PDN_VPITCH) {}'.format(vpitch / 1000), file=f)
print('set ::env(FP_PDN_HOFFSET) {}'.format(pdn_hoffset / 1000), file=f)
print('set ::env(FP_PDN_HPITCH) {}'.format(pdn_hpitch / 1000), file=f)
print(file=f)
print('set ::env(FP_SIZING) absolute', file=f)
print('set ::env(DIE_AREA) "0 0 {} {}"'.format(self.x_size / 1000, self.y_size / 1000), file=f)
def _add_net_name(self, dct, name, obj):
b = name.find('[')
if b == -1:
idx = None
else:
idx = int(name[b + 1:-1])
name = name[:b]
if name in dct:
dct[name][idx] = obj
else:
dct[name] = {idx: obj}
def write_verilog_range(self, f, key):
if key[0] is not None:
assert min(key) == 0
assert max(key) == len(key) - 1
f.write(' [{}:0]'.format(len(key) - 1))
def write_verilog(self, f):
pins = {}
for p in self.pins:
self._add_net_name(pins, p.name, p)
f.write('module {} (\n'.format(self.name))
for (i, name) in enumerate(sorted(pins.keys())):
p = pins[name]
k = list(p.keys())
first = p[k[0]]
if i != 0:
f.write(',\n')
f.write(' {}'.format({'I': 'input', 'O': 'output'}[first.dir]))
self.write_verilog_range(f, k)
f.write(' {}'.format(name))
f.write(');\n')
wires = {}
for n in self.nets:
self._add_net_name(wires, n.name, n)
for name in sorted(wires.keys()):
w = wires[name]
k = list(w.keys())
f.write(' wire')
self.write_verilog_range(f, k)
f.write(' {};\n'.format(name))
for c in self.components:
if not c.conns:
continue
f.write(' {} {}('.format(c.model['name'], c.name))
for (i, conn) in enumerate(c.conns):
if i != 0:
f.write(', ')
f.write('.{}({})'.format(c.model[conn['port']], conn['net'].name))
f.write(');\n')
f.write('endmodule\n')
def write_vhdl_component(self, f):
pins = {}
for p in self.pins:
self._add_net_name(pins, p.name, p)
f.write(' component {} is\n'.format(self.name))
f.write(' port (\n')
for (i, name) in enumerate(sorted(pins.keys())):
p = pins[name]
k = list(p.keys())
first = p[k[0]]
if i != 0:
f.write(';\n')
f.write(' {}: {}'.format(name, {'I': 'in ', 'O': 'out'}[first.dir]))
if k[0] is not None:
assert min(k) == 0
assert max(k) == len(k) - 1
f.write(' std_logic_vector({} downto 0)'.format(len(k) - 1))
else:
f.write(' std_logic')
if self.ppow:
f.write(';\n')
f.write(' \\{}\\: std_logic'.format(self.ppow))
if self.pgnd:
f.write(';\n')
f.write(' \\{}\\: std_logic'.format(self.pgnd))
f.write(');\n')
f.write(' end component;\n')
def write_magic_net(self, f):
print(' Netlist File', file=f)
for n in self.nets:
print(file=f)
for (inst, port) in n.conn:
if inst is None:
print('{}'.format(port.name), file=f)
else:
print('{}/{}'.format(inst.name, inst.model[port]), file=f) |
#
# PySNMP MIB module ELTEX-MES-SWITCH-RATE-LIMITER-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/ELTEX-MES-SWITCH-RATE-LIMITER-MIB
# Produced by pysmi-0.3.4 at Wed May 1 13:01:58 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
Integer, ObjectIdentifier, OctetString = mibBuilder.importSymbols("ASN1", "Integer", "ObjectIdentifier", "OctetString")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueSizeConstraint, ConstraintsUnion, SingleValueConstraint, ValueRangeConstraint, ConstraintsIntersection = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueSizeConstraint", "ConstraintsUnion", "SingleValueConstraint", "ValueRangeConstraint", "ConstraintsIntersection")
eltMesSwitchRateLimiterMIB, = mibBuilder.importSymbols("ELTEX-MES-MNG-MIB", "eltMesSwitchRateLimiterMIB")
ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup")
MibScalar, MibTable, MibTableRow, MibTableColumn, NotificationType, ModuleIdentity, IpAddress, MibIdentifier, Gauge32, Counter32, TimeTicks, Integer32, Unsigned32, ObjectIdentity, Counter64, iso, Bits = mibBuilder.importSymbols("SNMPv2-SMI", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "NotificationType", "ModuleIdentity", "IpAddress", "MibIdentifier", "Gauge32", "Counter32", "TimeTicks", "Integer32", "Unsigned32", "ObjectIdentity", "Counter64", "iso", "Bits")
DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention")
eltMesSwitchRateLimiterObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 35265, 1, 23, 1, 773, 1))
eltMesSwitchRateLimiterConfig = MibIdentifier((1, 3, 6, 1, 4, 1, 35265, 1, 23, 1, 773, 1, 1))
eltMesSwitchRateLimiterStatistics = MibIdentifier((1, 3, 6, 1, 4, 1, 35265, 1, 23, 1, 773, 1, 2))
class EltCpuRateLimiterTrafficType(TextualConvention, Integer32):
description = 'Traffic types for rate limiting on CPU.'
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21))
namedValues = NamedValues(("http", 1), ("telnet", 2), ("ssh", 3), ("snmp", 4), ("ip", 5), ("linkLocal", 6), ("arp", 7), ("arpInspec", 8), ("stpBpdu", 9), ("otherBpdu", 10), ("ipRouting", 11), ("ipOptions", 12), ("dhcpSnoop", 13), ("igmpSnoop", 14), ("mldSnoop", 15), ("sflow", 16), ("ace", 17), ("ipErrors", 18), ("other", 19), ("dhcpv6Snoop", 20), ("vrrp", 21))
class EltCpuRateStatisticsTrafficType(TextualConvention, Integer32):
description = 'Traffic types for input rates on CPU.'
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28))
namedValues = NamedValues(("stack", 1), ("http", 2), ("telnet", 3), ("ssh", 4), ("snmp", 5), ("ip", 6), ("arp", 7), ("arpInspec", 8), ("stp", 9), ("ieee", 10), ("routeUnknown", 11), ("ipHopByHop", 12), ("mtuExceeded", 13), ("ipv4Multicast", 14), ("ipv6Multicast", 15), ("dhcpSnooping", 16), ("igmpSnooping", 17), ("mldSnooping", 18), ("ttlExceeded", 19), ("ipv4IllegalAddress", 20), ("ipv4HeaderError", 21), ("ipDaMismatch", 22), ("sflow", 23), ("logDenyAces", 24), ("dhcpv6Snooping", 25), ("vrrp", 26), ("logPermitAces", 27), ("ipv6HeaderError", 28))
eltCpuRateLimiterTable = MibTable((1, 3, 6, 1, 4, 1, 35265, 1, 23, 1, 773, 1, 1, 1), )
if mibBuilder.loadTexts: eltCpuRateLimiterTable.setStatus('current')
if mibBuilder.loadTexts: eltCpuRateLimiterTable.setDescription('A list of CPU rate limiters.')
eltCpuRateLimiterEntry = MibTableRow((1, 3, 6, 1, 4, 1, 35265, 1, 23, 1, 773, 1, 1, 1, 1), ).setIndexNames((0, "ELTEX-MES-SWITCH-RATE-LIMITER-MIB", "eltCpuRateLimiterIndex"))
if mibBuilder.loadTexts: eltCpuRateLimiterEntry.setStatus('current')
if mibBuilder.loadTexts: eltCpuRateLimiterEntry.setDescription('An entry containing the custom CPU rate limiter information for specific traffic type.')
eltCpuRateLimiterIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 35265, 1, 23, 1, 773, 1, 1, 1, 1, 1), EltCpuRateLimiterTrafficType()).setMaxAccess("readonly")
if mibBuilder.loadTexts: eltCpuRateLimiterIndex.setStatus('current')
if mibBuilder.loadTexts: eltCpuRateLimiterIndex.setDescription('Traffic type')
eltCpuRateLimiterValue = MibTableColumn((1, 3, 6, 1, 4, 1, 35265, 1, 23, 1, 773, 1, 1, 1, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: eltCpuRateLimiterValue.setStatus('current')
if mibBuilder.loadTexts: eltCpuRateLimiterValue.setDescription('Value of rate-limiter')
eltCpuRateStatisticsTable = MibTable((1, 3, 6, 1, 4, 1, 35265, 1, 23, 1, 773, 1, 2, 1), )
if mibBuilder.loadTexts: eltCpuRateStatisticsTable.setStatus('current')
if mibBuilder.loadTexts: eltCpuRateStatisticsTable.setDescription('A list of CPU input rates per traffic type.')
eltCpuRateStatisticsEntry = MibTableRow((1, 3, 6, 1, 4, 1, 35265, 1, 23, 1, 773, 1, 2, 1, 1), ).setIndexNames((0, "ELTEX-MES-SWITCH-RATE-LIMITER-MIB", "eltCpuRateStatisticsIndex"))
if mibBuilder.loadTexts: eltCpuRateStatisticsEntry.setStatus('current')
if mibBuilder.loadTexts: eltCpuRateStatisticsEntry.setDescription('An entry containing the CPU input rates for specific traffic type.')
eltCpuRateStatisticsIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 35265, 1, 23, 1, 773, 1, 2, 1, 1, 1), EltCpuRateStatisticsTrafficType()).setMaxAccess("readonly")
if mibBuilder.loadTexts: eltCpuRateStatisticsIndex.setStatus('current')
if mibBuilder.loadTexts: eltCpuRateStatisticsIndex.setDescription('Traffic type')
eltCpuRateStatisticsRate = MibTableColumn((1, 3, 6, 1, 4, 1, 35265, 1, 23, 1, 773, 1, 2, 1, 1, 2), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: eltCpuRateStatisticsRate.setStatus('current')
if mibBuilder.loadTexts: eltCpuRateStatisticsRate.setDescription('Input rate int packets per second.')
eltCpuRateStatisticsCounter = MibTableColumn((1, 3, 6, 1, 4, 1, 35265, 1, 23, 1, 773, 1, 2, 1, 1, 3), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: eltCpuRateStatisticsCounter.setStatus('current')
if mibBuilder.loadTexts: eltCpuRateStatisticsCounter.setDescription('Total counter of packets.')
mibBuilder.exportSymbols("ELTEX-MES-SWITCH-RATE-LIMITER-MIB", eltMesSwitchRateLimiterStatistics=eltMesSwitchRateLimiterStatistics, eltCpuRateStatisticsTable=eltCpuRateStatisticsTable, eltCpuRateStatisticsIndex=eltCpuRateStatisticsIndex, eltMesSwitchRateLimiterObjects=eltMesSwitchRateLimiterObjects, EltCpuRateStatisticsTrafficType=EltCpuRateStatisticsTrafficType, eltCpuRateStatisticsEntry=eltCpuRateStatisticsEntry, eltCpuRateLimiterEntry=eltCpuRateLimiterEntry, eltCpuRateStatisticsRate=eltCpuRateStatisticsRate, eltCpuRateLimiterTable=eltCpuRateLimiterTable, eltCpuRateLimiterIndex=eltCpuRateLimiterIndex, eltMesSwitchRateLimiterConfig=eltMesSwitchRateLimiterConfig, EltCpuRateLimiterTrafficType=EltCpuRateLimiterTrafficType, eltCpuRateLimiterValue=eltCpuRateLimiterValue, eltCpuRateStatisticsCounter=eltCpuRateStatisticsCounter)
| (integer, object_identifier, octet_string) = mibBuilder.importSymbols('ASN1', 'Integer', 'ObjectIdentifier', 'OctetString')
(named_values,) = mibBuilder.importSymbols('ASN1-ENUMERATION', 'NamedValues')
(value_size_constraint, constraints_union, single_value_constraint, value_range_constraint, constraints_intersection) = mibBuilder.importSymbols('ASN1-REFINEMENT', 'ValueSizeConstraint', 'ConstraintsUnion', 'SingleValueConstraint', 'ValueRangeConstraint', 'ConstraintsIntersection')
(elt_mes_switch_rate_limiter_mib,) = mibBuilder.importSymbols('ELTEX-MES-MNG-MIB', 'eltMesSwitchRateLimiterMIB')
(module_compliance, notification_group) = mibBuilder.importSymbols('SNMPv2-CONF', 'ModuleCompliance', 'NotificationGroup')
(mib_scalar, mib_table, mib_table_row, mib_table_column, notification_type, module_identity, ip_address, mib_identifier, gauge32, counter32, time_ticks, integer32, unsigned32, object_identity, counter64, iso, bits) = mibBuilder.importSymbols('SNMPv2-SMI', 'MibScalar', 'MibTable', 'MibTableRow', 'MibTableColumn', 'NotificationType', 'ModuleIdentity', 'IpAddress', 'MibIdentifier', 'Gauge32', 'Counter32', 'TimeTicks', 'Integer32', 'Unsigned32', 'ObjectIdentity', 'Counter64', 'iso', 'Bits')
(display_string, textual_convention) = mibBuilder.importSymbols('SNMPv2-TC', 'DisplayString', 'TextualConvention')
elt_mes_switch_rate_limiter_objects = mib_identifier((1, 3, 6, 1, 4, 1, 35265, 1, 23, 1, 773, 1))
elt_mes_switch_rate_limiter_config = mib_identifier((1, 3, 6, 1, 4, 1, 35265, 1, 23, 1, 773, 1, 1))
elt_mes_switch_rate_limiter_statistics = mib_identifier((1, 3, 6, 1, 4, 1, 35265, 1, 23, 1, 773, 1, 2))
class Eltcpuratelimitertraffictype(TextualConvention, Integer32):
description = 'Traffic types for rate limiting on CPU.'
status = 'current'
subtype_spec = Integer32.subtypeSpec + constraints_union(single_value_constraint(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21))
named_values = named_values(('http', 1), ('telnet', 2), ('ssh', 3), ('snmp', 4), ('ip', 5), ('linkLocal', 6), ('arp', 7), ('arpInspec', 8), ('stpBpdu', 9), ('otherBpdu', 10), ('ipRouting', 11), ('ipOptions', 12), ('dhcpSnoop', 13), ('igmpSnoop', 14), ('mldSnoop', 15), ('sflow', 16), ('ace', 17), ('ipErrors', 18), ('other', 19), ('dhcpv6Snoop', 20), ('vrrp', 21))
class Eltcpuratestatisticstraffictype(TextualConvention, Integer32):
description = 'Traffic types for input rates on CPU.'
status = 'current'
subtype_spec = Integer32.subtypeSpec + constraints_union(single_value_constraint(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28))
named_values = named_values(('stack', 1), ('http', 2), ('telnet', 3), ('ssh', 4), ('snmp', 5), ('ip', 6), ('arp', 7), ('arpInspec', 8), ('stp', 9), ('ieee', 10), ('routeUnknown', 11), ('ipHopByHop', 12), ('mtuExceeded', 13), ('ipv4Multicast', 14), ('ipv6Multicast', 15), ('dhcpSnooping', 16), ('igmpSnooping', 17), ('mldSnooping', 18), ('ttlExceeded', 19), ('ipv4IllegalAddress', 20), ('ipv4HeaderError', 21), ('ipDaMismatch', 22), ('sflow', 23), ('logDenyAces', 24), ('dhcpv6Snooping', 25), ('vrrp', 26), ('logPermitAces', 27), ('ipv6HeaderError', 28))
elt_cpu_rate_limiter_table = mib_table((1, 3, 6, 1, 4, 1, 35265, 1, 23, 1, 773, 1, 1, 1))
if mibBuilder.loadTexts:
eltCpuRateLimiterTable.setStatus('current')
if mibBuilder.loadTexts:
eltCpuRateLimiterTable.setDescription('A list of CPU rate limiters.')
elt_cpu_rate_limiter_entry = mib_table_row((1, 3, 6, 1, 4, 1, 35265, 1, 23, 1, 773, 1, 1, 1, 1)).setIndexNames((0, 'ELTEX-MES-SWITCH-RATE-LIMITER-MIB', 'eltCpuRateLimiterIndex'))
if mibBuilder.loadTexts:
eltCpuRateLimiterEntry.setStatus('current')
if mibBuilder.loadTexts:
eltCpuRateLimiterEntry.setDescription('An entry containing the custom CPU rate limiter information for specific traffic type.')
elt_cpu_rate_limiter_index = mib_table_column((1, 3, 6, 1, 4, 1, 35265, 1, 23, 1, 773, 1, 1, 1, 1, 1), elt_cpu_rate_limiter_traffic_type()).setMaxAccess('readonly')
if mibBuilder.loadTexts:
eltCpuRateLimiterIndex.setStatus('current')
if mibBuilder.loadTexts:
eltCpuRateLimiterIndex.setDescription('Traffic type')
elt_cpu_rate_limiter_value = mib_table_column((1, 3, 6, 1, 4, 1, 35265, 1, 23, 1, 773, 1, 1, 1, 1, 2), integer32().subtype(subtypeSpec=value_range_constraint(0, 65535))).setMaxAccess('readwrite')
if mibBuilder.loadTexts:
eltCpuRateLimiterValue.setStatus('current')
if mibBuilder.loadTexts:
eltCpuRateLimiterValue.setDescription('Value of rate-limiter')
elt_cpu_rate_statistics_table = mib_table((1, 3, 6, 1, 4, 1, 35265, 1, 23, 1, 773, 1, 2, 1))
if mibBuilder.loadTexts:
eltCpuRateStatisticsTable.setStatus('current')
if mibBuilder.loadTexts:
eltCpuRateStatisticsTable.setDescription('A list of CPU input rates per traffic type.')
elt_cpu_rate_statistics_entry = mib_table_row((1, 3, 6, 1, 4, 1, 35265, 1, 23, 1, 773, 1, 2, 1, 1)).setIndexNames((0, 'ELTEX-MES-SWITCH-RATE-LIMITER-MIB', 'eltCpuRateStatisticsIndex'))
if mibBuilder.loadTexts:
eltCpuRateStatisticsEntry.setStatus('current')
if mibBuilder.loadTexts:
eltCpuRateStatisticsEntry.setDescription('An entry containing the CPU input rates for specific traffic type.')
elt_cpu_rate_statistics_index = mib_table_column((1, 3, 6, 1, 4, 1, 35265, 1, 23, 1, 773, 1, 2, 1, 1, 1), elt_cpu_rate_statistics_traffic_type()).setMaxAccess('readonly')
if mibBuilder.loadTexts:
eltCpuRateStatisticsIndex.setStatus('current')
if mibBuilder.loadTexts:
eltCpuRateStatisticsIndex.setDescription('Traffic type')
elt_cpu_rate_statistics_rate = mib_table_column((1, 3, 6, 1, 4, 1, 35265, 1, 23, 1, 773, 1, 2, 1, 1, 2), gauge32()).setMaxAccess('readonly')
if mibBuilder.loadTexts:
eltCpuRateStatisticsRate.setStatus('current')
if mibBuilder.loadTexts:
eltCpuRateStatisticsRate.setDescription('Input rate int packets per second.')
elt_cpu_rate_statistics_counter = mib_table_column((1, 3, 6, 1, 4, 1, 35265, 1, 23, 1, 773, 1, 2, 1, 1, 3), counter32()).setMaxAccess('readonly')
if mibBuilder.loadTexts:
eltCpuRateStatisticsCounter.setStatus('current')
if mibBuilder.loadTexts:
eltCpuRateStatisticsCounter.setDescription('Total counter of packets.')
mibBuilder.exportSymbols('ELTEX-MES-SWITCH-RATE-LIMITER-MIB', eltMesSwitchRateLimiterStatistics=eltMesSwitchRateLimiterStatistics, eltCpuRateStatisticsTable=eltCpuRateStatisticsTable, eltCpuRateStatisticsIndex=eltCpuRateStatisticsIndex, eltMesSwitchRateLimiterObjects=eltMesSwitchRateLimiterObjects, EltCpuRateStatisticsTrafficType=EltCpuRateStatisticsTrafficType, eltCpuRateStatisticsEntry=eltCpuRateStatisticsEntry, eltCpuRateLimiterEntry=eltCpuRateLimiterEntry, eltCpuRateStatisticsRate=eltCpuRateStatisticsRate, eltCpuRateLimiterTable=eltCpuRateLimiterTable, eltCpuRateLimiterIndex=eltCpuRateLimiterIndex, eltMesSwitchRateLimiterConfig=eltMesSwitchRateLimiterConfig, EltCpuRateLimiterTrafficType=EltCpuRateLimiterTrafficType, eltCpuRateLimiterValue=eltCpuRateLimiterValue, eltCpuRateStatisticsCounter=eltCpuRateStatisticsCounter) |
"""
Parent class for all video processors. Functions effectively as an interface.
All video processor implementations must inherit this class to function.
"""
class video_processor_base:
def __init__(self):
pass
def scale_video(self, original_video, scaled_video, scale):
"""
Scales a video (original) to a new video (scaled) into
given pixel dimensions (scale x scale)
"""
pass
def get_per_second_frames(self, original_video, out_dir):
"""
Extracts a frame for each second of a given video into out_dir.
"""
pass
def get_frame_range_images(self, original_video, out_dir, ranges):
"""
Extracts all the frames between the given
timestamp ranges (tuples of start/end timestamps in second increments)
for a given video (original_video - path to original video).
Extracted items are written to out_dir.
"""
pass
def get_frame_range_clips(self, original_video, out_dir, ranges, max_number_queries=3):
"""
Extracts all the video clips between the given
timestamp ranges (tuples of start/end timestamps in second increments)
for a given video (original_video - path to original video).
Extracted items are written to out_dir.
"""
pass
def get_frame_images(self, original_video, out_dir, frames):
"""
Given a list of frame numbers, extracts those frames from the video (original_video).
Writes said frames to out_dir
"""
pass | """
Parent class for all video processors. Functions effectively as an interface.
All video processor implementations must inherit this class to function.
"""
class Video_Processor_Base:
def __init__(self):
pass
def scale_video(self, original_video, scaled_video, scale):
"""
Scales a video (original) to a new video (scaled) into
given pixel dimensions (scale x scale)
"""
pass
def get_per_second_frames(self, original_video, out_dir):
"""
Extracts a frame for each second of a given video into out_dir.
"""
pass
def get_frame_range_images(self, original_video, out_dir, ranges):
"""
Extracts all the frames between the given
timestamp ranges (tuples of start/end timestamps in second increments)
for a given video (original_video - path to original video).
Extracted items are written to out_dir.
"""
pass
def get_frame_range_clips(self, original_video, out_dir, ranges, max_number_queries=3):
"""
Extracts all the video clips between the given
timestamp ranges (tuples of start/end timestamps in second increments)
for a given video (original_video - path to original video).
Extracted items are written to out_dir.
"""
pass
def get_frame_images(self, original_video, out_dir, frames):
"""
Given a list of frame numbers, extracts those frames from the video (original_video).
Writes said frames to out_dir
"""
pass |
#!/usr/bin/env python
#------------------------------------------------------------------------
# NAME: event.py -
# HISTORY: -
# 2016-02-02 leerw@ornl.gov -
# Copied from
# http://www.valuedlessons.com/2008/04/events-in-python.html
# and reformatted.
# 2008-04-28 leerw@ornl.gov -
#------------------------------------------------------------------------
#------------------------------------------------------------------------
# CLASS: Event -
#------------------------------------------------------------------------
class Event ( object ):
"""
Simple event implementation from
http://www.valuedlessons.com/2008/04/events-in-python.html
"""
# -- Object Methods
# --
#----------------------------------------------------------------------
# METHOD: __init__() -
#----------------------------------------------------------------------
def __init__( self, source ):
self.fListeners = set()
self.fSource = source
#end __init__
#----------------------------------------------------------------------
# METHOD: addListener() -
#----------------------------------------------------------------------
def addListener( self, l ):
"""
@param l listener to add
@return self
"""
self.fListeners.add( l )
return self
#end addListener
#----------------------------------------------------------------------
# METHOD: fire() -
#----------------------------------------------------------------------
def fire( self, *args, **kargs ):
"""
"""
for l in self.fListeners:
l( self.fSource, *args, **kargs )
#end fire
#----------------------------------------------------------------------
# METHOD: getSource() -
#----------------------------------------------------------------------
def getSource( self ):
return self.fSource
#end getSource
#----------------------------------------------------------------------
# METHOD: getListenerCount() -
#----------------------------------------------------------------------
def getListenerCount( self ):
return len( self.fListeners )
#end getListenerCount
#----------------------------------------------------------------------
# METHOD: removeListener() -
#----------------------------------------------------------------------
def removeListener( self, l ):
"""
@param l listener to remove
@return self
"""
if l in self.fListeners:
self.fListeners.remove( l )
return self
#end removeListener
#----------------------------------------------------------------------
# METHOD REFERENCES -
#----------------------------------------------------------------------
__call__ = fire
__iadd__ = addListener
__isub__ = removeListener
__len__ = getListenerCount
#end Event
| class Event(object):
"""
Simple event implementation from
http://www.valuedlessons.com/2008/04/events-in-python.html
"""
def __init__(self, source):
self.fListeners = set()
self.fSource = source
def add_listener(self, l):
"""
@param l listener to add
@return self
"""
self.fListeners.add(l)
return self
def fire(self, *args, **kargs):
"""
"""
for l in self.fListeners:
l(self.fSource, *args, **kargs)
def get_source(self):
return self.fSource
def get_listener_count(self):
return len(self.fListeners)
def remove_listener(self, l):
"""
@param l listener to remove
@return self
"""
if l in self.fListeners:
self.fListeners.remove(l)
return self
__call__ = fire
__iadd__ = addListener
__isub__ = removeListener
__len__ = getListenerCount |
def get_last_apriori_filter(connection):
# select to list
select_cursor = connection.cursor()
select_query = "select from_date,to_date,min_support,min_confidence from apriori_filter order by id desc limit 1"
select_cursor.execute(select_query)
record = select_cursor.fetchone()
select_cursor.close()
return record
def insert_apriori_filter(connection,from_date,to_date,min_support,min_confidence,num_of_transaction):
"""
"""
sql = "insert into apriori_filter(from_date,to_date,min_support,min_confidence,num_of_transaction) values(%s,%s,%s,%s,%s)"
# created_at =
value = (from_date,to_date,min_support,min_confidence,num_of_transaction)
cur = connection.cursor()
cur.execute(sql,value)
return cur.lastrowid
| def get_last_apriori_filter(connection):
select_cursor = connection.cursor()
select_query = 'select from_date,to_date,min_support,min_confidence from apriori_filter order by id desc limit 1'
select_cursor.execute(select_query)
record = select_cursor.fetchone()
select_cursor.close()
return record
def insert_apriori_filter(connection, from_date, to_date, min_support, min_confidence, num_of_transaction):
"""
"""
sql = 'insert into apriori_filter(from_date,to_date,min_support,min_confidence,num_of_transaction) values(%s,%s,%s,%s,%s)'
value = (from_date, to_date, min_support, min_confidence, num_of_transaction)
cur = connection.cursor()
cur.execute(sql, value)
return cur.lastrowid |
"""
# Sample code to perform I/O:
name = input() # Reading input from STDIN
print('Hi, %s.' % name) # Writing output to STDOUT
# Warning: Printing unwanted or ill-formatted data to output will cause the test cases to fail
"""
# Write your code here
t = int(input())
for _ in range(t):
h, w = map(int, input().strip().split())
if h % 3 == 0 or w % 3 == 0:
print(0)
else:
size = (h, w)
mx = max(size)
mn = min(size)
h1 = round(mx / 3)
h2 = mx - h1
a1 = h1 * mn
w1 = mn // 2
w2 = mn - w1
a2 = w1 * h2
a3 = w2 * h2
res1 = (max(a1, a2, a3) - min(a1, a2, a3))
h1 = round(mn / 3)
h2 = mn - h1
a1 = h1 * mx
w1 = mx // 2
w2 = mx - w1
a2 = w1 * h2
a3 = w2 * h2
res2 = (max(a1, a2, a3) - min(a1, a2, a3))
print(min(res1, res2, mn))
| """
# Sample code to perform I/O:
name = input() # Reading input from STDIN
print('Hi, %s.' % name) # Writing output to STDOUT
# Warning: Printing unwanted or ill-formatted data to output will cause the test cases to fail
"""
t = int(input())
for _ in range(t):
(h, w) = map(int, input().strip().split())
if h % 3 == 0 or w % 3 == 0:
print(0)
else:
size = (h, w)
mx = max(size)
mn = min(size)
h1 = round(mx / 3)
h2 = mx - h1
a1 = h1 * mn
w1 = mn // 2
w2 = mn - w1
a2 = w1 * h2
a3 = w2 * h2
res1 = max(a1, a2, a3) - min(a1, a2, a3)
h1 = round(mn / 3)
h2 = mn - h1
a1 = h1 * mx
w1 = mx // 2
w2 = mx - w1
a2 = w1 * h2
a3 = w2 * h2
res2 = max(a1, a2, a3) - min(a1, a2, a3)
print(min(res1, res2, mn)) |
#prime number
# n=int(input("enter any number"))
# count=0
# i=1
# while (i<=n):
# if (n%i)==0:
# count=count+1
# i=i+1
# if (count==2):
# print("prime number")
# else:
# print("composite number")
i=0
b=0
while i<=100:
j=2
count=0
while j<=i//2:
if i%j==0:
count=count+1
break
j+=1
if count==0 and i!=1:
print(i,"prime")
else:
print(i,"not prime")
i+=1 | i = 0
b = 0
while i <= 100:
j = 2
count = 0
while j <= i // 2:
if i % j == 0:
count = count + 1
break
j += 1
if count == 0 and i != 1:
print(i, 'prime')
else:
print(i, 'not prime')
i += 1 |
# 01234567890123456789012
# Mary had a little lamb.
# |--| |---------| GOLD
# |--| |-| |---| PRED
# || |---| INTERSECT
def merge_and_add(out, one, two):
a = max(one[0], two[0])
b = min(one[1], two[1])
if a <= b:
out.append([a, b])
def calculate_intersect(gold, pred):
out = []
id_gold = 0
id_pred = 0
while id_gold < len(gold) and id_pred < len(pred):
curr_gold = gold[id_gold]
curr_pred = pred[id_pred]
merge_and_add(out, curr_gold, curr_pred)
if curr_pred[1] >= curr_gold[1]:
id_gold += 1
if curr_pred[1] <= curr_gold[1]:
id_pred += 1
return out
calculate_intersect([[0, 5], [10, 16]], [[2, 4], [8, 10], [14, 15]])
| def merge_and_add(out, one, two):
a = max(one[0], two[0])
b = min(one[1], two[1])
if a <= b:
out.append([a, b])
def calculate_intersect(gold, pred):
out = []
id_gold = 0
id_pred = 0
while id_gold < len(gold) and id_pred < len(pred):
curr_gold = gold[id_gold]
curr_pred = pred[id_pred]
merge_and_add(out, curr_gold, curr_pred)
if curr_pred[1] >= curr_gold[1]:
id_gold += 1
if curr_pred[1] <= curr_gold[1]:
id_pred += 1
return out
calculate_intersect([[0, 5], [10, 16]], [[2, 4], [8, 10], [14, 15]]) |
# Dimmer Switch class
class DimmerSwitch():
def __init__(self, label):
self.label = label
self.isOn = False
self.brightness = 0
def turnOn(self):
self.isOn = True
# turn the light on at self.brightness
def turnOff(self):
self.isOn = False
# turn the light off
def raiseLevel(self):
if self.brightness < 10:
self.brightness = self.brightness + 1
def lowerLevel(self):
if self.brightness > 0:
self.brightness = self.brightness - 1
# Extra method for debugging
def show(self):
print('Label:', self.label)
print('Light is on?', self.isOn)
print('Brightness is:', self.brightness)
print()
# Main code (to demo with Python Tutor)
# Create two DimmerSwitch objects
oDimmer1 = DimmerSwitch('Dimmer1')
oDimmer2 = DimmerSwitch('Dimmer2')
# Tell oDimmer1 to raise its level
oDimmer1.raiseLevel()
# Tell oDimmer2 to raise its level
oDimmer2.raiseLevel()
| class Dimmerswitch:
def __init__(self, label):
self.label = label
self.isOn = False
self.brightness = 0
def turn_on(self):
self.isOn = True
def turn_off(self):
self.isOn = False
def raise_level(self):
if self.brightness < 10:
self.brightness = self.brightness + 1
def lower_level(self):
if self.brightness > 0:
self.brightness = self.brightness - 1
def show(self):
print('Label:', self.label)
print('Light is on?', self.isOn)
print('Brightness is:', self.brightness)
print()
o_dimmer1 = dimmer_switch('Dimmer1')
o_dimmer2 = dimmer_switch('Dimmer2')
oDimmer1.raiseLevel()
oDimmer2.raiseLevel() |
#func What we should get after the Module 1
chatbot_name = "Garik"
user_name = input("Hello! What's you name? ")
#1
phrase = input(chatbot_name + ": What do you think? ")
print("Yes, " + user_name + ", " + phrase)
#2
phrase = input(chatbot_name + ": What do you think? ")
print("Yes, " + user_name + ", " + phrase)
#3
phrase = input(chatbot_name + ": What do you think? ")
print("Yes, " + user_name + ", " + phrase)
#4
phrase = input(chatbot_name + ": What do you think? ")
print("Yes, " + user_name + ", " + phrase)
# ... and so on and so forth
| chatbot_name = 'Garik'
user_name = input("Hello! What's you name? ")
phrase = input(chatbot_name + ': What do you think? ')
print('Yes, ' + user_name + ', ' + phrase)
phrase = input(chatbot_name + ': What do you think? ')
print('Yes, ' + user_name + ', ' + phrase)
phrase = input(chatbot_name + ': What do you think? ')
print('Yes, ' + user_name + ', ' + phrase)
phrase = input(chatbot_name + ': What do you think? ')
print('Yes, ' + user_name + ', ' + phrase) |
# For more info check out https://github.com/etianen/django-python3-ldap#available-settings
# TODO Read this info from enviornment variables
# The URL of the LDAP server.
LDAP_AUTH_URL = "ldap://localhost:389"
# Initiate TLS on connection.
LDAP_AUTH_USE_TLS = False
# The LDAP search base for looking up users.
LDAP_AUTH_SEARCH_BASE = "ou=people,dc=example,dc=com"
# The LDAP class that represents a user.
LDAP_AUTH_OBJECT_CLASS = "inetOrgPerson"
# User model fields mapped to the LDAP
# attributes that represent them.
LDAP_AUTH_USER_FIELDS = {
"username": "uid",
"first_name": "givenName",
"last_name": "sn",
"email": "mail",
}
# A tuple of django model fields used to uniquely identify a user.
LDAP_AUTH_USER_LOOKUP_FIELDS = ("username",)
# Path to a callable that takes a dict of {model_field_name: value},
# returning a dict of clean model data.
# Use this to customize how data loaded from LDAP is saved to the User model.
LDAP_AUTH_CLEAN_USER_DATA = "django_python3_ldap.utils.clean_user_data"
# Path to a callable that takes a user model and a dict of {ldap_field_name: [value]},
# and saves any additional user relationships based on the LDAP data.
# Use this to customize how data loaded from LDAP is saved to User model relations.
# For customizing non-related User model fields, use LDAP_AUTH_CLEAN_USER_DATA.
LDAP_AUTH_SYNC_USER_RELATIONS = "django_python3_ldap.utils.sync_user_relations"
# Path to a callable that takes a dict of {ldap_field_name: value},
# returning a list of [ldap_search_filter]. The search filters will then be AND'd
# together when creating the final search filter.
LDAP_AUTH_FORMAT_SEARCH_FILTERS = "django_python3_ldap.utils.format_search_filters"
# Path to a callable that takes a dict of {model_field_name: value}, and returns
# a string of the username to bind to the LDAP server.
# Use this to support different types of LDAP server.
LDAP_AUTH_FORMAT_USERNAME = "django_python3_ldap.utils.format_username_openldap"
# Sets the login domain for Active Directory users.
LDAP_AUTH_ACTIVE_DIRECTORY_DOMAIN = None
# The LDAP username and password of a user for authenticating the `ldap_sync_users`
# management command. Set to None if you allow anonymous queries.
LDAP_AUTH_CONNECTION_USERNAME = None
LDAP_AUTH_CONNECTION_PASSWORD = None | ldap_auth_url = 'ldap://localhost:389'
ldap_auth_use_tls = False
ldap_auth_search_base = 'ou=people,dc=example,dc=com'
ldap_auth_object_class = 'inetOrgPerson'
ldap_auth_user_fields = {'username': 'uid', 'first_name': 'givenName', 'last_name': 'sn', 'email': 'mail'}
ldap_auth_user_lookup_fields = ('username',)
ldap_auth_clean_user_data = 'django_python3_ldap.utils.clean_user_data'
ldap_auth_sync_user_relations = 'django_python3_ldap.utils.sync_user_relations'
ldap_auth_format_search_filters = 'django_python3_ldap.utils.format_search_filters'
ldap_auth_format_username = 'django_python3_ldap.utils.format_username_openldap'
ldap_auth_active_directory_domain = None
ldap_auth_connection_username = None
ldap_auth_connection_password = None |
class Solution:
def canThreePartsEqualSum(self, A: List[int]) -> bool:
s = sum(A)
if s % 3 != 0:
return False
avg = s // 3
cnt = 0
s = 0
for i in A:
s += i
if s == avg:
cnt += 1
s = 0
return cnt == 3
| class Solution:
def can_three_parts_equal_sum(self, A: List[int]) -> bool:
s = sum(A)
if s % 3 != 0:
return False
avg = s // 3
cnt = 0
s = 0
for i in A:
s += i
if s == avg:
cnt += 1
s = 0
return cnt == 3 |
# -*- coding: utf-8 -*-
"""
Solution to Project Euler problem 4
Author: Jaime Liew
https://github.com/jaimeliew1/Project_Euler_Solutions
"""
def isPalindrome(x):
# Returns true if the integer, x is palindromic
x = str(x)
n = len(x)
if n%2 == 0:
left, right = x[:n//2], x[n//2:]
else:
left, right = x[:(n-1)//2], x[(n+1)//2:]
return left == right[::-1]
def run():
palindromes = []
for i in range(100,1000):
for j in range(i, 1000):
if isPalindrome(i*j):
palindromes.append(i*j)
return max(palindromes)
if __name__ == "__main__":
print(run())
| """
Solution to Project Euler problem 4
Author: Jaime Liew
https://github.com/jaimeliew1/Project_Euler_Solutions
"""
def is_palindrome(x):
x = str(x)
n = len(x)
if n % 2 == 0:
(left, right) = (x[:n // 2], x[n // 2:])
else:
(left, right) = (x[:(n - 1) // 2], x[(n + 1) // 2:])
return left == right[::-1]
def run():
palindromes = []
for i in range(100, 1000):
for j in range(i, 1000):
if is_palindrome(i * j):
palindromes.append(i * j)
return max(palindromes)
if __name__ == '__main__':
print(run()) |
# Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution(object):
def getIntersectionNode(self, headA, headB):
"""
:type head1, head1: ListNode
:rtype: ListNode
"""
if not headA or not headB: return None
tail = headA
while tail.next:
tail = tail.next
tail.next = headB
slow, fast = headA, headA
while fast and fast.next:
slow = slow.next
fast = fast.next.next
if slow == fast:
break
else:
tail.next = None
return None
fast = headA
while slow != fast:
slow = slow.next
fast = fast.next
tail.next = None
return fast
| class Solution(object):
def get_intersection_node(self, headA, headB):
"""
:type head1, head1: ListNode
:rtype: ListNode
"""
if not headA or not headB:
return None
tail = headA
while tail.next:
tail = tail.next
tail.next = headB
(slow, fast) = (headA, headA)
while fast and fast.next:
slow = slow.next
fast = fast.next.next
if slow == fast:
break
else:
tail.next = None
return None
fast = headA
while slow != fast:
slow = slow.next
fast = fast.next
tail.next = None
return fast |
new = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"
def to_base_64(string):
res=""
for i in string:
res+=binary(i)
if len(res)%6!=0: res=res+"0"*(6-len(res)%6)
result=""
for i in range(0,len(res),6):
result+=new[int(res[i:i+6], 2)]
return result
def from_base_64(string):
res=""
for i in string:
res+=binary2(i)
result=""
for i in range(0,len(res),8):
result+=chr(int(res[i:i+8], 2))
return result.rstrip('\x00')
def binary(string):
res=bin(ord(string))[2:]
return "0"*(8-len(res))+res
def binary2(string):
res=bin(new.index(string))[2:]
return "0"*(6-len(res))+res | new = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/'
def to_base_64(string):
res = ''
for i in string:
res += binary(i)
if len(res) % 6 != 0:
res = res + '0' * (6 - len(res) % 6)
result = ''
for i in range(0, len(res), 6):
result += new[int(res[i:i + 6], 2)]
return result
def from_base_64(string):
res = ''
for i in string:
res += binary2(i)
result = ''
for i in range(0, len(res), 8):
result += chr(int(res[i:i + 8], 2))
return result.rstrip('\x00')
def binary(string):
res = bin(ord(string))[2:]
return '0' * (8 - len(res)) + res
def binary2(string):
res = bin(new.index(string))[2:]
return '0' * (6 - len(res)) + res |
class HostName(basestring):
"""
Host name
"""
@staticmethod
def get_api_name():
return "host-name"
| class Hostname(basestring):
"""
Host name
"""
@staticmethod
def get_api_name():
return 'host-name' |
inp = open("input/day6.txt", "r")
prvotne_ribe = [int(x) for x in inp.readline().split(",")]
inp.close()
prvotna_populacija = [0 for _ in range(9)]
for riba in prvotne_ribe:
prvotna_populacija[riba] += 1
def zivljenje(N):
populacija = prvotna_populacija
for _ in range(N):
nova_populacija = [0 for _ in range(9)]
for k in range(9):
if k == 0:
nova_populacija[8] += populacija[k]
nova_populacija[6] += populacija[k]
else:
nova_populacija[k-1] += populacija[k]
populacija = nova_populacija
return sum(populacija)
# --------------------------
print("1. del: ")
print(zivljenje(80))
print("2. del: ")
print(zivljenje(256))
| inp = open('input/day6.txt', 'r')
prvotne_ribe = [int(x) for x in inp.readline().split(',')]
inp.close()
prvotna_populacija = [0 for _ in range(9)]
for riba in prvotne_ribe:
prvotna_populacija[riba] += 1
def zivljenje(N):
populacija = prvotna_populacija
for _ in range(N):
nova_populacija = [0 for _ in range(9)]
for k in range(9):
if k == 0:
nova_populacija[8] += populacija[k]
nova_populacija[6] += populacija[k]
else:
nova_populacija[k - 1] += populacija[k]
populacija = nova_populacija
return sum(populacija)
print('1. del: ')
print(zivljenje(80))
print('2. del: ')
print(zivljenje(256)) |
class MockLROPoller(object):
def result(self, timeout: None):
pass
class MockVirtualMachineScaleSetVMsOperations(object):
def begin_power_off(self, resource_group_name, scale_set_name, instance_id):
return MockLROPoller()
def begin_delete(self, resource_group_name, scale_set_name, instance_id):
return MockLROPoller()
def begin_restart(self, resource_group_name, scale_set_name, instance_id):
return MockLROPoller()
def begin_deallocate(self, resource_group_name, scale_set_name, instance_id):
return MockLROPoller()
class MockComputeManagementClient(object):
def __init__(self):
self.operations = MockVirtualMachineScaleSetVMsOperations()
@property
def virtual_machine_scale_set_vms(self):
return self.operations
| class Mocklropoller(object):
def result(self, timeout: None):
pass
class Mockvirtualmachinescalesetvmsoperations(object):
def begin_power_off(self, resource_group_name, scale_set_name, instance_id):
return mock_lro_poller()
def begin_delete(self, resource_group_name, scale_set_name, instance_id):
return mock_lro_poller()
def begin_restart(self, resource_group_name, scale_set_name, instance_id):
return mock_lro_poller()
def begin_deallocate(self, resource_group_name, scale_set_name, instance_id):
return mock_lro_poller()
class Mockcomputemanagementclient(object):
def __init__(self):
self.operations = mock_virtual_machine_scale_set_v_ms_operations()
@property
def virtual_machine_scale_set_vms(self):
return self.operations |
# -*- coding: utf-8 -*-
"""
This file is adopted from Chainer official implementation with small
modifications.
https://github.com/chainer/chainer/blob/v4.2.0/chainer/training/triggers/minmax_value_trigger.py
"""
class BestValueTrigger(object):
"""Trigger invoked when specific value becomes best. This will run every
time key value is observed.
Args:
key (str): Key of value.
compare (callable): Compare function which takes current best value and
new value and returns whether new value is better than current
best.
"""
def __init__(self, key, compare):
self._key = key
self._best_value = None
self._compare = compare
def __call__(self, trainer):
"""Decides whether the extension should be called on this iteration.
Args:
trainer (~chainer.training.Trainer): Trainer object that this
trigger is associated with. The ``observation`` of this trainer
is used to determine if the trigger should fire.
Returns:
bool: ``True`` if the corresponding extension should be invoked in
this iteration.
"""
observation = trainer.observation
key = self._key
if key not in observation.keys():
return False
value = float(observation[key]) # copy to CPU
if self._best_value is None or self._compare(self._best_value, value):
self._best_value = value
return True
return False
class MaxValueTrigger(BestValueTrigger):
"""Trigger invoked when specific value becomes maximum.
This will run every time key value is observed.
Args:
key (str): Key of value. The trigger fires when the value associated
with this key becomes maximum.
"""
def __init__(self, key):
super(MaxValueTrigger, self).__init__(
key, lambda max_value, new_value: new_value > max_value)
class MinValueTrigger(BestValueTrigger):
"""Trigger invoked when specific value becomes minimum.
This will run every time key value is observed.
Args:
key (str): Key of value. The trigger fires when the value associated
with this key becomes minimum.
"""
def __init__(self, key):
super(MinValueTrigger, self).__init__(
key, lambda min_value, new_value: new_value < min_value) | """
This file is adopted from Chainer official implementation with small
modifications.
https://github.com/chainer/chainer/blob/v4.2.0/chainer/training/triggers/minmax_value_trigger.py
"""
class Bestvaluetrigger(object):
"""Trigger invoked when specific value becomes best. This will run every
time key value is observed.
Args:
key (str): Key of value.
compare (callable): Compare function which takes current best value and
new value and returns whether new value is better than current
best.
"""
def __init__(self, key, compare):
self._key = key
self._best_value = None
self._compare = compare
def __call__(self, trainer):
"""Decides whether the extension should be called on this iteration.
Args:
trainer (~chainer.training.Trainer): Trainer object that this
trigger is associated with. The ``observation`` of this trainer
is used to determine if the trigger should fire.
Returns:
bool: ``True`` if the corresponding extension should be invoked in
this iteration.
"""
observation = trainer.observation
key = self._key
if key not in observation.keys():
return False
value = float(observation[key])
if self._best_value is None or self._compare(self._best_value, value):
self._best_value = value
return True
return False
class Maxvaluetrigger(BestValueTrigger):
"""Trigger invoked when specific value becomes maximum.
This will run every time key value is observed.
Args:
key (str): Key of value. The trigger fires when the value associated
with this key becomes maximum.
"""
def __init__(self, key):
super(MaxValueTrigger, self).__init__(key, lambda max_value, new_value: new_value > max_value)
class Minvaluetrigger(BestValueTrigger):
"""Trigger invoked when specific value becomes minimum.
This will run every time key value is observed.
Args:
key (str): Key of value. The trigger fires when the value associated
with this key becomes minimum.
"""
def __init__(self, key):
super(MinValueTrigger, self).__init__(key, lambda min_value, new_value: new_value < min_value) |
class VerificationProfile:
"""This class encapsulates a user profile."""
_PROFILE_ID = 'verificationProfileId'
_LOCALE = 'locale'
_ENROLLMENTS_COUNT = 'enrollmentsCount'
_REMAINING_ENROLLMENTS_COUNT = 'remainingEnrollmentsCount'
_CREATED_DATE_TIME = 'createdDateTime'
_LAST_ACTION_DATE_TIME = 'lastActionDateTime'
_ENROLLMENT_STATUS = 'enrollmentStatus'
def __init__(self, response):
"""Constructor of the VerificationProfile class.
Arguments:
response -- the dictionary of the deserialized python response
"""
self._profile_id = response.get(self._PROFILE_ID, None)
self._locale = response.get(self._LOCALE, None)
self._enrollments_count = response.get(self._ENROLLMENTS_COUNT, None)
self._remaining_enrollments_count = response.get(self._REMAINING_ENROLLMENTS_COUNT, None)
self._created_date_time = response.get(self._CREATED_DATE_TIME, None)
self._last_action_date_time = response.get(self._LAST_ACTION_DATE_TIME, None)
self._enrollment_status = response.get(self._ENROLLMENT_STATUS, None)
def get_profile_id(self):
"""Returns the profile ID of the user"""
return self._profile_id
def get_locale(self):
"""Returns the locale of the user"""
return self._locale
def get_enrollments_count(self):
"""Returns the total number of speech samples submitted for enrollment for this user"""
return self._enrollments_count
def get_remaining_enrollments_count(self):
"""Returns the number of speech samples required remaining to complete enrollment"""
return self._remaining_enrollments_count
def get_created_date_time(self):
"""Returns the creation date time of the user"""
return self._created_date_time
def get_last_action_date_time(self):
"""Returns the last action date time of the user"""
return self._last_action_date_time
def get_enrollment_status(self):
"""Returns the enrollment status of the user"""
return self._enrollment_status
| class Verificationprofile:
"""This class encapsulates a user profile."""
_profile_id = 'verificationProfileId'
_locale = 'locale'
_enrollments_count = 'enrollmentsCount'
_remaining_enrollments_count = 'remainingEnrollmentsCount'
_created_date_time = 'createdDateTime'
_last_action_date_time = 'lastActionDateTime'
_enrollment_status = 'enrollmentStatus'
def __init__(self, response):
"""Constructor of the VerificationProfile class.
Arguments:
response -- the dictionary of the deserialized python response
"""
self._profile_id = response.get(self._PROFILE_ID, None)
self._locale = response.get(self._LOCALE, None)
self._enrollments_count = response.get(self._ENROLLMENTS_COUNT, None)
self._remaining_enrollments_count = response.get(self._REMAINING_ENROLLMENTS_COUNT, None)
self._created_date_time = response.get(self._CREATED_DATE_TIME, None)
self._last_action_date_time = response.get(self._LAST_ACTION_DATE_TIME, None)
self._enrollment_status = response.get(self._ENROLLMENT_STATUS, None)
def get_profile_id(self):
"""Returns the profile ID of the user"""
return self._profile_id
def get_locale(self):
"""Returns the locale of the user"""
return self._locale
def get_enrollments_count(self):
"""Returns the total number of speech samples submitted for enrollment for this user"""
return self._enrollments_count
def get_remaining_enrollments_count(self):
"""Returns the number of speech samples required remaining to complete enrollment"""
return self._remaining_enrollments_count
def get_created_date_time(self):
"""Returns the creation date time of the user"""
return self._created_date_time
def get_last_action_date_time(self):
"""Returns the last action date time of the user"""
return self._last_action_date_time
def get_enrollment_status(self):
"""Returns the enrollment status of the user"""
return self._enrollment_status |
def assert_event_handler(expected_event, mocked_handler):
assert mocked_handler.call_count == 1
actual_event = mocked_handler.call_args[0][0]
assert actual_event == expected_event
| def assert_event_handler(expected_event, mocked_handler):
assert mocked_handler.call_count == 1
actual_event = mocked_handler.call_args[0][0]
assert actual_event == expected_event |
class RoleDisabledException(Exception):
def __init__(self):
self.name = "Your role is disable."
| class Roledisabledexception(Exception):
def __init__(self):
self.name = 'Your role is disable.' |
def run_pg_GB(
n_iter,
min_timesteps_per_batch,
max_path_length,
animate,
logdir,
nn_baseline,
seed,
n_layers,
output_activation,
size,
save_models,
save_best_model,
run_model_only,
script_optimizing_dir,
relative_positions,
death_penalty,
reward_circle,
num_enemies):
start = time.time()
if script_optimizing_dir is not None:
logdir = logdir[:5]+script_optimizing_dir+'/'+logdir[5:]
#========================================================================================#
# Set Up Logger
#========================================================================================#
setup_logger(logdir, locals())
#========================================================================================#
# Set Up Env
#========================================================================================#
# Make the gym environment
env = GB_game(num_char = num_enemies, reward_circle = reward_circle, death_penalty = death_penalty, relative_positions = relative_positions)
tf.set_random_seed(seed)
np.random.seed(seed)
env.seed(seed)
# Maximum length for episodes
max_path_length = max_path_length or env.spec.max_episode_steps
# Is this env continuous, or self.discrete?
discrete = isinstance(env.action_space, gym.spaces.Discrete)
# Observation and action sizes
ob_dim = env.observation_space.shape[0]
ac_dim = env.action_space.n if discrete else env.action_space.shape[0]
#========================================================================================#
# Initialize Agent
#========================================================================================#
computation_graph_args = {
'n_layers': n_layers,
'output_activation': output_activation,
'ob_dim': ob_dim,
'ac_dim': ac_dim,
'discrete': discrete,
'size': size,
'learning_rate': learning_rate,
'baseline_lr' : baseline_lr,
}
sample_trajectory_args = {
'animate': animate,
'max_path_length': max_path_length,
'min_timesteps_per_batch': min_timesteps_per_batch,
}
estimate_return_args = {
'gamma': gamma,
'reward_to_go': reward_to_go,
'nn_baseline': nn_baseline,
'normalize_advantages': normalize_advantages,
}
agent = Agent(computation_graph_args, sample_trajectory_args, estimate_return_args)
# build computation graph
agent.build_computation_graph()
# tensorflow: config, session, variable initialization
agent.init_tf_sess()
# Now we'll try to load...
if run_model_only is not None:
agent.load_models_action(run_model_only)
agent.running_only = True
#========================================================================================#
# Training Loop
#========================================================================================#
best_avg_return = -(5e10)
total_timesteps = 0
for itr in range(n_iter):
print("********** Iteration %i ************"%itr)
paths, timesteps_this_batch = agent.sample_trajectories(itr, env)
total_timesteps += timesteps_this_batch
# Build arrays for observation, action for the policy gradient update by concatenating
# across paths
if run_model_only is not None:
continue
ob_no = np.concatenate([path["observation"] for path in paths])
ac_na = np.concatenate([path["action"] for path in paths])
re_n = [path["reward"] for path in paths]
q_n, adv_n = agent.estimate_return(ob_no, re_n)
agent.update_parameters(ob_no, ac_na, q_n, adv_n)
# Log diagnostics
returns = [path["reward"].sum() for path in paths]
ep_lengths = [pathlength(path) for path in paths]
logz.log_tabular("Time", time.time() - start)
logz.log_tabular("Iteration", itr)
mean_return = np.mean(returns)
if mean_return > best_avg_return:
best_avg_return = mean_return
if save_best_model==True:
save_string = logdir[5:-2]
agent.save_models_action(save_string)
logz.log_tabular("AverageReturn", mean_return)
logz.log_tabular("StdReturn", np.std(returns))
logz.log_tabular("MaxReturn", np.max(returns))
logz.log_tabular("MinReturn", np.min(returns))
logz.log_tabular("EpLenMean", np.mean(ep_lengths))
logz.log_tabular("EpLenStd", np.std(ep_lengths))
logz.log_tabular("TimestepsThisBatch", timesteps_this_batch)
logz.log_tabular("TimestepsSoFar", total_timesteps)
# My own
if hasattr(agent,'batch_baseline_loss'):
logz.log_tabular("BaselineLoss", agent.batch_baseline_loss)
logz.log_tabular("UnscaledLoss", agent.batch_unscaled_loss)
logz.log_tabular("Loss", agent.batch_loss)
logz.dump_tabular()
logz.pickle_tf_vars()
# if script_optimizing == True:
# print(np.max(returns))
if save_models == True and save_best_model==False:
save_string = logdir[5:-2]
agent.save_models_action(save_string) | def run_pg_gb(n_iter, min_timesteps_per_batch, max_path_length, animate, logdir, nn_baseline, seed, n_layers, output_activation, size, save_models, save_best_model, run_model_only, script_optimizing_dir, relative_positions, death_penalty, reward_circle, num_enemies):
start = time.time()
if script_optimizing_dir is not None:
logdir = logdir[:5] + script_optimizing_dir + '/' + logdir[5:]
setup_logger(logdir, locals())
env = gb_game(num_char=num_enemies, reward_circle=reward_circle, death_penalty=death_penalty, relative_positions=relative_positions)
tf.set_random_seed(seed)
np.random.seed(seed)
env.seed(seed)
max_path_length = max_path_length or env.spec.max_episode_steps
discrete = isinstance(env.action_space, gym.spaces.Discrete)
ob_dim = env.observation_space.shape[0]
ac_dim = env.action_space.n if discrete else env.action_space.shape[0]
computation_graph_args = {'n_layers': n_layers, 'output_activation': output_activation, 'ob_dim': ob_dim, 'ac_dim': ac_dim, 'discrete': discrete, 'size': size, 'learning_rate': learning_rate, 'baseline_lr': baseline_lr}
sample_trajectory_args = {'animate': animate, 'max_path_length': max_path_length, 'min_timesteps_per_batch': min_timesteps_per_batch}
estimate_return_args = {'gamma': gamma, 'reward_to_go': reward_to_go, 'nn_baseline': nn_baseline, 'normalize_advantages': normalize_advantages}
agent = agent(computation_graph_args, sample_trajectory_args, estimate_return_args)
agent.build_computation_graph()
agent.init_tf_sess()
if run_model_only is not None:
agent.load_models_action(run_model_only)
agent.running_only = True
best_avg_return = -50000000000.0
total_timesteps = 0
for itr in range(n_iter):
print('********** Iteration %i ************' % itr)
(paths, timesteps_this_batch) = agent.sample_trajectories(itr, env)
total_timesteps += timesteps_this_batch
if run_model_only is not None:
continue
ob_no = np.concatenate([path['observation'] for path in paths])
ac_na = np.concatenate([path['action'] for path in paths])
re_n = [path['reward'] for path in paths]
(q_n, adv_n) = agent.estimate_return(ob_no, re_n)
agent.update_parameters(ob_no, ac_na, q_n, adv_n)
returns = [path['reward'].sum() for path in paths]
ep_lengths = [pathlength(path) for path in paths]
logz.log_tabular('Time', time.time() - start)
logz.log_tabular('Iteration', itr)
mean_return = np.mean(returns)
if mean_return > best_avg_return:
best_avg_return = mean_return
if save_best_model == True:
save_string = logdir[5:-2]
agent.save_models_action(save_string)
logz.log_tabular('AverageReturn', mean_return)
logz.log_tabular('StdReturn', np.std(returns))
logz.log_tabular('MaxReturn', np.max(returns))
logz.log_tabular('MinReturn', np.min(returns))
logz.log_tabular('EpLenMean', np.mean(ep_lengths))
logz.log_tabular('EpLenStd', np.std(ep_lengths))
logz.log_tabular('TimestepsThisBatch', timesteps_this_batch)
logz.log_tabular('TimestepsSoFar', total_timesteps)
if hasattr(agent, 'batch_baseline_loss'):
logz.log_tabular('BaselineLoss', agent.batch_baseline_loss)
logz.log_tabular('UnscaledLoss', agent.batch_unscaled_loss)
logz.log_tabular('Loss', agent.batch_loss)
logz.dump_tabular()
logz.pickle_tf_vars()
if save_models == True and save_best_model == False:
save_string = logdir[5:-2]
agent.save_models_action(save_string) |
class Node:
def __init__(self, value):
self.value = value
self.left = None
self.right = None
class BinarySearchTree:
def __init__(self):
pass
def insert(self):
pass
def level_order_traversal(self):
pass
def delete(self):
pass
| class Node:
def __init__(self, value):
self.value = value
self.left = None
self.right = None
class Binarysearchtree:
def __init__(self):
pass
def insert(self):
pass
def level_order_traversal(self):
pass
def delete(self):
pass |
'''
Assignment 1
'''
#Observing the output of the following commands
emp_number = 1233
print("Employee Number",emp_number)
emp_salary = 16745.50
emp_name = "Jerry Squaris"
print("Employee Salary and Name:",emp_salary, emp_name)
emp_salary = 23450.34
print("Updated Employee Salary:",emp_salary)
| """
Assignment 1
"""
emp_number = 1233
print('Employee Number', emp_number)
emp_salary = 16745.5
emp_name = 'Jerry Squaris'
print('Employee Salary and Name:', emp_salary, emp_name)
emp_salary = 23450.34
print('Updated Employee Salary:', emp_salary) |
MIN_DRIVING_AGE = 18
def allowed_driving(name, age):
"""Print '{name} is allowed to drive' or '{name} is not allowed to drive'
checking the passed in age against the MIN_DRIVING_AGE constant"""
if age >= MIN_DRIVING_AGE:
print(f'{name} is allowed to drive')
else:
print(f'{name} is not allowed to drive')
pass | min_driving_age = 18
def allowed_driving(name, age):
"""Print '{name} is allowed to drive' or '{name} is not allowed to drive'
checking the passed in age against the MIN_DRIVING_AGE constant"""
if age >= MIN_DRIVING_AGE:
print(f'{name} is allowed to drive')
else:
print(f'{name} is not allowed to drive')
pass |
"""A Task is a unit of organization in Busy"""
def create_task():
pass
class Task:
"""
A Task keeps track of a thing that has to get done. "Things" are very
complex, Busy calls them tasks. Some Tasks cannot be started until other
Tasks are completed, that task is "waiting on" those other tasks, some
would call it "blocked". Some Tasks cannot be completed until other tasks
are completed, those are children tasks. They come up when you put
something on your todo list and realize there are several steps to getting
that "one" task done, and you need/want to track that.
"""
def __init__(self, id):
self._id = id
def created(self):
pass
def started(self):
pass
def completed(self):
pass
def name(self):
pass
def description(self):
pass
def tags(self):
pass
def expected_time(self):
pass
def remaining_time(self):
pass
def story_points(self):
pass
def is_complete(self):
pass
# Tasks related to this Task
def waiting_on(self):
pass
def blocking(self):
pass
def parents(self):
pass
def children(self):
pass
# Work done for this task
def timers(self):
"""Timers directly related to this task"""
pass
def all_timers(self):
"""Include timers on children"""
pass
| """A Task is a unit of organization in Busy"""
def create_task():
pass
class Task:
"""
A Task keeps track of a thing that has to get done. "Things" are very
complex, Busy calls them tasks. Some Tasks cannot be started until other
Tasks are completed, that task is "waiting on" those other tasks, some
would call it "blocked". Some Tasks cannot be completed until other tasks
are completed, those are children tasks. They come up when you put
something on your todo list and realize there are several steps to getting
that "one" task done, and you need/want to track that.
"""
def __init__(self, id):
self._id = id
def created(self):
pass
def started(self):
pass
def completed(self):
pass
def name(self):
pass
def description(self):
pass
def tags(self):
pass
def expected_time(self):
pass
def remaining_time(self):
pass
def story_points(self):
pass
def is_complete(self):
pass
def waiting_on(self):
pass
def blocking(self):
pass
def parents(self):
pass
def children(self):
pass
def timers(self):
"""Timers directly related to this task"""
pass
def all_timers(self):
"""Include timers on children"""
pass |
"""4-9. Cube Comprehension: Use a list comprehension to generate a list of the
first 10 cubes."""
cubes = [num**3 for num in range(1,11)]
for cube in cubes:
print(cube) | """4-9. Cube Comprehension: Use a list comprehension to generate a list of the
first 10 cubes."""
cubes = [num ** 3 for num in range(1, 11)]
for cube in cubes:
print(cube) |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Imports, Definitionen von Klassen, Funktionen und Variablen ab hier.
"""
text = "Hallo Welt!"
# Hauptschleife, wenn direkt aufgerufen, ab hier
if __name__ == '__main__':
print(text)
| """
Imports, Definitionen von Klassen, Funktionen und Variablen ab hier.
"""
text = 'Hallo Welt!'
if __name__ == '__main__':
print(text) |
"""
http://community.topcoder.com/stat?c=problem_statement&pm=1675
Single Round Match 145 Round 1 - Division II, Level Two
"""
class ExerciseMachine:
def getPercentages(self, times):
h, m, s = map(int, times.split(':'))
seconds = h * 3600 + m * 60 + s
for i in [100, 50, 25, 20, 10, 5, 4, 2, 1]:
if seconds % i == 0:
return i - 1
| """
http://community.topcoder.com/stat?c=problem_statement&pm=1675
Single Round Match 145 Round 1 - Division II, Level Two
"""
class Exercisemachine:
def get_percentages(self, times):
(h, m, s) = map(int, times.split(':'))
seconds = h * 3600 + m * 60 + s
for i in [100, 50, 25, 20, 10, 5, 4, 2, 1]:
if seconds % i == 0:
return i - 1 |
with open("output.txt") as output:
data = output.read()
produced = {}
is_producing = set()
consumed = {}
readies = 0
for line in data.splitlines():
if ("JMSBasedValueInput" in line and "produced" in line) or "to low" in line:
for robot in (part.split("]")[0][6:] for part in line.split("[")[2:]):
if robot in produced:
produced[robot] += 1
else:
produced[robot] = 1
if "JMSBasedRobot" in line and "Received value" in line:
robot = line.split(":")[3].strip()
if robot in consumed:
consumed[robot] += 1
else:
consumed[robot] = 1
if "JMSBasedRobot" in line and "to low" in line:
robot = line.split(":")[3].strip()
is_producing.add(robot)
if "JMSBasedRobot" in line and "Done" in line:
robot = line.split(":")[3].strip()
is_producing.remove(robot)
if "Ready to" in line:
readies += 1
print(produced)
print(consumed)
print([(k,v) for k,v in produced.items() if v < 2])
print([(k,v) for k,v in consumed.items() if v < 2])
print(is_producing)
print(readies) | with open('output.txt') as output:
data = output.read()
produced = {}
is_producing = set()
consumed = {}
readies = 0
for line in data.splitlines():
if 'JMSBasedValueInput' in line and 'produced' in line or 'to low' in line:
for robot in (part.split(']')[0][6:] for part in line.split('[')[2:]):
if robot in produced:
produced[robot] += 1
else:
produced[robot] = 1
if 'JMSBasedRobot' in line and 'Received value' in line:
robot = line.split(':')[3].strip()
if robot in consumed:
consumed[robot] += 1
else:
consumed[robot] = 1
if 'JMSBasedRobot' in line and 'to low' in line:
robot = line.split(':')[3].strip()
is_producing.add(robot)
if 'JMSBasedRobot' in line and 'Done' in line:
robot = line.split(':')[3].strip()
is_producing.remove(robot)
if 'Ready to' in line:
readies += 1
print(produced)
print(consumed)
print([(k, v) for (k, v) in produced.items() if v < 2])
print([(k, v) for (k, v) in consumed.items() if v < 2])
print(is_producing)
print(readies) |
H, W, Y, X = map(int, input().split())
X -= 1
Y -= 1
field = []
for i in range(H):
field.append(input())
dx = [1, -1, 0, 0]
dy = [0, 0, 1, -1]
ans = 1
for i in range(4):
nx = X + dx[i]
ny = Y + dy[i]
while 0 <= nx < W and 0 <= ny < H and field[ny][nx] == '.':
nx += dx[i]
ny += dy[i]
ans += 1
print(ans) | (h, w, y, x) = map(int, input().split())
x -= 1
y -= 1
field = []
for i in range(H):
field.append(input())
dx = [1, -1, 0, 0]
dy = [0, 0, 1, -1]
ans = 1
for i in range(4):
nx = X + dx[i]
ny = Y + dy[i]
while 0 <= nx < W and 0 <= ny < H and (field[ny][nx] == '.'):
nx += dx[i]
ny += dy[i]
ans += 1
print(ans) |
#!/usr/bin/env python
def func1():
print("Hello World")
class Bogus:
my_var1 = ""
my_var2 = ""
my_var3 = ""
def hello(self):
print("Hello " + self.my_var1 + ", " + self.my_var2 + " and " + self.my_var3)
def not_hello(self):
print("Bye " + self.my_var1 + ", " + self.my_var2 + " and " + self.my_var3)
def __init__(self, var1, var2, var3):
self.my_var1 = var1
self.my_var2 = var2
self.my_var3 = var3
class BogusNew(Bogus):
def hello(self):
print("Welcome " + self.my_var1 + ", " + self.my_var2 + " and " + self.my_var3)
def __init__(self, var1, var2, var3):
print("Doing something more here...")
Bogus.__init__(self, var1, var2, var3)
if __name__ == "__main__":
print("I'm the module 'world'")
| def func1():
print('Hello World')
class Bogus:
my_var1 = ''
my_var2 = ''
my_var3 = ''
def hello(self):
print('Hello ' + self.my_var1 + ', ' + self.my_var2 + ' and ' + self.my_var3)
def not_hello(self):
print('Bye ' + self.my_var1 + ', ' + self.my_var2 + ' and ' + self.my_var3)
def __init__(self, var1, var2, var3):
self.my_var1 = var1
self.my_var2 = var2
self.my_var3 = var3
class Bogusnew(Bogus):
def hello(self):
print('Welcome ' + self.my_var1 + ', ' + self.my_var2 + ' and ' + self.my_var3)
def __init__(self, var1, var2, var3):
print('Doing something more here...')
Bogus.__init__(self, var1, var2, var3)
if __name__ == '__main__':
print("I'm the module 'world'") |
def foo():
return bbb
aaa = foo() and ccc | def foo():
return bbb
aaa = foo() and ccc |
#Print 1 to 100 ussing a loop
num = 0
while (num < 100):
num += 1
print(num)
| num = 0
while num < 100:
num += 1
print(num) |
def generate_table():
alphabet = 'ABCDEFGHIKLMNOPQRSTUVWXYZ'
tabel = [[0] * 5 for row in range(5)]
pos = 0
for x in range(5):
for y in range(5):
tabel[x][y] = alphabet[pos]
pos += 1
return tabel
def getStr(x, format='%02s'):
return ''.join(format % i for i in x)
def print_table(table):
print(' ' + getStr(range(1, 6)))
for row in range(0, len(table)):
print(str(row + 1) + getStr(table[row]))
def encrypt(table, words):
string = table
cipher = ''
for ch in words.upper():
if ch == "J": ch = "I"
for row in range(len(table)):
if ch in table[row]:
x = str((table[row].index(ch) + 1))
y = str(row + 1)
cipher += y + x
return cipher
def decrypt(table, numbers):
text = ''
for index in range(0, len(numbers), 2):
y = int(numbers[index]) - 1
x = int(numbers[index + 1]) - 1
if table[y][x] == "I":
table[y][x] = "(I/J)"
text += table[y][x]
return text
if __name__ == '__main__':
table = generate_table()
print_table(table)
cyp = input("Masukkan Plain Text: ")
ciphertext = encrypt(table, cyp)
print(ciphertext)
print(decrypt(table, ciphertext))
| def generate_table():
alphabet = 'ABCDEFGHIKLMNOPQRSTUVWXYZ'
tabel = [[0] * 5 for row in range(5)]
pos = 0
for x in range(5):
for y in range(5):
tabel[x][y] = alphabet[pos]
pos += 1
return tabel
def get_str(x, format='%02s'):
return ''.join((format % i for i in x))
def print_table(table):
print(' ' + get_str(range(1, 6)))
for row in range(0, len(table)):
print(str(row + 1) + get_str(table[row]))
def encrypt(table, words):
string = table
cipher = ''
for ch in words.upper():
if ch == 'J':
ch = 'I'
for row in range(len(table)):
if ch in table[row]:
x = str(table[row].index(ch) + 1)
y = str(row + 1)
cipher += y + x
return cipher
def decrypt(table, numbers):
text = ''
for index in range(0, len(numbers), 2):
y = int(numbers[index]) - 1
x = int(numbers[index + 1]) - 1
if table[y][x] == 'I':
table[y][x] = '(I/J)'
text += table[y][x]
return text
if __name__ == '__main__':
table = generate_table()
print_table(table)
cyp = input('Masukkan Plain Text: ')
ciphertext = encrypt(table, cyp)
print(ciphertext)
print(decrypt(table, ciphertext)) |
async def calc_cmd(bot, discord, message, botconfig, os, platform, datetime, one_result, localization, numexpr, prefix, embed_color):
args = message.content.split();
err = ""
no_args = discord.Embed(title=localization[1][9][0], description=str(localization[1][9][4]).format(prefix), color=botconfig['accent1'])
no_args.add_field(name=localization[1][9][6], value=localization[1][9][7], inline=False)
if " ".join(args[1:]) == "" or " ".join(args[1:]) == " " or " ".join(args[1:]) == None:
return await message.channel.send(embed=no_args)
calc_content = discord.Embed(title=localization[1][9][0], color=embed_color)
calc_content.add_field(name=localization[1][9][1], value="```py\n" + " ".join(args[1:]) + "```", inline=False)
try:
result = str(numexpr.evaluate(" ".join(args[1:])))
except Exception as e:
if str(e) == 'division by zero':
result = localization[1][9][8]
elif str(e) == "Python int too large to convert to C long":
result = localization[1][9][9]
elif str(e).startswith("'VariableNode' object has no attribute"):
result = localization[1][9][10]
else:
result = localization[1][9][3] + str(e)
finally:
calc_content.add_field(name=localization[1][9][2], value="```" + result + "```", inline=False)
calc_content.add_field(name=localization[1][9][6], value=localization[1][9][7], inline=False)
await message.channel.send(embed=calc_content)
| async def calc_cmd(bot, discord, message, botconfig, os, platform, datetime, one_result, localization, numexpr, prefix, embed_color):
args = message.content.split()
err = ''
no_args = discord.Embed(title=localization[1][9][0], description=str(localization[1][9][4]).format(prefix), color=botconfig['accent1'])
no_args.add_field(name=localization[1][9][6], value=localization[1][9][7], inline=False)
if ' '.join(args[1:]) == '' or ' '.join(args[1:]) == ' ' or ' '.join(args[1:]) == None:
return await message.channel.send(embed=no_args)
calc_content = discord.Embed(title=localization[1][9][0], color=embed_color)
calc_content.add_field(name=localization[1][9][1], value='```py\n' + ' '.join(args[1:]) + '```', inline=False)
try:
result = str(numexpr.evaluate(' '.join(args[1:])))
except Exception as e:
if str(e) == 'division by zero':
result = localization[1][9][8]
elif str(e) == 'Python int too large to convert to C long':
result = localization[1][9][9]
elif str(e).startswith("'VariableNode' object has no attribute"):
result = localization[1][9][10]
else:
result = localization[1][9][3] + str(e)
finally:
calc_content.add_field(name=localization[1][9][2], value='```' + result + '```', inline=False)
calc_content.add_field(name=localization[1][9][6], value=localization[1][9][7], inline=False)
await message.channel.send(embed=calc_content) |
class CancellationException(Exception):
"""Raised when command was cancelled from the CloudShell"""
def __init__(self, message, data):
"""
:param str message:
:param dict data:
:return:
"""
# Call the base class constructor with the parameters it needs
super(CancellationException, self).__init__(message)
self.data = data if data else {}
class CommandCancellationService(object):
def check_if_cancelled(self, cancellation_context, data=None):
"""Check if command was cancelled from the CloudShell
:param cancellation_context cloudshell.shell.core.driver_context.CancellationContext instance
:param dict data: Dictionary that will be added to the cancellation exception if raised.
Use this container to add context data to the cancellation exception to be used
by the exception handler
:raises cloudshell.cp.azure.common.exceptions.cancellation_exception.CancellationException
:return:
"""
if cancellation_context and cancellation_context.is_cancelled:
return True
| class Cancellationexception(Exception):
"""Raised when command was cancelled from the CloudShell"""
def __init__(self, message, data):
"""
:param str message:
:param dict data:
:return:
"""
super(CancellationException, self).__init__(message)
self.data = data if data else {}
class Commandcancellationservice(object):
def check_if_cancelled(self, cancellation_context, data=None):
"""Check if command was cancelled from the CloudShell
:param cancellation_context cloudshell.shell.core.driver_context.CancellationContext instance
:param dict data: Dictionary that will be added to the cancellation exception if raised.
Use this container to add context data to the cancellation exception to be used
by the exception handler
:raises cloudshell.cp.azure.common.exceptions.cancellation_exception.CancellationException
:return:
"""
if cancellation_context and cancellation_context.is_cancelled:
return True |
def f():
(some_global): int
print(some_global)
| def f():
(some_global): int
print(some_global) |
#!/usr/bin/env python3
n = int(input())
power = 7
i = 0
while i < n:
print(power)
power = power + 7
i = i + 1
| n = int(input())
power = 7
i = 0
while i < n:
print(power)
power = power + 7
i = i + 1 |
class Solution:
def racecar(self, target):
"""
:type target: int
:rtype: int
"""
q, cnt, used = [(0, 1)], 0, {(0, 1)}
while q:
new = []
for pos, speed in q:
if pos == target:
return cnt
elif pos > 20000 or -20000 > pos:
continue
if (pos + speed, speed * 2) not in used:
new.append((pos + speed, speed * 2))
used.add((pos + speed, speed * 2))
if speed > 0 and (pos, -1) not in used:
new.append((pos, -1))
used.add((pos, -1))
elif speed < 0 and (pos, 1) not in used:
new.append((pos, 1))
used.add((pos, 1))
q = new
cnt += 1
return cnt
| class Solution:
def racecar(self, target):
"""
:type target: int
:rtype: int
"""
(q, cnt, used) = ([(0, 1)], 0, {(0, 1)})
while q:
new = []
for (pos, speed) in q:
if pos == target:
return cnt
elif pos > 20000 or -20000 > pos:
continue
if (pos + speed, speed * 2) not in used:
new.append((pos + speed, speed * 2))
used.add((pos + speed, speed * 2))
if speed > 0 and (pos, -1) not in used:
new.append((pos, -1))
used.add((pos, -1))
elif speed < 0 and (pos, 1) not in used:
new.append((pos, 1))
used.add((pos, 1))
q = new
cnt += 1
return cnt |
"""
Numerically Solve the model to estimate parameters
"""
class CoronaVIRES_2(object):
"""
SERV model # 2 (_Known_Vaccination ):
Discrete approximations to Differential Equations which donot use past states of variables
Vaccination taken from data
"""
def __init__(self,N, new_vaccinations_first_dose, new_vaccinations_second_dose):
raise NotImplementedError()
self.N = N
self.new_vaccinations_first_dose = new_vaccinations_first_dose # Array
self.new_vaccinations_second_dose = new_vaccinations_second_dose
self.S = []
self.V1, self.V2 = [],[]
self.Es, self.E1, self.E2 = [],[],[]
self.I1, self.I2, self.Is = [],[],[]
self.R1, self.Rs = [],[]
self.I, self.E = [], []
self.D = []
def run_predict(self, T, alpha, beta, del1, del2, chi, dels, rho, phi, phi2, theta, S0, Es0, Is0):
"""
Predict till t time steps
new_vaccinations
"""
N = self.N
# Initial conditions
self.S = [S0]
self.V1, self.V2 = [0],[0]
self.Es, self.E1, self.E2 = [Es0],[0],[0]
self.I1, self.I2, self.Is = [0],[0],[Is0]
self.R1, self.Rs = [0],[0]
self.I, self.E = [Is0+0+0], [Es0+0+0]
self.D = [0] # Translate to 0
#loop using the DEs
for t in range(T+1):
S = self.S[-1]
V1, V2 = self.V1[-1], self.V2[-1]
Es, E1, E2 = self.Es[-1], self.E1[-1], self.E2[-1]
I1, I2, Is = self.I1[-1], self.I2[-1], self.Is[-1]
R1, Rs = self.R1[-1], self.Rs[-1]
I, E = self.I[-1], self.E[-1]
D = self.D[-1]
dS = alpha*Rs - S*I*beta/N - S*chi*E/N - rho*S
dV1 = rho*S + rho*Rs - V1*beta*I/N - V1*chi*E/N - phi*V1
dV2 = phi*V1 + phi2*R1 + (1-del2)*I2 - V2*beta*I/N - V2*chi*E/N
dEs = S*I*beta/N + S*chi*E/N - theta*Es
dE1 = V1*beta*I/N + V1*chi*E/N - theta*E1
dE2 = V2*beta*I/N + V2*chi*E/N - theta*E2
dI1 = theta*E1 - I1*del1 - (1-del1)*I1
dI2 = theta*E2 - I2*del2 - (1-del2)*I2
dIs = theta * Es - (1-dels)*Is - Is*dels
dD = del1*I1+del2*I2+dels*Is
dR1 = (1-del1)*I1 - phi2*R1
dRs = (1-dels)*Is - rho*Rs - alpha*Rs
dE = dE1 + dE2 + dEs
dI = dI1 + dI2 + dIs
self.S.append(S+dS)
self.V1.append(V1+dV1)
self.V2.append(V2+dV2)
self.Es.append(Es+dEs)
self.E1.append(E1+dE1)
self.E2.append(E2+dE2)
self.I1.append(I1+dI1)
self.I2.append(I2+dI2)
self.Is.append(Is+dIs)
self.D.append(D+dD)
self.R1.append(R1+dR1)
self.Rs.append(Rs+dRs)
self.E.append(E+dE)
self.I.append(I+dI)
def predict_Deaths(self, T, alpha, beta, del1, del2, chi, dels, rho, phi, phi2, theta, S0, Es0, Is0):
self.run_predict(T, alpha, beta, del1, del2, chi, dels, rho, phi, phi2, theta, S0, Es0, Is0)
return self.D[T]
def predict_Deaths_for_T_days(self, T, alpha, beta, del1, del2, chi, dels, rho, phi, phi2, theta, S0, Es0, Is0):
self.run_predict(T, alpha, beta, del1, del2, chi, dels, rho, phi, phi2, theta, S0, Es0, Is0)
return self.D
def predict_Positive(self, T, alpha, beta, del1, del2, chi, dels, rho, phi, phi2, theta, S0, Es0, Is0):
self.run_predict(T, alpha, beta, del1, del2, chi, dels, rho, phi, phi2, theta, S0, Es0, Is0)
return self.I[T]
#TODO: I+E or I
def predict_new_deaths(self, T, alpha, beta, del1, del2, chi, dels, rho, phi, phi2, theta, S0, Es0, Is0):
self.predict_Deaths(T+1, alpha, beta, del1, del2, chi, dels, rho, phi, phi2, theta, S0, Es0, Is0)
new_deaths = [0 for _ in range(len(self.D)-1)]
for i in range(1,len(self.D)):
new_deaths[i-1] = self.D[i]-self.D[i-1]
self.new_deaths = new_deaths
return new_deaths[T]
def fit_model(self, Deaths_observed, Infected_Observed, plot=False, plot_title="CoronaVIRES1", weights=None):
pass | """
Numerically Solve the model to estimate parameters
"""
class Coronavires_2(object):
"""
SERV model # 2 (_Known_Vaccination ):
Discrete approximations to Differential Equations which donot use past states of variables
Vaccination taken from data
"""
def __init__(self, N, new_vaccinations_first_dose, new_vaccinations_second_dose):
raise not_implemented_error()
self.N = N
self.new_vaccinations_first_dose = new_vaccinations_first_dose
self.new_vaccinations_second_dose = new_vaccinations_second_dose
self.S = []
(self.V1, self.V2) = ([], [])
(self.Es, self.E1, self.E2) = ([], [], [])
(self.I1, self.I2, self.Is) = ([], [], [])
(self.R1, self.Rs) = ([], [])
(self.I, self.E) = ([], [])
self.D = []
def run_predict(self, T, alpha, beta, del1, del2, chi, dels, rho, phi, phi2, theta, S0, Es0, Is0):
"""
Predict till t time steps
new_vaccinations
"""
n = self.N
self.S = [S0]
(self.V1, self.V2) = ([0], [0])
(self.Es, self.E1, self.E2) = ([Es0], [0], [0])
(self.I1, self.I2, self.Is) = ([0], [0], [Is0])
(self.R1, self.Rs) = ([0], [0])
(self.I, self.E) = ([Is0 + 0 + 0], [Es0 + 0 + 0])
self.D = [0]
for t in range(T + 1):
s = self.S[-1]
(v1, v2) = (self.V1[-1], self.V2[-1])
(es, e1, e2) = (self.Es[-1], self.E1[-1], self.E2[-1])
(i1, i2, is) = (self.I1[-1], self.I2[-1], self.Is[-1])
(r1, rs) = (self.R1[-1], self.Rs[-1])
(i, e) = (self.I[-1], self.E[-1])
d = self.D[-1]
d_s = alpha * Rs - S * I * beta / N - S * chi * E / N - rho * S
d_v1 = rho * S + rho * Rs - V1 * beta * I / N - V1 * chi * E / N - phi * V1
d_v2 = phi * V1 + phi2 * R1 + (1 - del2) * I2 - V2 * beta * I / N - V2 * chi * E / N
d_es = S * I * beta / N + S * chi * E / N - theta * Es
d_e1 = V1 * beta * I / N + V1 * chi * E / N - theta * E1
d_e2 = V2 * beta * I / N + V2 * chi * E / N - theta * E2
d_i1 = theta * E1 - I1 * del1 - (1 - del1) * I1
d_i2 = theta * E2 - I2 * del2 - (1 - del2) * I2
d_is = theta * Es - (1 - dels) * Is - Is * dels
d_d = del1 * I1 + del2 * I2 + dels * Is
d_r1 = (1 - del1) * I1 - phi2 * R1
d_rs = (1 - dels) * Is - rho * Rs - alpha * Rs
d_e = dE1 + dE2 + dEs
d_i = dI1 + dI2 + dIs
self.S.append(S + dS)
self.V1.append(V1 + dV1)
self.V2.append(V2 + dV2)
self.Es.append(Es + dEs)
self.E1.append(E1 + dE1)
self.E2.append(E2 + dE2)
self.I1.append(I1 + dI1)
self.I2.append(I2 + dI2)
self.Is.append(Is + dIs)
self.D.append(D + dD)
self.R1.append(R1 + dR1)
self.Rs.append(Rs + dRs)
self.E.append(E + dE)
self.I.append(I + dI)
def predict__deaths(self, T, alpha, beta, del1, del2, chi, dels, rho, phi, phi2, theta, S0, Es0, Is0):
self.run_predict(T, alpha, beta, del1, del2, chi, dels, rho, phi, phi2, theta, S0, Es0, Is0)
return self.D[T]
def predict__deaths_for_t_days(self, T, alpha, beta, del1, del2, chi, dels, rho, phi, phi2, theta, S0, Es0, Is0):
self.run_predict(T, alpha, beta, del1, del2, chi, dels, rho, phi, phi2, theta, S0, Es0, Is0)
return self.D
def predict__positive(self, T, alpha, beta, del1, del2, chi, dels, rho, phi, phi2, theta, S0, Es0, Is0):
self.run_predict(T, alpha, beta, del1, del2, chi, dels, rho, phi, phi2, theta, S0, Es0, Is0)
return self.I[T]
def predict_new_deaths(self, T, alpha, beta, del1, del2, chi, dels, rho, phi, phi2, theta, S0, Es0, Is0):
self.predict_Deaths(T + 1, alpha, beta, del1, del2, chi, dels, rho, phi, phi2, theta, S0, Es0, Is0)
new_deaths = [0 for _ in range(len(self.D) - 1)]
for i in range(1, len(self.D)):
new_deaths[i - 1] = self.D[i] - self.D[i - 1]
self.new_deaths = new_deaths
return new_deaths[T]
def fit_model(self, Deaths_observed, Infected_Observed, plot=False, plot_title='CoronaVIRES1', weights=None):
pass |
def Wizard(thoughts, eyes, eye, tongue):
return f"""
{thoughts}
{thoughts}
_____
.\'* *.\'
___/_*_(_
/ _______ \\
_\\_)/___\\(_/_
/ _((\\- -/))_ \\
\\ \\())(-)(()/ /
' \\(((()))/ \'
/ \' \\)).))\\ \' \\
/ _ \\ - | - /_ \\
( ( .;\'\'\';. .\' )
_\\\"__ / )\\ __\"/_
\\/ \\ \' / \\/
.\' \'...\' \' )
/ / | \\ \\
/ . . . \\
/ . . \\
/ / | \\ \\
.\' / b \'. \'.
_.-\' / Bb \'-. \'-_
_.-\' | BBb \'-. \'-.
(________mrf\____.dBBBb._________)____)
""" | def wizard(thoughts, eyes, eye, tongue):
return f"""\n {thoughts}\n {thoughts}\n _____\n .'* *.'\n ___/_*_(_\n / _______ \\\n _\\_)/___\\(_/_\n / _((\\- -/))_ \\\n \\ \\())(-)(()/ /\n ' \\(((()))/ '\n / ' \\)).))\\ ' \\\n / _ \\ - | - /_ \\\n ( ( .;''';. .' )\n _\\"__ / )\\ __"/_\n \\/ \\ ' / \\/\n .' '...' ' )\n / / | \\ \\\n / . . . \\\n / . . \\\n / / | \\ \\\n .' / b '. '.\n _.-' / Bb '-. '-_\n _.-' | BBb '-. '-.\n(________mrf\\____.dBBBb._________)____)\n""" |
def flatten(items):
""" Flatten nested list of any recursion. """
for i in items:
if isinstance(i, list):
for ii in flatten(i):
yield ii
else:
yield i
| def flatten(items):
""" Flatten nested list of any recursion. """
for i in items:
if isinstance(i, list):
for ii in flatten(i):
yield ii
else:
yield i |
ANNOTATION_NAME = "service-meta-Name"
ANNOTATION_DESCRIPTION = "service-meta-Description"
ANNOTATION_DOCS_LINK = "service-meta-DocsLink"
ANNOTATION_ENVIRONMENT = "service-meta-Environment"
ANNOTATION_FRIENDLY_NAME = "service-meta-FriendlyName"
ANNOTATION_ICON_URL = "service-meta-IconURL"
ANNOTATION_MAJOR_VERSION = "service-meta-MajorVersion"
ANNOTATION_MINOR_VERSION = "service-meta-MinorVersion"
ANNOTATION_PATCH_VERSION = "service-meta-PatchVersion"
ANNOTATION_PROJECTS = "service-meta-Projects"
ANNOTATION_SERVICE_TYPE = "service-meta-ServiceType"
ANNOTATION_SOURCE_LINK = "service-meta-SourceLink"
| annotation_name = 'service-meta-Name'
annotation_description = 'service-meta-Description'
annotation_docs_link = 'service-meta-DocsLink'
annotation_environment = 'service-meta-Environment'
annotation_friendly_name = 'service-meta-FriendlyName'
annotation_icon_url = 'service-meta-IconURL'
annotation_major_version = 'service-meta-MajorVersion'
annotation_minor_version = 'service-meta-MinorVersion'
annotation_patch_version = 'service-meta-PatchVersion'
annotation_projects = 'service-meta-Projects'
annotation_service_type = 'service-meta-ServiceType'
annotation_source_link = 'service-meta-SourceLink' |
try:
fname = open("a.txt","r")
fname.write("hello world")
except:
print("Cannot write the contents to the file")
finally:
f.close()
print("File closed")
| try:
fname = open('a.txt', 'r')
fname.write('hello world')
except:
print('Cannot write the contents to the file')
finally:
f.close()
print('File closed') |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Oct 30 18:36:15 2020
@author: daniel
"""
def uromano(n):
return {
1:'I',
2:'II',
3:'III',
4:'IV',
5:'V',
6:'VI',
7:'VII',
8:'VIII',
9:'IX'}.get(n,'no hay valor')
def udromano(n):
return {
1:'X',
2:'XX',
3:'XXX',
4:'XL',
5:'L',
6:'LX',
7:'LXX',
8:'LXXX',
9:'XC',
10:'C'}.get(n,'no hay valor')
n = int (input ('numero a convertir: '))
u = n % 10
d = n // 10
print(udromano(d) ,uromano(u))
| """
Created on Fri Oct 30 18:36:15 2020
@author: daniel
"""
def uromano(n):
return {1: 'I', 2: 'II', 3: 'III', 4: 'IV', 5: 'V', 6: 'VI', 7: 'VII', 8: 'VIII', 9: 'IX'}.get(n, 'no hay valor')
def udromano(n):
return {1: 'X', 2: 'XX', 3: 'XXX', 4: 'XL', 5: 'L', 6: 'LX', 7: 'LXX', 8: 'LXXX', 9: 'XC', 10: 'C'}.get(n, 'no hay valor')
n = int(input('numero a convertir: '))
u = n % 10
d = n // 10
print(udromano(d), uromano(u)) |
#
# PySNMP MIB module CISCO-SWITCH-RATE-LIMITER-CAPABILITY (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/CISCO-SWITCH-RATE-LIMITER-CAPABILITY
# Produced by pysmi-0.3.4 at Wed May 1 12:13:38 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
OctetString, ObjectIdentifier, Integer = mibBuilder.importSymbols("ASN1", "OctetString", "ObjectIdentifier", "Integer")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsIntersection, SingleValueConstraint, ValueRangeConstraint, ConstraintsUnion, ValueSizeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsIntersection", "SingleValueConstraint", "ValueRangeConstraint", "ConstraintsUnion", "ValueSizeConstraint")
ciscoAgentCapability, = mibBuilder.importSymbols("CISCO-SMI", "ciscoAgentCapability")
NotificationGroup, AgentCapabilities, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "AgentCapabilities", "ModuleCompliance")
MibScalar, MibTable, MibTableRow, MibTableColumn, Gauge32, Integer32, iso, IpAddress, TimeTicks, MibIdentifier, Counter64, ModuleIdentity, NotificationType, ObjectIdentity, Unsigned32, Bits, Counter32 = mibBuilder.importSymbols("SNMPv2-SMI", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Gauge32", "Integer32", "iso", "IpAddress", "TimeTicks", "MibIdentifier", "Counter64", "ModuleIdentity", "NotificationType", "ObjectIdentity", "Unsigned32", "Bits", "Counter32")
TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString")
ciscoSwitchRateLimiterCapability = ModuleIdentity((1, 3, 6, 1, 4, 1, 9, 7, 606))
ciscoSwitchRateLimiterCapability.setRevisions(('2011-07-27 00:00',))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
if mibBuilder.loadTexts: ciscoSwitchRateLimiterCapability.setRevisionsDescriptions(('Initial version of this MIB module.',))
if mibBuilder.loadTexts: ciscoSwitchRateLimiterCapability.setLastUpdated('201107270000Z')
if mibBuilder.loadTexts: ciscoSwitchRateLimiterCapability.setOrganization('Cisco Systems, Inc.')
if mibBuilder.loadTexts: ciscoSwitchRateLimiterCapability.setContactInfo('Cisco Systems Customer Service Postal: 170 West Tasman Drive San Jose, CA 95134 USA Tel: +1 800 553-NETS E-mail: cs-lan-switch-snmp@cisco.com')
if mibBuilder.loadTexts: ciscoSwitchRateLimiterCapability.setDescription('The capabilities description of CISCO-SWITCH-RATE-LIMITER-MIB.')
ciscoRateLimiterCapNxOSV05R0201PN7k = AgentCapabilities((1, 3, 6, 1, 4, 1, 9, 7, 606, 1))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ciscoRateLimiterCapNxOSV05R0201PN7k = ciscoRateLimiterCapNxOSV05R0201PN7k.setProductRelease('Cisco NX-OS 5.2(1) on Nexus 7000\n series devices.')
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ciscoRateLimiterCapNxOSV05R0201PN7k = ciscoRateLimiterCapNxOSV05R0201PN7k.setStatus('current')
if mibBuilder.loadTexts: ciscoRateLimiterCapNxOSV05R0201PN7k.setDescription('CISCO-SWITCH-RATE-LIMITER-MIB capabilities.')
mibBuilder.exportSymbols("CISCO-SWITCH-RATE-LIMITER-CAPABILITY", PYSNMP_MODULE_ID=ciscoSwitchRateLimiterCapability, ciscoSwitchRateLimiterCapability=ciscoSwitchRateLimiterCapability, ciscoRateLimiterCapNxOSV05R0201PN7k=ciscoRateLimiterCapNxOSV05R0201PN7k)
| (octet_string, object_identifier, integer) = mibBuilder.importSymbols('ASN1', 'OctetString', 'ObjectIdentifier', 'Integer')
(named_values,) = mibBuilder.importSymbols('ASN1-ENUMERATION', 'NamedValues')
(constraints_intersection, single_value_constraint, value_range_constraint, constraints_union, value_size_constraint) = mibBuilder.importSymbols('ASN1-REFINEMENT', 'ConstraintsIntersection', 'SingleValueConstraint', 'ValueRangeConstraint', 'ConstraintsUnion', 'ValueSizeConstraint')
(cisco_agent_capability,) = mibBuilder.importSymbols('CISCO-SMI', 'ciscoAgentCapability')
(notification_group, agent_capabilities, module_compliance) = mibBuilder.importSymbols('SNMPv2-CONF', 'NotificationGroup', 'AgentCapabilities', 'ModuleCompliance')
(mib_scalar, mib_table, mib_table_row, mib_table_column, gauge32, integer32, iso, ip_address, time_ticks, mib_identifier, counter64, module_identity, notification_type, object_identity, unsigned32, bits, counter32) = mibBuilder.importSymbols('SNMPv2-SMI', 'MibScalar', 'MibTable', 'MibTableRow', 'MibTableColumn', 'Gauge32', 'Integer32', 'iso', 'IpAddress', 'TimeTicks', 'MibIdentifier', 'Counter64', 'ModuleIdentity', 'NotificationType', 'ObjectIdentity', 'Unsigned32', 'Bits', 'Counter32')
(textual_convention, display_string) = mibBuilder.importSymbols('SNMPv2-TC', 'TextualConvention', 'DisplayString')
cisco_switch_rate_limiter_capability = module_identity((1, 3, 6, 1, 4, 1, 9, 7, 606))
ciscoSwitchRateLimiterCapability.setRevisions(('2011-07-27 00:00',))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
if mibBuilder.loadTexts:
ciscoSwitchRateLimiterCapability.setRevisionsDescriptions(('Initial version of this MIB module.',))
if mibBuilder.loadTexts:
ciscoSwitchRateLimiterCapability.setLastUpdated('201107270000Z')
if mibBuilder.loadTexts:
ciscoSwitchRateLimiterCapability.setOrganization('Cisco Systems, Inc.')
if mibBuilder.loadTexts:
ciscoSwitchRateLimiterCapability.setContactInfo('Cisco Systems Customer Service Postal: 170 West Tasman Drive San Jose, CA 95134 USA Tel: +1 800 553-NETS E-mail: cs-lan-switch-snmp@cisco.com')
if mibBuilder.loadTexts:
ciscoSwitchRateLimiterCapability.setDescription('The capabilities description of CISCO-SWITCH-RATE-LIMITER-MIB.')
cisco_rate_limiter_cap_nx_osv05_r0201_pn7k = agent_capabilities((1, 3, 6, 1, 4, 1, 9, 7, 606, 1))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cisco_rate_limiter_cap_nx_osv05_r0201_pn7k = ciscoRateLimiterCapNxOSV05R0201PN7k.setProductRelease('Cisco NX-OS 5.2(1) on Nexus 7000\n series devices.')
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cisco_rate_limiter_cap_nx_osv05_r0201_pn7k = ciscoRateLimiterCapNxOSV05R0201PN7k.setStatus('current')
if mibBuilder.loadTexts:
ciscoRateLimiterCapNxOSV05R0201PN7k.setDescription('CISCO-SWITCH-RATE-LIMITER-MIB capabilities.')
mibBuilder.exportSymbols('CISCO-SWITCH-RATE-LIMITER-CAPABILITY', PYSNMP_MODULE_ID=ciscoSwitchRateLimiterCapability, ciscoSwitchRateLimiterCapability=ciscoSwitchRateLimiterCapability, ciscoRateLimiterCapNxOSV05R0201PN7k=ciscoRateLimiterCapNxOSV05R0201PN7k) |
'''
520. Detect Capital
Given a word, you need to judge whether the usage of capitals in it is right or not.
We define the usage of capitals in a word to be right when one of the following cases holds:
All letters in this word are capitals, like "USA".
All letters in this word are not capitals, like "leetcode".
Only the first letter in this word is capital, like "Google".
Otherwise, we define that this word doesn't use capitals in a right way.
Example 1:
Input: "USA"
Output: True
Example 2:
Input: "FlaG"
Output: False
Note: The input will be a non-empty word consisting of uppercase and lowercase latin letters.
'''
def detectCapitalUse(word):
# check if all caps
if word.isupper():
return True
# check if all lower
if word.islower():
return True
# if at least one character in the word
if len(word)>=1:
# if first character is upper
if word[0].isupper():
# check if the rest is lower
for i in range(1, len(word)):
if word[i].isupper():
return False
# if all is lower return true
else:
return True
# if first is not upper return false because all upper and all lower cheked above
else:
return False
print(detectCapitalUse("USA"))
print("USA*"*8)
print(detectCapitalUse("some words"))
print("some words*"*8)
print(detectCapitalUse("Right"))
print("Right*"*8)
print(detectCapitalUse("wrOng"))
print("wrOng*"*8)
print(detectCapitalUse("A"))
print("A*"*8)
print(detectCapitalUse("b"))
print("b*"*8)
print(detectCapitalUse(""))
| """
520. Detect Capital
Given a word, you need to judge whether the usage of capitals in it is right or not.
We define the usage of capitals in a word to be right when one of the following cases holds:
All letters in this word are capitals, like "USA".
All letters in this word are not capitals, like "leetcode".
Only the first letter in this word is capital, like "Google".
Otherwise, we define that this word doesn't use capitals in a right way.
Example 1:
Input: "USA"
Output: True
Example 2:
Input: "FlaG"
Output: False
Note: The input will be a non-empty word consisting of uppercase and lowercase latin letters.
"""
def detect_capital_use(word):
if word.isupper():
return True
if word.islower():
return True
if len(word) >= 1:
if word[0].isupper():
for i in range(1, len(word)):
if word[i].isupper():
return False
else:
return True
else:
return False
print(detect_capital_use('USA'))
print('USA*' * 8)
print(detect_capital_use('some words'))
print('some words*' * 8)
print(detect_capital_use('Right'))
print('Right*' * 8)
print(detect_capital_use('wrOng'))
print('wrOng*' * 8)
print(detect_capital_use('A'))
print('A*' * 8)
print(detect_capital_use('b'))
print('b*' * 8)
print(detect_capital_use('')) |
#!/usr/bin/python3
# 2019-1-30
# Daniel Nicolas Gisolfi
lexemes = {
'TYPE': {
'priority': 0,
'pattern': r'^(int|string|boolean)$',
},
'BOOLEAN': {
'priority': 0,
'pattern':r'^(true|false)$',
},
'BOOL_OP':{
'priority': 2,
'pattern':r'^(!=|==)$',
},
'ADDITION_OP': {
'priority': 2,
'pattern': r'^\+$'
},
'WHILE': {
'priority': 0,
'pattern': r'^while$'
},
'PRINT': {
'priority': 0,
'pattern': r'^print$'
},
'ASSIGN_OP': {
'priority': 2,
'pattern': r'^=$'
},
'LEFT_PAREN': {
'priority': 2,
'pattern': r'^\($'
},
'RIGHT_PAREN': {
'priority': 2,
'pattern': r'^\)$'
},
'LEFT_BRACE': {
'priority': 2,
'pattern': r'^{$'
},
'RIGHT_BRACE': {
'priority': 2,
'pattern': r'^}$'
},
'DIGIT': {
'priority': 3,
'pattern': r'^\d$'
},
'CHAR': {
'priority': 4,
'pattern': r'^[a-z]{1}$'
},
'QUOTE': {
'priority': 2,
'pattern': r'^"$'
},
'ID': {
'priority': 1,
'pattern': r'^[a-z]$'
},
'EOP': {
'priority': 2,
'pattern': r'^\$$'
},
'IF': {
'priority': 0,
'pattern': r'^if$'
}
}
# Lexemes that will occur in the buffer rather than as a single char.
# They are sorted by length in descending order and seperate from
# the default lexeme list for effiecincy
buffer_lexemes = {
'ID': {
'pattern': r'^[a-z]',
'token': 'ID'
},
'DIGIT': {
'pattern': r'^\d',
'token': 'DIGIT'
},
'IF': {
'pattern': r'^if',
'token': 'IF',
'value': 'if'
},
'INT': {
'pattern': r'^int',
'token': 'TYPE',
'value': 'int'
},
'TRUE': {
'pattern': r'^true',
'token': 'BOOLEAN',
'value': 'true'
},
'FALSE': {
'pattern': r'^false',
'token': 'BOOLEAN',
'value': 'false'
},
'STRING': {
'pattern': r'^string',
'token': 'TYPE',
'value': 'string'
},
'WHILE': {
'pattern': r'^while',
'token': 'WHILE',
'value': 'while'
},
'PRINT': {
'pattern': r'^print',
'token': 'PRINT',
'value': 'print'
},
'BOOLEAN': {
'pattern': r'^boolean',
'token': 'TYPE',
'value': 'boolean'
}
} | lexemes = {'TYPE': {'priority': 0, 'pattern': '^(int|string|boolean)$'}, 'BOOLEAN': {'priority': 0, 'pattern': '^(true|false)$'}, 'BOOL_OP': {'priority': 2, 'pattern': '^(!=|==)$'}, 'ADDITION_OP': {'priority': 2, 'pattern': '^\\+$'}, 'WHILE': {'priority': 0, 'pattern': '^while$'}, 'PRINT': {'priority': 0, 'pattern': '^print$'}, 'ASSIGN_OP': {'priority': 2, 'pattern': '^=$'}, 'LEFT_PAREN': {'priority': 2, 'pattern': '^\\($'}, 'RIGHT_PAREN': {'priority': 2, 'pattern': '^\\)$'}, 'LEFT_BRACE': {'priority': 2, 'pattern': '^{$'}, 'RIGHT_BRACE': {'priority': 2, 'pattern': '^}$'}, 'DIGIT': {'priority': 3, 'pattern': '^\\d$'}, 'CHAR': {'priority': 4, 'pattern': '^[a-z]{1}$'}, 'QUOTE': {'priority': 2, 'pattern': '^"$'}, 'ID': {'priority': 1, 'pattern': '^[a-z]$'}, 'EOP': {'priority': 2, 'pattern': '^\\$$'}, 'IF': {'priority': 0, 'pattern': '^if$'}}
buffer_lexemes = {'ID': {'pattern': '^[a-z]', 'token': 'ID'}, 'DIGIT': {'pattern': '^\\d', 'token': 'DIGIT'}, 'IF': {'pattern': '^if', 'token': 'IF', 'value': 'if'}, 'INT': {'pattern': '^int', 'token': 'TYPE', 'value': 'int'}, 'TRUE': {'pattern': '^true', 'token': 'BOOLEAN', 'value': 'true'}, 'FALSE': {'pattern': '^false', 'token': 'BOOLEAN', 'value': 'false'}, 'STRING': {'pattern': '^string', 'token': 'TYPE', 'value': 'string'}, 'WHILE': {'pattern': '^while', 'token': 'WHILE', 'value': 'while'}, 'PRINT': {'pattern': '^print', 'token': 'PRINT', 'value': 'print'}, 'BOOLEAN': {'pattern': '^boolean', 'token': 'TYPE', 'value': 'boolean'}} |
# -*- coding: utf-8 -*-
def main():
low, high = list(map(int, input().split()))
n = int(input())
a = [int(input()) for _ in range(n)]
for ai in a:
if ai > high:
print(-1)
else:
print(max(0, low - ai))
if __name__ == '__main__':
main()
| def main():
(low, high) = list(map(int, input().split()))
n = int(input())
a = [int(input()) for _ in range(n)]
for ai in a:
if ai > high:
print(-1)
else:
print(max(0, low - ai))
if __name__ == '__main__':
main() |
# -*- coding: utf-8 -*-
def comp_mass_magnets(self):
"""Compute the mass of the hole magnets
Parameters
----------
self : HoleM57
A HoleM57 object
Returns
-------
Mmag: float
mass of the 2 Magnets [kg]
"""
M = 0
# magnet_0 and magnet_1 can have different materials
if self.magnet_0:
M += (
self.H2
* self.W4
* self.magnet_0.Lmag
* self.magnet_0.mat_type.mechanics.rho
)
if self.magnet_1:
M += (
self.H2
* self.W4
* self.magnet_1.Lmag
* self.magnet_1.mat_type.mechanics.rho
)
return M
| def comp_mass_magnets(self):
"""Compute the mass of the hole magnets
Parameters
----------
self : HoleM57
A HoleM57 object
Returns
-------
Mmag: float
mass of the 2 Magnets [kg]
"""
m = 0
if self.magnet_0:
m += self.H2 * self.W4 * self.magnet_0.Lmag * self.magnet_0.mat_type.mechanics.rho
if self.magnet_1:
m += self.H2 * self.W4 * self.magnet_1.Lmag * self.magnet_1.mat_type.mechanics.rho
return M |
# Test file for api version checker
class SomeClient():
def __init__(self, endpoint, credential, **kwargs):
"""
:param str endpoint: Something.
:param credential: Something.
:type credential: TokenCredential.
"""
pass
| class Someclient:
def __init__(self, endpoint, credential, **kwargs):
"""
:param str endpoint: Something.
:param credential: Something.
:type credential: TokenCredential.
"""
pass |
'''A program to find the sum of all values in a dictionary!!'''
print("Program to find sum of all items in dictionary!!")
def toFindSum(myD):
s = 0
for i in myD.values():
s= s + i
print('Sum:{}'.format(s))
d = dict()
length = int(input('Enter the number of {key:value} pairs\n'))
for i in range(length):
Input = input('\nEnter the {key:value} pair\nThe input should be of the format key:value\n')
t = Input.split(':')
d[t[0]] = int(t[1])
toFindSum(d)
| """A program to find the sum of all values in a dictionary!!"""
print('Program to find sum of all items in dictionary!!')
def to_find_sum(myD):
s = 0
for i in myD.values():
s = s + i
print('Sum:{}'.format(s))
d = dict()
length = int(input('Enter the number of {key:value} pairs\n'))
for i in range(length):
input = input('\nEnter the {key:value} pair\nThe input should be of the format key:value\n')
t = Input.split(':')
d[t[0]] = int(t[1])
to_find_sum(d) |
def peakFinding(arr):
"""
The motive of this algorithm is find the first peak in the array
and show its index.
"""
if len(arr) == 1:
return [0]
elif len(arr) == 2:
return [0] if arr[0] > arr[1] else [1]
else:
if arr[0] > arr[1]:
return [0]
elif arr[0] < arr[1]:
for i in range(3, len(arr) - 1):
if arr[i] > arr[i - 1] and arr[i] > arr[i + 1]:
return [i] # returns the first index when finds a peak.
else:
return [len(arr) - 1]
if __name__ == "__main__":
print(peakFinding([1])) # when array has only one elements
print(peakFinding([1, 2])) # when array has two elements
print(peakFinding([1, 0, 3]))
print(peakFinding([1, 0, 2, 1, 3, 2, 5, 2, 6]))
print(peakFinding([0, 1, 2, 3, 4, 5]))
| def peak_finding(arr):
"""
The motive of this algorithm is find the first peak in the array
and show its index.
"""
if len(arr) == 1:
return [0]
elif len(arr) == 2:
return [0] if arr[0] > arr[1] else [1]
elif arr[0] > arr[1]:
return [0]
elif arr[0] < arr[1]:
for i in range(3, len(arr) - 1):
if arr[i] > arr[i - 1] and arr[i] > arr[i + 1]:
return [i]
else:
return [len(arr) - 1]
if __name__ == '__main__':
print(peak_finding([1]))
print(peak_finding([1, 2]))
print(peak_finding([1, 0, 3]))
print(peak_finding([1, 0, 2, 1, 3, 2, 5, 2, 6]))
print(peak_finding([0, 1, 2, 3, 4, 5])) |
class Node:
def __init__(self, id, neighbours):
self.id = id
self.neighbours = neighbours
self.visited = False
class Path:
def __init__(self, neighbours):
self.neighbours = neighbours
def dfs_recursive(node):
print('Node ', node.id)
node.visited = True
for next in node.neighbours:
dfs_recursive(next)
def dfs_open_list(start):
open_list = [start]
while open_list != []:
first, rest = open_list[0], open_list[1:]
if first.visited == True:
open_list = rest
else:
print('Node ', first.id)
first.visited = True
open_list = first.neighbours + rest
def bfs_open_list(start):
open_list = [start]
while open_list != []:
first, rest = open_list[0], open_list[1:]
if first.visited:
open_list = rest
else:
print('Node ', first.id)
first.visited = True
open_list = rest + first.neighbours
def dfs_stack(start):
stack = [None] * 10
stack[0] = start
stack_pointer = 0
while stack_pointer >= 0:
current = stack[stack_pointer]
stack_pointer -= 1
if not current.visited:
print('Node ', current.id)
current.visited = True
if current.neighbours != []:
for n in reversed(current.neighbours):
stack_pointer += 1
stack[stack_pointer] = n
def reset_tree():
global tree
tree = Node(1,
[Node(2,
[Node(3, []),
Node(4, [])]),
Node(5,
[Node(6, [])])])
print("Recursive Depth First Search")
reset_tree()
dfs_recursive(tree)
print("Iterative Depth First Search")
reset_tree()
dfs_open_list(tree)
print("Breadth First Search")
reset_tree()
bfs_open_list(tree)
print("Depth First Search with Stack")
reset_tree()
dfs_stack(tree)
| class Node:
def __init__(self, id, neighbours):
self.id = id
self.neighbours = neighbours
self.visited = False
class Path:
def __init__(self, neighbours):
self.neighbours = neighbours
def dfs_recursive(node):
print('Node ', node.id)
node.visited = True
for next in node.neighbours:
dfs_recursive(next)
def dfs_open_list(start):
open_list = [start]
while open_list != []:
(first, rest) = (open_list[0], open_list[1:])
if first.visited == True:
open_list = rest
else:
print('Node ', first.id)
first.visited = True
open_list = first.neighbours + rest
def bfs_open_list(start):
open_list = [start]
while open_list != []:
(first, rest) = (open_list[0], open_list[1:])
if first.visited:
open_list = rest
else:
print('Node ', first.id)
first.visited = True
open_list = rest + first.neighbours
def dfs_stack(start):
stack = [None] * 10
stack[0] = start
stack_pointer = 0
while stack_pointer >= 0:
current = stack[stack_pointer]
stack_pointer -= 1
if not current.visited:
print('Node ', current.id)
current.visited = True
if current.neighbours != []:
for n in reversed(current.neighbours):
stack_pointer += 1
stack[stack_pointer] = n
def reset_tree():
global tree
tree = node(1, [node(2, [node(3, []), node(4, [])]), node(5, [node(6, [])])])
print('Recursive Depth First Search')
reset_tree()
dfs_recursive(tree)
print('Iterative Depth First Search')
reset_tree()
dfs_open_list(tree)
print('Breadth First Search')
reset_tree()
bfs_open_list(tree)
print('Depth First Search with Stack')
reset_tree()
dfs_stack(tree) |
def get_data(fieldname, entry):
field = fieldname.split(".")
rslt = entry
for data in field:
if rslt[data]:
rslt = rslt[data]
return rslt
def cnvt_grpby_to_nested_dict(grpby, entries):
"""
Create a dict of dict of grpby arguments with sorted
entries at the end of nested dict
"""
rslt = {}
print("rslt = %8x" % id(rslt))
for key, entry in entries.items():
target = rslt
for grp_info in grpby[:-1]:
fieldname = grp_info["field"]
# dflt_val = grp_info.get("default", grp_info["values"][0])
field = get_data(fieldname, entry)
if field not in target:
target[field] = {}
print("create {} %x for field %8s in %x" % (
id(target[field]), field, id(target)))
target = target[field]
# Handle last group differenty
grp_info = grpby[-1]
fieldname = grp_info["field"]
# dflt_val = grp_info.get("default", grp_info["values"][0])
field = get_data(fieldname, entry)
if field not in target:
target[field] = []
target = target[field]
target.append(key)
target.sort()
return rslt
def cnvt_nested_grpby_to_lst_dict(dicdic, grpby, lvl=0):
"""
Recursive func that transform nested dict created by
cnvt_grpby_to_nested_dict to list of dicts with
good order define in grpby
"""
grp = grpby[lvl]
grpvals = grp["values"]
def keyfunc(key):
if key in grpvals:
return grpvals.index(key)
else:
return len(grpvals)+1
# if isinstance(dicdic, dict):
# keys = sorted(dicdic.keys(), key=keyfunc)
# else:
# keys = sorted(dicdic)
if lvl < len(grpby):
rslt = []
# for key in keys:
for key in grpvals:
if lvl < len(grpby) - 1:
subentries = cnvt_nested_grpby_to_lst_dict(
dicdic.get(key, {}), grpby, lvl+1)
else:
subentries = dicdic.get(key, [])
entry = dict(
name=key,
separator_style=grp["separator_style"],
entries=subentries
)
rslt.append(entry)
return rslt
return dict(last=dicdic)
| def get_data(fieldname, entry):
field = fieldname.split('.')
rslt = entry
for data in field:
if rslt[data]:
rslt = rslt[data]
return rslt
def cnvt_grpby_to_nested_dict(grpby, entries):
"""
Create a dict of dict of grpby arguments with sorted
entries at the end of nested dict
"""
rslt = {}
print('rslt = %8x' % id(rslt))
for (key, entry) in entries.items():
target = rslt
for grp_info in grpby[:-1]:
fieldname = grp_info['field']
field = get_data(fieldname, entry)
if field not in target:
target[field] = {}
print('create {} %x for field %8s in %x' % (id(target[field]), field, id(target)))
target = target[field]
grp_info = grpby[-1]
fieldname = grp_info['field']
field = get_data(fieldname, entry)
if field not in target:
target[field] = []
target = target[field]
target.append(key)
target.sort()
return rslt
def cnvt_nested_grpby_to_lst_dict(dicdic, grpby, lvl=0):
"""
Recursive func that transform nested dict created by
cnvt_grpby_to_nested_dict to list of dicts with
good order define in grpby
"""
grp = grpby[lvl]
grpvals = grp['values']
def keyfunc(key):
if key in grpvals:
return grpvals.index(key)
else:
return len(grpvals) + 1
if lvl < len(grpby):
rslt = []
for key in grpvals:
if lvl < len(grpby) - 1:
subentries = cnvt_nested_grpby_to_lst_dict(dicdic.get(key, {}), grpby, lvl + 1)
else:
subentries = dicdic.get(key, [])
entry = dict(name=key, separator_style=grp['separator_style'], entries=subentries)
rslt.append(entry)
return rslt
return dict(last=dicdic) |
# 7. Lists
freinds = ["Pythobit","Boy"]
print(freinds[0]) # Output - Pythobit
print(len(freinds)) # Output - 2
freinds = [["Pythobit",20],["Boy",21]]
print(freinds[0][0]) # Output - Pythobit
print(freinds[1][1]) # Output - 21
freinds = ["Pythobit","Boy"]
freinds.append("Pythobit boy")
print(freinds) # Output - ["Pythobit", "Boy", "Pythobit boy"]
freinds = ["Pythobit","Boy","Pythobit boy"]
freinds.remove("Pythobit")
print(freinds) # Output - ['Boy', 'Pythobit boy']
| freinds = ['Pythobit', 'Boy']
print(freinds[0])
print(len(freinds))
freinds = [['Pythobit', 20], ['Boy', 21]]
print(freinds[0][0])
print(freinds[1][1])
freinds = ['Pythobit', 'Boy']
freinds.append('Pythobit boy')
print(freinds)
freinds = ['Pythobit', 'Boy', 'Pythobit boy']
freinds.remove('Pythobit')
print(freinds) |
filename = '/Users/andrew.meyers/Documents/andy/AdventOfCode2021/Day10/input.txt'
def parseInput(filename):
lines = []
with open(filename) as f:
for line in f:
lines.append(line)
return lines
def getPoints(line):
map = {')': 3, ']': 57, '}': 1197, '>': 25137}
stack = []
for c in line:
if c == '{' or c == '(' or c == '<' or c == '[':
stack.append(c)
else:
if len(stack) == 0:
return 0
l = stack.pop()
if (c == ')' and l != '(') or \
(c == '}' and l != '{') or \
(c == ']' and l != '[') or \
(c == '>' and l != '<'):
return map[c]
return 0
def getPointsForCorruptedLines(lines):
pts = 0
count = 0
for line in lines:
pt = getPoints(line)
if pt > 0:
count += 1
pts += pt
print(count)
return pts
def getAutoCompleteScore(line):
stack = []
for c in line:
if c == '\n':
continue
if c == '{' or c == '(' or c == '<' or c == '[':
stack.append(c)
else:
if len(stack) == 0:
return 0
l = stack.pop()
if (c == ')' and l != '(') or \
(c == '}' and l != '{') or \
(c == ']' and l != '[') or \
(c == '>' and l != '<'):
return 0
missingVal = {
'<': 4,
'{': 3,
'[': 2,
'(': 1
}
currentScore = 0
# we have leftovers
while len(stack) > 0:
l = stack.pop()
currentScore *= 5
currentScore += missingVal[l]
return currentScore
def getMiddleScoreForAutocomplete(lines):
scores = []
for line in lines:
score = getAutoCompleteScore(line)
if score > 0:
scores.append(score)
scores = sorted(scores)
print(len(scores))
idx = (len(scores) // 2)
print(idx)
return scores[idx]
test_lines = ['[({(<(())[]>[[{[]{<()<>>',
'[(()[<>])]({[<{<<[]>>(',
'{([(<{}[<>[]}>{[]{[(<()>',
'(((({<>}<{<{<>}{[]{[]{}',
'[[<[([]))<([[{}[[()]]]',
'[{[{({}]{}}([{[{{{}}([]',
'{<[[]]>}<{[{[{[]{()[[[]',
'[<(<(<(<{}))><([]([]()',
'<{([([[(<>()){}]>(<<{{',
'<{([{{}}[<[[[<>{}]]]>[]]']
if __name__ == '__main__':
isPart1 = False
lines = parseInput(filename)
if isPart1:
total = getPointsForCorruptedLines(lines)
print('The answer is:', total)
else:
total = getMiddleScoreForAutocomplete(lines)
print('The answer is:', total)
| filename = '/Users/andrew.meyers/Documents/andy/AdventOfCode2021/Day10/input.txt'
def parse_input(filename):
lines = []
with open(filename) as f:
for line in f:
lines.append(line)
return lines
def get_points(line):
map = {')': 3, ']': 57, '}': 1197, '>': 25137}
stack = []
for c in line:
if c == '{' or c == '(' or c == '<' or (c == '['):
stack.append(c)
else:
if len(stack) == 0:
return 0
l = stack.pop()
if c == ')' and l != '(' or (c == '}' and l != '{') or (c == ']' and l != '[') or (c == '>' and l != '<'):
return map[c]
return 0
def get_points_for_corrupted_lines(lines):
pts = 0
count = 0
for line in lines:
pt = get_points(line)
if pt > 0:
count += 1
pts += pt
print(count)
return pts
def get_auto_complete_score(line):
stack = []
for c in line:
if c == '\n':
continue
if c == '{' or c == '(' or c == '<' or (c == '['):
stack.append(c)
else:
if len(stack) == 0:
return 0
l = stack.pop()
if c == ')' and l != '(' or (c == '}' and l != '{') or (c == ']' and l != '[') or (c == '>' and l != '<'):
return 0
missing_val = {'<': 4, '{': 3, '[': 2, '(': 1}
current_score = 0
while len(stack) > 0:
l = stack.pop()
current_score *= 5
current_score += missingVal[l]
return currentScore
def get_middle_score_for_autocomplete(lines):
scores = []
for line in lines:
score = get_auto_complete_score(line)
if score > 0:
scores.append(score)
scores = sorted(scores)
print(len(scores))
idx = len(scores) // 2
print(idx)
return scores[idx]
test_lines = ['[({(<(())[]>[[{[]{<()<>>', '[(()[<>])]({[<{<<[]>>(', '{([(<{}[<>[]}>{[]{[(<()>', '(((({<>}<{<{<>}{[]{[]{}', '[[<[([]))<([[{}[[()]]]', '[{[{({}]{}}([{[{{{}}([]', '{<[[]]>}<{[{[{[]{()[[[]', '[<(<(<(<{}))><([]([]()', '<{([([[(<>()){}]>(<<{{', '<{([{{}}[<[[[<>{}]]]>[]]']
if __name__ == '__main__':
is_part1 = False
lines = parse_input(filename)
if isPart1:
total = get_points_for_corrupted_lines(lines)
print('The answer is:', total)
else:
total = get_middle_score_for_autocomplete(lines)
print('The answer is:', total) |
def k_wood(gb, so, x):
"""
Thermal conductivity of wood based on moisture content, volumetric
shrinkage, and basic specific gravity
.. math:: k = G_x (B + C x) + A
where :math:`k` is thermal conductivity [W/(mK)] of wood, :math:`G_x` is
specific gravity [-] based on volume at moisture content :math:`x` [%] and
:math:`A, B, C` are constants.
The :math:`G_x` term is determined from
.. math:: G_x = \\frac{G_b}{1 - S_x / 100}
where :math:`G_b` is basic specific gravity [-] and :math:`S_x` is
volumetric shrinkage [%] from green condition to moisture content :math:`x`.
The :math:`S_x` term is calculated from
.. math:: S_x = S_o \\left(1 - \\frac{x}{MC_{fs}} \\right)
where :math:`S_o` is volumetric shrinkage [%] from Table 4-3 [1]_ and :math:`MC_{fs}`
is the fiber saturation point assumed to be 30% moisture content.
Parameters
----------
gb : float
Basic specific gravity [-]
so : float
Volumetric shrinkage [%]
x : float
Moisture content [%]
Returns
-------
k : float
Thermal conductivity [W/(mK)]
Example
-------
>>> k_wood(0.54, 12.3, 10)
0.1567
References
----------
.. [1] Samuel V. Glass and Samuel L. Zelinka. Moisture Relations and
Physical Properties of Wood. Ch. 4 in Wood Handbook, pp. 1-19, 2010.
"""
mcfs = 30 # fiber staturation point estimate [%]
# shrinkage from green to final moisture content, Eq. 4-7 [%]
sx = so * (1 - x / mcfs)
# specific gravity based on volume at given moisture content, Eq. 4-9
gx = gb / (1 - sx / 100)
# thermal conductivity, Eq. 4-15 [W/(mK)]
a = 0.01864
b = 0.1941
c = 0.004064
k = gx * (b + c * x) + a
return k
| def k_wood(gb, so, x):
"""
Thermal conductivity of wood based on moisture content, volumetric
shrinkage, and basic specific gravity
.. math:: k = G_x (B + C x) + A
where :math:`k` is thermal conductivity [W/(mK)] of wood, :math:`G_x` is
specific gravity [-] based on volume at moisture content :math:`x` [%] and
:math:`A, B, C` are constants.
The :math:`G_x` term is determined from
.. math:: G_x = \\frac{G_b}{1 - S_x / 100}
where :math:`G_b` is basic specific gravity [-] and :math:`S_x` is
volumetric shrinkage [%] from green condition to moisture content :math:`x`.
The :math:`S_x` term is calculated from
.. math:: S_x = S_o \\left(1 - \\frac{x}{MC_{fs}} \\right)
where :math:`S_o` is volumetric shrinkage [%] from Table 4-3 [1]_ and :math:`MC_{fs}`
is the fiber saturation point assumed to be 30% moisture content.
Parameters
----------
gb : float
Basic specific gravity [-]
so : float
Volumetric shrinkage [%]
x : float
Moisture content [%]
Returns
-------
k : float
Thermal conductivity [W/(mK)]
Example
-------
>>> k_wood(0.54, 12.3, 10)
0.1567
References
----------
.. [1] Samuel V. Glass and Samuel L. Zelinka. Moisture Relations and
Physical Properties of Wood. Ch. 4 in Wood Handbook, pp. 1-19, 2010.
"""
mcfs = 30
sx = so * (1 - x / mcfs)
gx = gb / (1 - sx / 100)
a = 0.01864
b = 0.1941
c = 0.004064
k = gx * (b + c * x) + a
return k |
# =============================================================================
# Author: Teerapat Jenrungrot - https://github.com/mjenrungrot/
# FileName: 11052.py
# Description: UVa Online Judge - 11052
# =============================================================================
while True:
N = int(input())
if N == 0:
break
A = []
year = []
dp = []
for i in range(N):
tt, num, keep = input().split()
time_tuple = list(map(int, tt.split(":")))
A.append((time_tuple, keep))
year.append(-1)
dp.append(-1)
year[-1] = 0
for i in range(N - 2, -1, -1):
next_t = A[i + 1][0]
curr_t = A[i][0]
if curr_t < next_t:
year[i] = year[i + 1]
else:
year[i] = year[i + 1] - 1
# initialization
last = -1
earliest = -1
for i in range(N - 1, -1, -1):
if last == -1 and year[i] == 0:
dp[i] = 1
else:
dp[i] = N - i
if last == -1 and (A[i][1] == "+" or year[i] != 0):
last = i
if A[i][1] == "+":
earliest = i
for i in range(last, earliest - 1, -1):
for j in range(i + 1, N):
if year[i] == year[j]:
dp[i] = min(dp[i], dp[j] + 1)
elif A[i][0] >= A[j][0] and year[i] + 1 == year[j]:
dp[i] = min(dp[i], dp[j] + 1)
else:
break
if A[j][1] == "+":
break
print(dp[earliest])
| while True:
n = int(input())
if N == 0:
break
a = []
year = []
dp = []
for i in range(N):
(tt, num, keep) = input().split()
time_tuple = list(map(int, tt.split(':')))
A.append((time_tuple, keep))
year.append(-1)
dp.append(-1)
year[-1] = 0
for i in range(N - 2, -1, -1):
next_t = A[i + 1][0]
curr_t = A[i][0]
if curr_t < next_t:
year[i] = year[i + 1]
else:
year[i] = year[i + 1] - 1
last = -1
earliest = -1
for i in range(N - 1, -1, -1):
if last == -1 and year[i] == 0:
dp[i] = 1
else:
dp[i] = N - i
if last == -1 and (A[i][1] == '+' or year[i] != 0):
last = i
if A[i][1] == '+':
earliest = i
for i in range(last, earliest - 1, -1):
for j in range(i + 1, N):
if year[i] == year[j]:
dp[i] = min(dp[i], dp[j] + 1)
elif A[i][0] >= A[j][0] and year[i] + 1 == year[j]:
dp[i] = min(dp[i], dp[j] + 1)
else:
break
if A[j][1] == '+':
break
print(dp[earliest]) |
def largest_exponential(file):
text_file = open(file, "r")
lines = text_file.read().splitlines()
greatest = 0
count = 0
split_list = []
for elem in lines:
seperated = elem.split(',')
split_list.append(seperated)
for i in range(0, len(split_list)):
count += 1
base = int(split_list[i][0])
exp = float(split_list[i][1][:-len(split_list[i][1]) + 1] + "." + split_list[i][1][-len(split_list[i][1]) + 1:])
result = base ** exp
if result > greatest:
greatest = result
new_list = [base, exp, count]
return new_list
print(largest_exponential("Additional Files/p099_base_exp.txt"))
| def largest_exponential(file):
text_file = open(file, 'r')
lines = text_file.read().splitlines()
greatest = 0
count = 0
split_list = []
for elem in lines:
seperated = elem.split(',')
split_list.append(seperated)
for i in range(0, len(split_list)):
count += 1
base = int(split_list[i][0])
exp = float(split_list[i][1][:-len(split_list[i][1]) + 1] + '.' + split_list[i][1][-len(split_list[i][1]) + 1:])
result = base ** exp
if result > greatest:
greatest = result
new_list = [base, exp, count]
return new_list
print(largest_exponential('Additional Files/p099_base_exp.txt')) |
items = ["Clothes", "phones", "laptops", "Chocolates"]
if __name__ == "__main__":
while True:
try:
for index in range(0,len(items)):
print(f"{index} ")
option = int(input("Enter the number of your choice to get gift: "))
print(f"You have choosen {items[option]}")
except ValueError as ve:
print("Enter the choice of getting gift in numbers")
print(ve)
except IndexError as ie:
print(f"Enter valid number choice ranging from 0 to {len(items)-1}")
except Exception as e:
print(f"Unknown Error occured {e}")
else:
print("Thank god no errors")
finally:
choice = input('Do you want to Continue Enter y for yes and n for no: ')
if choice == 'n':
break
| items = ['Clothes', 'phones', 'laptops', 'Chocolates']
if __name__ == '__main__':
while True:
try:
for index in range(0, len(items)):
print(f'{index} ')
option = int(input('Enter the number of your choice to get gift: '))
print(f'You have choosen {items[option]}')
except ValueError as ve:
print('Enter the choice of getting gift in numbers')
print(ve)
except IndexError as ie:
print(f'Enter valid number choice ranging from 0 to {len(items) - 1}')
except Exception as e:
print(f'Unknown Error occured {e}')
else:
print('Thank god no errors')
finally:
choice = input('Do you want to Continue Enter y for yes and n for no: ')
if choice == 'n':
break |
"""json format (bim file)
"name": "",
"description": "",
"columns": [...],
"partitions": [...],
"measures": [...],
"annotations": [...]
"""
def get_measures(data: dict):
print()
print(79 * '*')
print(f"Getting measures at {(data['model']['name'])} cube")
print(79 * '*')
list_name_measures_by_bim = []
list_measures_by_bim = []
for table in range(0, len(data['model']['tables'])):
#print(f"\n{40 * '*'} table: {data['model']['tables'][table]['name']} {40 * '*'} ")
if 'measures' in data['model']['tables'][table]:
for measure_number in range(0, len(data['model']['tables'][table]['measures'])):
name = (data['model']['tables'][table]['measures'][measure_number]['name'])
expression = data['model']['tables'][table]['measures'][measure_number]['expression']
list_name_measures_by_bim.append(name)
list_measures_by_bim.append(expression)
#print(name)
return list_measures_by_bim, list_name_measures_by_bim
def get_calculated_col(data: dict):
print()
print(79 * '*')
print(f"Getting calculated_col in {(data['model']['name'])} cube")
print(79 * '*')
list_name_calculated_col_by_bim = []
list_calculated_col_by_bim = []
for table in range(0, len(data['model']['tables'])):
#print(f"\n{40 * '*'} table: {data['model']['tables'][table]['name']} {40 * '*'} ")
for col in range(0, len(data['model']['tables'][table]['columns'])):
if 'type' in data['model']['tables'][table]['columns'][col]:
if data['model']['tables'][table]['columns'][col]['type'] \
.startswith('calculated'):
name = (data['model']['tables'][table]['columns'][col]['name'])
expression = data['model']['tables'][table]['columns'][col]['expression']
list_name_calculated_col_by_bim.append(name)
list_calculated_col_by_bim.append(expression)
print(name)
return list_calculated_col_by_bim, list_name_calculated_col_by_bim
def get_queries(data: dict):
print()
print(79 * '*')
print(f"Getting queries in {(data['model']['name'])} cube")
print(79 * '*')
list_queries = []
list_name_queries = []
for table in range(0, len(data['model']['tables'])):
#print(f"\n{40 * '*'} table: {data['model']['tables'][table]['name']} {40 * '*'} ")
for partitions in range(0, len(data['model']['tables'][table]['partitions'])):
name = data['model']['tables'][table]['partitions'][partitions]['name']
query = data['model']['tables'][table]['partitions'][partitions]['source']['query']
list_name_queries.append(name)
list_queries.append(query)
print(name)
return list_queries, list_name_queries
| """json format (bim file)
"name": "",
"description": "",
"columns": [...],
"partitions": [...],
"measures": [...],
"annotations": [...]
"""
def get_measures(data: dict):
print()
print(79 * '*')
print(f"Getting measures at {data['model']['name']} cube")
print(79 * '*')
list_name_measures_by_bim = []
list_measures_by_bim = []
for table in range(0, len(data['model']['tables'])):
if 'measures' in data['model']['tables'][table]:
for measure_number in range(0, len(data['model']['tables'][table]['measures'])):
name = data['model']['tables'][table]['measures'][measure_number]['name']
expression = data['model']['tables'][table]['measures'][measure_number]['expression']
list_name_measures_by_bim.append(name)
list_measures_by_bim.append(expression)
return (list_measures_by_bim, list_name_measures_by_bim)
def get_calculated_col(data: dict):
print()
print(79 * '*')
print(f"Getting calculated_col in {data['model']['name']} cube")
print(79 * '*')
list_name_calculated_col_by_bim = []
list_calculated_col_by_bim = []
for table in range(0, len(data['model']['tables'])):
for col in range(0, len(data['model']['tables'][table]['columns'])):
if 'type' in data['model']['tables'][table]['columns'][col]:
if data['model']['tables'][table]['columns'][col]['type'].startswith('calculated'):
name = data['model']['tables'][table]['columns'][col]['name']
expression = data['model']['tables'][table]['columns'][col]['expression']
list_name_calculated_col_by_bim.append(name)
list_calculated_col_by_bim.append(expression)
print(name)
return (list_calculated_col_by_bim, list_name_calculated_col_by_bim)
def get_queries(data: dict):
print()
print(79 * '*')
print(f"Getting queries in {data['model']['name']} cube")
print(79 * '*')
list_queries = []
list_name_queries = []
for table in range(0, len(data['model']['tables'])):
for partitions in range(0, len(data['model']['tables'][table]['partitions'])):
name = data['model']['tables'][table]['partitions'][partitions]['name']
query = data['model']['tables'][table]['partitions'][partitions]['source']['query']
list_name_queries.append(name)
list_queries.append(query)
print(name)
return (list_queries, list_name_queries) |
"""
Convenience functions, etc for tests
"""
def show_and_wait(qtbot, *widgets, timeout=60000, raising=False):
"""
Helper that shows widgets and waits until they are closed (or timeout ms)
"""
for w in widgets:
w.show()
def are_closed():
for w in widgets:
if w.isVisible():
return False
return True
try:
qtbot.wait_until(are_closed, timeout=timeout)
except AssertionError:
if raising:
raise
def get_sub_config(cfg, item):
assert item in cfg["__itemConfigurations__"]
assert item in cfg["__orderedConfigNames__"]
return cfg["__itemConfigurations__"][item]
| """
Convenience functions, etc for tests
"""
def show_and_wait(qtbot, *widgets, timeout=60000, raising=False):
"""
Helper that shows widgets and waits until they are closed (or timeout ms)
"""
for w in widgets:
w.show()
def are_closed():
for w in widgets:
if w.isVisible():
return False
return True
try:
qtbot.wait_until(are_closed, timeout=timeout)
except AssertionError:
if raising:
raise
def get_sub_config(cfg, item):
assert item in cfg['__itemConfigurations__']
assert item in cfg['__orderedConfigNames__']
return cfg['__itemConfigurations__'][item] |
class Solution(object):
def simplifyPath(self, path):
"""
:type path: str
:rtype: str
"""
path = path.split("/")
stack = []
for p in path:
if p in ["", "."]:
continue
if p == "..":
if stack:
stack.pop()
else:
stack.append(p)
return "/" + "/".join(stack) | class Solution(object):
def simplify_path(self, path):
"""
:type path: str
:rtype: str
"""
path = path.split('/')
stack = []
for p in path:
if p in ['', '.']:
continue
if p == '..':
if stack:
stack.pop()
else:
stack.append(p)
return '/' + '/'.join(stack) |
# accept an integer and print the digits in reverse
n = int(input("Enter a positive integer: "))
print()
while (n!=0):
digit = n % 10 # extract the last digit
print(digit) # print the last digit
n = n // 10 # remove the last digit
'''
123 / 10
q = 12
r = 3
456 / 10
q = 45
r = 6
q = dividend // divisor
r = dividend % divisor
In case of quotient calculation, where the divisor is 10, the quotient is the number formed by removing the last digit.
In case of remainder calculation, where the divisor is 10, the remainder is the last digit.
'''
'''
In our above example, the loop is executed for every digit in the number. That means that the loop is not fixed, and depends on the input from the user. In such cases, where the loop is not fixed, we can use a while loop.
''' | n = int(input('Enter a positive integer: '))
print()
while n != 0:
digit = n % 10
print(digit)
n = n // 10
'\n\n123 / 10\n\nq = 12\nr = 3\n\n456 / 10\n\nq = 45\nr = 6\n\nq = dividend // divisor\nr = dividend % divisor\n\nIn case of quotient calculation, where the divisor is 10, the quotient is the number formed by removing the last digit.\nIn case of remainder calculation, where the divisor is 10, the remainder is the last digit.\n'
'\nIn our above example, the loop is executed for every digit in the number. That means that the loop is not fixed, and depends on the input from the user. In such cases, where the loop is not fixed, we can use a while loop.\n' |
# Licensed Materials - Property of IBM
# Copyright IBM Corp. 2016, 2017
class DataAlreadyExistsError(RuntimeError):
def __init__(self, label):
self.message = str("Data with label '%s' already exists and cannot be added" % (label))
def get_patient_id(d):
return d['patient']['identifier']
def get_index_by_label(d, label):
for idx in range(len(d['data'])):
if d['data'][idx]['label'] == label:
return idx
return None
def get_sampled_data_values(d, label):
idx = get_index_by_label(d, label)
return d['data'][idx]['valueSampledData']['values']
def get_coordinate_data_values(d, label):
idx = get_index_by_label(d, label)
return d['data'][idx]['valueCoordinateData']['values']
def get_period_value(d, label):
idx = get_index_by_label(d, label)
return d['data'][idx]['valueSampledData']['period']['value']
def get_sampled_data_unit(d, label):
idx = get_index_by_label(d, label)
return d['data'][idx]['valueSampledData']['unit']
def get_period_unit(d, label):
idx = get_index_by_label(d, label)
return d['data'][idx]['valueSampledData']['period']['unit']
def get_gain(d, label):
idx = get_index_by_label(d, label)
return d['data'][idx]['valueSampledData']['gain']
def get_initValue(d, label):
idx = get_index_by_label(d, label)
return d['data'][idx]['valueSampledData']['initVal']
def get_patient_ID(d):
return d['patient']['identifier']
def add_sampled_data(d, label, sampled_data, period_value, period_unit, update_if_exists=False):
# check if label already exists
data_idx = get_index_by_label(d, label)
if data_idx is not None:
if update_if_exists == True:
v = {'valuesSampledData' : { 'values' : sampled_data, 'period' : { 'value' : period_value, 'unit' : period_unit }}}
d['data'][data_idx] = v
else:
raise DataAlreadyExistsError(label=label)
else:
v = {'label' : label, 'valuesSampledData' : { 'values' : sampled_data, 'period' : { 'value' : period_value, 'unit' : period_unit }}}
d['data'].append(v)
def add_coordinate_data(d, label, coords, replace_if_exists=False):
data_idx = get_index_by_label(d, label)
if data_idx is not None:
if replace_if_exists == True:
v = {'valueCoordinateData' : {'values' : coords}}
d['data'][data_idx] = v
else:
raise DataAlreadyExistsError(label=label)
else:
v = {'label' : label, 'valueCoordinateData' : {'values' : coords}}
d['data'].append(v)
| class Dataalreadyexistserror(RuntimeError):
def __init__(self, label):
self.message = str("Data with label '%s' already exists and cannot be added" % label)
def get_patient_id(d):
return d['patient']['identifier']
def get_index_by_label(d, label):
for idx in range(len(d['data'])):
if d['data'][idx]['label'] == label:
return idx
return None
def get_sampled_data_values(d, label):
idx = get_index_by_label(d, label)
return d['data'][idx]['valueSampledData']['values']
def get_coordinate_data_values(d, label):
idx = get_index_by_label(d, label)
return d['data'][idx]['valueCoordinateData']['values']
def get_period_value(d, label):
idx = get_index_by_label(d, label)
return d['data'][idx]['valueSampledData']['period']['value']
def get_sampled_data_unit(d, label):
idx = get_index_by_label(d, label)
return d['data'][idx]['valueSampledData']['unit']
def get_period_unit(d, label):
idx = get_index_by_label(d, label)
return d['data'][idx]['valueSampledData']['period']['unit']
def get_gain(d, label):
idx = get_index_by_label(d, label)
return d['data'][idx]['valueSampledData']['gain']
def get_init_value(d, label):
idx = get_index_by_label(d, label)
return d['data'][idx]['valueSampledData']['initVal']
def get_patient_id(d):
return d['patient']['identifier']
def add_sampled_data(d, label, sampled_data, period_value, period_unit, update_if_exists=False):
data_idx = get_index_by_label(d, label)
if data_idx is not None:
if update_if_exists == True:
v = {'valuesSampledData': {'values': sampled_data, 'period': {'value': period_value, 'unit': period_unit}}}
d['data'][data_idx] = v
else:
raise data_already_exists_error(label=label)
else:
v = {'label': label, 'valuesSampledData': {'values': sampled_data, 'period': {'value': period_value, 'unit': period_unit}}}
d['data'].append(v)
def add_coordinate_data(d, label, coords, replace_if_exists=False):
data_idx = get_index_by_label(d, label)
if data_idx is not None:
if replace_if_exists == True:
v = {'valueCoordinateData': {'values': coords}}
d['data'][data_idx] = v
else:
raise data_already_exists_error(label=label)
else:
v = {'label': label, 'valueCoordinateData': {'values': coords}}
d['data'].append(v) |
#
# @lc app=leetcode id=202 lang=python3
#
# [202] Happy Number
#
# @lc code=start
# class Solution:
# def isHappy(self, n: int):
# appeared = {}
# while True:
# s = 0
# while n > 0:
# s += (n % 10) * (n % 10)
# n = n//10
# if s == 1:
# return True
# else:
# if s not in appeared:
# appeared[s] = True
# n = s
# else:
# return False
class Solution:
def isHappy(self, n):
visited = set()
re = self.helper(n, visited)
return re
def helper(self, n, visited):
s = 0
while n > 0:
s = s + (n%10) ** 2
n = n//10
if s == 1:
return True
elif s in visited:
return False
else:
visited.add(s)
return self.helper(s, visited)
if __name__ == '__main__':
a = Solution()
b = a.isHappy(68)
print(b)
# @lc code=end
| class Solution:
def is_happy(self, n):
visited = set()
re = self.helper(n, visited)
return re
def helper(self, n, visited):
s = 0
while n > 0:
s = s + (n % 10) ** 2
n = n // 10
if s == 1:
return True
elif s in visited:
return False
else:
visited.add(s)
return self.helper(s, visited)
if __name__ == '__main__':
a = solution()
b = a.isHappy(68)
print(b) |
# capture discord_id to validate
@bot.command(pass_context=True)
async def example(ctx):
get_discord_id = ctx.message.author.id
user = await bot.get_user_info(get_discord_id)
print(get_discord_id)
# get username and unique ID from discord user that uses the command
@bot.command(pass_context=True)
async def getinfo(ctx, vote):
getMemberID = ctx.message.author.id
getMemberName = ctx.message.author.name
print (getMemberID)
print (getMemberName)
| @bot.command(pass_context=True)
async def example(ctx):
get_discord_id = ctx.message.author.id
user = await bot.get_user_info(get_discord_id)
print(get_discord_id)
@bot.command(pass_context=True)
async def getinfo(ctx, vote):
get_member_id = ctx.message.author.id
get_member_name = ctx.message.author.name
print(getMemberID)
print(getMemberName) |
# -*- coding: utf-8 -*-
"""src module init.
import src.poke_env as poke_env
import cross_evaluate_random_players as cross_evaluate_random_players
import max_damage_player as max_damage_player
#import rl_with_open_ai_gym_wrapper as rl_with_open_ai_gym_wrapper
import snivy_agent as snivy_agent
import sweeper as sweeper
__all__ = [
"poke_env",
"cross_evaluate_random_players",
"max_damage_player",
#"rl_with_open_ai_gym_wrapper",
"snivy_agent",
"sweeper",
]
"""
| """src module init.
import src.poke_env as poke_env
import cross_evaluate_random_players as cross_evaluate_random_players
import max_damage_player as max_damage_player
#import rl_with_open_ai_gym_wrapper as rl_with_open_ai_gym_wrapper
import snivy_agent as snivy_agent
import sweeper as sweeper
__all__ = [
"poke_env",
"cross_evaluate_random_players",
"max_damage_player",
#"rl_with_open_ai_gym_wrapper",
"snivy_agent",
"sweeper",
]
""" |
values = [23,52,59,37,48]
sum = 0
length = 10
for value in values:
sum+=value
length+=1
print("Total sum:"+str(sum)+"-Average: " + str(sum/length))
| values = [23, 52, 59, 37, 48]
sum = 0
length = 10
for value in values:
sum += value
length += 1
print('Total sum:' + str(sum) + '-Average: ' + str(sum / length)) |
class Solution:
# kind of dynamic programming?
def fib(self, N):
prepared_numbers = [0, 1, 1, 2, 3, 5, 8, 13]
if N <= len(prepared_numbers) - 1:
return prepared_numbers[N]
else:
for i in range(N - len(prepared_numbers) + 1):
prepared_numbers.append(prepared_numbers[-2] + prepared_numbers[-1])
return prepared_numbers[-1]
| class Solution:
def fib(self, N):
prepared_numbers = [0, 1, 1, 2, 3, 5, 8, 13]
if N <= len(prepared_numbers) - 1:
return prepared_numbers[N]
else:
for i in range(N - len(prepared_numbers) + 1):
prepared_numbers.append(prepared_numbers[-2] + prepared_numbers[-1])
return prepared_numbers[-1] |
"""
Do Not Edit the code here unless you know
what you are doing, This is a simple tokenizer
that can form tokens of whole sentences
thereby removing any need for NLTK
Scipy or any other 3rd party Library for
tokenization purposes.
- Moses
"""
abbreviations = {'dr.': 'doctor', 'mr.': 'mister', 'bro.': 'brother', 'bro': 'brother', 'mrs.': 'mistress', 'ms.': 'miss', 'jr.': 'junior', 'sr.': 'senior',
'i.e.': 'for example', 'e.g.': 'for example', 'vs.': 'versus'}
terminators = ['.', '!', '?']
wrappers = ['"', "'", ')', ']', '}']
def find_sentences(paragraph):
end = True
sentences = []
while end > -1:
end = find_sentence_end(paragraph)
if end > -1:
sentences.append(paragraph[end:].strip())
paragraph = paragraph[:end]
sentences.append(paragraph)
sentences.reverse()
return sentences
def find_sentence_end(paragraph):
[possible_endings, contraction_locations] = [[], []]
contractions = abbreviations.keys()
sentence_terminators = terminators + [terminator + wrapper for wrapper in wrappers for terminator in terminators]
for sentence_terminator in sentence_terminators:
t_indices = list(find_all(paragraph, sentence_terminator))
possible_endings.extend(([] if not len(t_indices) else [[i, len(sentence_terminator)] for i in t_indices]))
for contraction in contractions:
c_indices = list(find_all(paragraph, contraction))
contraction_locations.extend(([] if not len(c_indices) else [i + len(contraction) for i in c_indices]))
possible_endings = [pe for pe in possible_endings if pe[0] + pe[1] not in contraction_locations]
if len(paragraph) in [pe[0] + pe[1] for pe in possible_endings]:
max_end_start = max([pe[0] for pe in possible_endings])
possible_endings = [pe for pe in possible_endings if pe[0] != max_end_start]
possible_endings = [pe[0] + pe[1] for pe in possible_endings if sum(pe) > len(paragraph) or (sum(pe) < len(paragraph) and paragraph[sum(pe)] == ' ')]
end = (-1 if not len(possible_endings) else max(possible_endings))
return end
def find_all(a_str, sub):
start = 0
while True:
start = a_str.find(sub, start)
if start == -1:
return
yield start
start += len(sub) | """
Do Not Edit the code here unless you know
what you are doing, This is a simple tokenizer
that can form tokens of whole sentences
thereby removing any need for NLTK
Scipy or any other 3rd party Library for
tokenization purposes.
- Moses
"""
abbreviations = {'dr.': 'doctor', 'mr.': 'mister', 'bro.': 'brother', 'bro': 'brother', 'mrs.': 'mistress', 'ms.': 'miss', 'jr.': 'junior', 'sr.': 'senior', 'i.e.': 'for example', 'e.g.': 'for example', 'vs.': 'versus'}
terminators = ['.', '!', '?']
wrappers = ['"', "'", ')', ']', '}']
def find_sentences(paragraph):
end = True
sentences = []
while end > -1:
end = find_sentence_end(paragraph)
if end > -1:
sentences.append(paragraph[end:].strip())
paragraph = paragraph[:end]
sentences.append(paragraph)
sentences.reverse()
return sentences
def find_sentence_end(paragraph):
[possible_endings, contraction_locations] = [[], []]
contractions = abbreviations.keys()
sentence_terminators = terminators + [terminator + wrapper for wrapper in wrappers for terminator in terminators]
for sentence_terminator in sentence_terminators:
t_indices = list(find_all(paragraph, sentence_terminator))
possible_endings.extend([] if not len(t_indices) else [[i, len(sentence_terminator)] for i in t_indices])
for contraction in contractions:
c_indices = list(find_all(paragraph, contraction))
contraction_locations.extend([] if not len(c_indices) else [i + len(contraction) for i in c_indices])
possible_endings = [pe for pe in possible_endings if pe[0] + pe[1] not in contraction_locations]
if len(paragraph) in [pe[0] + pe[1] for pe in possible_endings]:
max_end_start = max([pe[0] for pe in possible_endings])
possible_endings = [pe for pe in possible_endings if pe[0] != max_end_start]
possible_endings = [pe[0] + pe[1] for pe in possible_endings if sum(pe) > len(paragraph) or (sum(pe) < len(paragraph) and paragraph[sum(pe)] == ' ')]
end = -1 if not len(possible_endings) else max(possible_endings)
return end
def find_all(a_str, sub):
start = 0
while True:
start = a_str.find(sub, start)
if start == -1:
return
yield start
start += len(sub) |
try:
num = int(input('Enter a number: '))
except Exception:
print('Some input error')
def convert_to_binary(num):
if num > 1:
convert_to_binary(num // 2)
print(num % 2, end = '')
print('Binary: ', end = '')
convert_to_binary(num) | try:
num = int(input('Enter a number: '))
except Exception:
print('Some input error')
def convert_to_binary(num):
if num > 1:
convert_to_binary(num // 2)
print(num % 2, end='')
print('Binary: ', end='')
convert_to_binary(num) |
# from .disalexi import Image
# from .landsat import Landsat
__version__ = "0.0.3"
| __version__ = '0.0.3' |
# Definir excepciones en Python
class Err(Exception):
def __init__(self,valor):
print("Fue el error por",valor)
try:
raise Err(4)
except Err:
print("Error escrito:")
| class Err(Exception):
def __init__(self, valor):
print('Fue el error por', valor)
try:
raise err(4)
except Err:
print('Error escrito:') |
# Define the class as author
class Author:
# The function is in it and the __ is a special function in python. properties in the brackets are what
# is being passed in the function
def __init__(self, name, firstName, nationality):
# Define the attributes in the class
self.name = name
self.firstName = firstName
self.nationality = nationality
| class Author:
def __init__(self, name, firstName, nationality):
self.name = name
self.firstName = firstName
self.nationality = nationality |
db = {
"users": [
{
"id": 2,
"username": "marceline",
"name": "Marceline Abadeer",
"bio": "1000 year old vampire queen, musician"
}
],
"threads": [
{
"id": 2,
"title": "What's up with the Lich?",
"createdBy": 2
}
],
"posts": [
{
"thread": 2,
"text": "Has anyone checked on the lich recently?",
"user": 2
}
]
}
db_more = {
"users": [
{
"id": 1,
"username": "marceline",
"name": "Marceline Abadeer",
"bio": "1000 year old vampire queen, musician"
},
{
"id": 2,
"username": "finn",
"name": "Finn 'the Human' Mertens",
"bio": "Adventurer and hero, last human, defender of good"
},
{
"id": 3,
"username": "pb",
"name": "Bonnibel Bubblegum",
"bio": "Scientist, bearer of candy power, ruler of the candy kingdom"
}
],
"threads": [
{
"id": 1,
"title": "What's up with the Lich?",
"createdBy": 4
},
{
"id": 2,
"title": "Party at the candy kingdom tomorrow",
"createdBy": 3
},
{
"id": 3,
"title": "In search of a new guitar",
"createdBy": 1
}
],
"posts": [
{
"thread": 1,
"text": "Has anyone checked on the lich recently?",
"user": 4
},
{
"thread": 1,
"text": "I'll stop by and see how he's doing tomorrow!",
"user": 2
},
{
"thread": 2,
"text": "Come party with the candy people tomorrow!",
"user": 3
}
]
}
| db = {'users': [{'id': 2, 'username': 'marceline', 'name': 'Marceline Abadeer', 'bio': '1000 year old vampire queen, musician'}], 'threads': [{'id': 2, 'title': "What's up with the Lich?", 'createdBy': 2}], 'posts': [{'thread': 2, 'text': 'Has anyone checked on the lich recently?', 'user': 2}]}
db_more = {'users': [{'id': 1, 'username': 'marceline', 'name': 'Marceline Abadeer', 'bio': '1000 year old vampire queen, musician'}, {'id': 2, 'username': 'finn', 'name': "Finn 'the Human' Mertens", 'bio': 'Adventurer and hero, last human, defender of good'}, {'id': 3, 'username': 'pb', 'name': 'Bonnibel Bubblegum', 'bio': 'Scientist, bearer of candy power, ruler of the candy kingdom'}], 'threads': [{'id': 1, 'title': "What's up with the Lich?", 'createdBy': 4}, {'id': 2, 'title': 'Party at the candy kingdom tomorrow', 'createdBy': 3}, {'id': 3, 'title': 'In search of a new guitar', 'createdBy': 1}], 'posts': [{'thread': 1, 'text': 'Has anyone checked on the lich recently?', 'user': 4}, {'thread': 1, 'text': "I'll stop by and see how he's doing tomorrow!", 'user': 2}, {'thread': 2, 'text': 'Come party with the candy people tomorrow!', 'user': 3}]} |
class LogEntry(dict):
"""
Log message and info for jobs and services
Fields:
- ``id``: Unique ID for the log, string, REQUIRED
- ``code``: Error code, string, optional
- ``level``: Severity level, string (error, warning, info or debug), REQUIRED
- ``message``: Error message, string, REQUIRED
- ``time``: Date and time of the error event as RFC3339 date-time, string, available since API 1.1.0
- ``path``: A "stack trace" for the process, array of dicts
- ``links``: Related links, array of dicts
- ``usage``: Usage metrics available as property 'usage', dict, available since API 1.1.0
May contain the following metrics: cpu, memory, duration, network, disk, storage and other custom ones
Each of the metrics is also a dict with the following parts: value (numeric) and unit (string)
- ``data``: Arbitrary data the user wants to "log" for debugging purposes.
Please note that this property may not exist as there's a difference
between None and non-existing. None for example refers to no-data in
many cases while the absence of the property means that the user did
not provide any data for debugging.
"""
_required = {"id", "level", "message"}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# Check required fields
missing = self._required.difference(self.keys())
if missing:
raise ValueError("Missing required fields: {m}".format(m=sorted(missing)))
@property
def id(self):
return self["id"]
# Legacy alias
log_id = id
@property
def message(self):
return self["message"]
@property
def level(self):
return self["level"]
# TODO: add properties for "code", "time", "path", "links" and "data" with sensible defaults?
| class Logentry(dict):
"""
Log message and info for jobs and services
Fields:
- ``id``: Unique ID for the log, string, REQUIRED
- ``code``: Error code, string, optional
- ``level``: Severity level, string (error, warning, info or debug), REQUIRED
- ``message``: Error message, string, REQUIRED
- ``time``: Date and time of the error event as RFC3339 date-time, string, available since API 1.1.0
- ``path``: A "stack trace" for the process, array of dicts
- ``links``: Related links, array of dicts
- ``usage``: Usage metrics available as property 'usage', dict, available since API 1.1.0
May contain the following metrics: cpu, memory, duration, network, disk, storage and other custom ones
Each of the metrics is also a dict with the following parts: value (numeric) and unit (string)
- ``data``: Arbitrary data the user wants to "log" for debugging purposes.
Please note that this property may not exist as there's a difference
between None and non-existing. None for example refers to no-data in
many cases while the absence of the property means that the user did
not provide any data for debugging.
"""
_required = {'id', 'level', 'message'}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
missing = self._required.difference(self.keys())
if missing:
raise value_error('Missing required fields: {m}'.format(m=sorted(missing)))
@property
def id(self):
return self['id']
log_id = id
@property
def message(self):
return self['message']
@property
def level(self):
return self['level'] |
"""
from django import forms
from .models import Item
class AddForm(forms.ModelForm):
class Meta:
model = Item
fields = ('created_by',
'title', 'image', 'description', 'price', 'pieces', 'instructions', 'labels', 'label_colour', 'slug')
"""
| """
from django import forms
from .models import Item
class AddForm(forms.ModelForm):
class Meta:
model = Item
fields = ('created_by',
'title', 'image', 'description', 'price', 'pieces', 'instructions', 'labels', 'label_colour', 'slug')
""" |
# "Strange Number"
# Alec Dewulf
# April Long 2020
# Difficulty: Easy
# Prerequisities: Number Theory
"""
OVERVIEW
This solution is based on the idea that the number of prime factors (k) must be less
than the total prime factors (max_k). The number of factors can be calculated by
adding one to each exponent in a number's prime factorization and then multiplying
those exponents.
HELPER FUNCTIONS
Is_prime returns True if none of the numbers less than the square root of n
divide it and False otherwise.
get_prime factors takes in n and k and returns the length of the list of n's prime
factorization.
Example:
get_prime_factors(100, 2)
returns: 4 --> len([2, 2, 5, 5])
k is used as a limit. If at any time the length of prime factors reaches k, then that k is
valid and there is no need to collect more of the prime factors.
MAIN SOLUTION
After getting inputs, max_k is assigned to the value of the function call get_prime_factors(x, k).
This is the max value k can take on because there need to be at least k things that multiply to be x.
The most amount of things you can multiply to get x is all the numbers of its prime factorization.
Example:
x = 100
k = 4
100 = 5x5x2x2
Therefore you can have a k up to four and have 100 work as a value of x.
For example the number: 2^4 x 3^4 x 5^1 x 7^1 will have 100 factors
((4+1)*(4+1)*(1+1)*(1+1)) and 4 prime factors (2, 3, 5, 7).
No greater amount of prime factors are possible because there are no five
things that multiply to be 100 (You can't use 1 or break down the prime
factorization any more)
"""
def is_prime(n):
if n < 2:
return False
return all(n % i for i in range(2, int(math.sqrt(n)) + 1))
def get_prime_factors(n, k):
prime_factors = []
d = 2
while d * d <= n:
while n % d == 0:
n //= d
prime_factors.append(d)
if len(prime_factors) == k:
return k
d += 1
# avoid 1 as a factor
if n > 1:
assert d <= n
prime_factors.append(n)
if len(prime_factors) == k:
return k
return len(prime_factors)
test_cases = int(input())
answers = []
for n in range(test_cases):
x, k = map(int, input().split())
max_k = get_prime_factors(x, k)
# no number with 1 factor
if x == 1:
answers.append(0)
elif k <= max_k:
answers.append(1)
else:
answers.append(0)
# return results
for a in answers:
print(a)
| """
OVERVIEW
This solution is based on the idea that the number of prime factors (k) must be less
than the total prime factors (max_k). The number of factors can be calculated by
adding one to each exponent in a number's prime factorization and then multiplying
those exponents.
HELPER FUNCTIONS
Is_prime returns True if none of the numbers less than the square root of n
divide it and False otherwise.
get_prime factors takes in n and k and returns the length of the list of n's prime
factorization.
Example:
get_prime_factors(100, 2)
returns: 4 --> len([2, 2, 5, 5])
k is used as a limit. If at any time the length of prime factors reaches k, then that k is
valid and there is no need to collect more of the prime factors.
MAIN SOLUTION
After getting inputs, max_k is assigned to the value of the function call get_prime_factors(x, k).
This is the max value k can take on because there need to be at least k things that multiply to be x.
The most amount of things you can multiply to get x is all the numbers of its prime factorization.
Example:
x = 100
k = 4
100 = 5x5x2x2
Therefore you can have a k up to four and have 100 work as a value of x.
For example the number: 2^4 x 3^4 x 5^1 x 7^1 will have 100 factors
((4+1)*(4+1)*(1+1)*(1+1)) and 4 prime factors (2, 3, 5, 7).
No greater amount of prime factors are possible because there are no five
things that multiply to be 100 (You can't use 1 or break down the prime
factorization any more)
"""
def is_prime(n):
if n < 2:
return False
return all((n % i for i in range(2, int(math.sqrt(n)) + 1)))
def get_prime_factors(n, k):
prime_factors = []
d = 2
while d * d <= n:
while n % d == 0:
n //= d
prime_factors.append(d)
if len(prime_factors) == k:
return k
d += 1
if n > 1:
assert d <= n
prime_factors.append(n)
if len(prime_factors) == k:
return k
return len(prime_factors)
test_cases = int(input())
answers = []
for n in range(test_cases):
(x, k) = map(int, input().split())
max_k = get_prime_factors(x, k)
if x == 1:
answers.append(0)
elif k <= max_k:
answers.append(1)
else:
answers.append(0)
for a in answers:
print(a) |
class Node:
def __init__(self, data=None):
self.val = data
self.next = None
class LinkedList:
def __init__(self):
self.head=None
def push(self,val):
new_node=Node(val)
#case 1
if self.head is None:
self.head=new_node
self.head.next=None
return
temp=self.head
while temp.next is not None:
temp=temp.next
temp.next=new_node
new_node.next=None
LinkedList.push=push
def __str__(self):
re_str="["
temp=self.head
while temp is not None:
re_str+=" "+str(temp.val) + " ,"
temp=temp.next
re_str=re_str.rstrip(",")
re_str+="]"
return re_str
LinkedList.__str__=__str__
def pop(self):
#case 1
if self.head is None:
raise IndexError("list cannot be pop, : because list is empty")
#case 2
if self.head.next is None:
val=self.head.val
self.head=None
return val
temp=self.head
while temp.next is not None:
pre=temp
temp=temp.next
val=temp.val
pre.next=None
return val
LinkedList.pop=pop
def insert(self,index,val):
new_node=Node(val)
if index==0:
new_node.next=self.head
self.head=new_node
return
count=0
temp=self.head
while temp is not None and count<index:
pre=temp
temp=temp.next
count+=1
pre.next=new_node
new_node.next=temp
LinkedList.insert=insert
def remove_at(self,index):
if index>self.len():
raise IndexError("list index out of Range ")
if index==0:
self.head=self.head.next
return
if self.head is None:
raise IndexError("Cannot be remove because list is empty")
count=0
temp=self.head
# remove funtion must be temp not the temp.next remember!!!!
while temp is not None and count<index:
pre=temp
temp=temp.next
count+=1
pre.next=temp.next
LinkedList.remove_at=remove_at
def len(self):
if self.head is None:
return 0
temp=self.head
count=0
while temp is not None:
temp=temp.next
count+=1
return count
LinkedList.len=len
def remove(self,val):
if self.head is None:
raise IndexError(" Cannot be removed becaus list is empty ")
if self.head.val ==val:
self.head=self.head.next
return
if self.head.next is None:
if self.head.val==val:
self.head=None
return
temp=self.head
while temp.next is not None:
pre=temp
temp=temp.next
if temp.val==val:
break
else:
return
pre.next=temp.next
return
LinkedList.remove=remove
def reverse_list(self):
pre = None
current = self.head
while current is not None:
next = current.next
current.next = pre
pre = current
current = next
self.head = pre
LinkedList.reverse_list=reverse_list
if __name__ == '__main__':
l = LinkedList()
l.push(1)
l.push(2)
l.push(3)
print(l)
l.reverse_list()
print(l)
| class Node:
def __init__(self, data=None):
self.val = data
self.next = None
class Linkedlist:
def __init__(self):
self.head = None
def push(self, val):
new_node = node(val)
if self.head is None:
self.head = new_node
self.head.next = None
return
temp = self.head
while temp.next is not None:
temp = temp.next
temp.next = new_node
new_node.next = None
LinkedList.push = push
def __str__(self):
re_str = '['
temp = self.head
while temp is not None:
re_str += ' ' + str(temp.val) + ' ,'
temp = temp.next
re_str = re_str.rstrip(',')
re_str += ']'
return re_str
LinkedList.__str__ = __str__
def pop(self):
if self.head is None:
raise index_error('list cannot be pop, : because list is empty')
if self.head.next is None:
val = self.head.val
self.head = None
return val
temp = self.head
while temp.next is not None:
pre = temp
temp = temp.next
val = temp.val
pre.next = None
return val
LinkedList.pop = pop
def insert(self, index, val):
new_node = node(val)
if index == 0:
new_node.next = self.head
self.head = new_node
return
count = 0
temp = self.head
while temp is not None and count < index:
pre = temp
temp = temp.next
count += 1
pre.next = new_node
new_node.next = temp
LinkedList.insert = insert
def remove_at(self, index):
if index > self.len():
raise index_error('list index out of Range ')
if index == 0:
self.head = self.head.next
return
if self.head is None:
raise index_error('Cannot be remove because list is empty')
count = 0
temp = self.head
while temp is not None and count < index:
pre = temp
temp = temp.next
count += 1
pre.next = temp.next
LinkedList.remove_at = remove_at
def len(self):
if self.head is None:
return 0
temp = self.head
count = 0
while temp is not None:
temp = temp.next
count += 1
return count
LinkedList.len = len
def remove(self, val):
if self.head is None:
raise index_error(' Cannot be removed becaus list is empty ')
if self.head.val == val:
self.head = self.head.next
return
if self.head.next is None:
if self.head.val == val:
self.head = None
return
temp = self.head
while temp.next is not None:
pre = temp
temp = temp.next
if temp.val == val:
break
else:
return
pre.next = temp.next
return
LinkedList.remove = remove
def reverse_list(self):
pre = None
current = self.head
while current is not None:
next = current.next
current.next = pre
pre = current
current = next
self.head = pre
LinkedList.reverse_list = reverse_list
if __name__ == '__main__':
l = linked_list()
l.push(1)
l.push(2)
l.push(3)
print(l)
l.reverse_list()
print(l) |
def crossingSum(matrix, a, b):
return sum(matrix[a]) + sum([x[b] for i, x in enumerate(matrix) if i != a])
if __name__ == '__main__':
input0 = [[[1,1,1,1], [2,2,2,2], [3,3,3,3]], [[1,1], [1,1]], [[1,1], [3,3], [1,1], [2,2]], [[100]], [[1,2], [3,4]], [[1,2,3,4]], [[1,2,3,4,5], [1,2,2,2,2], [1,2,2,2,2], [1,2,2,2,2], [1,2,2,2,2], [1,2,2,2,2], [1,2,2,2,2]]]
input1 = [1, 0, 3, 0, 1, 0, 1]
input2 = [3, 0, 0, 0, 1, 3, 1]
expectedOutput = [12, 3, 9, 100, 9, 10, 21]
assert len(input0) == len(expectedOutput), '# input0 = {}, # expectedOutput = {}'.format(len(input0), len(expectedOutput))
assert len(input1) == len(expectedOutput), '# input1 = {}, # expectedOutput = {}'.format(len(input1), len(expectedOutput))
assert len(input2) == len(expectedOutput), '# input2 = {}, # expectedOutput = {}'.format(len(input2), len(expectedOutput))
for i, expected in enumerate(expectedOutput):
actual = crossingSum(input0[i], input1[i], input2[i])
assert actual == expected, 'crossingSum({}, {}, {}) returned {}, but expected {}'.format(input0[i], input1[i], input2[i], actual, expected)
print('PASSES {} out of {} tests'.format(len(expectedOutput), len(expectedOutput))) | def crossing_sum(matrix, a, b):
return sum(matrix[a]) + sum([x[b] for (i, x) in enumerate(matrix) if i != a])
if __name__ == '__main__':
input0 = [[[1, 1, 1, 1], [2, 2, 2, 2], [3, 3, 3, 3]], [[1, 1], [1, 1]], [[1, 1], [3, 3], [1, 1], [2, 2]], [[100]], [[1, 2], [3, 4]], [[1, 2, 3, 4]], [[1, 2, 3, 4, 5], [1, 2, 2, 2, 2], [1, 2, 2, 2, 2], [1, 2, 2, 2, 2], [1, 2, 2, 2, 2], [1, 2, 2, 2, 2], [1, 2, 2, 2, 2]]]
input1 = [1, 0, 3, 0, 1, 0, 1]
input2 = [3, 0, 0, 0, 1, 3, 1]
expected_output = [12, 3, 9, 100, 9, 10, 21]
assert len(input0) == len(expectedOutput), '# input0 = {}, # expectedOutput = {}'.format(len(input0), len(expectedOutput))
assert len(input1) == len(expectedOutput), '# input1 = {}, # expectedOutput = {}'.format(len(input1), len(expectedOutput))
assert len(input2) == len(expectedOutput), '# input2 = {}, # expectedOutput = {}'.format(len(input2), len(expectedOutput))
for (i, expected) in enumerate(expectedOutput):
actual = crossing_sum(input0[i], input1[i], input2[i])
assert actual == expected, 'crossingSum({}, {}, {}) returned {}, but expected {}'.format(input0[i], input1[i], input2[i], actual, expected)
print('PASSES {} out of {} tests'.format(len(expectedOutput), len(expectedOutput))) |
r,x,y,z=open("ads\\1Plumber\\Ad.txt").read().split("\n", 3)
print(y)
| (r, x, y, z) = open('ads\\1Plumber\\Ad.txt').read().split('\n', 3)
print(y) |
ENTRY_POINT = 'circular_shift'
#[PROMPT]
def circular_shift(x, shift):
"""Circular shift the digits of the integer x, shift the digits right by shift
and return the result as a string.
If shift > number of digits, return digits reversed.
>>> circular_shift(12, 1)
"21"
>>> circular_shift(12, 2)
"12"
"""
#[SOLUTION]
s = str(x)
if shift > len(s):
return s[::-1]
else:
return s[len(s) - shift:] + s[:len(s) - shift]
#[CHECK]
def check(candidate):
# Check some simple cases
assert candidate(100, 2) == "001"
assert candidate(12, 2) == "12"
assert candidate(97, 8) == "79"
assert candidate(12, 1) == "21", "This prints if this assert fails 1 (good for debugging!)"
# Check some edge cases that are easy to work out by hand.
assert candidate(11, 101) == "11", "This prints if this assert fails 2 (also good for debugging!)"
| entry_point = 'circular_shift'
def circular_shift(x, shift):
"""Circular shift the digits of the integer x, shift the digits right by shift
and return the result as a string.
If shift > number of digits, return digits reversed.
>>> circular_shift(12, 1)
"21"
>>> circular_shift(12, 2)
"12"
"""
s = str(x)
if shift > len(s):
return s[::-1]
else:
return s[len(s) - shift:] + s[:len(s) - shift]
def check(candidate):
assert candidate(100, 2) == '001'
assert candidate(12, 2) == '12'
assert candidate(97, 8) == '79'
assert candidate(12, 1) == '21', 'This prints if this assert fails 1 (good for debugging!)'
assert candidate(11, 101) == '11', 'This prints if this assert fails 2 (also good for debugging!)' |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.