hexsha stringlengths 40 40 | size int64 4 1.02M | ext stringclasses 8
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 209 | max_stars_repo_name stringlengths 5 121 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 209 | max_issues_repo_name stringlengths 5 121 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 209 | max_forks_repo_name stringlengths 5 121 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 4 1.02M | avg_line_length float64 1.07 66.1k | max_line_length int64 4 266k | alphanum_fraction float64 0.01 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
b9673ca75497919d0aa53e58bccaac8afdb626a7 | 3,233 | py | Python | acquire-glonass-l3ocd.py | wumouyan/GNSS-SDR-Python | 61292c2ba151724538808663e2a6d0b048635401 | [
"MIT"
] | 68 | 2015-06-23T17:30:06.000Z | 2022-03-29T22:06:54.000Z | acquire-glonass-l3ocd.py | wumouyan/GNSS-SDR-Python | 61292c2ba151724538808663e2a6d0b048635401 | [
"MIT"
] | 4 | 2018-03-01T05:14:36.000Z | 2021-12-05T11:07:39.000Z | acquire-glonass-l3ocd.py | wumouyan/GNSS-SDR-Python | 61292c2ba151724538808663e2a6d0b048635401 | [
"MIT"
] | 43 | 2015-06-26T10:27:05.000Z | 2022-03-30T02:47:09.000Z | #!/usr/bin/env python
import optparse
import numpy as np
import scipy.signal
import scipy.fftpack as fft
import gnsstools.glonass.l3ocd as l3ocd
import gnsstools.nco as nco
import gnsstools.io as io
import gnsstools.util as util
#
# Acquisition search
#
def search(x,prn,doppler_search,ms):
fs = 3*10230000.0
n = 3*10230 # 1 ms coherent integration
doppler_min, doppler_max, doppler_incr = doppler_search
incr = float(l3ocd.code_length)/n
c = l3ocd.code(prn,0,0,incr,n) # obtain samples of the L3-I code
c = fft.fft(np.concatenate((c,np.zeros(n))))
m_metric,m_code,m_doppler = 0,0,0
for doppler in np.arange(doppler_min,doppler_max,doppler_incr): # doppler bins
q = np.zeros(2*n)
w = nco.nco(-doppler/fs,0,2*n)
for block in range(ms): # incoherent sums
b = x[(block*n):((block+2)*n)]
b = b*w
r = fft.ifft(c*np.conj(fft.fft(b)))
q = q + np.absolute(r)
idx = np.argmax(q)
if q[idx]>m_metric:
m_metric = q[idx]
m_code = l3ocd.code_length*(float(idx)/n)
m_doppler = doppler
m_code = m_code%l3ocd.code_length
return m_metric,m_code,m_doppler
#
# main program
#
parser = optparse.OptionParser(usage="""acquire-gps-l3ocd.py [options] input_filename sample_rate carrier_offset
Acquire GLONASS L3OCd signals
Examples:
Acquire all GLONASS channels using standard input with sample rate 69.984 MHz and carrier offset 10.383375 MHz:
acquire-glonass-l3ocd.py /dev/stdin 69984000 10383375
Arguments:
input_filename input data file, i/q interleaved, 8 bit signed
sample_rate sampling rate in Hz
carrier_offset offset to L3 carrier in Hz (positive or negative)""")
parser.disable_interspersed_args()
parser.add_option("--prn", default="0-63", help="PRNs to search, e.g. 1,3-8,30 (default %default)")
parser.add_option("--doppler-search", metavar="MIN,MAX,INCR", default="-7000,7000,200", help="Doppler search grid: min,max,increment (default %default)")
parser.add_option("--time", type="int", default=80, help="integration time in milliseconds (default %default)")
(options, args) = parser.parse_args()
filename = args[0]
fs = float(args[1])
coffset = float(args[2])
prns = util.parse_list_ranges(options.prn)
doppler_search = util.parse_list_floats(options.doppler_search)
ms = options.time
# read first portion of file
ms_pad = ms + 5
n = int(fs*0.001*ms_pad)
fp = open(filename,"rb")
x = io.get_samples_complex(fp,n)
# resample to 3*10.230 MHz
fsr = 3*10230000.0/fs
nco.mix(x,-coffset/fs,0)
h = scipy.signal.firwin(161,12e6/(fs/2),window='hanning')
x = scipy.signal.filtfilt(h,[1],x)
xr = np.interp((1/fsr)*np.arange(ms_pad*3*10230),np.arange(len(x)),np.real(x))
xi = np.interp((1/fsr)*np.arange(ms_pad*3*10230),np.arange(len(x)),np.imag(x))
x = xr+(1j)*xi
# iterate (in parallel) over PRNs of interest
def worker(p):
x,prn = p
metric,code,doppler = search(x,prn,doppler_search,ms)
return 'prn %2d doppler % 7.1f metric % 7.1f code_offset %6.1f' % (prn,doppler,metric,code)
import multiprocessing as mp
cpus = mp.cpu_count()
results = mp.Pool(cpus).map(worker, map(lambda prn: (x,prn),prns))
for r in results:
print(r)
| 30.790476 | 153 | 0.689143 |
c71520861da923e2ff8122c4645232633bcf8e6a | 97 | py | Python | training_codes/biophys2lifmodel_lr/run_lr2_g8_8_test500ms_inh_lif_syn_z109.py | zqwei/LIF_Vis_model | 16f651ac827ba5f0feb40a0e619e600f1251d009 | [
"MIT"
] | null | null | null | training_codes/biophys2lifmodel_lr/run_lr2_g8_8_test500ms_inh_lif_syn_z109.py | zqwei/LIF_Vis_model | 16f651ac827ba5f0feb40a0e619e600f1251d009 | [
"MIT"
] | null | null | null | training_codes/biophys2lifmodel_lr/run_lr2_g8_8_test500ms_inh_lif_syn_z109.py | zqwei/LIF_Vis_model | 16f651ac827ba5f0feb40a0e619e600f1251d009 | [
"MIT"
] | null | null | null | import start0 as start
start.run_simulation('config_lr2_g8_8_test500ms_inh_lif_syn_z109.json')
| 19.4 | 71 | 0.865979 |
27e2caa1b8ee0dd5b60546d0a397dc6811d015dc | 41,519 | py | Python | controllers/building.py | nursix/DRKCM | 09328289ff721c416494398aa751ff99906327cb | [
"MIT"
] | 3 | 2022-01-26T08:07:54.000Z | 2022-03-21T21:53:52.000Z | controllers/building.py | nursix/eden-asp | e49f46cb6488918f8d5a163dcd5a900cd686978c | [
"MIT"
] | null | null | null | controllers/building.py | nursix/eden-asp | e49f46cb6488918f8d5a163dcd5a900cd686978c | [
"MIT"
] | null | null | null | """
Buildings Assessments module
Data model from:
http://www.atcouncil.org/products/downloadable-products/placards
Postearthquake Safety Evaluation of Buildings: ATC-20
http://www.atcouncil.org/pdfs/rapid.pdf
This is actually based on the New Zealand variant:
http://eden.sahanafoundation.org/wiki/BluePrintBuildingAssessments
@ToDo: Port forms to Survey module & deprecate as much as possible of this
module (which might be all)
@ToDo: Hide fields for triage form server side
- once print comes from controller then it will also skip these fields
- less to download to browser (more scalable)
@ToDo: add other forms (ATC-38, ATC-45)
"""
module = request.controller
if not settings.has_module(module):
raise HTTP(404, body="Module disabled: %s" % module)
# -----------------------------------------------------------------------------
# Define the Model
# @ToDo: Move to modules/s3db/building.py
# - here it isn't visible to s3db.load_all_models() or Sync
# -----------------------------------------------------------------------------
from gluon.sql import SQLCustomType
person_id = s3db.pr_person_id
location_id = s3db.gis_location_id
organisation_id = s3db.org_organisation_id
s3_datetime_format = settings.get_L10n_datetime_format()
# Options
building_area_inspected = {
1: T("Exterior and Interior"),
2: T("Exterior Only")
}
building_construction_types = {
1: T("Timber frame"), # Wood frame
2: T("Steel frame"),
3: T("Tilt-up concrete"),
4: T("Concrete frame"),
5: T("Concrete shear wall"),
6: T("Unreinforced masonry"),
7: T("Reinforced masonry"),
8: T("RC frame with masonry infill"),
99: T("Other")
}
building_primary_occupancy_opts = {
1: T("Dwelling"),
2: T("Other residential"),
3: T("Public assembly"),
4: T("School"),
5: T("Religious"),
6: T("Commercial/Offices"),
7: T("Industrial"),
8: T("Government"),
9: T("Heritage Listed"), # Historic
99: T("Other")
}
building_evaluation_condition = {
1: T("Minor/None"),
2: T("Moderate"),
3: T("Severe")
}
building_estimated_damage = {
1: T("None"),
2: "0-1%",
3: "1-10%",
4: "10-30%",
5: "30-60%",
6: "60-100%",
7: "100%"
}
building_estimated_damage_image = {
1: "tic.png",
2: "1percent.png",
3: "10percent.png",
4: "10-30percent.png",
5: "30-60percent.png",
6: "60-100percent.png",
7: "cross.png",
}
building_posting_l1_opts = {
1: "%s (%s)" % (T("Inspected"), T("Green")),
2: "%s (%s)" % (T("Restricted Use"), T("Yellow")),
3: "%s (%s)" % (T("Unsafe"), T("Red")),
}
building_posting_l2_opts = {
1: "%s (%s): G1" % (T("Inspected"), T("Green")),
2: "%s (%s): G2" % (T("Inspected"), T("Green")),
3: "%s (%s): Y1" % (T("Restricted Use"), T("Yellow")),
4: "%s (%s): Y2" % (T("Restricted Use"), T("Yellow")),
5: "%s (%s): R1" % (T("Unsafe"), T("Red")),
6: "%s (%s): R2" % (T("Unsafe"), T("Red")),
7: "%s (%s): R3" % (T("Unsafe"), T("Red")),
}
def uuid8anum():
import uuid
return "%s-%s" % (str(uuid.uuid4())[0:4], str(uuid.uuid4())[4:8])
s3uuid_8char = SQLCustomType(type = "string",
native = "VARCHAR(64)",
encoder = (lambda x: "'%s'" % (uuid8anum() if x == "" else str(x).replace("'", "''"))),
decoder = (lambda x: x))
# NZSEE Level 1 (~ATC-20 Rapid Evaluation) Safety Assessment Form ---------
resourcename = "nzseel1"
tablename = "%s_%s" % (module, resourcename)
db.define_table(tablename,
Field("ticket_id",
type=s3uuid_8char,
length=64,
notnull=True,
unique=True,
writable=False,
default=uuid8anum(),
label = T("Ticket ID"),
represent = lambda id: id and id.upper() or T("None")
),
person_id(label=T("Inspector ID"), empty=False), # pre-populated in Controller
organisation_id(label=T("Territorial Authority")), # Affiliation in ATC20 terminology
Field("date", "datetime", default=request.now,
requires=IS_DATETIME(format=s3_datetime_format),
label=T("Inspection date and time")),
#Field("daytime", "time", label=T("Inspection time")),
Field("area", "integer", label=T("Areas inspected"),
requires=IS_EMPTY_OR(IS_IN_SET(building_area_inspected)),
represent=lambda opt: \
building_area_inspected.get(opt, UNKNOWN_OPT)),
#Field("name", label=T("Building Name"), requires=IS_NOT_EMPTY()), # Included in location_id
location_id(empty=False),
Field("name_short",
label=T("Building Short Name/Business Name")),
Field("contact_name",
label=T("Contact Name"),
requires=IS_NOT_EMPTY()),
Field("contact_phone", label=T("Contact Phone"),
requires=IS_NOT_EMPTY()),
Field("stories_above", "integer",
label=T("Storeys at and above ground level")), # Number of stories above ground
Field("stories_below", "integer",
label=T("Below ground level")), # Number of stories below ground
Field("footprint", "integer",
label=T("Total gross floor area (square meters)")),
Field("year_built", "integer",
label=T("Year built")),
Field("residential_units", "integer",
label=T("Number of residential units")),
#Field("residential_units_not_habitable", "integer",
# label=T("Number of residential units not habitable")),
Field("photo", "boolean",
label=T("Photo Taken?"),
represent = s3_yes_no_represent),
Field("construction_type", "integer",
label=T("Type of Construction"),
requires=IS_EMPTY_OR(IS_IN_SET(building_construction_types)),
represent=lambda opt: \
building_construction_types.get(opt, UNKNOWN_OPT)),
Field("construction_type_other",
label="(%s)" % T("specify")),
Field("primary_occupancy", "integer",
label=T("Primary Occupancy"),
requires=IS_EMPTY_OR(IS_IN_SET(building_primary_occupancy_opts)),
represent=lambda opt: building_primary_occupancy_opts.get(opt, UNKNOWN_OPT)),
Field("primary_occupancy_other",
label="(%s)" % T("specify")),
Field("collapse", "integer",
label=T("Collapse, partial collapse, off foundation"),
requires=IS_EMPTY_OR(IS_IN_SET(building_evaluation_condition)),
represent=lambda opt: \
building_evaluation_condition.get(opt, UNKNOWN_OPT)),
Field("leaning", "integer",
label=T("Building or storey leaning"),
requires=IS_EMPTY_OR(IS_IN_SET(building_evaluation_condition)),
represent=lambda opt: \
building_evaluation_condition.get(opt, UNKNOWN_OPT)),
Field("structural", "integer",
label=T("Wall or other structural damage"),
requires=IS_EMPTY_OR(IS_IN_SET(building_evaluation_condition)),
represent=lambda opt: \
building_evaluation_condition.get(opt, UNKNOWN_OPT)),
Field("falling", "integer",
label=T("Overhead falling hazard"),
requires=IS_EMPTY_OR(IS_IN_SET(building_evaluation_condition)),
represent=lambda opt: \
building_evaluation_condition.get(opt, UNKNOWN_OPT)),
Field("slips", "integer",
label=T("Ground movement, settlement, slips"),
requires=IS_EMPTY_OR(IS_IN_SET(building_evaluation_condition)),
represent=lambda opt: \
building_evaluation_condition.get(opt, UNKNOWN_OPT)),
Field("neighbour", "integer",
label=T("Neighbouring building hazard"),
requires=IS_EMPTY_OR(IS_IN_SET(building_evaluation_condition)),
represent=lambda opt: \
building_evaluation_condition.get(opt, UNKNOWN_OPT)),
Field("other", "integer", label=T("Other"),
requires=IS_EMPTY_OR(IS_IN_SET(building_evaluation_condition)),
represent=lambda opt: \
building_evaluation_condition.get(opt, UNKNOWN_OPT)),
Field("other_details",
label="(%s)" % T("specify")),
Field("action_comments", "text",
label=T("Comments")),
Field("posting", "integer",
requires=IS_IN_SET(building_posting_l1_opts),
represent=lambda opt: \
building_posting_l1_opts.get(opt, UNKNOWN_OPT)),
Field("restrictions", "text",
label=T("Record any restriction on use or entry")),
#Field("posting_comments", "text", label=T("Comments")),
Field("barricades", "boolean",
label=T("Barricades are needed"),
represent = s3_yes_no_represent),
Field("barricades_details", "text",
label="(%s)" % T("state location")),
Field("detailed_evaluation", "boolean",
label=T("Level 2 or detailed engineering evaluation recommended"),
represent = s3_yes_no_represent),
Field("detailed_structural", "boolean",
label=T("Structural"),
represent = s3_yes_no_represent),
Field("detailed_geotechnical", "boolean",
label=T("Geotechnical"),
represent = s3_yes_no_represent),
Field("detailed_other", "boolean",
label=T("Other"),
represent = s3_yes_no_represent),
Field("detailed_other_details",
label="(%s)" % T("specify")),
Field("other_recommendations", "text",
label=T("Other recommendations")),
Field("estimated_damage", "integer",
label=T("Estimated Overall Building Damage"),
comment="(%s)" % T("Exclude contents"),
requires=IS_IN_SET(building_estimated_damage),
represent=lambda opt: \
building_estimated_damage.get(opt, UNKNOWN_OPT)),
*s3_meta_fields())
# CRUD strings
ADD_ASSESSMENT = T("Add Level 1 Assessment")
s3.crud_strings[tablename] = Storage(
label_create = ADD_ASSESSMENT,
title_display = T("Level 1 Assessment Details"),
title_list = T("Level 1 Assessments"),
title_update = T("Edit Level 1 Assessment"),
label_list_button = T("List Level 1 Assessments"),
label_delete_button = T("Delete Level 1 Assessment"),
msg_record_created = T("Level 1 Assessment added"),
msg_record_modified = T("Level 1 Assessment updated"),
msg_record_deleted = T("Level 1 Assessment deleted"),
msg_list_empty = T("No Level 1 Assessments currently registered"))
building_nzseel1_search = s3base.S3Search(
name="nzseel1_search_simple",
label=T("Ticket ID"),
comment=T("To search for an assessment, enter any portion of the ticket number of the assessment. You may use % as wildcard. Press 'Search' without input to list all assessments."),
field=["ticket_id"])
# Set as default search method
s3db.configure(tablename,
search_method = building_nzseel1_search,
)
# -------------------------------------------------------------------------
# NZSEE Level 2 (~ATC-20 Rapid Evaluation) Safety Assessment Form
resourcename = "nzseel2"
tablename = "%s_%s" % (module, resourcename)
db.define_table(tablename,
Field("ticket_id",
type=s3uuid_8char,
length=64,
notnull=True,
unique=True,
label = T("Ticket ID"),
represent = lambda id: id and id.upper() or T("None")),
person_id(label=T("Inspector ID"), empty=False), # pre-populated in Controller
organisation_id(label=T("Territorial Authority")), # Affiliation in ATC20 terminology
Field("date", "datetime", default=request.now,
requires=IS_DATETIME(format=s3_datetime_format),
label=T("Inspection date and time")),
#Field("daytime", "time", label=T("Inspection time")),
Field("area", "integer", label=T("Areas inspected"),
requires=IS_EMPTY_OR(IS_IN_SET(building_area_inspected)),
represent=lambda opt:
building_area_inspected.get(opt, UNKNOWN_OPT)),
#Field("name", label=T("Building Name"), requires=IS_NOT_EMPTY()), # Included in location_id
location_id(empty=False),
Field("name_short",
label=T("Building Short Name/Business Name")),
Field("contact_name",
label=T("Contact Name"),
requires=IS_NOT_EMPTY()),
Field("contact_phone",
label=T("Contact Phone"),
requires=IS_NOT_EMPTY()),
Field("stories_above", "integer",
label=T("Storeys at and above ground level")), # Number of stories above ground
Field("stories_below", "integer",
label=T("Below ground level")), # Number of stories below ground
Field("footprint", "integer",
label=T("Total gross floor area (square meters)")),
Field("year_built", "integer",
label=T("Year built")),
Field("residential_units", "integer",
label=T("Number of residential units")),
#Field("residential_units_not_habitable", "integer",
# label=T("Number of residential units not habitable")),
Field("photo", "boolean",
label=T("Photo Taken?"),
represent = s3_yes_no_represent),
Field("construction_type", "integer",
label=T("Type of Construction"),
requires=IS_EMPTY_OR(IS_IN_SET(building_construction_types)),
represent=lambda opt: \
building_construction_types.get(opt, UNKNOWN_OPT)),
Field("construction_type_other",
label="(%s)" % T("specify")),
Field("primary_occupancy", "integer",
label=T("Primary Occupancy"),
requires=IS_EMPTY_OR(IS_IN_SET(building_primary_occupancy_opts)),
represent=lambda opt: \
building_primary_occupancy_opts.get(opt, UNKNOWN_OPT)),
Field("primary_occupancy_other",
label="(%s)" % T("specify")),
Field("collapse", "integer",
label=T("Collapse, partial collapse, off foundation"),
requires=IS_EMPTY_OR(IS_IN_SET(building_evaluation_condition)),
represent=lambda opt: \
building_evaluation_condition.get(opt, UNKNOWN_OPT)),
Field("leaning", "integer",
label=T("Building or storey leaning"),
requires=IS_EMPTY_OR(IS_IN_SET(building_evaluation_condition)),
represent=lambda opt: \
building_evaluation_condition.get(opt, UNKNOWN_OPT)),
Field("structural", "integer",
label=T("Wall or other structural damage"),
requires=IS_EMPTY_OR(IS_IN_SET(building_evaluation_condition)),
represent=lambda opt: \
building_evaluation_condition.get(opt, UNKNOWN_OPT)),
Field("falling", "integer",
label=T("Overhead falling hazard"),
requires=IS_EMPTY_OR(IS_IN_SET(building_evaluation_condition)),
represent=lambda opt: \
building_evaluation_condition.get(opt, UNKNOWN_OPT)),
Field("slips", "integer",
label=T("Ground movement, settlement, slips"),
requires=IS_EMPTY_OR(IS_IN_SET(building_evaluation_condition)),
represent=lambda opt: \
building_evaluation_condition.get(opt, UNKNOWN_OPT)),
Field("neighbour", "integer",
label=T("Neighbouring building hazard"),
requires=IS_EMPTY_OR(IS_IN_SET(building_evaluation_condition)),
represent=lambda opt: \
building_evaluation_condition.get(opt, UNKNOWN_OPT)),
Field("other", "integer",
label=T("Electrical, gas, sewerage, water, hazmats"),
requires=IS_EMPTY_OR(IS_IN_SET(building_evaluation_condition)),
represent=lambda opt: \
building_evaluation_condition.get(opt, UNKNOWN_OPT)),
#Field("other_details", label="(%s)" % T("specify")),
Field("action_comments", "text",
label=T("Comments")),
Field("posting_existing", "integer",
label=T("Existing Placard Type"),
requires=IS_IN_SET(building_posting_l1_opts),
represent=lambda opt: \
building_posting_l1_opts.get(opt, UNKNOWN_OPT)),
Field("posting", "integer",
label=T("Choose a new posting based on the new evaluation and team judgement. Severe conditions affecting the whole building are grounds for an UNSAFE posting. Localised Severe and overall Moderate conditions may require a RESTRICTED USE. Place INSPECTED placard at main entrance. Post all other placards at every significant entrance."),
requires=IS_IN_SET(building_posting_l2_opts),
#@ToDo: comment= Guidance on meaning of options
represent=lambda opt: \
building_posting_l2_opts.get(opt, UNKNOWN_OPT)),
Field("restrictions", "text",
label=T("Record any restriction on use or entry")),
#Field("posting_comments", "text", label=T("Comments")),
Field("barricades", "boolean",
label=T("Barricades are needed"),
represent = s3_yes_no_represent),
Field("barricades_details", "text",
label="(%s)" % T("state location")),
Field("detailed_evaluation", "boolean",
label=T("Level 2 or detailed engineering evaluation recommended"),
represent = s3_yes_no_represent),
Field("detailed_structural", "boolean",
label=T("Structural"),
represent = s3_yes_no_represent),
Field("detailed_geotechnical", "boolean",
label=T("Geotechnical"),
represent = s3_yes_no_represent),
Field("detailed_other", "boolean",
label=T("Other"),
represent = s3_yes_no_represent),
Field("detailed_other_details",
label="(%s)" % T("specify")),
Field("other_recommendations", "text",
label=T("Other recommendations")),
Field("estimated_damage", "integer",
label=T("Estimated Overall Building Damage"),
comment="(%s)" % T("Exclude contents"),
requires=IS_IN_SET(building_estimated_damage),
represent=lambda opt: \
building_estimated_damage.get(opt, UNKNOWN_OPT)),
Field("structural_foundations", "integer",
label=T("Foundations"),
requires=IS_EMPTY_OR(IS_IN_SET(building_evaluation_condition)),
represent=lambda opt: \
building_evaluation_condition.get(opt, UNKNOWN_OPT)),
Field("structural_roofs", "integer",
label=T("Roofs, floors (vertical load)"),
requires=IS_EMPTY_OR(IS_IN_SET(building_evaluation_condition)),
represent=lambda opt: \
building_evaluation_condition.get(opt, UNKNOWN_OPT)),
Field("structural_columns", "integer",
label=T("Columns, pilasters, corbels"),
requires=IS_EMPTY_OR(IS_IN_SET(building_evaluation_condition)),
represent=lambda opt: \
building_evaluation_condition.get(opt, UNKNOWN_OPT)),
Field("structural_diaphragms", "integer",
label=T("Diaphragms, horizontal bracing"),
requires=IS_EMPTY_OR(IS_IN_SET(building_evaluation_condition)),
represent=lambda opt: \
building_evaluation_condition.get(opt, UNKNOWN_OPT)),
Field("structural_precast", "integer",
label=T("Pre-cast connections"),
requires=IS_EMPTY_OR(IS_IN_SET(building_evaluation_condition)),
represent=lambda opt: \
building_evaluation_condition.get(opt, UNKNOWN_OPT)),
Field("structural_beam", "integer",
label=T("Beam"),
requires=IS_EMPTY_OR(IS_IN_SET(building_evaluation_condition)),
represent=lambda opt: \
building_evaluation_condition.get(opt, UNKNOWN_OPT)),
Field("non_parapets", "integer",
label=T("Parapets, ornamentation"),
requires=IS_EMPTY_OR(IS_IN_SET(building_evaluation_condition)),
represent=lambda opt: \
building_evaluation_condition.get(opt, UNKNOWN_OPT)),
Field("non_cladding", "integer",
label=T("Cladding, glazing"),
requires=IS_EMPTY_OR(IS_IN_SET(building_evaluation_condition)),
represent=lambda opt: \
building_evaluation_condition.get(opt, UNKNOWN_OPT)),
Field("non_ceilings", "integer",
label=T("Ceilings, light fixtures"),
requires=IS_EMPTY_OR(IS_IN_SET(building_evaluation_condition)),
represent=lambda opt: \
building_evaluation_condition.get(opt, UNKNOWN_OPT)),
Field("non_interior", "integer",
label=T("Interior walls, partitions"),
requires=IS_EMPTY_OR(IS_IN_SET(building_evaluation_condition)),
represent=lambda opt: \
building_evaluation_condition.get(opt, UNKNOWN_OPT)),
Field("non_elevators", "integer",
label=T("Elevators"),
requires=IS_EMPTY_OR(IS_IN_SET(building_evaluation_condition)),
represent=lambda opt: \
building_evaluation_condition.get(opt, UNKNOWN_OPT)),
Field("non_stairs", "integer",
label="%s/ %s" % (T("Stairs"), T("Exits")),
requires=IS_EMPTY_OR(IS_IN_SET(building_evaluation_condition)),
represent=lambda opt: \
building_evaluation_condition.get(opt, UNKNOWN_OPT)),
Field("non_utilities", "integer",
label=T("Utilities"),
requires=IS_EMPTY_OR(IS_IN_SET(building_evaluation_condition)),
comment= "(%s)" % T("eg. gas, electricity, water"),
represent=lambda opt: \
building_evaluation_condition.get(opt, UNKNOWN_OPT)),
Field("non_other", "integer",
label=T("Other"),
requires=IS_EMPTY_OR(IS_IN_SET(building_evaluation_condition)),
represent=lambda opt: \
building_evaluation_condition.get(opt, UNKNOWN_OPT)),
Field("geotechnical_slope", "integer",
label=T("Slope failure, debris"),
requires=IS_EMPTY_OR(IS_IN_SET(building_evaluation_condition)),
represent=lambda opt: \
building_evaluation_condition.get(opt, UNKNOWN_OPT)),
Field("geotechnical_ground", "integer",
label=T("Ground movement, fissures"),
requires=IS_EMPTY_OR(IS_IN_SET(building_evaluation_condition)),
represent=lambda opt: \
building_evaluation_condition.get(opt, UNKNOWN_OPT)),
Field("geotechnical_soil", "integer",
label=T("Soil bulging, liquefaction"),
requires=IS_EMPTY_OR(IS_IN_SET(building_evaluation_condition)),
represent=lambda opt: \
building_evaluation_condition.get(opt, UNKNOWN_OPT)),
Field("general_comments", "text",
label=T("General Comment")),
Field("sketch", "upload",
autodelete=True,
requires = IS_EMPTY_OR(IS_IMAGE(maxsize=(800, 800),
error_message=T("Upload an image file (bmp, gif, jpeg or png), max. 300x300 pixels!"))),
label=T("Sketch"),
comment=S3PopupLink(c="doc",
f="image",
label=T("Add Photo"),
title=T("Sketch"),
tooltip=T("Provide an optional sketch of the entire building or damage points. Indicate damage points.")
)),
Field("recommendations", "text",
label=T("Recommendations for Repair and Reconstruction or Demolition"),
comment="(%s)" % T("Optional")),
*s3_meta_fields())
# CRUD strings
ADD_ASSESSMENT = T("Add Level 2 Assessment")
s3.crud_strings[tablename] = Storage(
label_create = ADD_ASSESSMENT,
title_display = T("Level 2 Assessment Details"),
title_list = T("Level 2 Assessments"),
title_update = T("Edit Level 2 Assessment"),
label_list_button = T("List Level 2 Assessments"),
label_delete_button = T("Delete Level 2 Assessment"),
msg_record_created = T("Level 2 Assessment added"),
msg_record_modified = T("Level 2 Assessment updated"),
msg_record_deleted = T("Level 2 Assessment deleted"),
msg_list_empty = T("No Level 2 Assessments currently registered"))
building_nzseel2_search = s3base.S3Search(
name="nzseel2_search_simple",
label=T("Ticket ID"),
comment=T("To search for an assessment, enter any portion the ticket number of the assessment. You may use % as wildcard. Press 'Search' without input to list all assessments."),
field=["ticket_id"])
# Set as default search method
s3db.configure(tablename,
search_method = building_nzseel2_search,
)
# -----------------------------------------------------------------------------
# Controllers
# -----------------------------------------------------------------------------
def index():
""" Module's Home Page """
module_name = settings.modules[module].get("name_nice")
response.title = module_name
return {"module_name": module_name,
}
# -----------------------------------------------------------------------------
def nzseel1():
"""
NZSEE Level 1 (~ATC-20 Rapid Evaluation) Safety Assessment Form
RESTful CRUD controller
@ToDo: Action Button to create a new L2 Assessment from an L1
"""
resourcename = "nzseel1"
tablename = "%s_%s" % (module, resourcename)
table = db[tablename]
# Pre-populate Inspector ID
table.person_id.default = auth.s3_logged_in_person()
# Subheadings in forms:
s3db.configure(tablename,
deletable = False,
create_next = URL(module, resourcename, args="[id]"),
subheadings = {"name": ".", # Description in ATC-20
"collapse": "%s / %s" % (T("Overall Hazards"), T("Damage")),
"posting": ".",
"barricades": "%s:" % T("Further Action Recommended"),
"estimated_damage": ".",
},
)
rheader = nzseel1_rheader
return crud_controller(rheader=rheader)
# -----------------------------------------------------------------------------
def nzseel1_rheader(r, tabs=[]):
""" Resource Headers """
if r.representation == "html":
if r.name == "nzseel1":
assess = r.record
if assess:
table = r.table
rheader_tabs = s3_rheader_tabs(r, tabs)
location = assess.location_id
if location:
location = table.location_id.represent(location)
person = assess.person_id
if person:
query = (db.pr_person.id == person)
pe_id = db(query).select(db.pr_person.pe_id,
limitby=(0, 1)).first().pe_id
query = (db.pr_contact.pe_id == pe_id) & \
(db.pr_contact.contact_method == "SMS")
mobile = db(query).select(db.pr_contact.value,
limitby=(0, 1)).first()
if mobile:
mobile = mobile.value
person = s3_fullname(person)
rheader = DIV(TABLE(
TR(
TH("%s: " % T("Person")), person,
TH("%s: " % T("Mobile")), mobile,
),
TR(
TH("%s: " % T("Location")), location,
TH("%s: " % T("Date")), table.date.represent(assess.date)
),
TR(
TH(""), "",
TH("%s: " % T("Ticket ID")),
r.table.ticket_id.represent(assess.ticket_id),
),
),
rheader_tabs)
return rheader
return None
# -----------------------------------------------------------------------------
# NZSEE Level 2 (~ATC-20 Rapid Evaluation) Safety Assessment Form
def nzseel2():
"""
RESTful CRUD controller
"""
resourcename = "nzseel2"
tablename = "%s_%s" % (module, resourcename)
table = db[tablename]
# Pre-populate Inspector ID
table.person_id.default = auth.s3_logged_in_person()
# Subheadings in forms:
s3db.configure(tablename,
deletable=False,
create_next = URL(module,resourcename, args="[id]"),
subheadings = {"name": ".", # Description in ATC-20
"collapse": "%s / %s" % (T("Overall Hazards"), T("Damage")),
"posting_existing": ".",
"barricades": "%s:" % T("Further Action Recommended"),
"estimated_damage": ".",
"structural_foundations": "%s / %s" % (T("Structural Hazards"), T("Damage")),
"non_parapets": "%s / %s" % (T("Non-structural Hazards"), T("Damage")),
"geotechnical_slope": "%s / %s" % (T("Geotechnical Hazards"), T("Damage")),
})
rheader = nzseel2_rheader
return crud_controller(rheader=rheader)
# -----------------------------------------------------------------------------
def nzseel2_rheader(r, tabs=[]):
""" Resource Headers """
if r.representation == "html":
if r.name == "nzseel2":
assess = r.record
if assess:
table = r.table
rheader_tabs = s3_rheader_tabs(r, tabs)
location = assess.location_id
if location:
location = table.location_id.represent(location)
person = assess.person_id
if person:
query = (db.pr_person.id == person)
pe_id = db(query).select(db.pr_person.pe_id,
limitby=(0, 1)).first().pe_id
query = (db.pr_contact.pe_id == pe_id) & \
(db.pr_contact.contact_method == "SMS")
mobile = db(query).select(db.pr_contact.value,
limitby=(0, 1)).first()
if mobile:
mobile = mobile.value
person = s3_fullname(person)
rheader = DIV(TABLE(
TR(
TH("%s: " % T("Person")), person,
TH("%s: " % T("Mobile")), mobile,
),
TR(
TH("%s: " % T("Location")), location,
TH("%s: " % T("Date")), table.date.represent(assess.date)
),
TR(
TH(""), "",
TH("%s: " % T("Ticket ID")),
r.table.ticket_id.represent(assess.ticket_id),
),
),
rheader_tabs)
return rheader
return None
# -----------------------------------------------------------------------------
def report():
"""
A report providing assessment totals, and breakdown by assessment type and status.
e.g. Level 1 (red, yellow, green) Level 2 (R1-R3, Y1-Y2, G1-G2)
@ToDo: Make into a Custom Method to be able to support Table ACLs
(currently protected by Controller ACL)
"""
level1 = Storage()
table = db.building_nzseel1
# Which is more efficient?
# A) 4 separate .count() in DB
# B) Pulling all records into Python & doing counts in Python
query = (table.deleted == False)
level1.total = db(query).count()
filter = (table.posting == 1)
level1.green = db(query & filter).count()
filter = (table.posting == 2)
level1.yellow = db(query & filter).count()
filter = (table.posting == 3)
level1.red = db(query & filter).count()
level2 = Storage()
table = db.building_nzseel2
query = (table.deleted == False)
level2.total = db(query).count()
filter = (table.posting.belongs((1, 2)))
level2.green = db(query & filter).count()
filter = (table.posting.belongs((3, 4)))
level2.yellow = db(query & filter).count()
filter = (table.posting.belongs((5, 6, 7)))
level2.red = db(query & filter).count()
return {"level1": level1,
"level2": level2,
}
# -----------------------------------------------------------------------------
#def getformatedData(dbresult):
# result = []
# cnt = -1;
# # Format the results
# for row in dbresult:
# damage = row.estimated_damage
# try:
# trueDate = row.date #datetime.datetime.strptime(row.date, "%Y-%m-%d %H:%M:%S")
# except:
# trueDate = row.created_on
# date = trueDate.strftime("%d %b %Y")
# hour = trueDate.strftime("%H")
# key = (date, hour)
# if (cnt == -1) or (result[cnt][0] != key):
# result.append([key , 0, 0, 0, 0, 0, 0, 0, 1])
# cnt += 1
# else:
# result[cnt][8] += 1
# result[cnt][damage] += 1
#
# return result
def getformatedData(dbresult):
result = []
cntT = cntH = -1
for row in dbresult:
damage = row.estimated_damage
try:
trueDate = row.date
except:
trueDate = row.created_on
date = trueDate.strftime("%d %b %Y")
hour = trueDate.strftime("%H")
keyT = (date, "Total")
keyH = (date, hour)
if (cntT == -1) or (result[cntT][0] != keyT):
result.append([keyT, 0, 0, 0, 0, 0, 0, 0, 0])
cntT = cntH + 1
cntH = cntT
if (result[cntH][0] != keyH):
result.append([keyH, 0, 0, 0, 0, 0, 0, 0, 0])
cntH += 1
result[cntT][8] += 1
result[cntH][8] += 1
result[cntT][damage] += 1
result[cntH][damage] += 1
return result
def timeline():
"""
A report providing assessments received broken down by time
"""
result = Storage()
inspection = []
creation = []
# raw SQL command
# select `date`, estimated_damage FROM building_nzseel1 WHERE deleted = "F" ORDER BY `date` DESC
table = db.building_nzseel1
dbresult = db(table.deleted == False).select(table.date,
table.estimated_damage,
orderby=~table.date,
)
inspection = getformatedData(dbresult)
# Here is the raw SQL command
# select created_on, estimated_damage FROM building_nzseel1 WHERE deleted = "F" ORDER BY created_on DESC
dbresult = db(table.deleted == False).select(table.created_on,
table.estimated_damage,
orderby=~table.created_on,
)
creation = getformatedData(dbresult)
totals = [0, 0, 0, 0, 0, 0, 0, 0]
for line in inspection:
if line[0][1] == "Total":
for i in range(8):
totals[i] += line[i + 1]
return {"inspection": inspection,
"creation": creation,
"totals": totals,
}
# -----------------------------------------------------------------------------
def adminLevel():
"""
A report providing assessments received broken down by administration level
"""
# raw SQL command
# select parent, `path`, estimated_damage FROM building_nzseel1, gis_location WHERE building_nzseel1.deleted = "F" and (gis_location.id = building_nzseel1.location_id)
tableNZ1 = db.building_nzseel1
ltable = s3db.gis_location
query = (tableNZ1.location_id == ltable.id) & (tableNZ1.deleted == False)
dbresult = db(query).select(ltable.path,
ltable.parent,
tableNZ1.estimated_damage
)
result = []
temp = {}
# Format the results
for row in dbresult:
parent = row.gis_location.parent ##report[0]
path = row.gis_location.path #report[1]
damage = row.building_nzseel1.estimated_damage #report[2]
if parent in temp:
temp[parent][7] += 1
else:
temp[parent] = [0, 0, 0, 0, 0, 0, 0, 1]
temp[parent][damage - 1] += 1
gis = {}
for (key) in temp.keys():
# raw SQL command
# "select name, parent FROM gis_location WHERE gis_location.id = '%s'" % key
row = ltable(key)
if row == None:
gis[key] = T("Unknown")
else:
gis[key] = row.name
for (key, item) in temp.items():
if gis.has_key(key):
name = gis[key]
else:
name = T("Unknown")
result.append((name, item))
return {"report": result,
}
# -----------------------------------------------------------------------------
| 46.183537 | 362 | 0.50666 |
5de792e1aad705a526ef21275ab1410061e75aa0 | 394 | py | Python | 3day/Quiz01_2.py | jsjang93/joony | 62f7a325094c887212b894932263bf84500e0f03 | [
"MIT"
] | null | null | null | 3day/Quiz01_2.py | jsjang93/joony | 62f7a325094c887212b894932263bf84500e0f03 | [
"MIT"
] | null | null | null | 3day/Quiz01_2.py | jsjang93/joony | 62f7a325094c887212b894932263bf84500e0f03 | [
"MIT"
] | null | null | null | # Quiz01_2.py
items = {"콜라":1000,"사이다":900,"씨그램":500,"우유":700,"활명수":800}
print("=== 음료 자판기 입니다 ====")
print("[콜라][사이다][씨그램][우유][활명수] 중 선택")
print("복수 선택 시 --> 예) 사이다,우유 ")
# 선택목록 item, 가격 price
item = input() # 사이다,우유
items2 = item.strip().split(',')
prices = [p for i,p in items.items() if i in items2]
price = 0
for p in prices: price += p
print("가격 : {0} 원".format(price) )
| 15.153846 | 58 | 0.563452 |
fbd18191d9ab6d542b5db5fc91abfc17ab30a46e | 10,595 | py | Python | fa/Automato.py | wesbdss/AutomatosFinitos | efbd140e511b409139311fa7010388d114fdf096 | [
"MIT"
] | null | null | null | fa/Automato.py | wesbdss/AutomatosFinitos | efbd140e511b409139311fa7010388d114fdf096 | [
"MIT"
] | null | null | null | fa/Automato.py | wesbdss/AutomatosFinitos | efbd140e511b409139311fa7010388d114fdf096 | [
"MIT"
] | null | null | null | import os
#Nome: Wesley Benício
#Trabalho de LFA
#------funções------
def listArq(nome):#procura os arquivos
lista = os.listdir()
for x in lista:
if x == nome:
return x
def traduzFunTransiNFA():#transforma uma tabela de transição NFA em Função de transição em DFA
ini = None
fin =[]
transi=[]
dstates=[]
try:
arq = open(listArq('nfaTabela.txt'),'r')
arq1 = open('nfaFuncao.txt','w')
except Exception:
print("Aquivo Inexistente")
return "Vazio"
transi = arq.read()#le o arquivo
transi = transi.split('\n')
alpha = transi[0].split('\t')
alpha.pop(0)#retira o espaço vazio
transi.pop(0)#retira o alphabeto )
for x in transi: #encontra a transição inicial e a final
if x[0] == '>' and ini == None:
y=x.split('\t') # >*q0,q2,q1
y = y[0].split('>')# '',*q0
z = y[1]
if z[0] == '*':
z = z.split('*')
fin.append(z[1])
ini=z[1]
else:
ini = y[1]
if x[0] == '*' : # *q1
z = x.split('*')
z = z[1]
z = z.split('\t')
fin.append(z[0])
ini='Qi='+str(ini)+'\n'
Sfin='Qf='
for p in fin:
Sfin = Sfin+p+','
Sfin= Sfin[:-1]
Sfin = Sfin+'\n'
#arq1.write(ini)
#arq1.write(Sfin)
arq.close()
try:
arq = open(listArq('nfaTabela.txt'),'r')
except Exception:
print("Aquivo Inexistente")
return "Vazio"
estados = arq.read()
estados= estados.split("\n")
alpha = estados[0]
estados.pop(0)
alpha = alpha.split('\t')
alpha.pop(0)
for x in range(0,len(estados)):
estados[x] = estados[x].replace('\t','-')
estados[x] = estados[x].replace('{','')
estados[x] = estados[x].replace('}','')
estados[x] = estados[x].replace('>','')
estados[x] = estados[x].replace('*','')
y = estados[0].split('-')
resul = y[0]+',&'+'='+y[0]+','+y[3]
estini = ini.split('=')
estini = estini[1]
estini = estini.split('\n')
estini = estini[0]
dstates.append(e_closure(estini,estados))
ini = dstates[0]
# print('____Separa AQUI___')
contd = 0
while contd < len(dstates):
start = dstates[contd]
for x in alpha[:-1]:
aux1 = move(start,x,estados,alpha[:-1])
if not aux1:
resul = e_closure(aux1,estados)
else:
resul = e_closure(aux1,estados)
# print ("RESULTADO AQUI: ",resul)
if resul:
dstates.append(resul)
dstates = sorted(set(dstates))
contd=contd+1
final= fin
fin = None
fin = []
dstatesletras = []
for x in range(0,len(dstates)):
dstatesletras.append('q'+str(x))
for aux2 in range(0,len(dstates)):
for aux3 in final:
if dstates[aux2].find(aux3) != -1:
fin.append(dstatesletras[aux2])
fin = sorted(set(fin))
# print(dstates,'<-->',ini,'<-final->',final,'<-fin->',fin)
dstatesletras = []
for x in range(0,len(dstates)):
dstatesletras.append('q'+str(x))
print(dstatesletras)
for z in range(0,len(dstates)):
if dstates[z].find(ini) != -1:
ini = dstatesletras[z]
arq1.write('Qi='+ini+'\n')
fin = str(fin).replace('[','')
fin = fin.replace(']','')
fin = fin.replace(' ','')
fin = fin.replace('\'','')
arq1.write('Qf='+fin+'\n')
for y in range(0,len(dstates)):
for x in range(0,len(alpha[:-1])):
aux1 = move(dstates[y],alpha[x],estados,alpha[:-1])
aux1 = e_closure(aux1,estados)
for z in range(0,len(dstates)):
if dstates[z] == aux1:
aux = dstatesletras[y]+','+ str(alpha[x])+'='+dstatesletras[z]+'\n'
arq1.write(str(aux))
arq.close()
arq1.close()
return
def e_closure(est,estados):#trata a string para leitura pela recursividade
#print("Começa Closure: ",est)
final=''
z = str(est)
z=z.replace('\'','')
z=z.replace('[','')
z=z.replace(']','')
z=z.replace(' ','')
if str(est).find(',')>0:
k=est.split(',')
for l in k:
l=l.replace('\'','')
l=l.replace('[','')
l=l.replace(']','')
l=l.replace(' ','')
final = final+','+e_closure_recursivo(l,estados)
else:
#print("VALOR E CLOSURE ENTRANDO: ",z)
final = e_closure_recursivo(z,estados)
#print("O que saiu do Eclosure Recusivo: ",final)
final = final.replace(',',' ')
final = final.split(' ')
final = sorted(set(final))
#print(final,'<<')
if final[0] == '':
final.pop(0)
final = str(final).replace('[','')
final = final.replace(']','')
final = final.replace(' ','')
final = final.replace('\'','')
#print("Termina Closure: ",final)
return final
def e_closure_recursivo(est,estados):#encontra todos os estados lendo string vazia
for x in estados:
y = x.split('-')
# k = y[len(y)-1]
if est == y[0]:
#print("Print aqui: ",k,"<--")
if str(y[len(y)-1]).find(',')>0 and y[len(y)-1]:
#print("Entrou no if 1:")
z=str(y[len(y)-1]).split(',')
#print("Valor de z: ",z[0],z[1])
return str(y[0])+','+str(e_closure_recursivo(z[0],estados))+','+str(e_closure_recursivo(z[1],estados))
if y[len(y)-1]:
#print("Entrou no if 2:")
return str(y[0])+','+str(e_closure_recursivo(y[3],estados))
if not y[len(y)-1]:
#print("Entrou no else 2:")
return str(y[0])
return ''
def move(est,entrada,estados,alpha):#so funciona com conjunto de estados
#print("Começa Move: ",est)
final =[]
try:
est = str(est).split(',')
except Exception:
print("nada")
for z in estados:
z = z.split('-')
for x in est:
x=x.replace(' ','')
if z[0] == x:
for y in range(0,len(alpha)):
#print("Print 2:",entrada,alpha[y])
if entrada == alpha[y]:
if z[y+1]:
#print ("Adicionoi:",z[y+1])
final.append(z[y+1])
final = sorted(set(final))
final = str(final).replace('\'','')
final = final.replace('[','')
final = final.replace(']','')
#print("Termina Move: ",final)
return final
def traduzFunTransiDFA():
ini = None
fin =[]
try:
arq = open(listArq('dfaTabela.txt'),'r')
arq1 = open('dfaFuncao.txt','w')
except Exception:
print("Aquivo Inexistente")
return "Vazio"
transi = arq.read()#le o arquivo
transi = transi.split('\n')
alpha = transi[0].split('\t')
alpha.pop(0)#retira o espaço vazio
transi.pop(0)#retira o alphabeto )
for x in transi: #encontra a transição inicial e a final
if x[0] == '>' and ini == None:
y=x.split('\t') # >*q0,q2,q1
y = y[0].split('>')# '',*q0
z = y[1]
if z[0] == '*':
z = z.split('*')
fin.append(z[1])
ini=z[1]
else:
ini = y[1]
if x[0] == '*' : # *q1
z = x.split('*')
fin.append(z[1])
ini='Qi='+str(ini)+'\n'
Sfin='Qf='
for p in fin:
Sfin = Sfin+p+','
Sfin= Sfin[:-1]
Sfin = Sfin+'\n'
arq1.write(ini)
arq1.write(Sfin)
for x in transi:
if x[0]=='*' or x[1] == '*':
x = x.split('*')
x = x[1]
x = x.split('\t')
for y in range(0,len(alpha)):
string = str(x[0])+','+alpha[y]+'='+str(x[y+1])+'\n'
arq1.write(string)
arq1.close()
arq.close()
def leituraArq(one,two):
ini=None
fin=[]
try:
arq = open(listArq(one),'r')
arqe = open(listArq(two),'r')
except Exception:
print("Aquivo Inexistente")
return "Vazio"
transi = arq.read()#le o arquivo
entrada = arqe.read()
transi = transi.split('\n')#separa as transições
entrada = entrada.split('\n')
for x in transi:
y = x.split('=')
if y[0] == 'Qi':#encontra o estado inicial
ini = y[1]
if y[0] == 'Qf':#encontra o estado final
y=y[1]
y = y.split(',')
fin = y
if (ini or fin) == None:
return -1
transi.pop(0)#retira o estado inicial
transi.pop(0)#retira o estado final
arq.close
return (ini,fin,transi,entrada)
def execute(ini,fin,transi,entrada):
atual = ini #estado atual recebe o inicio do automato
entrada = list(entrada)
z = None
for num in entrada:
for x in transi:
y= x.split(',')
try:
z= y[1].split('=')
except Exception:
continue
if y[0] == atual and z[0]==num:
break
if y[0] == atual and z[0]==num:#continua lendo a entrada
atual = z[1]
continue
else:#aqui ele rejeita
atual = None
break
for z in fin:
if atual == z:
return "Aceito"
return "Rejeitado"
def main():
traduzFunTransiDFA() # Faz a leitura do arquivo em tabela de transição e transforma em funcçoes de transição
(ini,fin,transi,entrada)=leituraArq('dfaFuncao.txt','entrada.txt')
arq = open("ResultadoDFA.txt",'w')
for x in entrada:
resultado = execute(ini,fin,transi,x)
resultado = resultado+'\n'
arq.write(resultado)
arq.close
def main2():
traduzFunTransiNFA()
(ini,fin,transi,entrada) = leituraArq('nfaFuncao.txt','entrada.txt')
arq = open("ResultadoNFA.txt",'w')
for x in entrada:
resultado = execute(ini,fin,transi,x)
resultado = resultado+'\n'
arq.write(resultado)
arq.close
def menu():
var = None
while var != 3:
if var == 1:
main()
break;
if var == 2:
main2()
break;
print("1 - Ler e Calcular DFA")
print("2 - Ler e Calcular NFA")
print("3 - Sair")
var = int (input("Insira a opção: "))
#main() DFA pronto
#traduzFunTransiNFA() NFA pronto
menu()
| 29.929379 | 118 | 0.488438 |
eaaef3b00d6a5c565cf71b665f6b9fe427725adf | 99,866 | py | Python | zappa/cli.py | DomainGroupOSS/Zappa | 5a1f8e98141aef84e64e72e938d86f03454e0f70 | [
"MIT"
] | null | null | null | zappa/cli.py | DomainGroupOSS/Zappa | 5a1f8e98141aef84e64e72e938d86f03454e0f70 | [
"MIT"
] | 1 | 2021-03-25T23:39:25.000Z | 2021-03-25T23:39:25.000Z | zappa/cli.py | DomainGroupOSS/Zappa | 5a1f8e98141aef84e64e72e938d86f03454e0f70 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Zappa CLI
Deploy arbitrary Python programs as serverless Zappa applications.
"""
from __future__ import unicode_literals
from __future__ import division
from past.builtins import basestring
from builtins import input, bytes
import argcomplete
import argparse
import base64
import pkgutil
import botocore
import click
import collections
import hjson as json
import inspect
import importlib
import logging
import os
import pkg_resources
import random
import re
import requests
import slugify
import string
import sys
import tempfile
import time
import toml
import yaml
import zipfile
from click.exceptions import ClickException
from dateutil import parser
from datetime import datetime, timedelta
from .core import Zappa, logger, API_GATEWAY_REGIONS
from .utilities import (check_new_version_available, detect_django_settings,
detect_flask_apps, parse_s3_url, human_size,
validate_name, InvalidAwsLambdaName,
get_runtime_from_python_version, string_to_timestamp)
CUSTOM_SETTINGS = [
'assume_policy',
'attach_policy',
'aws_region',
'delete_local_zip',
'delete_s3_zip',
'exclude',
'extra_permissions',
'include',
'role_name',
'touch',
]
BOTO3_CONFIG_DOCS_URL = 'https://boto3.readthedocs.io/en/latest/guide/quickstart.html#configuration'
##
# Main Input Processing
##
class ZappaCLI(object):
"""
ZappaCLI object is responsible for loading the settings,
handling the input arguments and executing the calls to the core library.
"""
# CLI
vargs = None
command = None
stage_env = None
# Zappa settings
zappa = None
zappa_settings = None
load_credentials = True
# Specific settings
api_stage = None
app_function = None
aws_region = None
debug = None
prebuild_script = None
project_name = None
profile_name = None
lambda_arn = None
lambda_name = None
lambda_description = None
s3_bucket_name = None
settings_file = None
zip_path = None
handler_path = None
vpc_config = None
memory_size = None
use_apigateway = None
lambda_handler = None
django_settings = None
manage_roles = True
exception_handler = None
environment_variables = None
authorizer = None
aws_kms_key_arn = ''
stage_name_env_pattern = re.compile('^[a-zA-Z0-9_]+$')
def __init__(self):
self._stage_config_overrides = {} # change using self.override_stage_config_setting(key, val)
@property
def stage_config(self):
"""
A shortcut property for settings of a stage.
"""
def get_stage_setting(stage, extended_stages=None):
if extended_stages is None:
extended_stages = []
if stage in extended_stages:
raise RuntimeError(stage + " has already been extended to these settings. "
"There is a circular extends within the settings file.")
extended_stages.append(stage)
try:
stage_settings = dict(self.zappa_settings[stage].copy())
except KeyError:
raise ClickException("Cannot extend settings for undefined stage '" + stage + "'.")
extends_stage = self.zappa_settings[stage].get('extends', None)
if not extends_stage:
return stage_settings
extended_settings = get_stage_setting(stage=extends_stage, extended_stages=extended_stages)
extended_settings.update(stage_settings)
return extended_settings
settings = get_stage_setting(stage=self.api_stage)
# Backwards compatible for delete_zip setting that was more explicitly named delete_local_zip
if u'delete_zip' in settings:
settings[u'delete_local_zip'] = settings.get(u'delete_zip')
settings.update(self.stage_config_overrides)
return settings
@property
def stage_config_overrides(self):
"""
Returns zappa_settings we forcefully override for the current stage
set by `self.override_stage_config_setting(key, value)`
"""
return getattr(self, '_stage_config_overrides', {}).get(self.api_stage, {})
def override_stage_config_setting(self, key, val):
"""
Forcefully override a setting set by zappa_settings (for the current stage only)
:param key: settings key
:param val: value
"""
self._stage_config_overrides = getattr(self, '_stage_config_overrides', {})
self._stage_config_overrides.setdefault(self.api_stage, {})[key] = val
def handle(self, argv=None):
"""
Main function.
Parses command, load settings and dispatches accordingly.
"""
desc = ('Zappa - Deploy Python applications to AWS Lambda'
' and API Gateway.\n')
parser = argparse.ArgumentParser(description=desc)
parser.add_argument(
'-v', '--version', action='version',
version=pkg_resources.get_distribution("zappa").version,
help='Print the zappa version'
)
env_parser = argparse.ArgumentParser(add_help=False)
me_group = env_parser.add_mutually_exclusive_group()
all_help = ('Execute this command for all of our defined '
'Zappa stages.')
me_group.add_argument('--all', action='store_true', help=all_help)
me_group.add_argument('stage_env', nargs='?')
group = env_parser.add_argument_group()
group.add_argument(
'-a', '--app_function', help='The WSGI application function.'
)
group.add_argument(
'-s', '--settings_file', help='The path to a Zappa settings file.'
)
group.add_argument(
'-q', '--quiet', action='store_true', help='Silence all output.'
)
# https://github.com/Miserlou/Zappa/issues/407
# Moved when 'template' command added.
# Fuck Terraform.
group.add_argument(
'-j', '--json', action='store_true', help='Make the output of this command be machine readable.'
)
##
# Certify
##
subparsers = parser.add_subparsers(title='subcommands', dest='command')
cert_parser = subparsers.add_parser(
'certify', parents=[env_parser],
help='Create and install SSL certificate'
)
cert_parser.add_argument(
'--no-cleanup', action='store_true',
help=("Don't remove certificate files from /tmp during certify."
" Dangerous.")
)
cert_parser.add_argument(
'--manual', action='store_true',
help=("Gets new Let's Encrypt certificates, but prints them to console."
"Does not update API Gateway domains.")
)
cert_parser.add_argument(
'-y', '--yes', action='store_true', help='Auto confirm yes.'
)
##
# Deploy
##
deploy_parser = subparsers.add_parser(
'deploy', parents=[env_parser], help='Deploy application.'
)
##
# Init
##
init_parser = subparsers.add_parser('init', help='Initialize Zappa app.')
##
# Package
##
package_parser = subparsers.add_parser(
'package', parents=[env_parser], help='Build the application zip package locally.'
)
package_parser.add_argument(
'-o', '--output', help='Name of file to output the package to.'
)
##
# Template
##
template_parser = subparsers.add_parser(
'template', parents=[env_parser], help='Create a CloudFormation template for this API Gateway.'
)
template_parser.add_argument(
'-l', '--lambda-arn', required=True, help='ARN of the Lambda function to template to.'
)
template_parser.add_argument(
'-r', '--role-arn', required=True, help='ARN of the Role to template with.'
)
template_parser.add_argument(
'-o', '--output', help='Name of file to output the template to.'
)
##
# Invocation
##
invoke_parser = subparsers.add_parser(
'invoke', parents=[env_parser],
help='Invoke remote function.'
)
invoke_parser.add_argument(
'--raw', action='store_true',
help=('When invoking remotely, invoke this python as a string,'
' not as a modular path.')
)
invoke_parser.add_argument('command_rest')
##
# Manage
##
manage_parser = subparsers.add_parser(
'manage',
help='Invoke remote Django manage.py commands.'
)
rest_help = ("Command in the form of <env> <command>. <env> is not "
"required if --all is specified")
manage_parser.add_argument('--all', action='store_true', help=all_help)
manage_parser.add_argument('command_rest', nargs='+', help=rest_help)
##
# Rollback
##
def positive_int(s):
""" Ensure an arg is positive """
i = int(s)
if i < 0:
msg = "This argument must be positive (got {})".format(s)
raise argparse.ArgumentTypeError(msg)
return i
rollback_parser = subparsers.add_parser(
'rollback', parents=[env_parser],
help='Rollback deployed code to a previous version.'
)
rollback_parser.add_argument(
'-n', '--num-rollback', type=positive_int, default=1,
help='The number of versions to rollback.'
)
##
# Scheduling
##
subparsers.add_parser(
'schedule', parents=[env_parser],
help='Schedule functions to occur at regular intervals.'
)
##
# Status
##
status_parser = subparsers.add_parser(
'status', parents=[env_parser],
help='Show deployment status and event schedules.'
)
##
# Log Tailing
##
tail_parser = subparsers.add_parser(
'tail', parents=[env_parser], help='Tail deployment logs.'
)
tail_parser.add_argument(
'--no-color', action='store_true',
help="Don't color log tail output."
)
tail_parser.add_argument(
'--http', action='store_true',
help='Only show HTTP requests in tail output.'
)
tail_parser.add_argument(
'--non-http', action='store_true',
help='Only show non-HTTP requests in tail output.'
)
tail_parser.add_argument(
'--since', type=str, default="100000s",
help="Only show lines since a certain timeframe."
)
tail_parser.add_argument(
'--filter', type=str, default="",
help="Apply a filter pattern to the logs."
)
##
# Undeploy
##
undeploy_parser = subparsers.add_parser(
'undeploy', parents=[env_parser], help='Undeploy application.'
)
undeploy_parser.add_argument(
'--remove-logs', action='store_true',
help=('Removes log groups of api gateway and lambda task'
' during the undeployment.'),
)
undeploy_parser.add_argument(
'-y', '--yes', action='store_true', help='Auto confirm yes.'
)
##
# Unschedule
##
subparsers.add_parser('unschedule', parents=[env_parser],
help='Unschedule functions.')
##
# Updating
##
subparsers.add_parser(
'update', parents=[env_parser], help='Update deployed application.'
)
##
# Debug
##
subparsers.add_parser(
'shell', parents=[env_parser], help='A debug shell with a loaded Zappa object.'
)
argcomplete.autocomplete(parser)
args = parser.parse_args(argv)
self.vargs = vars(args)
# Parse the input
# NOTE(rmoe): Special case for manage command
# The manage command can't have both stage_env and command_rest
# arguments. Since they are both positional arguments argparse can't
# differentiate the two. This causes problems when used with --all.
# (e.g. "manage --all showmigrations admin" argparse thinks --all has
# been specified AND that stage_env='showmigrations')
# By having command_rest collect everything but --all we can split it
# apart here instead of relying on argparse.
if args.command == 'manage' and not self.vargs.get('all'):
self.stage_env = self.vargs['command_rest'].pop(0)
else:
self.stage_env = self.vargs.get('stage_env')
self.command = args.command
if self.vargs.get('quiet'):
self.silence()
# We don't have any settings yet, so make those first!
# (Settings-based interactions will fail
# before a project has been initialized.)
if self.command == 'init':
self.init()
return
# Make sure there isn't a new version available
if not self.vargs.get('json'):
self.check_for_update()
# Load and Validate Settings File
self.load_settings_file(self.vargs.get('settings_file'))
# Should we execute this for all stages, or just one?
all_stages = self.vargs.get('all')
stages = []
if all_stages: # All stages!
stages = self.zappa_settings.keys()
else: # Just one env.
if not self.stage_env:
# If there's only one stage defined in the settings,
# use that as the default.
if len(self.zappa_settings.keys()) == 1:
stages.append(list(self.zappa_settings.keys())[0])
else:
parser.error("Please supply an stage to interact with.")
else:
stages.append(self.stage_env)
for stage in stages:
try:
self.dispatch_command(self.command, stage)
except ClickException as e:
# Discussion on exit codes: https://github.com/Miserlou/Zappa/issues/407
e.show()
sys.exit(e.exit_code)
def dispatch_command(self, command, stage):
"""
Given a command to execute and stage,
execute that command.
"""
self.api_stage = stage
if command not in ['status', 'manage']:
if not self.vargs['json']:
click.echo("Calling " + click.style(command, fg="green", bold=True) + " for stage " +
click.style(self.api_stage, bold=True) + ".." )
# Explicity define the app function.
# Related: https://github.com/Miserlou/Zappa/issues/832
if self.vargs.get('app_function', None):
self.app_function = self.vargs['app_function']
# Load our settings, based on api_stage.
try:
self.load_settings(self.vargs.get('settings_file'))
except ValueError as e:
print("Error: {}".format(e.message))
sys.exit(-1)
self.callback('settings')
# Hand it off
if command == 'deploy': # pragma: no cover
self.deploy()
if command == 'package': # pragma: no cover
self.package(self.vargs['output'])
if command == 'template': # pragma: no cover
self.template( self.vargs['lambda_arn'],
self.vargs['role_arn'],
output=self.vargs['output'],
json=self.vargs['json']
)
elif command == 'update': # pragma: no cover
self.update()
elif command == 'rollback': # pragma: no cover
self.rollback(self.vargs['num_rollback'])
elif command == 'invoke': # pragma: no cover
if not self.vargs.get('command_rest'):
print("Please enter the function to invoke.")
return
self.invoke(self.vargs['command_rest'], raw_python=self.vargs['raw'])
elif command == 'manage': # pragma: no cover
if not self.vargs.get('command_rest'):
print("Please enter the management command to invoke.")
return
if not self.django_settings:
print("This command is for Django projects only!")
print("If this is a Django project, please define django_settings in your zappa_settings.")
return
command_tail = self.vargs.get('command_rest')
if len(command_tail) > 1:
command = " ".join(command_tail) # ex: zappa manage dev "shell --version"
else:
command = command_tail[0] # ex: zappa manage dev showmigrations admin
self.invoke(command, command="manage")
elif command == 'tail': # pragma: no cover
self.tail(
colorize=(not self.vargs['no_color']),
http=self.vargs['http'],
non_http=self.vargs['non_http'],
since=self.vargs['since'],
filter_pattern=self.vargs['filter'],
)
elif command == 'undeploy': # pragma: no cover
self.undeploy(
no_confirm=self.vargs['yes'],
remove_logs=self.vargs['remove_logs']
)
elif command == 'schedule': # pragma: no cover
self.schedule()
elif command == 'unschedule': # pragma: no cover
self.unschedule()
elif command == 'status': # pragma: no cover
self.status(return_json=self.vargs['json'])
elif command == 'certify': # pragma: no cover
self.certify(
no_cleanup=self.vargs['no_cleanup'],
no_confirm=self.vargs['yes'],
manual=self.vargs['manual']
)
elif command == 'shell': # pragma: no cover
self.shell()
##
# The Commands
##
def package(self, output=None):
"""
Only build the package
"""
# Make sure we're in a venv.
self.check_venv()
# force not to delete the local zip
self.override_stage_config_setting('delete_local_zip', False)
# Execute the prebuild script
if self.prebuild_script:
self.execute_prebuild_script()
# Create the Lambda Zip
self.create_package(output)
self.callback('zip')
size = human_size(os.path.getsize(self.zip_path))
click.echo(click.style("Package created", fg="green", bold=True) + ": " + click.style(self.zip_path, bold=True) + " (" + size + ")")
def template(self, lambda_arn, role_arn, output=None, json=False):
"""
Only build the template file.
"""
if not lambda_arn:
raise ClickException("Lambda ARN is required to template.")
if not role_arn:
raise ClickException("Role ARN is required to template.")
self.zappa.credentials_arn = role_arn
# Create the template!
template = self.zappa.create_stack_template(
lambda_arn=lambda_arn,
lambda_name=self.lambda_name,
api_key_required=self.api_key_required,
iam_authorization=self.iam_authorization,
authorizer=self.authorizer,
cors_options=self.cors,
description=self.apigateway_description
)
if not output:
template_file = self.lambda_name + '-template-' + str(int(time.time())) + '.json'
else:
template_file = output
with open(template_file, 'wb') as out:
out.write(bytes(template.to_json(indent=None, separators=(',',':')), "utf-8"))
if not json:
click.echo(click.style("Template created", fg="green", bold=True) + ": " + click.style(template_file, bold=True))
else:
with open(template_file, 'r') as out:
print(out.read())
def deploy(self):
"""
Package your project, upload it to S3, register the Lambda function
and create the API Gateway routes.
"""
# Make sure we're in a venv.
self.check_venv()
# Execute the prebuild script
if self.prebuild_script:
self.execute_prebuild_script()
# Make sure this isn't already deployed.
deployed_versions = self.zappa.get_lambda_function_versions(self.lambda_name)
if len(deployed_versions) > 0:
raise ClickException("This application is " + click.style("already deployed", fg="red") +
" - did you mean to call " + click.style("update", bold=True) + "?")
# Make sure the necessary IAM execution roles are available
if self.manage_roles:
try:
self.zappa.create_iam_roles()
except botocore.client.ClientError:
raise ClickException(
click.style("Failed", fg="red") + " to " + click.style("manage IAM roles", bold=True) + "!\n" +
"You may " + click.style("lack the necessary AWS permissions", bold=True) +
" to automatically manage a Zappa execution role.\n" +
"To fix this, see here: " +
click.style("https://github.com/Miserlou/Zappa#using-custom-aws-iam-roles-and-policies", bold=True)
+ '\n')
# Create the Lambda Zip
self.create_package()
self.callback('zip')
# Upload it to S3
success = self.zappa.upload_to_s3(
self.zip_path, self.s3_bucket_name)
if not success: # pragma: no cover
raise ClickException("Unable to upload to S3. Quitting.")
# If using a slim handler, upload it to S3 and tell lambda to use this slim handler zip
if self.stage_config.get('slim_handler', False):
# https://github.com/Miserlou/Zappa/issues/510
success = self.zappa.upload_to_s3(self.handler_path, self.s3_bucket_name)
if not success: # pragma: no cover
raise ClickException("Unable to upload handler to S3. Quitting.")
# Copy the project zip to the current project zip
current_project_name = '{0!s}_current_project.zip'.format(self.project_name)
success = self.zappa.copy_on_s3(src_file_name=self.zip_path, dst_file_name=current_project_name,
bucket_name=self.s3_bucket_name)
if not success: # pragma: no cover
raise ClickException("Unable to copy the zip to be the current project. Quitting.")
handler_file = self.handler_path
else:
handler_file = self.zip_path
# Fixes https://github.com/Miserlou/Zappa/issues/613
try:
self.lambda_arn = self.zappa.get_lambda_function(
function_name=self.lambda_name)
except botocore.client.ClientError:
# Register the Lambda function with that zip as the source
# You'll also need to define the path to your lambda_handler code.
self.lambda_arn = self.zappa.create_lambda_function(
bucket=self.s3_bucket_name,
s3_key=handler_file,
function_name=self.lambda_name,
handler=self.lambda_handler,
description=self.lambda_description,
vpc_config=self.vpc_config,
dead_letter_config=self.dead_letter_config,
timeout=self.timeout_seconds,
memory_size=self.memory_size,
runtime=self.runtime,
environment_variables=self.environment_variables,
aws_kms_key_arn=self.aws_kms_key_arn
)
# Schedule events for this deployment
self.schedule()
endpoint_url = ''
deployment_string = click.style("Deployment complete", fg="green", bold=True) + "!"
if self.use_apigateway:
# Create and configure the API Gateway
template = self.zappa.create_stack_template(
lambda_arn=self.lambda_arn,
lambda_name=self.lambda_name,
api_key_required=self.api_key_required,
iam_authorization=self.iam_authorization,
authorizer=self.authorizer,
cors_options=self.cors,
description=self.apigateway_description
)
self.zappa.update_stack(self.lambda_name, self.s3_bucket_name, wait=True)
api_id = self.zappa.get_api_id(self.lambda_name)
# Add binary support
if self.binary_support:
self.zappa.add_binary_support(api_id=api_id)
# Deploy the API!
endpoint_url = self.deploy_api_gateway(api_id)
deployment_string = deployment_string + ": {}".format(endpoint_url)
# Create/link API key
if self.api_key_required:
if self.api_key is None:
self.zappa.create_api_key(api_id=api_id, stage_name=self.api_stage)
else:
self.zappa.add_api_stage_to_api_key(api_key=self.api_key, api_id=api_id, stage_name=self.api_stage)
if self.stage_config.get('touch', True):
requests.get(endpoint_url)
# Finally, delete the local copy our zip package
if self.stage_config.get('delete_local_zip', True):
self.remove_local_zip()
# Remove the project zip from S3.
self.remove_uploaded_zip()
self.callback('post')
click.echo(deployment_string)
def update(self):
"""
Repackage and update the function code.
"""
# Make sure we're in a venv.
self.check_venv()
# Execute the prebuild script
if self.prebuild_script:
self.execute_prebuild_script()
# Temporary version check
try:
updated_time = 1472581018
function_response = self.zappa.lambda_client.get_function(FunctionName=self.lambda_name)
conf = function_response['Configuration']
last_updated = parser.parse(conf['LastModified'])
last_updated_unix = time.mktime(last_updated.timetuple())
except Exception as e:
click.echo(click.style("Warning!", fg="red") + " Couldn't get function " + self.lambda_name +
" in " + self.zappa.aws_region + " - have you deployed yet?")
sys.exit(-1)
if last_updated_unix <= updated_time:
click.echo(click.style("Warning!", fg="red") +
" You may have upgraded Zappa since deploying this application. You will need to " +
click.style("redeploy", bold=True) + " for this deployment to work properly!")
# Make sure the necessary IAM execution roles are available
if self.manage_roles:
try:
self.zappa.create_iam_roles()
except botocore.client.ClientError:
click.echo(click.style("Failed", fg="red") + " to " + click.style("manage IAM roles", bold=True) + "!")
click.echo("You may " + click.style("lack the necessary AWS permissions", bold=True) +
" to automatically manage a Zappa execution role.")
click.echo("To fix this, see here: " +
click.style("https://github.com/Miserlou/Zappa#using-custom-aws-iam-roles-and-policies",
bold=True))
sys.exit(-1)
# Create the Lambda Zip,
self.create_package()
self.callback('zip')
# Upload it to S3
success = self.zappa.upload_to_s3(self.zip_path, self.s3_bucket_name)
if not success: # pragma: no cover
raise ClickException("Unable to upload project to S3. Quitting.")
# If using a slim handler, upload it to S3 and tell lambda to use this slim handler zip
if self.stage_config.get('slim_handler', False):
# https://github.com/Miserlou/Zappa/issues/510
success = self.zappa.upload_to_s3(self.handler_path, self.s3_bucket_name)
if not success: # pragma: no cover
raise ClickException("Unable to upload handler to S3. Quitting.")
# Copy the project zip to the current project zip
current_project_name = '{0!s}_current_project.zip'.format(self.project_name)
success = self.zappa.copy_on_s3(src_file_name=self.zip_path, dst_file_name=current_project_name,
bucket_name=self.s3_bucket_name)
if not success: # pragma: no cover
raise ClickException("Unable to copy the zip to be the current project. Quitting.")
handler_file = self.handler_path
else:
handler_file = self.zip_path
# Register the Lambda function with that zip as the source
# You'll also need to define the path to your lambda_handler code.
self.lambda_arn = self.zappa.update_lambda_function(
self.s3_bucket_name,
handler_file,
self.lambda_name
)
# Remove the uploaded zip from S3, because it is now registered..
self.remove_uploaded_zip()
# Update the configuration, in case there are changes.
self.lambda_arn = self.zappa.update_lambda_configuration(
lambda_arn=self.lambda_arn,
function_name=self.lambda_name,
handler=self.lambda_handler,
description=self.lambda_description,
vpc_config=self.vpc_config,
timeout=self.timeout_seconds,
memory_size=self.memory_size,
runtime=self.runtime,
environment_variables=self.environment_variables,
aws_kms_key_arn=self.aws_kms_key_arn
)
# Finally, delete the local copy our zip package
if self.stage_config.get('delete_local_zip', True):
self.remove_local_zip()
if self.use_apigateway:
self.zappa.create_stack_template(
lambda_arn=self.lambda_arn,
lambda_name=self.lambda_name,
api_key_required=self.api_key_required,
iam_authorization=self.iam_authorization,
authorizer=self.authorizer,
cors_options=self.cors,
description=self.apigateway_description
)
self.zappa.update_stack(self.lambda_name, self.s3_bucket_name, wait=True, update_only=True)
api_id = self.zappa.get_api_id(self.lambda_name)
# update binary support
if self.binary_support:
self.zappa.add_binary_support(api_id=api_id)
else:
self.zappa.remove_binary_support(api_id=api_id)
endpoint_url = self.deploy_api_gateway(api_id)
if self.stage_config.get('domain', None):
endpoint_url = self.stage_config.get('domain')
else:
endpoint_url = None
self.schedule()
self.callback('post')
if endpoint_url and 'https://' not in endpoint_url:
endpoint_url = 'https://' + endpoint_url
deployed_string = "Your updated Zappa deployment is " + click.style("live", fg='green', bold=True) + "!"
if self.use_apigateway:
deployed_string = deployed_string + ": " + click.style("{}".format(endpoint_url), bold=True)
api_url = None
if endpoint_url and 'amazonaws.com' not in endpoint_url:
api_url = self.zappa.get_api_url(
self.lambda_name,
self.api_stage)
if endpoint_url != api_url:
deployed_string = deployed_string + " (" + api_url + ")"
if self.stage_config.get('touch', True):
if api_url:
requests.get(api_url)
elif endpoint_url:
requests.get(endpoint_url)
click.echo(deployed_string)
def rollback(self, revision):
"""
Rollsback the currently deploy lambda code to a previous revision.
"""
print("Rolling back..")
self.zappa.rollback_lambda_function_version(
self.lambda_name, versions_back=revision)
print("Done!")
def tail(self, since, filter_pattern, limit=10000, keep_open=True, colorize=True, http=False, non_http=False):
"""
Tail this function's logs.
if keep_open, do so repeatedly, printing any new logs
"""
try:
since_stamp = string_to_timestamp(since)
last_since = since_stamp
while True:
new_logs = self.zappa.fetch_logs(
self.lambda_name,
start_time=since_stamp,
limit=limit,
filter_pattern=filter_pattern,
)
new_logs = [ e for e in new_logs if e['timestamp'] > last_since ]
self.print_logs(new_logs, colorize, http, non_http)
if not keep_open:
break
if new_logs:
last_since = new_logs[-1]['timestamp']
time.sleep(1)
except KeyboardInterrupt: # pragma: no cover
# Die gracefully
try:
sys.exit(0)
except SystemExit:
os._exit(130)
def undeploy(self, no_confirm=False, remove_logs=False):
"""
Tear down an exiting deployment.
"""
if not no_confirm: # pragma: no cover
confirm = input("Are you sure you want to undeploy? [y/n] ")
if confirm != 'y':
return
if self.use_apigateway:
if remove_logs:
self.zappa.remove_api_gateway_logs(self.lambda_name)
domain_name = self.stage_config.get('domain', None)
# Only remove the api key when not specified
if self.api_key_required and self.api_key is None:
api_id = self.zappa.get_api_id(self.lambda_name)
self.zappa.remove_api_key(api_id, self.api_stage)
gateway_id = self.zappa.undeploy_api_gateway(
self.lambda_name,
domain_name=domain_name
)
self.unschedule() # removes event triggers, including warm up event.
self.zappa.delete_lambda_function(self.lambda_name)
if remove_logs:
self.zappa.remove_lambda_function_logs(self.lambda_name)
click.echo(click.style("Done", fg="green", bold=True) + "!")
def schedule(self):
"""
Given a a list of functions and a schedule to execute them,
setup up regular execution.
"""
events = self.stage_config.get('events', [])
if events:
if not isinstance(events, list): # pragma: no cover
print("Events must be supplied as a list.")
return
for event in events:
self.collision_warning(event.get('function'))
if self.stage_config.get('keep_warm', True):
if not events:
events = []
keep_warm_rate = self.stage_config.get('keep_warm_expression', "rate(4 minutes)")
events.append({'name': 'zappa-keep-warm',
'function': 'handler.keep_warm_callback',
'expression': keep_warm_rate,
'description': 'Zappa Keep Warm - {}'.format(self.lambda_name)})
if events:
try:
function_response = self.zappa.lambda_client.get_function(FunctionName=self.lambda_name)
except botocore.exceptions.ClientError as e: # pragma: no cover
click.echo(click.style("Function does not exist", fg="yellow") + ", please " +
click.style("deploy", bold=True) + "first. Ex:" +
click.style("zappa deploy {}.".format(self.api_stage), bold=True))
sys.exit(-1)
print("Scheduling..")
self.zappa.schedule_events(
lambda_arn=function_response['Configuration']['FunctionArn'],
lambda_name=self.lambda_name,
events=events
)
# Add async tasks SNS
if self.stage_config.get('async_source', None) == 'sns' \
and self.stage_config.get('async_resources', True):
self.lambda_arn = self.zappa.get_lambda_function(
function_name=self.lambda_name)
topic_arn = self.zappa.create_async_sns_topic(
lambda_name=self.lambda_name,
lambda_arn=self.lambda_arn
)
click.echo('SNS Topic created: %s' % topic_arn)
def unschedule(self):
"""
Given a a list of scheduled functions,
tear down their regular execution.
"""
# Run even if events are not defined to remove previously existing ones (thus default to []).
events = self.stage_config.get('events', [])
if not isinstance(events, list): # pragma: no cover
print("Events must be supplied as a list.")
return
function_arn = None
try:
function_response = self.zappa.lambda_client.get_function(FunctionName=self.lambda_name)
function_arn = function_response['Configuration']['FunctionArn']
except botocore.exceptions.ClientError as e: # pragma: no cover
raise ClickException("Function does not exist, you should deploy first. Ex: zappa deploy {}. "
"Proceeding to unschedule CloudWatch based events.".format(self.api_stage))
print("Unscheduling..")
self.zappa.unschedule_events(
lambda_name=self.lambda_name,
lambda_arn=function_arn,
events=events,
)
# Remove async task SNS
if self.stage_config.get('async_source', None) == 'sns' \
and self.stage_config.get('async_resources', True):
removed_arns = self.zappa.remove_async_sns_topic(self.lambda_name)
click.echo('SNS Topic removed: %s' % ', '.join(removed_arns))
def invoke(self, function_name, raw_python=False, command=None):
"""
Invoke a remote function.
"""
# There are three likely scenarios for 'command' here:
# command, which is a modular function path
# raw_command, which is a string of python to execute directly
# manage, which is a Django-specific management command invocation
key = command if command is not None else 'command'
if raw_python:
command = {'raw_command': function_name}
else:
command = {key: function_name}
# Can't use hjson
import json as json
response = self.zappa.invoke_lambda_function(
self.lambda_name,
json.dumps(command),
invocation_type='RequestResponse',
)
if 'LogResult' in response:
print(base64.b64decode(response['LogResult']))
else:
print(response)
def status(self, return_json=False):
"""
Describe the status of the current deployment.
"""
def tabular_print(title, value):
"""
Convience function for priting formatted table items.
"""
click.echo('%-*s%s' % (32, click.style("\t" + title, fg='green') + ':', str(value)))
return
# Lambda Env Details
lambda_versions = self.zappa.get_lambda_function_versions(self.lambda_name)
if not lambda_versions:
raise ClickException(click.style("No Lambda %s detected in %s - have you deployed yet?" %
(self.lambda_name, self.zappa.aws_region), fg='red'))
status_dict = collections.OrderedDict()
status_dict["Lambda Versions"] = len(lambda_versions)
function_response = self.zappa.lambda_client.get_function(FunctionName=self.lambda_name)
conf = function_response['Configuration']
self.lambda_arn = conf['FunctionArn']
status_dict["Lambda Name"] = self.lambda_name
status_dict["Lambda ARN"] = self.lambda_arn
status_dict["Lambda Role ARN"] = conf['Role']
status_dict["Lambda Handler"] = conf['Handler']
status_dict["Lambda Code Size"] = conf['CodeSize']
status_dict["Lambda Version"] = conf['Version']
status_dict["Lambda Last Modified"] = conf['LastModified']
status_dict["Lambda Memory Size"] = conf['MemorySize']
status_dict["Lambda Timeout"] = conf['Timeout']
status_dict["Lambda Runtime"] = conf['Runtime']
if 'VpcConfig' in conf.keys():
status_dict["Lambda VPC ID"] = conf.get('VpcConfig', {}).get('VpcId', 'Not assigned')
else:
status_dict["Lambda VPC ID"] = None
# Calculated statistics
try:
function_invocations = self.zappa.cloudwatch.get_metric_statistics(
Namespace='AWS/Lambda',
MetricName='Invocations',
StartTime=datetime.utcnow()-timedelta(days=1),
EndTime=datetime.utcnow(),
Period=1440,
Statistics=['Sum'],
Dimensions=[{'Name': 'FunctionName',
'Value': '{}'.format(self.lambda_name)}]
)['Datapoints'][0]['Sum']
except Exception as e:
function_invocations = 0
try:
function_errors = self.zappa.cloudwatch.get_metric_statistics(
Namespace='AWS/Lambda',
MetricName='Errors',
StartTime=datetime.utcnow()-timedelta(days=1),
EndTime=datetime.utcnow(),
Period=1440,
Statistics=['Sum'],
Dimensions=[{'Name': 'FunctionName',
'Value': '{}'.format(self.lambda_name)}]
)['Datapoints'][0]['Sum']
except Exception as e:
function_errors = 0
try:
error_rate = "{0:.2f}%".format(function_errors / function_invocations * 100)
except:
error_rate = "Error calculating"
status_dict["Invocations (24h)"] = int(function_invocations)
status_dict["Errors (24h)"] = int(function_errors)
status_dict["Error Rate (24h)"] = error_rate
# URLs
if self.use_apigateway:
api_url = self.zappa.get_api_url(
self.lambda_name,
self.api_stage)
status_dict["API Gateway URL"] = api_url
# Api Keys
api_id = self.zappa.get_api_id(self.lambda_name)
for api_key in self.zappa.get_api_keys(api_id, self.api_stage):
status_dict["API Gateway x-api-key"] = api_key
# There literally isn't a better way to do this.
# AWS provides no way to tie a APIGW domain name to its Lambda funciton.
domain_url = self.stage_config.get('domain', None)
if domain_url:
status_dict["Domain URL"] = 'https://' + domain_url
else:
status_dict["Domain URL"] = "None Supplied"
# Scheduled Events
event_rules = self.zappa.get_event_rules_for_lambda(lambda_arn=self.lambda_arn)
status_dict["Num. Event Rules"] = len(event_rules)
if len(event_rules) > 0:
status_dict['Events'] = []
for rule in event_rules:
event_dict = {}
rule_name = rule['Name']
event_dict["Event Rule Name"] = rule_name
event_dict["Event Rule Schedule"] = rule.get(u'ScheduleExpression', None)
event_dict["Event Rule State"] = rule.get(u'State', None).title()
event_dict["Event Rule ARN"] = rule.get(u'Arn', None)
status_dict['Events'].append(event_dict)
if return_json:
# Putting the status in machine readable format
# https://github.com/Miserlou/Zappa/issues/407
print(json.dumpsJSON(status_dict))
else:
click.echo("Status for " + click.style(self.lambda_name, bold=True) + ": ")
for k, v in status_dict.items():
if k == 'Events':
# Events are a list of dicts
for event in v:
for item_k, item_v in event.items():
tabular_print(item_k, item_v)
else:
tabular_print(k, v)
# TODO: S3/SQS/etc. type events?
return True
def check_stage_name(self, stage_name):
"""
Make sure the stage name matches the AWS-allowed pattern
(calls to apigateway_client.create_deployment, will fail with error
message "ClientError: An error occurred (BadRequestException) when
calling the CreateDeployment operation: Stage name only allows
a-zA-Z0-9_" if the pattern does not match)
"""
if self.stage_name_env_pattern.match(stage_name):
return True
raise ValueError("AWS requires stage name to match a-zA-Z0-9_")
def check_environment(self, environment):
"""
Make sure the environment contains only strings
(since putenv needs a string)
"""
non_strings = []
for (k,v) in environment.items():
if not isinstance(v, basestring):
non_strings.append(k)
if non_strings:
raise ValueError("The following environment variables are not strings: {}".format(", ".join(non_strings)))
else:
return True
def init(self, settings_file="zappa_settings.json"):
"""
Initialize a new Zappa project by creating a new zappa_settings.json in a guided process.
This should probably be broken up into few separate componants once it's stable.
Testing these inputs requires monkeypatching with mock, which isn't pretty.
"""
# Make sure we're in a venv.
self.check_venv()
# Ensure that we don't already have a zappa_settings file.
if os.path.isfile(settings_file):
raise ClickException("This project already has a " + click.style("{0!s} file".format(settings_file), fg="red", bold=True) + "!")
# Explain system.
click.echo(click.style(u"""\n███████╗ █████╗ ██████╗ ██████╗ █████╗
╚══███╔╝██╔══██╗██╔══██╗██╔══██╗██╔══██╗
███╔╝ ███████║██████╔╝██████╔╝███████║
███╔╝ ██╔══██║██╔═══╝ ██╔═══╝ ██╔══██║
███████╗██║ ██║██║ ██║ ██║ ██║
╚══════╝╚═╝ ╚═╝╚═╝ ╚═╝ ╚═╝ ╚═╝\n""", fg='green', bold=True))
click.echo(click.style("Welcome to ", bold=True) + click.style("Zappa", fg='green', bold=True) + click.style("!\n", bold=True))
click.echo(click.style("Zappa", bold=True) + " is a system for running server-less Python web applications"
" on AWS Lambda and AWS API Gateway.")
click.echo("This `init` command will help you create and configure your new Zappa deployment.")
click.echo("Let's get started!\n")
# Create Env
while True:
click.echo("Your Zappa configuration can support multiple production stages, like '" +
click.style("dev", bold=True) + "', '" + click.style("staging", bold=True) + "', and '" +
click.style("production", bold=True) + "'.")
env = input("What do you want to call this environment (default 'dev'): ") or "dev"
try:
self.check_stage_name(env)
break
except ValueError:
click.echo(click.style("Stage names must match a-zA-Z0-9_", fg="red"))
# Detect AWS profiles and regions
# If anyone knows a more straightforward way to easily detect and parse AWS profiles I'm happy to change this, feels like a hack
session = botocore.session.Session()
config = session.full_config
profiles = config.get("profiles", {})
profile_names = list(profiles.keys())
click.echo("\nAWS Lambda and API Gateway are only available in certain regions. "\
"Let's check to make sure you have a profile set up in one that will work.")
if not profile_names:
profile_name, profile = None, None
click.echo("We couldn't find an AWS profile to use. Before using Zappa, you'll need to set one up. See here for more info: {}"
.format(click.style(BOTO3_CONFIG_DOCS_URL, fg="blue", underline=True)))
elif len(profile_names) == 1:
profile_name = profile_names[0]
profile = profiles[profile_name]
click.echo("Okay, using profile {}!".format(click.style(profile_name, bold=True)))
else:
if "default" in profile_names:
default_profile = [p for p in profile_names if p == "default"][0]
else:
default_profile = profile_names[0]
while True:
profile_name = input("We found the following profiles: {}, and {}. "\
"Which would you like us to use? (default '{}'): "
.format(
', '.join(profile_names[:-1]),
profile_names[-1],
default_profile
)) or default_profile
if profile_name in profiles:
profile = profiles[profile_name]
break
else:
click.echo("Please enter a valid name for your AWS profile.")
profile_region = profile.get("region") if profile else None
# Create Bucket
click.echo("\nYour Zappa deployments will need to be uploaded to a " + click.style("private S3 bucket", bold=True) + ".")
click.echo("If you don't have a bucket yet, we'll create one for you too.")
default_bucket = "zappa-" + ''.join(random.choice(string.ascii_lowercase + string.digits) for _ in range(9))
bucket = input("What do you want call your bucket? (default '%s'): " % default_bucket) or default_bucket
# Detect Django/Flask
try: # pragma: no cover
import django
has_django = True
except ImportError as e:
has_django = False
try: # pragma: no cover
import flask
has_flask = True
except ImportError as e:
has_flask = False
print('')
# App-specific
if has_django: # pragma: no cover
click.echo("It looks like this is a " + click.style("Django", bold=True) + " application!")
click.echo("What is the " + click.style("module path", bold=True) + " to your projects's Django settings?")
django_settings = None
matches = detect_django_settings()
while django_settings in [None, '']:
if matches:
click.echo("We discovered: " + click.style(', '.join('{}'.format(i) for v, i in enumerate(matches)), bold=True))
django_settings = input("Where are your project's settings? (default '%s'): " % matches[0]) or matches[0]
else:
click.echo("(This will likely be something like 'your_project.settings')")
django_settings = input("Where are your project's settings?: ")
django_settings = django_settings.replace("'", "")
django_settings = django_settings.replace('"', "")
else:
matches = None
if has_flask:
click.echo("It looks like this is a " + click.style("Flask", bold=True) + " application.")
matches = detect_flask_apps()
click.echo("What's the " + click.style("modular path", bold=True) + " to your app's function?")
click.echo("This will likely be something like 'your_module.app'.")
app_function = None
while app_function in [None, '']:
if matches:
click.echo("We discovered: " + click.style(', '.join('{}'.format(i) for v, i in enumerate(matches)), bold=True))
app_function = input("Where is your app's function? (default '%s'): " % matches[0]) or matches[0]
else:
app_function = input("Where is your app's function?: ")
app_function = app_function.replace("'", "")
app_function = app_function.replace('"', "")
# TODO: Create VPC?
# Memory size? Time limit?
# Domain? LE keys? Region?
# 'Advanced Settings' mode?
# Globalize
click.echo("\nYou can optionally deploy to " + click.style("all available regions", bold=True) + " in order to provide fast global service.")
click.echo("If you are using Zappa for the first time, you probably don't want to do this!")
global_deployment = False
while True:
global_type = input("Would you like to deploy this application " + click.style("globally", bold=True) + "? (default 'n') [y/n/(p)rimary]: ")
if not global_type:
break
if global_type.lower() in ["y", "yes", "p", "primary"]:
global_deployment = True
break
if global_type.lower() in ["n", "no"]:
global_deployment = False
break
if global_deployment:
regions = API_GATEWAY_REGIONS
if global_type.lower() in ["p", "primary"]:
envs = [{env + '_' + region.replace('-', '_'): { 'aws_region': region}} for region in regions if '-1' in region]
else:
envs = [{env + '_' + region.replace('-', '_'): { 'aws_region': region}} for region in regions]
else:
envs = [{env: {}}]
zappa_settings = {}
for each_env in envs:
# Honestly, this could be cleaner.
env_name = list(each_env.keys())[0]
env_dict = each_env[env_name]
env_bucket = bucket
if global_deployment:
# `zappa init` doesn't generate compatible s3_bucket names #828
env_bucket = (bucket + '-' + env_name).replace('_', '-')
env_zappa_settings = {
env_name: {
's3_bucket': env_bucket,
}
}
if profile_name:
env_zappa_settings[env_name]['profile_name'] = profile_name
if 'aws_region' in env_dict:
env_zappa_settings[env_name]['aws_region'] = env_dict.get('aws_region')
elif profile_region:
env_zappa_settings[env_name]['aws_region'] = profile_region
zappa_settings.update(env_zappa_settings)
if has_django:
zappa_settings[env_name]['django_settings'] = django_settings
else:
zappa_settings[env_name]['app_function'] = app_function
import json as json # hjson is fine for loading, not fine for writing.
zappa_settings_json = json.dumps(zappa_settings, sort_keys=True, indent=4)
click.echo("\nOkay, here's your " + click.style("zappa_settings.js", bold=True) + ":\n")
click.echo(click.style(zappa_settings_json, fg="yellow", bold=False))
confirm = input("\nDoes this look " + click.style("okay", bold=True, fg="green") + "? (default 'y') [y/n]: ") or 'yes'
if confirm[0] not in ['y', 'Y', 'yes', 'YES']:
click.echo("" + click.style("Sorry", bold=True, fg='red') + " to hear that! Please init again.")
return
# Write
with open("zappa_settings.json", "w") as zappa_settings_file:
zappa_settings_file.write(zappa_settings_json)
if global_deployment:
click.echo("\n" + click.style("Done", bold=True) + "! You can also " + click.style("deploy all", bold=True) + " by executing:\n")
click.echo(click.style("\t$ zappa deploy --all", bold=True))
click.echo("\nAfter that, you can " + click.style("update", bold=True) + " your application code with:\n")
click.echo(click.style("\t$ zappa update --all", bold=True))
else:
click.echo("\n" + click.style("Done", bold=True) + "! Now you can " + click.style("deploy", bold=True) + " your Zappa application by executing:\n")
click.echo(click.style("\t$ zappa deploy %s" % env, bold=True))
click.echo("\nAfter that, you can " + click.style("update", bold=True) + " your application code with:\n")
click.echo(click.style("\t$ zappa update %s" % env, bold=True))
click.echo("\nTo learn more, check out our project page on " + click.style("GitHub", bold=True) +
" here: " + click.style("https://github.com/Miserlou/Zappa", fg="cyan", bold=True))
click.echo("and stop by our " + click.style("Slack", bold=True) + " channel here: " +
click.style("https://slack.zappa.io", fg="cyan", bold=True))
click.echo("\nEnjoy!,")
click.echo(" ~ Team " + click.style("Zappa", bold=True) + "!")
return
def certify(self, no_cleanup=False, no_confirm=True, manual=False):
"""
Register or update a domain certificate for this env.
"""
if not self.domain:
raise ClickException("Can't certify a domain without " + click.style("domain", fg="red", bold=True) + " configured!")
if not no_confirm: # pragma: no cover
confirm = input("Are you sure you want to certify? [y/n] ")
if confirm != 'y':
return
# Give warning on --no-cleanup
if no_cleanup:
clean_up = False
click.echo(click.style("Warning!", fg="red", bold=True) + " You are calling certify with " +
click.style("--no-cleanup", bold=True) +
". Your certificate files will remain in the system temporary directory after this command executes!")
else:
clean_up = True
# Make sure this isn't already deployed.
deployed_versions = self.zappa.get_lambda_function_versions(self.lambda_name)
if len(deployed_versions) == 0:
raise ClickException("This application " + click.style("isn't deployed yet", fg="red") +
" - did you mean to call " + click.style("deploy", bold=True) + "?")
account_key_location = self.stage_config.get('lets_encrypt_key', None)
cert_location = self.stage_config.get('certificate', None)
cert_key_location = self.stage_config.get('certificate_key', None)
cert_chain_location = self.stage_config.get('certificate_chain', None)
cert_arn = self.stage_config.get('certificate_arn', None)
# These are sensitive
certificate_body = None
certificate_private_key = None
certificate_chain = None
# Prepare for custom Let's Encrypt
if not cert_location and not cert_arn:
if not account_key_location:
raise ClickException("Can't certify a domain without " + click.style("lets_encrypt_key", fg="red", bold=True) +
" or " + click.style("certificate", fg="red", bold=True)+
" or " + click.style("certificate_arn", fg="red", bold=True) + " configured!")
# Get install account_key to /tmp/account_key.pem
if account_key_location.startswith('s3://'):
bucket, key_name = parse_s3_url(account_key_location)
self.zappa.s3_client.download_file(bucket, key_name, '/tmp/account.key')
else:
from shutil import copyfile
copyfile(account_key_location, '/tmp/account.key')
# Prepare for Custom SSL
elif not account_key_location and not cert_arn:
if not cert_location or not cert_key_location or not cert_chain_location:
raise ClickException("Can't certify a domain without " +
click.style("certificate, certificate_key and certificate_chain", fg="red", bold=True) + " configured!")
# Read the supplied certificates.
with open(cert_location) as f:
certificate_body = f.read()
with open(cert_key_location) as f:
certificate_private_key = f.read()
with open(cert_chain_location) as f:
certificate_chain = f.read()
click.echo("Certifying domain " + click.style(self.domain, fg="green", bold=True) + "..")
# Get cert and update domain.
# Let's Encrypt
if not cert_location and not cert_arn:
from .letsencrypt import get_cert_and_update_domain, cleanup
cert_success = get_cert_and_update_domain(
self.zappa,
self.lambda_name,
self.api_stage,
self.domain,
clean_up,
manual
)
# Deliberately undocumented feature (for now, at least.)
# We are giving the user the ability to shoot themselves in the foot.
# _This is probably not a good idea._
# However, I am sick and tired of hitting the Let's Encrypt cert
# limit while testing.
if clean_up:
cleanup()
# Custom SSL / ACM
else:
if not self.zappa.get_domain_name(self.domain):
dns_name = self.zappa.create_domain_name(
domain_name=self.domain,
certificate_name=self.domain + "-Zappa-Cert",
certificate_body=certificate_body,
certificate_private_key=certificate_private_key,
certificate_chain=certificate_chain,
certificate_arn=cert_arn,
lambda_name=self.lambda_name,
stage=self.api_stage,
)
if self.stage_config.get('route53_enabled', True):
self.zappa.update_route53_records(self.domain, dns_name)
print("Created a new domain name with supplied certificate. Please note that it can take up to 40 minutes for this domain to be "
"created and propagated through AWS, but it requires no further work on your part.")
else:
self.zappa.update_domain_name(
domain_name=self.domain,
certificate_name=self.domain + "-Zappa-Cert",
certificate_body=certificate_body,
certificate_private_key=certificate_private_key,
certificate_chain=certificate_chain,
certificate_arn=cert_arn,
lambda_name=self.lambda_name,
stage=self.api_stage,
route53=self.stage_config.get('route53_enabled', True)
)
cert_success = True
if cert_success:
click.echo("Certificate " + click.style("updated", fg="green", bold=True) + "!")
else:
click.echo(click.style("Failed", fg="red", bold=True) + " to generate or install certificate! :(")
click.echo("\n==============\n")
shamelessly_promote()
##
# Shell
##
def shell(self):
"""
Spawn a debug shell.
"""
click.echo(click.style("NOTICE!", fg="yellow", bold=True) + " This is a " + click.style("local", fg="green", bold=True) + " shell, inside a " + click.style("Zappa", bold=True) + " object!")
self.zappa.shell()
return
##
# Utility
##
def callback(self, position):
"""
Allows the execution of custom code between creation of the zip file and deployment to AWS.
:return: None
"""
callbacks = self.stage_config.get('callbacks', {})
callback = callbacks.get(position)
if callback:
(mod_path, cb_func_name) = callback.rsplit('.', 1)
try: # Prefer callback in working directory
if mod_path.count('.') >= 1: # Callback function is nested in a folder
(mod_folder_path, mod_name) = mod_path.rsplit('.', 1)
mod_folder_path_fragments = mod_folder_path.split('.')
working_dir = os.path.join(os.getcwd(), *mod_folder_path_fragments)
else:
mod_name = mod_path
working_dir = os.getcwd()
working_dir_importer = pkgutil.get_importer(working_dir)
module_ = working_dir_importer.find_module(mod_name).load_module(mod_name)
except (ImportError, AttributeError):
try: # Callback func might be in virtualenv
module_ = importlib.import_module(mod_path)
except ImportError: # pragma: no cover
raise ClickException(click.style("Failed ", fg="red") + 'to ' + click.style(
"import {position} callback ".format(position=position),
bold=True) + 'module: "{mod_path}"'.format(mod_path=click.style(mod_path, bold=True)))
if not hasattr(module_, cb_func_name): # pragma: no cover
raise ClickException(click.style("Failed ", fg="red") + 'to ' + click.style(
"find {position} callback ".format(position=position), bold=True) + 'function: "{cb_func_name}" '.format(
cb_func_name=click.style(cb_func_name, bold=True)) + 'in module "{mod_path}"'.format(mod_path=mod_path))
cb_func = getattr(module_, cb_func_name)
cb_func(self) # Call the function passing self
def check_for_update(self):
"""
Print a warning if there's a new Zappa version available.
"""
try:
version = pkg_resources.require("zappa")[0].version
updateable = check_new_version_available(version)
if updateable:
click.echo(click.style("Important!", fg="yellow", bold=True) +
" A new version of " + click.style("Zappa", bold=True) + " is available!")
click.echo("Upgrade with: " + click.style("pip install zappa --upgrade", bold=True))
click.echo("Visit the project page on GitHub to see the latest changes: " +
click.style("https://github.com/Miserlou/Zappa", bold=True))
except Exception as e: # pragma: no cover
print(e)
return
def load_settings(self, settings_file=None, session=None):
"""
Load the local zappa_settings file.
An existing boto session can be supplied, though this is likely for testing purposes.
Returns the loaded Zappa object.
"""
# Ensure we're passed a valid settings file.
if not settings_file:
settings_file = self.get_json_or_yaml_settings()
if not os.path.isfile(settings_file):
raise ClickException("Please configure your zappa_settings file.")
# Load up file
self.load_settings_file(settings_file)
# Make sure that the stages are valid names:
for stage_name in self.zappa_settings.keys():
try:
self.check_stage_name(stage_name)
except ValueError:
raise ValueError("API stage names must match a-zA-Z0-9_ ; '{0!s}' does not.".format(stage_name))
# Make sure that this stage is our settings
if self.api_stage not in self.zappa_settings.keys():
raise ClickException("Please define stage '{0!s}' in your Zappa settings.".format(self.api_stage))
# We need a working title for this project. Use one if supplied, else cwd dirname.
if 'project_name' in self.stage_config: # pragma: no cover
# If the name is invalid, this will throw an exception with message up stack
self.project_name = validate_name(self.stage_config['project_name'])
else:
self.project_name = slugify.slugify(os.getcwd().split(os.sep)[-1])[:15]
# The name of the actual AWS Lambda function, ex, 'helloworld-dev'
# Assume that we already have have validated the name beforehand.
# Related: https://github.com/Miserlou/Zappa/pull/664
# https://github.com/Miserlou/Zappa/issues/678
# And various others from Slack.
self.lambda_name = slugify.slugify(self.project_name + '-' + self.api_stage)
# Load stage-specific settings
self.s3_bucket_name = self.stage_config.get('s3_bucket', "zappa-" + ''.join(random.choice(string.ascii_lowercase + string.digits) for _ in range(9)))
self.vpc_config = self.stage_config.get('vpc_config', {})
self.memory_size = self.stage_config.get('memory_size', 512)
self.app_function = self.stage_config.get('app_function', None)
self.exception_handler = self.stage_config.get('exception_handler', None)
self.aws_region = self.stage_config.get('aws_region', None)
self.debug = self.stage_config.get('debug', True)
self.prebuild_script = self.stage_config.get('prebuild_script', None)
self.profile_name = self.stage_config.get('profile_name', None)
self.log_level = self.stage_config.get('log_level', "DEBUG")
self.domain = self.stage_config.get('domain', None)
self.timeout_seconds = self.stage_config.get('timeout_seconds', 30)
dead_letter_arn = self.stage_config.get('dead_letter_arn', '')
self.dead_letter_config = {'TargetArn': dead_letter_arn} if dead_letter_arn else {}
# Provide legacy support for `use_apigateway`, now `apigateway_enabled`.
# https://github.com/Miserlou/Zappa/issues/490
# https://github.com/Miserlou/Zappa/issues/493
self.use_apigateway = self.stage_config.get('use_apigateway', True)
if self.use_apigateway:
self.use_apigateway = self.stage_config.get('apigateway_enabled', True)
self.apigateway_description = self.stage_config.get('apigateway_description', None)
self.lambda_handler = self.stage_config.get('lambda_handler', 'handler.lambda_handler')
# DEPRECATED. https://github.com/Miserlou/Zappa/issues/456
self.remote_env_bucket = self.stage_config.get('remote_env_bucket', None)
self.remote_env_file = self.stage_config.get('remote_env_file', None)
self.remote_env = self.stage_config.get('remote_env', None)
self.settings_file = self.stage_config.get('settings_file', None)
self.django_settings = self.stage_config.get('django_settings', None)
self.manage_roles = self.stage_config.get('manage_roles', True)
self.binary_support = self.stage_config.get('binary_support', True)
self.api_key_required = self.stage_config.get('api_key_required', False)
self.api_key = self.stage_config.get('api_key')
self.iam_authorization = self.stage_config.get('iam_authorization', False)
self.cors = self.stage_config.get("cors", None)
self.lambda_description = self.stage_config.get('lambda_description', "Zappa Deployment")
self.environment_variables = self.stage_config.get('environment_variables', {})
self.check_environment(self.environment_variables)
self.authorizer = self.stage_config.get('authorizer', {})
self.runtime = self.stage_config.get('runtime', get_runtime_from_python_version())
self.aws_kms_key_arn = self.stage_config.get('aws_kms_key_arn', '')
desired_role_name = self.lambda_name + "-ZappaLambdaExecutionRole"
self.zappa = Zappa( boto_session=session,
profile_name=self.profile_name,
aws_region=self.aws_region,
load_credentials=self.load_credentials,
desired_role_name=desired_role_name,
runtime=self.runtime
)
for setting in CUSTOM_SETTINGS:
if setting in self.stage_config:
setting_val = self.stage_config[setting]
# Read the policy file contents.
if setting.endswith('policy'):
with open(setting_val, 'r') as f:
setting_val = f.read()
setattr(self.zappa, setting, setting_val)
if self.app_function:
self.collision_warning(self.app_function)
if self.app_function[-3:] == '.py':
click.echo(click.style("Warning!", fg="red", bold=True) +
" Your app_function is pointing to a " + click.style("file and not a function", bold=True) +
"! It should probably be something like 'my_file.app', not 'my_file.py'!")
return self.zappa
def get_json_or_yaml_settings(self, settings_name="zappa_settings"):
"""
Return zappa_settings path as JSON or YAML (or TOML), as appropriate.
"""
zs_json = settings_name + ".json"
zs_yaml = settings_name + ".yml"
zs_toml = settings_name + ".toml"
# Must have at least one
if not os.path.isfile(zs_json) \
and not os.path.isfile(zs_yaml) \
and not os.path.isfile(zs_toml):
raise ClickException("Please configure a zappa_settings file or call `zappa init`.")
# Prefer JSON
if os.path.isfile(zs_json):
settings_file = zs_json
elif os.path.isfile(zs_toml):
settings_file = zs_toml
else:
settings_file = zs_yaml
return settings_file
def load_settings_file(self, settings_file=None):
"""
Load our settings file.
"""
if not settings_file:
settings_file = self.get_json_or_yaml_settings()
if not os.path.isfile(settings_file):
raise ClickException("Please configure your zappa_settings file or call `zappa init`.")
if '.yml' in settings_file:
with open(settings_file) as yaml_file:
try:
self.zappa_settings = yaml.load(yaml_file)
except ValueError: # pragma: no cover
raise ValueError("Unable to load the Zappa settings YAML. It may be malformed.")
elif '.toml' in settings_file:
with open(settings_file) as toml_file:
try:
self.zappa_settings = toml.load(toml_file)
except ValueError: # pragma: no cover
raise ValueError("Unable to load the Zappa settings TOML. It may be malformed.")
else:
with open(settings_file) as json_file:
try:
self.zappa_settings = json.load(json_file)
except ValueError: # pragma: no cover
raise ValueError("Unable to load the Zappa settings JSON. It may be malformed.")
def create_package(self, output=None):
"""
Ensure that the package can be properly configured,
and then create it.
"""
# Create the Lambda zip package (includes project and virtualenvironment)
# Also define the path the handler file so it can be copied to the zip
# root for Lambda.
current_file = os.path.dirname(os.path.abspath(
inspect.getfile(inspect.currentframe())))
handler_file = os.sep.join(current_file.split(os.sep)[0:]) + os.sep + 'handler.py'
# Create the zip file(s)
if self.stage_config.get('slim_handler', False):
# Create two zips. One with the application and the other with just the handler.
# https://github.com/Miserlou/Zappa/issues/510
self.zip_path = self.zappa.create_lambda_zip(
prefix=self.lambda_name,
use_precompiled_packages=self.stage_config.get('use_precompiled_packages', True),
exclude=self.stage_config.get('exclude', [])
)
# Make sure the normal venv is not included in the handler's zip
exclude = self.stage_config.get('exclude', [])
cur_venv = self.zappa.get_current_venv()
exclude.append(cur_venv.split('/')[-1])
self.handler_path = self.zappa.create_lambda_zip(
prefix='handler_{0!s}'.format(self.lambda_name),
venv=self.zappa.create_handler_venv(),
handler_file=handler_file,
slim_handler=True,
exclude=exclude,
output=output
)
else:
# Custom excludes for different versions.
# Related: https://github.com/kennethreitz/requests/issues/3985
if sys.version_info[0] < 3:
# Exclude packages already builtin to the python lambda environment
# Related: https://github.com/Miserlou/Zappa/issues/556
exclude = self.stage_config.get(
'exclude', [
"boto3",
"dateutil",
"botocore",
"s3transfer",
"six.py",
"jmespath",
"concurrent"
])
else:
# This could be python3.6 optimized.
exclude = self.stage_config.get(
'exclude', [
"boto3",
"dateutil",
"botocore",
"s3transfer",
"concurrent"
])
# Create a single zip that has the handler and application
self.zip_path = self.zappa.create_lambda_zip(
prefix=self.lambda_name,
handler_file=handler_file,
use_precompiled_packages=self.stage_config.get('use_precompiled_packages', True),
exclude=exclude,
output=output
)
# Warn if this is too large for Lambda.
file_stats = os.stat(self.zip_path)
if file_stats.st_size > 52428800: # pragma: no cover
print('\n\nWarning: Application zip package is likely to be too large for AWS Lambda. '
'Try setting "slim_handler" to true in your Zappa settings file.\n\n')
# Throw custom setings into the zip that handles requests
if self.stage_config.get('slim_handler', False):
handler_zip = self.handler_path
else:
handler_zip = self.zip_path
with zipfile.ZipFile(handler_zip, 'a') as lambda_zip:
settings_s = "# Generated by Zappa\n"
if self.app_function:
if '.' not in self.app_function: # pragma: no cover
raise ClickException("Your " + click.style("app_function", fg='red', bold=True) + " value is not a modular path." +
" It needs to be in the format `" + click.style("your_module.your_app_object", bold=True) + "`.")
app_module, app_function = self.app_function.rsplit('.', 1)
settings_s = settings_s + "APP_MODULE='{0!s}'\nAPP_FUNCTION='{1!s}'\n".format(app_module, app_function)
if self.exception_handler:
settings_s += "EXCEPTION_HANDLER='{0!s}'\n".format(self.exception_handler)
else:
settings_s += "EXCEPTION_HANDLER=None\n"
if self.debug:
settings_s = settings_s + "DEBUG=True\n"
else:
settings_s = settings_s + "DEBUG=False\n"
settings_s = settings_s + "LOG_LEVEL='{0!s}'\n".format((self.log_level))
if self.binary_support:
settings_s = settings_s + "BINARY_SUPPORT=True\n"
else:
settings_s = settings_s + "BINARY_SUPPORT=False\n"
# If we're on a domain, we don't need to define the /<<env>> in
# the WSGI PATH
if self.domain:
settings_s = settings_s + "DOMAIN='{0!s}'\n".format((self.domain))
else:
settings_s = settings_s + "DOMAIN=None\n"
# Pass through remote config bucket and path
if self.remote_env:
settings_s = settings_s + "REMOTE_ENV='{0!s}'\n".format(
self.remote_env
)
# DEPRECATED. use remove_env instead
elif self.remote_env_bucket and self.remote_env_file:
settings_s = settings_s + "REMOTE_ENV='s3://{0!s}/{1!s}'\n".format(
self.remote_env_bucket, self.remote_env_file
)
# Local envs
env_dict = {}
if self.aws_region:
env_dict['AWS_REGION'] = self.aws_region
env_dict.update(dict(self.environment_variables))
# Environment variable keys can't be Unicode
# https://github.com/Miserlou/Zappa/issues/604
try:
env_dict = dict((k.encode('ascii'), v) for (k, v) in env_dict.items())
except Exception: # pragma: no cover
raise ValueError("Environment variable keys must not be unicode.")
settings_s = settings_s + "ENVIRONMENT_VARIABLES={0}\n".format(
env_dict
)
# We can be environment-aware
settings_s = settings_s + "API_STAGE='{0!s}'\n".format((self.api_stage))
settings_s = settings_s + "PROJECT_NAME='{0!s}'\n".format((self.project_name))
if self.settings_file:
settings_s = settings_s + "SETTINGS_FILE='{0!s}'\n".format((self.settings_file))
else:
settings_s = settings_s + "SETTINGS_FILE=None\n"
if self.django_settings:
settings_s = settings_s + "DJANGO_SETTINGS='{0!s}'\n".format((self.django_settings))
else:
settings_s = settings_s + "DJANGO_SETTINGS=None\n"
# If slim handler, path to project zip
if self.stage_config.get('slim_handler', False):
settings_s += "ZIP_PATH='s3://{0!s}/{1!s}_current_project.zip'\n".format(self.s3_bucket_name, self.project_name)
# since includes are for slim handler add the setting here by joining arbitrary list from zappa_settings file
# and tell the handler we are the slim_handler
# https://github.com/Miserlou/Zappa/issues/776
settings_s += "SLIM_HANDLER=True\n"
include = self.stage_config.get('include', [])
if len(include) >= 1:
settings_s += "INCLUDE=" + str(include) + '\n'
# AWS Events function mapping
event_mapping = {}
events = self.stage_config.get('events', [])
for event in events:
arn = event.get('event_source', {}).get('arn')
function = event.get('function')
if arn and function:
event_mapping[arn] = function
settings_s = settings_s + "AWS_EVENT_MAPPING={0!s}\n".format(event_mapping)
# Authorizer config
authorizer_function = self.authorizer.get('function', None)
if authorizer_function:
settings_s += "AUTHORIZER_FUNCTION='{0!s}'\n".format(authorizer_function)
# Copy our Django app into root of our package.
# It doesn't work otherwise.
if self.django_settings:
base = __file__.rsplit(os.sep, 1)[0]
django_py = ''.join(os.path.join(base, 'ext', 'django_zappa.py'))
lambda_zip.write(django_py, 'django_zappa_app.py')
# Lambda requires a specific chmod
temp_settings = tempfile.NamedTemporaryFile(delete=False)
os.chmod(temp_settings.name, 0o644)
temp_settings.write(bytes(settings_s, "utf-8"))
temp_settings.close()
lambda_zip.write(temp_settings.name, 'zappa_settings.py')
os.remove(temp_settings.name)
def remove_local_zip(self):
"""
Remove our local zip file.
"""
if self.stage_config.get('delete_local_zip', True):
try:
if os.path.isfile(self.zip_path):
os.remove(self.zip_path)
if self.handler_path and os.path.isfile(self.handler_path):
os.remove(self.handler_path)
except Exception as e: # pragma: no cover
sys.exit(-1)
def remove_uploaded_zip(self):
"""
Remove the local and S3 zip file after uploading and updating.
"""
# Remove the uploaded zip from S3, because it is now registered..
if self.stage_config.get('delete_s3_zip', True):
self.zappa.remove_from_s3(self.zip_path, self.s3_bucket_name)
if self.stage_config.get('slim_handler', False):
# Need to keep the project zip as the slim handler uses it.
self.zappa.remove_from_s3(self.handler_path, self.s3_bucket_name)
def on_exit(self):
"""
Cleanup after the command finishes.
Always called: SystemExit, KeyboardInterrupt and any other Exception that occurs.
"""
if self.zip_path:
self.remove_uploaded_zip()
self.remove_local_zip()
def print_logs(self, logs, colorize=True, http=False, non_http=False):
"""
Parse, filter and print logs to the console.
"""
for log in logs:
timestamp = log['timestamp']
message = log['message']
if "START RequestId" in message:
continue
if "REPORT RequestId" in message:
continue
if "END RequestId" in message:
continue
if not colorize:
if http:
if self.is_http_log_entry(message.strip()):
print("[" + str(timestamp) + "] " + message.strip())
elif non_http:
if not self.is_http_log_entry(message.strip()):
print("[" + str(timestamp) + "] " + message.strip())
else:
print("[" + str(timestamp) + "] " + message.strip())
else:
if http:
if self.is_http_log_entry(message.strip()):
click.echo(click.style("[", fg='cyan') + click.style(str(timestamp), bold=True) + click.style("]", fg='cyan') + self.colorize_log_entry(message.strip()))
elif non_http:
if not self.is_http_log_entry(message.strip()):
click.echo(click.style("[", fg='cyan') + click.style(str(timestamp), bold=True) + click.style("]", fg='cyan') + self.colorize_log_entry(message.strip()))
else:
click.echo(click.style("[", fg='cyan') + click.style(str(timestamp), bold=True) + click.style("]", fg='cyan') + self.colorize_log_entry(message.strip()))
def is_http_log_entry(self, string):
"""
Determines if a log entry is an HTTP-formatted log string or not.
"""
# Debug event filter
if 'Zappa Event' in string:
return False
# IP address filter
for token in string.replace('\t', ' ').split(' '):
try:
if (token.count('.') is 3 and token.replace('.', '').isnumeric()):
return True
except Exception: # pragma: no cover
pass
return False
def colorize_log_entry(self, string):
"""
Apply various heuristics to return a colorized version of a string.
If these fail, simply return the string in plaintext.
"""
final_string = string
try:
# First, do stuff in square brackets
inside_squares = re.findall(r'\[([^]]*)\]', string)
for token in inside_squares:
if token in ['CRITICAL', 'ERROR', 'WARNING', 'DEBUG', 'INFO', 'NOTSET']:
final_string = final_string.replace('[' + token + ']', click.style("[", fg='cyan') + click.style(token, fg='cyan', bold=True) + click.style("]", fg='cyan'))
else:
final_string = final_string.replace('[' + token + ']', click.style("[", fg='cyan') + click.style(token, bold=True) + click.style("]", fg='cyan'))
# Then do quoted strings
quotes = re.findall(r'"[^"]*"', string)
for token in quotes:
final_string = final_string.replace(token, click.style(token, fg="yellow"))
# And UUIDs
for token in final_string.replace('\t', ' ').split(' '):
try:
if token.count('-') is 4 and token.replace('-', '').isalnum():
final_string = final_string.replace(token, click.style(token, fg="magenta"))
except Exception: # pragma: no cover
pass
# And IP addresses
try:
if token.count('.') is 3 and token.replace('.', '').isnumeric():
final_string = final_string.replace(token, click.style(token, fg="red"))
except Exception: # pragma: no cover
pass
# And status codes
try:
if token in ['200']:
final_string = final_string.replace(token, click.style(token, fg="green"))
if token in ['400', '401', '403', '404', '405', '500']:
final_string = final_string.replace(token, click.style(token, fg="red"))
except Exception: # pragma: no cover
pass
# And Zappa Events
try:
if "Zappa Event:" in final_string:
final_string = final_string.replace("Zappa Event:", click.style("Zappa Event:", bold=True, fg="green"))
except Exception: # pragma: no cover
pass
# And dates
for token in final_string.split('\t'):
try:
is_date = parser.parse(token)
final_string = final_string.replace(token, click.style(token, fg="green"))
except Exception: # pragma: no cover
pass
final_string = final_string.replace('\t', ' ').replace(' ', ' ')
if final_string[0] != ' ':
final_string = ' ' + final_string
return final_string
except Exception as e: # pragma: no cover
return string
def execute_prebuild_script(self):
"""
Parse and execute the prebuild_script from the zappa_settings.
"""
(pb_mod_path, pb_func) = self.prebuild_script.rsplit('.', 1)
try: # Prefer prebuild script in working directory
if pb_mod_path.count('.') >= 1: # Prebuild script func is nested in a folder
(mod_folder_path, mod_name) = pb_mod_path.rsplit('.', 1)
mod_folder_path_fragments = mod_folder_path.split('.')
working_dir = os.path.join(os.getcwd(), *mod_folder_path_fragments)
else:
mod_name = pb_mod_path
working_dir = os.getcwd()
working_dir_importer = pkgutil.get_importer(working_dir)
module_ = working_dir_importer.find_module(mod_name).load_module(mod_name)
except (ImportError, AttributeError):
try: # Prebuild func might be in virtualenv
module_ = importlib.import_module(pb_mod_path)
except ImportError: # pragma: no cover
raise ClickException(click.style("Failed ", fg="red") + 'to ' + click.style(
"import prebuild script ", bold=True) + 'module: "{pb_mod_path}"'.format(
pb_mod_path=click.style(pb_mod_path, bold=True)))
if not hasattr(module_, pb_func): # pragma: no cover
raise ClickException(click.style("Failed ", fg="red") + 'to ' + click.style(
"find prebuild script ", bold=True) + 'function: "{pb_func}" '.format(
pb_func=click.style(pb_func, bold=True)) + 'in module "{pb_mod_path}"'.format(
pb_mod_path=pb_mod_path))
prebuild_function = getattr(module_, pb_func)
prebuild_function() # Call the function
def collision_warning(self, item):
"""
Given a string, print a warning if this could
collide with a Zappa core package module.
Use for app functions and events.
"""
namespace_collisions = [
"zappa.", "wsgi.", "middleware.", "handler.", "util.", "letsencrypt.", "cli."
]
for namespace_collision in namespace_collisions:
if namespace_collision in item:
click.echo(click.style("Warning!", fg="red", bold=True) +
" You may have a namespace collision with " + click.style(item, bold=True) +
"! You may want to rename that file.")
def deploy_api_gateway(self, api_id):
cache_cluster_enabled = self.stage_config.get('cache_cluster_enabled', False)
cache_cluster_size = str(self.stage_config.get('cache_cluster_size', .5))
endpoint_url = self.zappa.deploy_api_gateway(
api_id=api_id,
stage_name=self.api_stage,
cache_cluster_enabled=cache_cluster_enabled,
cache_cluster_size=cache_cluster_size,
cloudwatch_log_level=self.stage_config.get('cloudwatch_log_level', 'OFF'),
cloudwatch_data_trace=self.stage_config.get('cloudwatch_data_trace', False),
cloudwatch_metrics_enabled=self.stage_config.get('cloudwatch_metrics_enabled', False),
)
return endpoint_url
def check_venv(self):
""" Ensure we're inside a virtualenv. """
if self.zappa:
venv = self.zappa.get_current_venv()
else:
# Just for `init`, when we don't have settings yet.
venv = Zappa.get_current_venv()
if not venv:
raise ClickException(
click.style("Zappa", bold=True) + " requires an " + click.style("active virtual environment", bold=True, fg="red") + "!\n" +
"Learn more about virtual environments here: " + click.style("http://docs.python-guide.org/en/latest/dev/virtualenvs/", bold=False, fg="cyan"))
def silence(self):
"""
Route all stdout to null.
"""
sys.stdout = open(os.devnull, 'w')
sys.stderr = open(os.devnull, 'w')
####################################################################
# Main
####################################################################
def shamelessly_promote():
"""
Shamelessly promote our little community.
"""
click.echo("Need " + click.style("help", fg='green', bold=True) +
"? Found a " + click.style("bug", fg='green', bold=True) +
"? Let us " + click.style("know", fg='green', bold=True) + "! :D")
click.echo("File bug reports on " + click.style("GitHub", bold=True) + " here: "
+ click.style("https://github.com/Miserlou/Zappa", fg='cyan', bold=True))
click.echo("And join our " + click.style("Slack", bold=True) + " channel here: "
+ click.style("https://slack.zappa.io", fg='cyan', bold=True))
click.echo("Love!,")
click.echo(" ~ Team " + click.style("Zappa", bold=True) + "!")
def handle(): # pragma: no cover
"""
Main program execution handler.
"""
try:
cli = ZappaCLI()
sys.exit(cli.handle())
except SystemExit as e: # pragma: no cover
cli.on_exit()
sys.exit(e.code)
except KeyboardInterrupt: # pragma: no cover
cli.on_exit()
sys.exit(130)
except Exception as e:
cli.on_exit()
click.echo("Oh no! An " + click.style("error occurred", fg='red', bold=True) + "! :(")
click.echo("\n==============\n")
import traceback
traceback.print_exc()
click.echo("\n==============\n")
shamelessly_promote()
sys.exit(-1)
if __name__ == '__main__': # pragma: no cover
handle()
| 42.084282 | 197 | 0.560942 |
8210f27732be381e884c7df84b93448d917d4e49 | 3,706 | py | Python | python/tvm/relay/backend/graph_runtime_codegen.py | jiangzoi/incubator-tvm | 144c6f45f7217b9df2f5605e06f0903e470ac11c | [
"Apache-2.0"
] | 9 | 2019-12-17T08:03:54.000Z | 2022-01-19T02:34:23.000Z | python/tvm/relay/backend/graph_runtime_codegen.py | jiangzoi/incubator-tvm | 144c6f45f7217b9df2f5605e06f0903e470ac11c | [
"Apache-2.0"
] | 2 | 2020-09-14T09:18:25.000Z | 2020-09-24T03:28:18.000Z | python/tvm/relay/backend/graph_runtime_codegen.py | jiangzoi/incubator-tvm | 144c6f45f7217b9df2f5605e06f0903e470ac11c | [
"Apache-2.0"
] | 3 | 2020-10-04T20:30:18.000Z | 2022-01-24T18:03:52.000Z | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
A compiler from a Relay expression to TVM's graph runtime.
The compiler is built from a few pieces.
First we define a compiler from a single Relay expression to the
graph langauge. We require the expression to be a function.
The function's parameters correspond to the placeholder/inputs
and model parameters found in the computation graph representation.
The body of the function represents the computation graph.
The compiler's output is a program in the graph language, which is composed of
graph langauge is composed of Node, NodeRef, InputNode, OpNode.
This "little language" represents programs in TVM's graph format.
To connect to the graph runtime, we use a printer that converts our graph format
into TVM's JSON format. The resulting string can be loaded by
contrib.graph_runtime or any other TVM runtime compatible systems.
"""
from tvm.runtime.ndarray import empty
from tvm.relay import _build_module
from tvm import target as _target
from tvm.tir import expr as _expr
class GraphRuntimeCodegen(object):
"""The compiler from Relay to the TVM runtime system."""
def __init__(self, mod, target):
self._mod = _build_module._GraphRuntimeCodegen()
self._init = self._mod["init"]
self._codegen = self._mod["codegen"]
self._get_graph_json = self._mod["get_graph_json"]
self._list_params_name = self._mod["list_params_name"]
self._get_param_by_name = self._mod["get_param_by_name"]
self._get_irmodule = self._mod["get_irmodule"]
self._setup(mod, target)
def _setup(self, mod, target):
tgts = {}
if isinstance(target, dict):
for dev, tgt in target.items():
if not isinstance(tgt, (str, _target.Target)):
raise Exception("Unknown target type")
tgts[dev] = _target.create(tgt)
elif isinstance(target, (str, _target.Target)):
tgts[_expr.IntImm("int32", 0)] = _target.create(target)
self._init(mod, tgts)
def codegen(self, func):
"""Compile a single function into a graph.
Parameters
----------
func: tvm.relay.Expr
The function to compile.
Returns
-------
graph_json : str
The graph json that can be consumed by runtime.
mod : IRModule or Dict[str, IRModule]
The lowered functions.
params : Dict[str, tvm.nd.NDArray]
Additional constant parameters.
"""
self._codegen(func)
graph_json = self._get_graph_json()
lowered_func = self._get_irmodule()
param_names = self._list_params_name()
params = {}
for key in param_names:
arr = self._get_param_by_name(key)
param = empty(arr.shape, dtype=arr.dtype, ctx=arr.ctx)
arr.copyto(param)
params[key] = param
return graph_json, lowered_func, params
| 39.849462 | 80 | 0.685915 |
db3275888711dbd012dfce80b7fe25f976a308ae | 2,089 | py | Python | samples/test/various_io_types_test.py | TheDutchDevil/pipelines | a5ba3f0fcd98ffd60f98bce964927ab63382d5d7 | [
"Apache-2.0"
] | null | null | null | samples/test/various_io_types_test.py | TheDutchDevil/pipelines | a5ba3f0fcd98ffd60f98bce964927ab63382d5d7 | [
"Apache-2.0"
] | null | null | null | samples/test/various_io_types_test.py | TheDutchDevil/pipelines | a5ba3f0fcd98ffd60f98bce964927ab63382d5d7 | [
"Apache-2.0"
] | null | null | null | # Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Two step v2-compatible pipeline."""
import kfp
from .various_io_types import my_pipeline
from .util import run_pipeline_func, TestCase
def verify(run, run_id: str, **kwargs):
assert run.status == 'Succeeded'
# TODO(Bobgy): verify MLMD status
run_pipeline_func([
# Currently fails with:
# sing_rewriter.py", line 520, in _refactor_inputs_if_uri_placeholder
# container_template['container']['args'])
# File "/Users/gongyuan/kfp/pipelines/sdk/python/kfp/compiler/_data_passing_rewriter.py", line 510, in reconcile_filename
# 'supported.' % artifact_input['name'])
# RuntimeError: Cannot find input3 in output to file name mapping.Please note currently connecting URI placeholder with path placeholder is not supported.
# TestCase(
# pipeline_func=my_pipeline,
# verify_func=verify,
# mode=kfp.dsl.PipelineExecutionMode.V1_LEGACY
# ),
# Currently fails with:
# RuntimeError: Internal compiler error: Compiler has produced Argo-incompatible workflow.
# Please create a new issue at https://github.com/kubeflow/pipelines/issues attaching the pipeline code and the pipeline package.
# Error: time="2021-04-06T16:50:06.048Z" level=error msg="Error in file /dev/stdin: templates.pipeline-with-various-types inputs.parameters.input_3-producer-pod-id- was not supplied"
# time="2021-04-06T16:50:06.048Z" level=fatal msg="Errors encountered in validation"
# TestCase(pipeline_func=my_pipeline, verify_func=verify),
])
| 45.413043 | 186 | 0.742939 |
ca0f4faa5c59f8c8f6608d0be5399c8f3c4559d2 | 3,550 | py | Python | django_database/storefront/settings.py | SyedArsalanAmin/webdev | 28fd7fc6c865588604c9e965a4416c7e0eb4a1c8 | [
"MIT"
] | null | null | null | django_database/storefront/settings.py | SyedArsalanAmin/webdev | 28fd7fc6c865588604c9e965a4416c7e0eb4a1c8 | [
"MIT"
] | null | null | null | django_database/storefront/settings.py | SyedArsalanAmin/webdev | 28fd7fc6c865588604c9e965a4416c7e0eb4a1c8 | [
"MIT"
] | null | null | null | """
Django settings for storefront project.
Generated by 'django-admin startproject' using Django 3.2.12.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-afl=f6upfj77vb7w9k5f_aeucv%4do0v63#$rn0#z+ejr_+fde'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
"migration_fixer",
'django.contrib.messages',
'django.contrib.staticfiles',
'playground',
'debug_toolbar',
'store',
'store_custom',
'tags',
'likes'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
"debug_toolbar.middleware.DebugToolbarMiddleware"
]
INTERNAL_IPS = [
# ...
"127.0.0.1",
# ...
]
ROOT_URLCONF = 'storefront.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'storefront.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'storefront',
'HOST':'localhost',
'USER':'root',
'PASSWORD':'12345678'
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
| 24.825175 | 91 | 0.688732 |
b3ad77b99f28196ebfa2d4c3fc38944afa801460 | 5,576 | py | Python | consumers/python/tweet_analytics.py | cuong24/docker-kafka-cassandra | 14c3c25e45cf1ed2da40c3fd7b2f7012e811a77d | [
"MIT"
] | null | null | null | consumers/python/tweet_analytics.py | cuong24/docker-kafka-cassandra | 14c3c25e45cf1ed2da40c3fd7b2f7012e811a77d | [
"MIT"
] | null | null | null | consumers/python/tweet_analytics.py | cuong24/docker-kafka-cassandra | 14c3c25e45cf1ed2da40c3fd7b2f7012e811a77d | [
"MIT"
] | 2 | 2021-07-21T13:42:16.000Z | 2021-07-31T04:31:04.000Z | import nltk
import os
from nltk.stem.wordnet import WordNetLemmatizer
from nltk.corpus import twitter_samples, stopwords
from nltk.tag import pos_tag
from nltk.tokenize import word_tokenize
from nltk import FreqDist, classify, NaiveBayesClassifier
from sklearn.model_selection import train_test_split
import pickle
from sklearn.preprocessing import FunctionTransformer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.pipeline import Pipeline
from sklearn.metrics import classification_report, confusion_matrix, accuracy_score
from sklearn.ensemble import RandomForestClassifier
import numpy as np
import re, string, random, sys
def tokenmerger(tokens):
return " ".join(tokens)
def tokenizeIt(data):
res = [word_tokenize(text) for text in data]
return res
def removeIt(data):
res = [tokenmerger(remove_noise(tokens)) for tokens in data]
return res
def remove_noise(tweet_tokens):
cleaned_tokens = []
lemmatizer = WordNetLemmatizer()
stop_words = stopwords.words('english')
for token, tag in pos_tag(tweet_tokens):
token = re.sub('http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+#]|[!*\(\),]|'\
'(?:%[0-9a-fA-F][0-9a-fA-F]))+','', token)
token = re.sub("(@[A-Za-z0-9_]+)","", token)
if tag.startswith("NN"):
pos = 'n'
elif tag.startswith('VB'):
pos = 'v'
else:
pos = 'a'
token = lemmatizer.lemmatize(token, pos)
if len(token) > 0 and token not in string.punctuation and token.lower() not in stop_words:
cleaned_tokens.append(token.lower())
return cleaned_tokens
def get_all_words(cleaned_tokens_list):
for tokens in cleaned_tokens_list:
for token in tokens:
yield token
def get_tweets_for_model(cleaned_tokens_list):
for tweet_tokens in cleaned_tokens_list:
yield dict([token, True] for token in tweet_tokens)
def trainModel():
positive_tweets = twitter_samples.strings('positive_tweets.json')
negative_tweets = twitter_samples.strings('negative_tweets.json')
text = twitter_samples.strings('tweets.20150430-223406.json')
# tweet_tokens = twitter_samples.tokenized('positive_tweets.json')[0]
stop_words = stopwords.words('english')
positive_tweet_tokens = twitter_samples.tokenized('positive_tweets.json')
negative_tweet_tokens = twitter_samples.tokenized('negative_tweets.json')
positive_cleaned_tokens_list = []
negative_cleaned_tokens_list = []
for tokens in positive_tweet_tokens:
positive_cleaned_tokens_list.append(remove_noise(tokens))
for tokens in negative_tweet_tokens:
negative_cleaned_tokens_list.append(remove_noise(tokens))
# all_pos_words = get_all_words(positive_cleaned_tokens_list)
# freq_dist_pos = FreqDist(all_pos_words)
# print(freq_dist_pos.most_common(10))
positive_tokens_for_model = get_tweets_for_model(positive_cleaned_tokens_list)
negative_tokens_for_model = get_tweets_for_model(negative_cleaned_tokens_list)
positive_dataset = [(tweet_dict, "Positive")
for tweet_dict in positive_tokens_for_model]
negative_dataset = [(tweet_dict, "Negative")
for tweet_dict in negative_tokens_for_model]
dataset = positive_dataset + negative_dataset
random.shuffle(dataset)
train_data = dataset[:7000]
test_data = dataset[7000:]
classifier = NaiveBayesClassifier.train(train_data)
print("Accuracy is:", classify.accuracy(classifier, test_data))
with open('trainedmodel.pkl', 'wb') as f:
pickle.dump(classifier, f)
# print(classifier.show_most_informative_features(10))
def trainRandomForest():
stop_words = stopwords.words('english')
positive_tweets = twitter_samples.strings('positive_tweets.json')
negative_tweets = twitter_samples.strings('negative_tweets.json')
X = positive_tweets + negative_tweets
positives = np.ones([len(positive_tweets),1])
negatives = np.zeros([len(negative_tweets),1])
y = np.concatenate([positives, negatives])
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=1, shuffle=True)
pipe = Pipeline([
('tokenize', FunctionTransformer(tokenizeIt)),
('noise', FunctionTransformer(removeIt)),
('tfidf', TfidfVectorizer(max_features=1500, min_df=5, max_df=0.7)),
('classifier', RandomForestClassifier(n_estimators=100, random_state=1))
])
pipe.fit(X_train, y_train)
y_pred = pipe.predict(X_test)
print(confusion_matrix(y_test,y_pred))
print(classification_report(y_test,y_pred))
print(accuracy_score(y_test, y_pred))
with open('trainedpipe.pkl', 'wb') as f:
pickle.dump(pipe, f)
if __name__ == "__main__":
path = os.path.dirname(os.path.realpath(__file__))
parent = os.path.dirname(path)
cwd = parent + "/nltk_data"
print("Set NLTK path to {}".format(cwd))
nltk.data.path = [cwd]
if len(sys.argv) > 1 and sys.argv[1] == "train" :
trainRandomForest()
# trainRandomForest()
# trainModel()
custom_tweet = "I ordered just once from TerribleCo, they screwed up, never used the app again."
# custom_tokens = remove_noise(word_tokenize(custom_tweet))
# doc = ' '.join(custom_tokens)
with open('trainedpipe.pkl', 'rb') as f:
classifier = pickle.load(f)
res = classifier.predict([custom_tweet])
print(custom_tweet, "Positive" if res == 1 else "Negative")
| 34.419753 | 106 | 0.697991 |
205f23bcdc33d660c64d782a3a97e97a9c032ec1 | 36,835 | py | Python | tests/unit/faucet/test_valve_config.py | cglewis/faucet | 9e4cfc79d580c8a4a70a21a1dc6e2ec7ee0fc0aa | [
"Apache-2.0"
] | 393 | 2017-09-21T11:00:03.000Z | 2022-03-31T09:46:28.000Z | tests/unit/faucet/test_valve_config.py | proteanblank/faucet | 3acbc1cb788c1854b7d290a1a050c7ee082a0b3e | [
"Apache-2.0"
] | 1,363 | 2017-09-17T21:54:43.000Z | 2022-03-29T20:49:42.000Z | tests/unit/faucet/test_valve_config.py | cglewis/faucet | 9e4cfc79d580c8a4a70a21a1dc6e2ec7ee0fc0aa | [
"Apache-2.0"
] | 146 | 2017-09-18T02:33:35.000Z | 2022-01-13T07:21:12.000Z | #!/usr/bin/env python3
"""Unit tests run as PYTHONPATH=../../.. python3 ./test_valve.py."""
# pylint: disable=too-many-lines
# Copyright (C) 2015 Research and Innovation Advanced Network New Zealand Ltd.
# Copyright (C) 2015--2019 The Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
import copy
import hashlib
import unittest
import time
from os_ken.ofproto import ofproto_v1_3 as ofp
from faucet import config_parser_util
from faucet import valve_of
from clib.fakeoftable import CONTROLLER_PORT
from clib.valve_test_lib import BASE_DP1_CONFIG, CONFIG, DP1_CONFIG, FAUCET_MAC, ValveTestBases
class ValveIncludeTestCase(ValveTestBases.ValveTestNetwork):
"""Test include optional files."""
CONFIG = """
include-optional: ['/does/not/exist/']
dps:
s1:
%s
interfaces:
p1:
number: 1
native_vlan: 0x100
""" % DP1_CONFIG
def setUp(self):
"""Setup config with non-existent optional include file"""
self.setup_valves(self.CONFIG)
def test_include_optional(self):
"""Test include optional files."""
self.assertEqual(1, int(self.get_prom('dp_status')))
class ValveBadConfTestCase(ValveTestBases.ValveTestNetwork):
"""Test recovery from a bad config file."""
CONFIG = """
dps:
s1:
%s
interfaces:
p1:
number: 1
native_vlan: 0x100
""" % DP1_CONFIG
MORE_CONFIG = """
dps:
s1:
%s
interfaces:
p1:
number: 1
native_vlan: 0x100
p2:
number: 2
native_vlan: 0x100
""" % DP1_CONFIG
BAD_CONFIG = """
dps: {}
"""
def setUp(self):
"""Setup invalid config"""
self.setup_valves(self.CONFIG)
def test_bad_conf(self):
"""Test various config types & config reloading"""
for config, load_error in (
(self.CONFIG, 0),
(self.BAD_CONFIG, 1),
(self.CONFIG, 0),
(self.MORE_CONFIG, 0),
(self.BAD_CONFIG, 1),
(self.CONFIG, 0)):
with open(self.config_file, 'w', encoding='utf-8') as config_file:
config_file.write(config)
self.valves_manager.request_reload_configs(self.mock_time(), self.config_file)
self.assertEqual(
load_error,
self.get_prom('faucet_config_load_error', bare=True),
msg='%u: %s' % (load_error, config))
class ValveChangePortTestCase(ValveTestBases.ValveTestNetwork):
"""Test changes to config on ports."""
CONFIG = """
dps:
s1:
%s
interfaces:
p1:
number: 1
native_vlan: 0x100
p2:
number: 2
native_vlan: 0x200
permanent_learn: True
""" % DP1_CONFIG
LESS_CONFIG = """
dps:
s1:
%s
interfaces:
p1:
number: 1
native_vlan: 0x100
p2:
number: 2
native_vlan: 0x200
permanent_learn: False
""" % DP1_CONFIG
def setUp(self):
"""Setup basic port and vlan config"""
self.setup_valves(self.CONFIG)
def test_delete_permanent_learn(self):
"""Test port permanent learn can deconfigured."""
table = self.network.tables[self.DP_ID]
before_table_state = table.table_state()
self.rcv_packet(2, 0x200, {
'eth_src': self.P2_V200_MAC,
'eth_dst': self.P3_V200_MAC,
'ipv4_src': '10.0.0.2',
'ipv4_dst': '10.0.0.3',
'vid': 0x200})
self.update_and_revert_config(
self.CONFIG, self.LESS_CONFIG,
'warm', before_table_states={self.DP_ID: before_table_state})
class ValveDeletePortTestCase(ValveTestBases.ValveTestNetwork):
"""Test deletion of a port."""
CONFIG = """
dps:
s1:
%s
interfaces:
p1:
number: 1
tagged_vlans: [0x100]
p2:
number: 2
tagged_vlans: [0x100]
p3:
number: 3
tagged_vlans: [0x100]
""" % DP1_CONFIG
LESS_CONFIG = """
dps:
s1:
%s
interfaces:
p1:
number: 1
tagged_vlans: [0x100]
p2:
number: 2
tagged_vlans: [0x100]
""" % DP1_CONFIG
def setUp(self):
"""Setup basic port and vlan config"""
self.setup_valves(self.CONFIG)
def test_port_delete(self):
"""Test port can be deleted."""
self.update_and_revert_config(self.CONFIG, self.LESS_CONFIG, 'cold')
class ValveAddPortMirrorNoDelVLANTestCase(ValveTestBases.ValveTestNetwork):
"""Test addition of port mirroring does not cause a del VLAN."""
CONFIG = """
dps:
s1:
%s
interfaces:
p1:
number: 1
tagged_vlans: [0x100]
p2:
number: 2
tagged_vlans: [0x100]
p3:
number: 3
output_only: true
""" % DP1_CONFIG
MORE_CONFIG = """
dps:
s1:
%s
interfaces:
p1:
number: 1
tagged_vlans: [0x100]
p2:
number: 2
tagged_vlans: [0x100]
p3:
number: 3
output_only: true
mirror: [1]
""" % DP1_CONFIG
def setUp(self):
"""Setup basic port and vlan config"""
_ = self.setup_valves(self.CONFIG)[self.DP_ID]
def test_port_mirror(self):
"""Test addition of port mirroring is a warm start."""
_ = self.update_config(self.MORE_CONFIG, reload_type='warm')[self.DP_ID]
class ValveAddPortTestCase(ValveTestBases.ValveTestNetwork):
"""Test addition of a port."""
CONFIG = """
dps:
s1:
%s
interfaces:
p1:
number: 1
tagged_vlans: [0x100]
p2:
number: 2
tagged_vlans: [0x100]
""" % DP1_CONFIG
MORE_CONFIG = """
dps:
s1:
%s
interfaces:
p1:
number: 1
tagged_vlans: [0x100]
p2:
number: 2
tagged_vlans: [0x100]
p3:
number: 3
tagged_vlans: [0x100]
""" % DP1_CONFIG
@staticmethod
def _inport_flows(in_port, ofmsgs):
return [
ofmsg for ofmsg in ValveTestBases.flowmods_from_flows(ofmsgs)
if ofmsg.match.get('in_port') == in_port]
def setUp(self):
"""Setup basic port and vlan config"""
initial_ofmsgs = self.setup_valves(self.CONFIG)[self.DP_ID]
self.assertFalse(self._inport_flows(3, initial_ofmsgs))
def test_port_add(self):
"""Test port can be added."""
reload_ofmsgs = self.update_config(self.MORE_CONFIG, reload_type='cold')[self.DP_ID]
self.assertTrue(self._inport_flows(3, reload_ofmsgs))
class ValveAddPortTrafficTestCase(ValveTestBases.ValveTestNetwork):
"""Test addition of a port with traffic."""
# NOTE: This needs to use 'Generic' hardware,
# as GenericTFM does not support 'warm' start
REQUIRE_TFM = False
CONFIG = """
dps:
s1:
dp_id: 1
hardware: Generic
interfaces:
p1:
number: 1
tagged_vlans: [0x100]
p2:
number: 2
tagged_vlans: [0x100]
"""
MORE_CONFIG = """
dps:
s1:
dp_id: 1
hardware: Generic
interfaces:
p1:
number: 1
tagged_vlans: [0x100]
p2:
number: 2
tagged_vlans: [0x100]
p3:
number: 3
tagged_vlans: [0x100]
"""
@staticmethod
def _inport_flows(in_port, ofmsgs):
return [
ofmsg for ofmsg in ValveTestBases.flowmods_from_flows(ofmsgs)
if ofmsg.match.get('in_port') == in_port]
def _learn(self, in_port):
ucast_pkt = self.pkt_match(in_port, 1)
ucast_pkt['in_port'] = in_port
ucast_pkt['vlan_vid'] = self.V100
table = self.network.tables[self.DP_ID]
self.assertTrue(table.is_output(ucast_pkt, port=CONTROLLER_PORT))
self.rcv_packet(in_port, self.V100, ucast_pkt)
def _unicast_between(self, in_port, out_port, not_out=1):
ucast_match = self.pkt_match(in_port, out_port)
ucast_match['in_port'] = in_port
ucast_match['vlan_vid'] = self.V100
table = self.network.tables[self.DP_ID]
self.assertTrue(table.is_output(ucast_match, port=out_port))
self.assertFalse(table.is_output(ucast_match, port=not_out))
def setUp(self):
initial_ofmsgs = self.setup_valves(self.CONFIG)[self.DP_ID]
self.assertFalse(self._inport_flows(3, initial_ofmsgs))
def test_port_add_no_ofmsgs(self):
"""New config does not generate new flows."""
update_ofmsgs = self.update_config(self.MORE_CONFIG,
reload_type='warm')[self.DP_ID]
self.assertFalse(self._inport_flows(3, update_ofmsgs))
def test_port_add_link_state(self):
"""New port can be added in link-down state."""
self.update_config(self.MORE_CONFIG, reload_type='warm')
self.add_port(3, link_up=False)
self.port_expected_status(3, 0)
self.set_port_link_up(3)
self.port_expected_status(3, 1)
def test_port_add_traffic(self):
"""New port can be added, and pass traffic."""
self.update_config(self.MORE_CONFIG, reload_type='warm')
self.add_port(3)
self._learn(2)
self._learn(3)
self._unicast_between(2, 3)
self._unicast_between(3, 2)
class ValveWarmStartVLANTestCase(ValveTestBases.ValveTestNetwork):
"""Test change of port VLAN only is a warm start."""
CONFIG = """
dps:
s1:
%s
interfaces:
p1:
number: 9
tagged_vlans: [0x100]
p2:
number: 11
tagged_vlans: [0x100]
p3:
number: 13
tagged_vlans: [0x100]
p4:
number: 14
native_vlan: 0x200
""" % DP1_CONFIG
WARM_CONFIG = """
dps:
s1:
%s
interfaces:
p1:
number: 9
tagged_vlans: [0x100]
p2:
number: 11
tagged_vlans: [0x100]
p3:
number: 13
tagged_vlans: [0x100]
p4:
number: 14
native_vlan: 0x300
""" % DP1_CONFIG
def setUp(self):
"""Setup basic port and vlan config"""
self.setup_valves(self.CONFIG)
def test_warm_start(self):
"""Test VLAN change is warm startable and metrics maintained."""
self.update_and_revert_config(self.CONFIG, self.WARM_CONFIG, 'warm')
self.rcv_packet(9, 0x100, {
'eth_src': self.P1_V100_MAC,
'eth_dst': self.UNKNOWN_MAC,
'ipv4_src': '10.0.0.1',
'ipv4_dst': '10.0.0.2'})
vlan_labels = {'vlan': str(int(0x100))}
port_labels = {'port': 'p1', 'port_description': 'p1'}
port_labels.update(vlan_labels)
def verify_func():
self.assertEqual(
1, self.get_prom('vlan_hosts_learned', labels=vlan_labels))
self.assertEqual(
1, self.get_prom('port_vlan_hosts_learned', labels=port_labels))
verify_func()
self.update_config(self.WARM_CONFIG, reload_type='warm')
verify_func()
class ValveDeleteVLANTestCase(ValveTestBases.ValveTestNetwork):
"""Test deleting VLAN."""
CONFIG = """
dps:
s1:
%s
interfaces:
p1:
number: 1
tagged_vlans: [0x100, 0x200]
p2:
number: 2
native_vlan: 0x200
""" % DP1_CONFIG
LESS_CONFIG = """
dps:
s1:
%s
interfaces:
p1:
number: 1
tagged_vlans: [0x200]
p2:
number: 2
native_vlan: 0x200
""" % DP1_CONFIG
def setUp(self):
"""Setup basic port and vlan config"""
self.setup_valves(self.CONFIG)
def test_delete_vlan(self):
"""Test VLAN can be deleted."""
self.update_and_revert_config(self.CONFIG, self.LESS_CONFIG, 'cold')
class ValveChangeDPTestCase(ValveTestBases.ValveTestNetwork):
"""Test changing DP."""
CONFIG = """
dps:
s1:
%s
priority_offset: 4321
interfaces:
p1:
number: 1
native_vlan: 0x100
p2:
number: 2
native_vlan: 0x100
""" % DP1_CONFIG
NEW_CONFIG = """
dps:
s1:
%s
priority_offset: 1234
interfaces:
p1:
number: 1
native_vlan: 0x100
p2:
number: 2
native_vlan: 0x100
""" % DP1_CONFIG
def setUp(self):
"""Setup basic port and vlan config with priority offset"""
self.setup_valves(self.CONFIG)
def test_change_dp(self):
"""Test DP changed."""
self.update_and_revert_config(self.CONFIG, self.NEW_CONFIG, 'cold')
class ValveAddVLANTestCase(ValveTestBases.ValveTestNetwork):
"""Test adding VLAN."""
CONFIG = """
dps:
s1:
%s
interfaces:
p1:
number: 1
tagged_vlans: [0x100, 0x200]
p2:
number: 2
tagged_vlans: [0x100]
""" % DP1_CONFIG
MORE_CONFIG = """
dps:
s1:
%s
interfaces:
p1:
number: 1
tagged_vlans: [0x100, 0x200]
p2:
number: 2
tagged_vlans: [0x100, 0x300]
""" % DP1_CONFIG
def setUp(self):
"""Setup basic port and vlan config"""
self.setup_valves(self.CONFIG)
def test_add_vlan(self):
"""Test VLAN can added."""
self.update_and_revert_config(self.CONFIG, self.MORE_CONFIG, 'cold')
class ValveChangeACLTestCase(ValveTestBases.ValveTestNetwork):
"""Test changes to ACL on a port."""
CONFIG = """
acls:
acl_same_a:
- rule:
actions:
allow: 1
acl_same_b:
- rule:
actions:
allow: 1
acl_diff_c:
- rule:
actions:
allow: 0
dps:
s1:
%s
interfaces:
p1:
number: 1
native_vlan: 0x100
acl_in: acl_same_a
p2:
number: 2
native_vlan: 0x200
""" % DP1_CONFIG
SAME_CONTENT_CONFIG = """
acls:
acl_same_a:
- rule:
actions:
allow: 1
acl_same_b:
- rule:
actions:
allow: 1
acl_diff_c:
- rule:
actions:
allow: 0
dps:
s1:
%s
interfaces:
p1:
number: 1
native_vlan: 0x100
acl_in: acl_same_b
p2:
number: 2
native_vlan: 0x200
""" % DP1_CONFIG
DIFF_CONTENT_CONFIG = """
acls:
acl_same_a:
- rule:
actions:
allow: 1
acl_same_b:
- rule:
actions:
allow: 1
acl_diff_c:
- rule:
actions:
allow: 0
dps:
s1:
%s
interfaces:
p1:
number: 1
native_vlan: 0x100
acl_in: acl_diff_c
p2:
number: 2
native_vlan: 0x200
""" % DP1_CONFIG
def setUp(self):
"""Setup basic ACL config"""
self.setup_valves(self.CONFIG)
def test_change_port_acl(self):
"""Test port ACL can be changed."""
self.update_and_revert_config(self.CONFIG, self.SAME_CONTENT_CONFIG, 'warm')
self.update_config(self.SAME_CONTENT_CONFIG, reload_type='warm')
self.rcv_packet(1, 0x100, {
'eth_src': self.P1_V100_MAC,
'eth_dst': self.UNKNOWN_MAC,
'ipv4_src': '10.0.0.1',
'ipv4_dst': '10.0.0.2'})
vlan_labels = {'vlan': str(int(0x100))}
port_labels = {'port': 'p1', 'port_description': 'p1'}
port_labels.update(vlan_labels)
def verify_func():
self.assertEqual(
1, self.get_prom('vlan_hosts_learned', labels=vlan_labels))
self.assertEqual(
1, self.get_prom('port_vlan_hosts_learned', labels=port_labels))
verify_func()
# ACL changed but we kept the learn cache.
self.update_config(self.DIFF_CONTENT_CONFIG, reload_type='warm')
verify_func()
class ValveChangeMirrorTestCase(ValveTestBases.ValveTestNetwork):
"""Test changes mirroring port."""
CONFIG = """
dps:
s1:
%s
interfaces:
p1:
number: 1
native_vlan: 0x100
p2:
number: 2
output_only: True
p3:
number: 3
native_vlan: 0x200
""" % DP1_CONFIG
MIRROR_CONFIG = """
dps:
s1:
%s
interfaces:
p1:
number: 1
native_vlan: 0x100
p2:
number: 2
mirror: p1
p3:
number: 3
native_vlan: 0x200
""" % DP1_CONFIG
def setUp(self):
"""Setup basic port and vlan config"""
self.setup_valves(self.CONFIG)
def test_change_port_acl(self):
"""Test port ACL can be changed."""
self.update_and_revert_config(self.CONFIG, self.MIRROR_CONFIG, reload_type='warm')
vlan_labels = {'vlan': str(int(0x100))}
port_labels = {'port': 'p1', 'port_description': 'p1'}
port_labels.update(vlan_labels)
def verify_prom():
self.assertEqual(
1, self.get_prom('vlan_hosts_learned', labels=vlan_labels))
self.assertEqual(
1, self.get_prom('port_vlan_hosts_learned', labels=port_labels))
self.rcv_packet(1, 0x100, {
'eth_src': self.P1_V100_MAC,
'eth_dst': self.UNKNOWN_MAC,
'ipv4_src': '10.0.0.1',
'ipv4_dst': '10.0.0.2'})
verify_prom()
# Now mirroring port 1 but we kept the cache.
self.update_config(self.MIRROR_CONFIG, reload_type='warm')
verify_prom()
# Now unmirror again.
self.update_config(self.CONFIG, reload_type='warm')
verify_prom()
class ValveACLTestCase(ValveTestBases.ValveTestNetwork):
"""Test ACL drop/allow and reloading."""
def setUp(self):
self.setup_valves(CONFIG)
def test_vlan_acl_deny(self):
"""Test VLAN ACL denies a packet."""
acl_config = """
dps:
s1:
%s
interfaces:
p1:
number: 1
native_vlan: v100
p2:
number: 2
native_vlan: v200
tagged_vlans: [v100]
p3:
number: 3
tagged_vlans: [v100, v200]
p4:
number: 4
tagged_vlans: [v200]
p5:
number: 5
native_vlan: v300
vlans:
v100:
vid: 0x100
v200:
vid: 0x200
acl_in: drop_non_ospf_ipv4
v300:
vid: 0x300
acls:
drop_non_ospf_ipv4:
- rule:
nw_dst: '224.0.0.5'
dl_type: 0x800
actions:
allow: 1
- rule:
dl_type: 0x800
actions:
allow: 0
""" % DP1_CONFIG
drop_match = {
'in_port': 2,
'vlan_vid': 0,
'eth_type': 0x800,
'ipv4_dst': '192.0.2.1'}
accept_match = {
'in_port': 2,
'vlan_vid': 0,
'eth_type': 0x800,
'ipv4_dst': '224.0.0.5'}
table = self.network.tables[self.DP_ID]
# base case
for match in (drop_match, accept_match):
self.assertTrue(
table.is_output(match, port=3, vid=self.V200),
msg='Packet not output before adding ACL')
def verify_func():
self.flap_port(2)
self.assertFalse(
table.is_output(drop_match), msg='Packet not blocked by ACL')
self.assertTrue(
table.is_output(accept_match, port=3, vid=self.V200),
msg='Packet not allowed by ACL')
self.update_and_revert_config(
CONFIG, acl_config, reload_type='cold', verify_func=verify_func)
class ValveEgressACLTestCase(ValveTestBases.ValveTestNetwork):
"""Test ACL drop/allow and reloading."""
def setUp(self):
self.setup_valves(CONFIG)
def test_vlan_acl_deny(self):
"""Test VLAN ACL denies a packet."""
allow_host_v6 = 'fc00:200::1:1'
deny_host_v6 = 'fc00:200::1:2'
faucet_v100_vip = 'fc00:100::1'
faucet_v200_vip = 'fc00:200::1'
acl_config = """
dps:
s1:
{dp1_config}
interfaces:
p1:
number: 1
native_vlan: v100
p2:
number: 2
native_vlan: v200
tagged_vlans: [v100]
p3:
number: 3
tagged_vlans: [v100, v200]
p4:
number: 4
tagged_vlans: [v200]
vlans:
v100:
vid: 0x100
faucet_mac: '{mac}'
faucet_vips: ['{v100_vip}/64']
v200:
vid: 0x200
faucet_mac: '{mac}'
faucet_vips: ['{v200_vip}/64']
acl_out: drop_non_allow_host_v6
minimum_ip_size_check: false
routers:
r_v100_v200:
vlans: [v100, v200]
acls:
drop_non_allow_host_v6:
- rule:
ipv6_dst: '{allow_host}'
eth_type: 0x86DD
actions:
allow: 1
- rule:
eth_type: 0x86DD
actions:
allow: 0
""".format(dp1_config=DP1_CONFIG, mac=FAUCET_MAC, v100_vip=faucet_v100_vip,
v200_vip=faucet_v200_vip, allow_host=allow_host_v6)
l2_drop_match = {
'in_port': 2,
'eth_dst': self.P3_V200_MAC,
'vlan_vid': 0,
'eth_type': 0x86DD,
'ipv6_dst': deny_host_v6}
l2_accept_match = {
'in_port': 3,
'eth_dst': self.P2_V200_MAC,
'vlan_vid': 0x200 | ofp.OFPVID_PRESENT,
'eth_type': 0x86DD,
'ipv6_dst': allow_host_v6}
v100_accept_match = {'in_port': 1, 'vlan_vid': 0}
table = self.network.tables[self.DP_ID]
# base case
for match in (l2_drop_match, l2_accept_match):
self.assertTrue(
table.is_output(match, port=4),
msg='Packet not output before adding ACL')
def verify_func():
self.assertTrue(
table.is_output(v100_accept_match, port=3),
msg='Packet not output when on vlan with no ACL')
self.assertFalse(
table.is_output(l2_drop_match, port=3),
msg='Packet not blocked by ACL')
self.assertTrue(
table.is_output(l2_accept_match, port=2),
msg='Packet not allowed by ACL')
# unicast
self.rcv_packet(2, 0x200, {
'eth_src': self.P2_V200_MAC,
'eth_dst': self.P3_V200_MAC,
'vid': 0x200,
'ipv6_src': allow_host_v6,
'ipv6_dst': deny_host_v6,
'neighbor_advert_ip': allow_host_v6})
self.rcv_packet(3, 0x200, {
'eth_src': self.P3_V200_MAC,
'eth_dst': self.P2_V200_MAC,
'vid': 0x200,
'ipv6_src': deny_host_v6,
'ipv6_dst': allow_host_v6,
'neighbor_advert_ip': deny_host_v6})
self.assertTrue(
table.is_output(l2_accept_match, port=2),
msg='Packet not allowed by ACL')
self.assertFalse(
table.is_output(l2_drop_match, port=3),
msg='Packet not blocked by ACL')
# l3
l3_drop_match = {
'in_port': 1,
'eth_dst': FAUCET_MAC,
'vlan_vid': 0,
'eth_type': 0x86DD,
'ipv6_dst': deny_host_v6}
l3_accept_match = {
'in_port': 1,
'eth_dst': FAUCET_MAC,
'vlan_vid': 0,
'eth_type': 0x86DD,
'ipv6_dst': allow_host_v6}
self.assertTrue(
table.is_output(l3_accept_match, port=2),
msg='Routed packet not allowed by ACL')
self.assertFalse(
table.is_output(l3_drop_match, port=3),
msg='Routed packet not blocked by ACL')
# multicast
self.update_and_revert_config(CONFIG, acl_config, 'cold', verify_func=verify_func)
class ValveReloadConfigProfile(ValveTestBases.ValveTestNetwork):
"""Test reload processing time."""
CONFIG = """
dps:
s1:
%s
interfaces:
p1:
number: 1
native_vlan: 0x100
""" % BASE_DP1_CONFIG
NUM_PORTS = 100
baseline_total_tt = None
def setUp(self):
"""Setup basic port and vlan config"""
self.setup_valves(CONFIG)
def test_profile_reload(self):
"""Test reload processing time."""
orig_config = copy.copy(self.CONFIG)
def load_orig_config():
pstats_out, _ = self.profile(
partial(self.update_config, orig_config))
self.baseline_total_tt = pstats_out.total_tt # pytype: disable=attribute-error
for i in range(2, 100):
self.CONFIG += """
p%u:
number: %u
native_vlan: 0x100
""" % (i, i)
for i in range(5):
load_orig_config()
pstats_out, pstats_text = self.profile(
partial(self.update_config, self.CONFIG, reload_type='cold'))
cache_info = valve_of.output_non_output_actions.cache_info()
self.assertGreater(cache_info.hits, cache_info.misses, msg=cache_info)
total_tt_prop = (
pstats_out.total_tt / self.baseline_total_tt) # pytype: disable=attribute-error
# must not be 20x slower, to ingest config for 100 interfaces than 1.
# TODO: This test might have to be run separately,
# since it is marginal on GitHub actions due to parallel test runs.
if total_tt_prop < 20:
for valve in self.valves_manager.valves.values():
for table in valve.dp.tables.values():
cache_info = table._trim_inst.cache_info() # pylint: disable=protected-access
self.assertGreater(cache_info.hits, cache_info.misses, msg=cache_info)
return
time.sleep(i)
self.fail('%f: %s' % (total_tt_prop, pstats_text))
class ValveTestVLANRef(ValveTestBases.ValveTestNetwork):
"""Test reference to same VLAN by name or VID."""
CONFIG = """
dps:
s1:
%s
interfaces:
p1:
number: 1
native_vlan: 333
p2:
number: 2
native_vlan: threes
vlans:
threes:
vid: 333
""" % DP1_CONFIG
def setUp(self):
"""Setup basic port and vlan config"""
self.setup_valves(self.CONFIG)
def test_vlan_refs(self):
"""Test same VLAN is referred to."""
vlans = self.valves_manager.valves[self.DP_ID].dp.vlans
self.assertEqual(1, len(vlans))
self.assertEqual('threes', vlans[333].name, vlans[333])
self.assertEqual(2, len(vlans[333].untagged))
class ValveTestConfigHash(ValveTestBases.ValveTestNetwork):
"""Verify faucet_config_hash_info update after config change"""
CONFIG = """
dps:
s1:
%s
interfaces:
p1:
number: 1
native_vlan: 0x100
""" % DP1_CONFIG
def setUp(self):
"""Setup basic port and vlan config"""
self.setup_valves(self.CONFIG)
def _get_info(self, metric, name):
""""Return (single) info dict for metric"""
# There doesn't seem to be a nice API for this,
# so we use the prometheus client internal API
metrics = list(metric.collect())
self.assertEqual(len(metrics), 1)
samples = metrics[0].samples
self.assertEqual(len(samples), 1)
sample = samples[0]
self.assertEqual(sample.name, name)
return sample.labels
def _check_hashes(self):
"""Verify and return faucet_config_hash_info labels"""
labels = self._get_info(metric=self.metrics.faucet_config_hash,
name='faucet_config_hash_info')
files = labels['config_files'].split(',')
hashes = labels['hashes'].split(',')
self.assertTrue(len(files) == len(hashes) == 1)
self.assertEqual(files[0], self.config_file, 'wrong config file')
hash_value = config_parser_util.config_file_hash(self.config_file)
self.assertEqual(hashes[0], hash_value, 'hash validation failed')
return labels
def _change_config(self):
"""Change self.CONFIG"""
if '0x100' in self.CONFIG:
self.CONFIG = self.CONFIG.replace('0x100', '0x200')
else:
self.CONFIG = self.CONFIG.replace('0x200', '0x100')
self.update_config(self.CONFIG, reload_expected=True)
return self.CONFIG
def test_config_hash_func(self):
"""Verify that faucet_config_hash_func is set correctly"""
labels = self._get_info(metric=self.metrics.faucet_config_hash_func,
name='faucet_config_hash_func')
hash_funcs = list(labels.values())
self.assertEqual(len(hash_funcs), 1, "found multiple hash functions")
hash_func = hash_funcs[0]
# Make sure that it matches and is supported in hashlib
self.assertEqual(hash_func, config_parser_util.CONFIG_HASH_FUNC)
self.assertTrue(hash_func in hashlib.algorithms_guaranteed)
def test_config_hash_update(self):
"""Verify faucet_config_hash_info is properly updated after config"""
# Verify that hashes change after config is changed
old_config = self.CONFIG
old_hashes = self._check_hashes()
starting_hashes = old_hashes
self._change_config()
new_config = self.CONFIG
self.assertNotEqual(old_config, new_config, 'config not changed')
new_hashes = self._check_hashes()
self.assertNotEqual(old_hashes, new_hashes,
'hashes not changed after config change')
# Verify that hashes don't change after config isn't changed
old_hashes = new_hashes
self.update_config(self.CONFIG, reload_expected=False)
new_hashes = self._check_hashes()
self.assertEqual(old_hashes, new_hashes,
"hashes changed when config didn't")
# Verify that hash is restored when config is restored
self._change_config()
new_hashes = self._check_hashes()
self.assertEqual(new_hashes, starting_hashes,
'hashes should be restored to starting values')
class ValveTestConfigRevert(ValveTestBases.ValveTestNetwork):
"""Test configuration revert"""
CONFIG = """
dps:
s1:
dp_id: 0x1
hardware: 'GenericTFM'
interfaces:
p1:
number: 1
native_vlan: 0x100
"""
CONFIG_AUTO_REVERT = True
def setUp(self):
"""Setup basic port and vlan config with hardware type set"""
self.setup_valves(self.CONFIG)
def test_config_revert(self):
"""Verify config is automatically reverted if bad."""
self.assertEqual(self.get_prom('faucet_config_load_error', bare=True), 0)
self.update_config('***broken***', reload_expected=True, error_expected=1)
self.assertEqual(self.get_prom('faucet_config_load_error', bare=True), 1)
with open(self.config_file, 'r', encoding='utf-8') as config_file:
config_content = config_file.read()
self.assertEqual(self.CONFIG, config_content)
self.update_config(self.CONFIG + '\n', reload_expected=False, error_expected=0)
more_config = self.CONFIG + """
p2:
number: 2
native_vlan: 0x100
"""
self.update_config(more_config, reload_expected=True, reload_type='warm', error_expected=0)
class ValveTestConfigRevertBootstrap(ValveTestBases.ValveTestNetwork):
"""Test configuration auto reverted if bad"""
BAD_CONFIG = """
*** busted ***
"""
GOOD_CONFIG = """
dps:
s1:
dp_id: 0x1
hardware: 'GenericTFM'
interfaces:
p1:
number: 1
native_vlan: 0x100
"""
CONFIG_AUTO_REVERT = True
def setUp(self):
"""Setup invalid config"""
self.setup_valves(self.BAD_CONFIG, error_expected=1)
def test_config_revert(self):
"""Verify config is automatically reverted if bad."""
self.assertEqual(self.get_prom('faucet_config_load_error', bare=True), 1)
self.update_config(self.GOOD_CONFIG + '\n', reload_expected=False, error_expected=0)
self.assertEqual(self.get_prom('faucet_config_load_error', bare=True), 0)
class ValveTestConfigApplied(ValveTestBases.ValveTestNetwork):
"""Test cases for faucet_config_applied."""
CONFIG = """
dps:
s1:
dp_id: 0x1
hardware: 'GenericTFM'
interfaces:
p1:
description: "one thing"
number: 1
native_vlan: 0x100
"""
NEW_DESCR_CONFIG = """
dps:
s1:
dp_id: 0x1
hardware: 'GenericTFM'
interfaces:
p1:
description: "another thing"
number: 1
native_vlan: 0x100
"""
def setUp(self):
"""Setup basic port and vlan config with hardware type set"""
self.setup_valves(self.CONFIG)
def test_config_applied_update(self):
"""Verify that config_applied increments after DP connect"""
# 100% for a single datapath
self.assertEqual(self.get_prom('faucet_config_applied', bare=True), 1.0)
# Add a second datapath, which currently isn't programmed
self.CONFIG += """
s2:
dp_id: 0x2
hardware: 'GenericTFM'
interfaces:
p1:
number: 1
native_vlan: 0x100
"""
self.update_config(self.CONFIG, reload_expected=False)
# Should be 50%
self.assertEqual(self.get_prom('faucet_config_applied', bare=True), .5)
# We don't have a way to simulate the second datapath connecting,
# we update the statistic manually
self.valves_manager.update_config_applied({0x2: True})
# Should be 100% now
self.assertEqual(self.get_prom('faucet_config_applied', bare=True), 1.0)
def test_description_only(self):
"""Test updating config description"""
self.update_config(self.NEW_DESCR_CONFIG, reload_expected=False)
class ValveReloadConfigTestCase(ValveTestBases.ValveTestBig): # pylint: disable=too-few-public-methods
"""Repeats the tests after a config reload."""
def setUp(self):
super().setUp()
self.flap_port(1)
self.update_config(CONFIG, reload_type='warm', reload_expected=False)
if __name__ == "__main__":
unittest.main() # pytype: disable=module-attr
| 28.620824 | 103 | 0.554337 |
ae6aacbaf3401e19dc03a1ec3921cb85dbf2d3a9 | 3,614 | py | Python | rfft/experiment.py | msamogh/rrr | ff0ed176a54e488a4498662335d9d3085296c16b | [
"MIT"
] | 1 | 2018-05-09T04:42:14.000Z | 2018-05-09T04:42:14.000Z | rfft/experiment.py | msamogh/rrr | ff0ed176a54e488a4498662335d9d3085296c16b | [
"MIT"
] | 11 | 2019-12-16T20:57:00.000Z | 2022-03-11T23:20:25.000Z | rfft/experiment.py | msamogh/rrr | ff0ed176a54e488a4498662335d9d3085296c16b | [
"MIT"
] | 1 | 2018-10-08T16:46:40.000Z | 2018-10-08T16:46:40.000Z | import os
import pickle
from abc import ABCMeta
from abc import abstractmethod
from enum import Enum
class ExperimentType(Enum):
IMAGE = 1
TEXT = 2
TABULAR = 3
class Dataset(Enum):
TRAIN = 1
TEST = 2
class ExperimentStatus(object):
def __init__(self, initialized=False, trained=False):
self.initialized = initialized
self.trained = trained
class Experiment():
"""Represents an experiment."""
__metaclass__ = ABCMeta
def __init__(self):
self.status = ExperimentStatus()
self.name = None
@abstractmethod
def domain(self):
"""Returns the data domain of the experiment - text, image, or tabular.
The values can take on any of the values from ExperimentType.
"""
pass
@abstractmethod
def pretty_name(self):
"""Returns human readable name of the experiment."""
pass
@abstractmethod
def description(self):
"""Returns description of the experiment."""
pass
@abstractmethod
def get_status(self):
"""Returns the current state of the experiment.
The values can take on any of the values from ExperimentStatus.
"""
pass
@abstractmethod
def generate_dataset(self):
"""Loads and preprocesses the dataset."""
pass
@abstractmethod
def get_sample(self, dataset, sample_idx):
"""Returns the input sample from the train dataset."""
pass
@abstractmethod
def load_annotations(self, **hypothesis_params):
"""Loads and processes annotations."""
pass
@abstractmethod
def unload_annotations(self):
"""Removes any loaded annotations from the state."""
pass
@abstractmethod
def set_annotation(self, sample_idx, annotation):
"""Specifies the annotation for the given input sample."""
pass
@abstractmethod
def get_annotation(self, sample_idx):
"""Returns the annotation of the given input sample."""
pass
@abstractmethod
def delete_annotation(self, sample_idx):
"""Deletes annotation corresponding to the given input sample."""
pass
@abstractmethod
def train(self, num_epochs):
"""Initializes and trains a model on the generated train data."""
pass
@classmethod
def get_saved_experiments(cls):
"""Returns list of paths for saved trained experiments."""
try:
saved_models = os.listdir(cls.MODELS_DIR)
return [os.path.join(cls.MODELS_DIR, model) for model in saved_models]
except Exception:
return []
@classmethod
def load_experiment(cls, filepath, prepend_path=False):
"""Loads experiment from saved file and returns it."""
experiment = cls()
if prepend_path:
filepath = os.path.join(cls.MODELS_DIR, filepath)
exp_dict = pickle.load(open(filepath, 'rb'))
experiment.__dict__.update(exp_dict)
return experiment
@abstractmethod
def save_experiment(self):
"""Save experiment state to file."""
pass
@abstractmethod
def score_model(self):
"""Runs prediction of the model on train and test sets and returns the performance
metrics."""
pass
@abstractmethod
def explain(self, sample, **experiment_params):
"""Explains the reasons for the prediction of the given input sample."""
pass
| 26.77037 | 91 | 0.614001 |
491c7a02069324304d4ca643ddeb12306ccd3619 | 2,807 | py | Python | crowd_integration/utils.py | Siikakala/kompassi | 14cdcd966ab689d762cc885e28b6d15465c216f0 | [
"CC-BY-3.0"
] | null | null | null | crowd_integration/utils.py | Siikakala/kompassi | 14cdcd966ab689d762cc885e28b6d15465c216f0 | [
"CC-BY-3.0"
] | null | null | null | crowd_integration/utils.py | Siikakala/kompassi | 14cdcd966ab689d762cc885e28b6d15465c216f0 | [
"CC-BY-3.0"
] | null | null | null | import json
import logging
from django.conf import settings
import requests
from requests import HTTPError
from requests.auth import HTTPBasicAuth
logger = logging.getLogger('kompassi')
AUTH = HTTPBasicAuth(
settings.KOMPASSI_CROWD_APPLICATION_NAME,
settings.KOMPASSI_CROWD_APPLICATION_PASSWORD,
)
HEADERS = {
'Content-Type': 'application/json',
'Accept': 'application/json',
}
class CrowdError(RuntimeError):
pass
def crowd_request(method, url, params={}, body=None, ignore_status_codes=[]):
url = '{base_url}{url}'.format(base_url=settings.KOMPASSI_CROWD_BASE_URL, url=url)
response = requests.request(
method=method,
url=url,
auth=AUTH,
data=json.dumps(body) if body else None,
headers=HEADERS,
params=params,
)
if response.status_code in ignore_status_codes:
return
try:
response.raise_for_status()
except HTTPError as e:
logger.exception(response.text)
raise CrowdError(e)
def user_to_crowd(user, password=None):
user_doc = {
'name': user.username,
'first-name': user.first_name,
'last-name': user.last_name,
'email': user.email,
'active': True,
}
if password is not None:
user_doc['password'] = {'value': password}
return user_doc
def change_user_password(user, password):
return crowd_request(
'PUT',
'/user/password',
{'username': user.username},
{'value': password},
)
def ensure_group_exists(group_name):
body = {
"name": group_name,
"type": "GROUP",
"active": True
}
return crowd_request(
'POST',
'/group',
{},
body,
ignore_status_codes=[400],
)
def ensure_user_group_membership(user, group_name, should_belong_to_group=True):
if should_belong_to_group:
ensure_user_is_member_of_group(user, group_name)
else:
ensure_user_is_not_member_of_group(user, group_name)
def ensure_user_is_member_of_group(user, group_name):
return crowd_request(
'POST',
'/user/group/direct',
{'username': user.username},
{'name': group_name},
ignore_status_codes=[409],
)
def ensure_user_is_not_member_of_group(user, group_name):
return crowd_request(
'DELETE',
'/user/group/direct',
{'username': user.username, 'groupname': group_name},
ignore_status_codes=[404],
)
def create_user(user, password):
return crowd_request(
'POST',
'/user',
{},
user_to_crowd(user, password)
)
def update_user(user):
return crowd_request(
'PUT',
'/user',
{'username': user.username},
user_to_crowd(user)
)
| 21.105263 | 86 | 0.627004 |
9a6dca42c705169dccbf31c338995256af437bcb | 8,971 | py | Python | rl_algorithms/common/abstract/distributed_logger.py | medipixel/reinforcement_learning_examples | c5f7d1d60dcefb3050d75c5c657207183bd8db65 | [
"MIT"
] | 11 | 2018-12-18T13:46:48.000Z | 2019-02-11T02:03:29.000Z | rl_algorithms/common/abstract/distributed_logger.py | medipixel/rl_baselines | c5f7d1d60dcefb3050d75c5c657207183bd8db65 | [
"MIT"
] | 35 | 2019-01-19T06:09:26.000Z | 2019-02-11T04:15:44.000Z | rl_algorithms/common/abstract/distributed_logger.py | medipixel/reinforcement_learning_examples | c5f7d1d60dcefb3050d75c5c657207183bd8db65 | [
"MIT"
] | null | null | null | """Base class for loggers use in distributed training.
- Author: Chris Yoon
- Contact: chris.yoon@medipixel.io
"""
from abc import ABC, abstractmethod
from collections import deque
import os
import shutil
from typing import Dict, List
import gym
import numpy as np
import plotly.graph_objects as go
import pyarrow as pa
import torch
import wandb
import zmq
from rl_algorithms.common.env.atari_wrappers import atari_env_generator
import rl_algorithms.common.env.utils as env_utils
from rl_algorithms.common.helper_functions import numpy2floattensor, smoothen_graph
from rl_algorithms.common.networks.brain import Brain
from rl_algorithms.utils.config import ConfigDict
class DistributedLogger(ABC):
"""Base class for loggers use in distributed training.
Attributes:
log_cfg (ConfigDict): configuration for saving log and checkpoint
comm_config (ConfigDict): configs for communication
backbone (ConfigDict): backbone configs for building network
head (ConfigDict): head configs for building network
brain (Brain): logger brain for evaluation
update_step (int): tracker for learner update step
device (torch.device): device, cpu by default
log_info_queue (deque): queue for storing log info received from learner
env (gym.Env): gym environment for running test
"""
def __init__(
self,
log_cfg: ConfigDict,
comm_cfg: ConfigDict,
backbone: ConfigDict,
head: ConfigDict,
env_name: str,
is_atari: bool,
state_size: int,
output_size: int,
max_update_step: int,
episode_num: int,
max_episode_steps: int,
interim_test_num: int,
is_log: bool,
is_render: bool,
):
self.log_cfg = log_cfg
self.comm_cfg = comm_cfg
self.device = torch.device("cpu") # Logger only runs on cpu
head.configs.state_size = state_size
head.configs.output_size = output_size
self.brain = Brain(backbone, head).to(self.device)
self.env_name = env_name
self.is_atari = is_atari
self.max_update_step = max_update_step
self.episode_num = episode_num
self.max_episode_steps = max_episode_steps
self.interim_test_num = interim_test_num
self.is_log = is_log
self.is_render = is_render
self.update_step = 0
self.log_info_queue = deque(maxlen=100)
self._init_env()
# pylint: disable=attribute-defined-outside-init
def _init_env(self):
"""Initialize gym environment."""
if self.is_atari:
self.env = atari_env_generator(self.env_name, self.max_episode_steps)
else:
self.env = gym.make(self.env_name)
self.env, self.max_episode_steps = env_utils.set_env(
self.env, self.max_episode_steps
)
@abstractmethod
def load_params(self, path: str):
if not os.path.exists(path):
raise Exception(
f"[ERROR] the input path does not exist. Wrong path: {path}"
)
# pylint: disable=attribute-defined-outside-init
def init_communication(self):
"""Initialize inter-process communication sockets."""
ctx = zmq.Context()
self.pull_socket = ctx.socket(zmq.PULL)
self.pull_socket.bind(f"tcp://127.0.0.1:{self.comm_cfg.learner_logger_port}")
@abstractmethod
def select_action(self, state: np.ndarray):
pass
@abstractmethod
def write_log(self, log_value: dict):
pass
# pylint: disable=no-self-use
@staticmethod
def _preprocess_state(state: np.ndarray, device: torch.device) -> torch.Tensor:
state = numpy2floattensor(state, device)
return state
def set_wandb(self):
"""Set configuration for wandb logging."""
wandb.init(
project=self.env_name,
name=f"{self.log_cfg.agent}/{self.log_cfg.curr_time}",
)
additional_log = dict(
episode_num=self.episode_num,
max_episode_steps=self.max_episode_steps,
)
wandb.config.update(additional_log)
shutil.copy(self.log_cfg.cfg_path, os.path.join(wandb.run.dir, "config.yaml"))
def recv_log_info(self):
"""Receive info from learner."""
received = False
try:
log_info_id = self.pull_socket.recv(zmq.DONTWAIT)
received = True
except zmq.Again:
pass
if received:
self.log_info_queue.append(log_info_id)
def run(self):
"""Run main logging loop; continuously receive data and log."""
if self.is_log:
self.set_wandb()
while self.update_step < self.max_update_step:
self.recv_log_info()
if self.log_info_queue: # if non-empty
log_info_id = self.log_info_queue.pop()
log_info = pa.deserialize(log_info_id)
state_dict = log_info["state_dict"]
log_value = log_info["log_value"]
self.update_step = log_value["update_step"]
self.synchronize(state_dict)
avg_score = self.test(self.update_step)
log_value["avg_score"] = avg_score
self.write_log(log_value)
def write_worker_log(self, worker_logs: List[dict], worker_update_interval: int):
"""Log the mean scores of each episode per update step to wandb."""
# NOTE: Worker plots are passed onto wandb.log as matplotlib.pyplot
# since wandb doesn't support logging multiple lines to single plot
self.set_wandb()
# Plot individual workers
fig = go.Figure()
worker_id = 0
for worker_log in worker_logs:
fig.add_trace(
go.Scatter(
x=list(worker_log.keys()),
y=smoothen_graph(list(worker_log.values())),
mode="lines",
name=f"Worker {worker_id}",
line=dict(width=2),
)
)
worker_id = worker_id + 1
# Plot mean scores
logged_update_steps = list(
range(0, self.max_update_step + 1, worker_update_interval)
)
mean_scores = []
try:
for step in logged_update_steps:
scores_for_step = []
for worker_log in worker_logs:
if step in list(worker_log):
scores_for_step.append(worker_log[step])
mean_scores.append(np.mean(scores_for_step))
except Exception as e:
print(f"[Error] {e}")
fig.add_trace(
go.Scatter(
x=logged_update_steps,
y=mean_scores,
mode="lines+markers",
name="Mean scores",
line=dict(width=5),
)
)
# Write to wandb
wandb.log({"Worker scores": fig})
def test(self, update_step: int, interim_test: bool = True):
"""Test the agent."""
avg_score = self._test(update_step, interim_test)
# termination
self.env.close()
return avg_score
def _test(self, update_step: int, interim_test: bool) -> float:
"""Common test routine."""
if interim_test:
test_num = self.interim_test_num
else:
test_num = self.episode_num
self.brain.eval()
scores = []
for i_episode in range(test_num):
state = self.env.reset()
done = False
score = 0
step = 0
while not done:
if self.is_render:
self.env.render()
action = self.select_action(state)
next_state, reward, done, _ = self.env.step(action)
state = next_state
score += reward
step += 1
scores.append(score)
if interim_test:
print(
"[INFO] update step: %d\ttest %d\tstep: %d\ttotal score: %d"
% (update_step, i_episode, step, score)
)
else:
print(
"[INFO] test %d\tstep: %d\ttotal score: %d"
% (i_episode, step, score)
)
return np.mean(scores)
def synchronize(self, state_dict: Dict[str, np.ndarray]):
"""Copy parameters from numpy arrays."""
param_name_list = list(state_dict.keys())
for logger_named_param in self.brain.named_parameters():
logger_param_name = logger_named_param[0]
if logger_param_name in param_name_list:
new_param = numpy2floattensor(
state_dict[logger_param_name], self.device
)
logger_named_param[1].data.copy_(new_param)
| 32.860806 | 86 | 0.590012 |
34ee78f0eb09de4243abf714b06fd56f6a118e1a | 52,410 | py | Python | applications/StructuralMechanicsApplication/tests/test_constitutive_law.py | lkusch/Kratos | e8072d8e24ab6f312765185b19d439f01ab7b27b | [
"BSD-4-Clause"
] | 778 | 2017-01-27T16:29:17.000Z | 2022-03-30T03:01:51.000Z | applications/StructuralMechanicsApplication/tests/test_constitutive_law.py | lkusch/Kratos | e8072d8e24ab6f312765185b19d439f01ab7b27b | [
"BSD-4-Clause"
] | 6,634 | 2017-01-15T22:56:13.000Z | 2022-03-31T15:03:36.000Z | applications/StructuralMechanicsApplication/tests/test_constitutive_law.py | lkusch/Kratos | e8072d8e24ab6f312765185b19d439f01ab7b27b | [
"BSD-4-Clause"
] | 224 | 2017-02-07T14:12:49.000Z | 2022-03-06T23:09:34.000Z | import KratosMultiphysics
import KratosMultiphysics.StructuralMechanicsApplication as StructuralMechanicsApplication
import KratosMultiphysics.KratosUnittest as KratosUnittest
from KratosMultiphysics.kratos_utilities import CheckIfApplicationsAvailable
if CheckIfApplicationsAvailable("ConstitutiveLawsApplication"):
from KratosMultiphysics import ConstitutiveLawsApplication
import math
class TestConstitutiveLaw(KratosUnittest.TestCase):
def setUp(self):
pass
@staticmethod
def _create_geometry(model_part, dim):
# Create new nodes
node1 = model_part.CreateNewNode(1, 0.0, 0.0, 0.0)
node2 = model_part.CreateNewNode(2, 1.0, 0.0, 0.0)
node3 = model_part.CreateNewNode(3, 0.0, 1.0, 0.0)
if (dim == 2):
nnodes = 3
# Allocate a geometry
geom = KratosMultiphysics.Triangle2D3(node1,node2,node3)
elif (dim == 3):
nnodes = 4
node4 = model_part.CreateNewNode(4, 0.0, 0.0, 1.0)
# Allocate a geometry
geom = KratosMultiphysics.Tetrahedra3D4(node1,node2,node3,node4)
else:
raise Exception("Error: bad dimension value: ", dim)
return [geom, nnodes]
def _set_cl_parameters(self, cl_options, F, detF, strain_vector, stress_vector, constitutive_matrix, N, DN_DX, model_part, properties, geom):
# Setting the parameters - note that a constitutive law may not need them all!
cl_params = KratosMultiphysics.ConstitutiveLawParameters()
cl_params.SetOptions(cl_options)
cl_params.SetDeformationGradientF(F)
cl_params.SetDeterminantF(detF)
cl_params.SetStrainVector(strain_vector)
cl_params.SetStressVector(stress_vector)
cl_params.SetConstitutiveMatrix(constitutive_matrix)
cl_params.SetShapeFunctionsValues(N)
cl_params.SetShapeFunctionsDerivatives(DN_DX)
cl_params.SetProcessInfo(model_part.ProcessInfo)
cl_params.SetMaterialProperties(properties)
cl_params.SetElementGeometry(geom)
## Do all sort of checks
cl_params.CheckAllParameters() # Can not use this until the geometry is correctly exported to python
cl_params.CheckMechanicalVariables()
cl_params.CheckShapeFunctions()
return cl_params
def _cl_check(self, cl, properties, geom, model_part, dim):
cl.Check(properties, geom, model_part.ProcessInfo)
if(cl.WorkingSpaceDimension() != dim):
raise Exception("Mismatch between the WorkingSpaceDimension of the Constitutive Law and the dimension of the space in which the test is performed")
def _set_cl_options(self, dict_options):
cl_options = KratosMultiphysics.Flags()
if ("USE_ELEMENT_PROVIDED_STRAIN" in dict_options):
cl_options.Set(KratosMultiphysics.ConstitutiveLaw.USE_ELEMENT_PROVIDED_STRAIN, dict_options["USE_ELEMENT_PROVIDED_STRAIN"])
if ("COMPUTE_STRESS" in dict_options):
cl_options.Set(KratosMultiphysics.ConstitutiveLaw.COMPUTE_STRESS, dict_options["COMPUTE_STRESS"])
if ("COMPUTE_CONSTITUTIVE_TENSOR" in dict_options):
cl_options.Set(KratosMultiphysics.ConstitutiveLaw.COMPUTE_CONSTITUTIVE_TENSOR, dict_options["COMPUTE_CONSTITUTIVE_TENSOR"])
if ("COMPUTE_STRAIN_ENERGY" in dict_options):
cl_options.Set(KratosMultiphysics.ConstitutiveLaw.COMPUTE_STRAIN_ENERGY, dict_options["COMPUTE_STRAIN_ENERGY"])
if ("ISOCHORIC_TENSOR_ONLY" in dict_options):
cl_options.Set(KratosMultiphysics.ConstitutiveLaw.ISOCHORIC_TENSOR_ONLY, dict_options["ISOCHORIC_TENSOR_ONLY"])
if ("VOLUMETRIC_TENSOR_ONLY" in dict_options):
cl_options.Set(KratosMultiphysics.ConstitutiveLaw.VOLUMETRIC_TENSOR_ONLY, dict_options["VOLUMETRIC_TENSOR_ONLY"])
if ("FINALIZE_MATERIAL_RESPONSE" in dict_options):
cl_options.Set(KratosMultiphysics.ConstitutiveLaw.FINALIZE_MATERIAL_RESPONSE, dict_options["FINALIZE_MATERIAL_RESPONSE"])
# From here below it should be an otput not an input
if ("FINITE_STRAINS" in dict_options):
cl_options.Set(KratosMultiphysics.ConstitutiveLaw.FINITE_STRAINS, dict_options["FINITE_STRAINS"])
if ("INFINITESIMAL_STRAINS" in dict_options):
cl_options.Set(KratosMultiphysics.ConstitutiveLaw.INFINITESIMAL_STRAINS, dict_options["INFINITESIMAL_STRAINS"])
if ("PLANE_STRAIN_LAW" in dict_options):
cl_options.Set(KratosMultiphysics.ConstitutiveLaw.PLANE_STRAIN_LAW, dict_options["PLANE_STRAIN_LAW"])
if ("PLANE_STRESS_LAW" in dict_options):
cl_options.Set(KratosMultiphysics.ConstitutiveLaw.PLANE_STRESS_LAW, dict_options["PLANE_STRESS_LAW"])
if ("AXISYMMETRIC_LAW" in dict_options):
cl_options.Set(KratosMultiphysics.ConstitutiveLaw.AXISYMMETRIC_LAW, dict_options["AXISYMMETRIC_LAW"])
if ("U_P_LAW" in dict_options):
cl_options.Set(KratosMultiphysics.ConstitutiveLaw.U_P_LAW, dict_options["U_P_LAW"])
if ("ISOTROPIC" in dict_options):
cl_options.Set(KratosMultiphysics.ConstitutiveLaw.ISOTROPIC, dict_options["ISOTROPIC"])
if ("ANISOTROPIC" in dict_options):
cl_options.Set(KratosMultiphysics.ConstitutiveLaw.ANISOTROPIC, dict_options["ANISOTROPIC"])
return cl_options
def _print_cl_output(self, cl, cl_params, properties, geom, N, model_part):
print("The Material Response PK2")
cl.CalculateMaterialResponsePK2(cl_params)
print("Stress = ", cl_params.GetStressVector())
print("Strain = ", cl_params.GetStrainVector())
print("C = ", cl_params.GetConstitutiveMatrix())
cl.FinalizeMaterialResponsePK2(cl_params)
print("\nThe Material Response Kirchhoff")
cl.CalculateMaterialResponseKirchhoff(cl_params)
print("Stress = ", cl_params.GetStressVector())
print("Strain = ", cl_params.GetStrainVector())
print("C = ", cl_params.GetConstitutiveMatrix())
cl.FinalizeMaterialResponseKirchhoff(cl_params)
print("\nThe Material Response Cauchy")
cl.CalculateMaterialResponseCauchy(cl_params)
print("Stress = ", cl_params.GetStressVector())
print("Strain = ", cl_params.GetStrainVector())
print("C = ", cl_params.GetConstitutiveMatrix())
cl.FinalizeMaterialResponseCauchy(cl_params)
def _generic_constitutive_law_test(self, model_part, deformation_test):
# Define geometry
[geom, nnodes] = self._create_geometry(model_part, deformation_test.cl.dim)
N = KratosMultiphysics.Vector(nnodes)
DN_DX = KratosMultiphysics.Matrix(nnodes, deformation_test.cl.dim)
# Material properties
properties = deformation_test.cl.create_properties(model_part)
# Construct a constitutive law
cl = deformation_test.cl.create_constitutive_Law()
self._cl_check(cl, properties, geom, model_part, deformation_test.cl.dim)
# Set the parameters to be employed
dict_options = {'USE_ELEMENT_PROVIDED_STRAIN': False,
'COMPUTE_STRESS': True,
'COMPUTE_CONSTITUTIVE_TENSOR': True,
'FINITE_STRAINS': True,
'ISOTROPIC': True,
}
cl_options = self._set_cl_options(dict_options)
# Define deformation gradient
F = deformation_test.get_init_deformation_gradientF()
detF = 1.0
stress_vector = KratosMultiphysics.Vector(cl.GetStrainSize())
strain_vector = KratosMultiphysics.Vector(cl.GetStrainSize())
constitutive_matrix = KratosMultiphysics.Matrix(cl.GetStrainSize(),cl.GetStrainSize())
for i in range(0, cl.GetStrainSize()):
stress_vector[i] = 0.0
strain_vector[i] = 0.0
for j in range(0, cl.GetStrainSize()):
constitutive_matrix[i,j] = 0.0
# Setting the parameters - note that a constitutive law may not need them all!
cl_params = self._set_cl_parameters(cl_options, F, detF, strain_vector, stress_vector, constitutive_matrix, N, DN_DX, model_part, properties, geom)
cl.InitializeMaterial(properties, geom, N)
# Check the results
deformation_test.initialize_reference_stress(cl.GetStrainSize())
for i in range(deformation_test.nr_timesteps):
deformation_test.set_deformation(cl_params, i)
# Chauchy
cl.CalculateMaterialResponseCauchy(cl_params)
cl.FinalizeMaterialResponseCauchy(cl_params)
reference_stress = deformation_test.get_reference_stress(i)
stress = cl_params.GetStressVector()
tolerance = 1.0e-4
for j in range(cl.GetStrainSize()):
if (abs(stress[j]) > tolerance):
self.assertAlmostEqual((reference_stress[j] - stress[j])/stress[j], 0.0, msg=("Error checking solution " + str(stress[j]) + " different from " + str(reference_stress[j]) + " with tolerance of " + str(tolerance)), delta=tolerance)
def test_Uniaxial_KirchhoffSaintVenant_3D(self):
self.skipTestIfApplicationsNotAvailable("ConstitutiveLawsApplication")
current_model = KratosMultiphysics.Model()
model_part = current_model.CreateModelPart("test")
deformation_test = UniaxialKirchhoffSaintVenant3D(0.05)
self._generic_constitutive_law_test(model_part, deformation_test)
def test_Shear_KirchhoffSaintVenant_3D(self):
self.skipTestIfApplicationsNotAvailable("ConstitutiveLawsApplication")
current_model = KratosMultiphysics.Model()
model_part = current_model.CreateModelPart("test")
deformation_test = SimpleShearKirchhoffSaintVenant3D(0.02)
self._generic_constitutive_law_test(model_part, deformation_test)
def test_Shear_Plus_Strech_KirchhoffSaintVenant_3D(self):
self.skipTestIfApplicationsNotAvailable("ConstitutiveLawsApplication")
current_model = KratosMultiphysics.Model()
model_part = current_model.CreateModelPart("test")
deformation_test = ShearPlusStrechKirchhoffSaintVenant3D()
self._generic_constitutive_law_test(model_part, deformation_test)
def test_Uniaxial_HyperElastic_3D(self):
self.skipTestIfApplicationsNotAvailable("ConstitutiveLawsApplication")
current_model = KratosMultiphysics.Model()
model_part = current_model.CreateModelPart("test")
deformation_test = UniaxialHyperElastic3D(0.2)
self._generic_constitutive_law_test(model_part, deformation_test)
def test_Shear_HyperElastic_3D(self):
self.skipTestIfApplicationsNotAvailable("ConstitutiveLawsApplication")
current_model = KratosMultiphysics.Model()
model_part = current_model.CreateModelPart("test")
deformation_test = SimpleShearHyperElastic3D(0.2)
self._generic_constitutive_law_test(model_part, deformation_test)
def test_Shear_Plus_Strech_HyperElastic_3D(self):
self.skipTestIfApplicationsNotAvailable("ConstitutiveLawsApplication")
current_model = KratosMultiphysics.Model()
model_part = current_model.CreateModelPart("test")
deformation_test = ShearPlusStrechHyperElastic3D()
self._generic_constitutive_law_test(model_part, deformation_test)
def test_Uniaxial_Linear_Elastic_3D(self):
current_model = KratosMultiphysics.Model()
model_part = current_model.CreateModelPart("test")
deformation_test = UniaxialLinearElastic3D(0.2)
self._generic_constitutive_law_test(model_part, deformation_test)
def test_Shear_Linear_Elastic_3D(self):
current_model = KratosMultiphysics.Model()
model_part = current_model.CreateModelPart("test")
deformation_test = SimpleShearLinearElastic3D(0.2)
self._generic_constitutive_law_test(model_part, deformation_test)
def test_Shear_Plus_Strech_Linear_Elastic_3D(self):
current_model = KratosMultiphysics.Model()
model_part = current_model.CreateModelPart("test")
deformation_test = ShearPlusStrechLinearElastic3D()
self._generic_constitutive_law_test(model_part, deformation_test)
def test_Uniaxial_Linear_Elastic_Plane_Stress_2D(self):
current_model = KratosMultiphysics.Model()
model_part = current_model.CreateModelPart("test")
deformation_test = UniaxialLinearElasticPlaneStress2D(0.2)
self._generic_constitutive_law_test(model_part, deformation_test)
def test_Shear_Linear_Elastic_Plane_Stress_2D(self):
current_model = KratosMultiphysics.Model()
model_part = current_model.CreateModelPart("test")
deformation_test = SimpleShearLinearElasticPlaneStress2D(0.2)
self._generic_constitutive_law_test(model_part, deformation_test)
def test_Uniaxial_Linear_Elastic_Plane_Stress_Uncoupled_Shear_2D(self):
self.skipTestIfApplicationsNotAvailable("ConstitutiveLawsApplication")
current_model = KratosMultiphysics.Model()
model_part = current_model.CreateModelPart("test")
deformation_test = UniaxialElasticPlaneStressUncoupledShear2D(0.2)
self._generic_constitutive_law_test(model_part, deformation_test)
def test_Shear_Linear_Elastic_Plane_Stress_Uncoupled_Shear_2D(self):
self.skipTestIfApplicationsNotAvailable("ConstitutiveLawsApplication")
current_model = KratosMultiphysics.Model()
model_part = current_model.CreateModelPart("test")
deformation_test = SimpleShearElasticPlaneStressUncoupledShear2D(0.2)
self._generic_constitutive_law_test(model_part, deformation_test)
def test_J2_Plasticity_3D(self):
self.skipTestIfApplicationsNotAvailable("ConstitutiveLawsApplication")
def _generic_constitutive_law_test(model_part, deformation_test):
# Define geometry
[geom, nnodes] = self._create_geometry(model_part, deformation_test.cl.dim)
N = KratosMultiphysics.Vector(nnodes)
DN_DX = KratosMultiphysics.Matrix(nnodes, deformation_test.cl.dim)
# Material properties
properties = deformation_test.cl.create_properties(model_part)
# Construct a constitutive law
cl = deformation_test.cl.create_constitutive_Law()
self._cl_check(cl, properties, geom, model_part, deformation_test.cl.dim)
# Set the parameters to be employed
dict_options = {'USE_ELEMENT_PROVIDED_STRAIN': False,
'COMPUTE_STRESS': True,
'COMPUTE_CONSTITUTIVE_TENSOR': True,
'FINITE_STRAINS': True,
'ISOTROPIC': True,
}
cl_options = self._set_cl_options(dict_options)
# Define deformation gradient
F = deformation_test.get_init_deformation_gradientF()
detF = 1.0
stress_vector = KratosMultiphysics.Vector(cl.GetStrainSize())
strain_vector = KratosMultiphysics.Vector(cl.GetStrainSize())
constitutive_matrix = KratosMultiphysics.Matrix(cl.GetStrainSize(),cl.GetStrainSize())
for i in range(0, cl.GetStrainSize()):
stress_vector[i] = 0.0
strain_vector[i] = 0.0
for j in range(0, cl.GetStrainSize()):
constitutive_matrix[i,j] = 0.0
# Setting the parameters - note that a constitutive law may not need them all!
cl_params = self._set_cl_parameters(cl_options, F, detF, strain_vector, stress_vector, constitutive_matrix, N, DN_DX, model_part, properties, geom)
cl.InitializeMaterial(properties, geom, N)
# Check the results
deformation_test.initialize_reference_stress(cl.GetStrainSize())
for i in range(deformation_test.nr_timesteps):
deformation_test.set_deformation(cl_params, i)
# Chauchy
cl.CalculateMaterialResponseCauchy(cl_params)
cl.FinalizeMaterialResponseCauchy(cl_params)
reference_stress = deformation_test.get_reference_stress(i)
stress = cl_params.GetStressVector()
tolerance = 1.0e-4
for j in range(cl.GetStrainSize()):
if (abs(stress[j]) > tolerance):
self.assertAlmostEqual((reference_stress[j] - stress[j])/stress[j], 0.0, msg=("Error checking solution " + str(stress[j]) + " different from " + str(reference_stress[j]) + " with tolerance of " + str(tolerance)), delta=tolerance)
current_model = KratosMultiphysics.Model()
model_part = current_model.CreateModelPart("test")
deformation_test = DeformationSmallStrainJ2Plasticity3D()
_generic_constitutive_law_test(model_part, deformation_test)
def test_J2_Plasticity_Plane_Strain_2D(self):
self.skipTestIfApplicationsNotAvailable("ConstitutiveLawsApplication")
def _generic_constitutive_law_test(model_part, deformation_test):
# Define geometry
[geom, nnodes] = self._create_geometry(model_part, deformation_test.cl.dim)
N = KratosMultiphysics.Vector(nnodes)
DN_DX = KratosMultiphysics.Matrix(nnodes, deformation_test.cl.dim)
# Material properties
properties = deformation_test.cl.create_properties(model_part)
# Construct a constitutive law
cl = deformation_test.cl.create_constitutive_Law()
self._cl_check(cl, properties, geom, model_part, deformation_test.cl.dim)
# Set the parameters to be employed
dict_options = {'USE_ELEMENT_PROVIDED_STRAIN': False,
'COMPUTE_STRESS': True,
'COMPUTE_CONSTITUTIVE_TENSOR': True,
'FINITE_STRAINS': True,
'ISOTROPIC': True,
}
cl_options = self._set_cl_options(dict_options)
# Define deformation gradient
F = deformation_test.get_init_deformation_gradientF()
detF = 1.0
stress_vector = KratosMultiphysics.Vector(cl.GetStrainSize())
strain_vector = KratosMultiphysics.Vector(cl.GetStrainSize())
constitutive_matrix = KratosMultiphysics.Matrix(cl.GetStrainSize(),cl.GetStrainSize())
for i in range(0, cl.GetStrainSize()):
stress_vector[i] = 0.0
strain_vector[i] = 0.0
for j in range(0, cl.GetStrainSize()):
constitutive_matrix[i,j] = 0.0
# Setting the parameters - note that a constitutive law may not need them all!
cl_params = self._set_cl_parameters(cl_options, F, detF, strain_vector, stress_vector, constitutive_matrix, N, DN_DX, model_part, properties, geom)
cl.InitializeMaterial(properties, geom, N)
# Check the results
deformation_test.initialize_reference_stress(cl.GetStrainSize())
for i in range(deformation_test.nr_timesteps):
deformation_test.set_deformation(cl_params, i)
# Chauchy
cl.CalculateMaterialResponseCauchy(cl_params)
cl.FinalizeMaterialResponseCauchy(cl_params)
reference_stress = deformation_test.get_reference_stress(i)
stress = cl_params.GetStressVector()
tolerance = 1.0e-4
for j in range(cl.GetStrainSize()):
if (abs(stress[j]) > tolerance):
self.assertAlmostEqual((reference_stress[j] - stress[j])/stress[j], 0.0, msg=("Error checking solution " + str(stress[j]) + " different from " + str(reference_stress[j]) + " with tolerance of " + str(tolerance)), delta=tolerance)
current_model = KratosMultiphysics.Model()
model_part = current_model.CreateModelPart("test")
deformation_test = DeformationSmallStrainJ2PlasticityPlaneStrain2D()
_generic_constitutive_law_test(model_part, deformation_test)
def test_Isotropic_Damage_Plane_Strain_2D(self):
self.skipTestIfApplicationsNotAvailable("ConstitutiveLawsApplication")
def _generic_constitutive_law_test(model_part, deformation_test):
# Define geometry
[geom, nnodes] = self._create_geometry(model_part, deformation_test.cl.dim)
N = KratosMultiphysics.Vector(nnodes)
DN_DX = KratosMultiphysics.Matrix(nnodes, deformation_test.cl.dim)
# Material properties
properties = deformation_test.cl.create_properties(model_part)
# Construct a constitutive law
cl = deformation_test.cl.create_constitutive_Law()
self._cl_check(cl, properties, geom, model_part, deformation_test.cl.dim)
# Set the parameters to be employed
dict_options = {'USE_ELEMENT_PROVIDED_STRAIN': False,
'COMPUTE_STRESS': True,
'COMPUTE_CONSTITUTIVE_TENSOR': True,
'FINITE_STRAINS': True,
'ISOTROPIC': True,
}
cl_options = self._set_cl_options(dict_options)
# Define deformation gradient
F = deformation_test.get_init_deformation_gradientF()
detF = 1.0
stress_vector = KratosMultiphysics.Vector(cl.GetStrainSize())
strain_vector = KratosMultiphysics.Vector(cl.GetStrainSize())
constitutive_matrix = KratosMultiphysics.Matrix(cl.GetStrainSize(),cl.GetStrainSize())
for i in range(0, cl.GetStrainSize()):
stress_vector[i] = 0.0
strain_vector[i] = 0.0
for j in range(0, cl.GetStrainSize()):
constitutive_matrix[i,j] = 0.0
# Setting the parameters - note that a constitutive law may not need them all!
cl_params = self._set_cl_parameters(cl_options, F, detF, strain_vector, stress_vector, constitutive_matrix, N, DN_DX, model_part, properties, geom)
cl.InitializeMaterial(properties, geom, N)
# Check the results
deformation_test.initialize_reference_stress(cl.GetStrainSize())
for i in range(deformation_test.nr_timesteps):
deformation_test.set_deformation(cl_params, i)
# Chauchy
cl.CalculateMaterialResponseCauchy(cl_params)
cl.FinalizeMaterialResponseCauchy(cl_params)
reference_stress = deformation_test.get_reference_stress(i)
stress = cl_params.GetStressVector()
tolerance = 1.0e-4
for j in range(cl.GetStrainSize()):
if (abs(stress[j]) > tolerance):
self.assertAlmostEqual((reference_stress[j] - stress[j])/stress[j], 0.0, msg=("Error checking solution " + str(stress[j]) + " different from " + str(reference_stress[j]) + " with tolerance of " + str(tolerance)), delta=tolerance)
# Define a model
current_model = KratosMultiphysics.Model()
model_part = current_model.CreateModelPart("test")
deformation_test = DeformationSmallStrainIsotropicDamagePlaneStrain2D()
_generic_constitutive_law_test(model_part, deformation_test)
class Deformation():
def __init__(self):
self.nr_timesteps = 100
def get_init_deformation_gradientF(self):
self.F = KratosMultiphysics.Matrix(self.cl.dim,self.cl.dim)
for i in range(self.cl.dim):
for j in range(self.cl.dim):
if(i==j):
self.F[i,j] = 1.0
else:
self.F[i,j] = 0.0
return self.F
def initialize_reference_stress(self, strain_size):
self.reference_stress = KratosMultiphysics.Vector(strain_size)
for i in range(strain_size):
self.reference_stress[i] = 0.0
def set_deformation(self, cl_params, i):
F = self.get_deformation_gradientF(i)
detF = self.get_determinantF(i)
cl_params.SetDeformationGradientF(F)
cl_params.SetDeterminantF(detF)
class UniaxialDeformation(Deformation):
def __init__(self, deltaDef):
Deformation.__init__(self)
self.deltaDef = deltaDef
def get_deformation_gradientF(self, i):
self.F[0,0] = 1.0 + self.deltaDef * i
return self.F
def get_determinantF(self, i):
return 1.0 + self.deltaDef * i
class UniaxialKirchhoffSaintVenant3D(UniaxialDeformation):
def __init__(self, deltaDef):
UniaxialDeformation.__init__(self, deltaDef)
self.cl = KirchhoffSaintVenant3D()
def get_reference_stress(self, i):
lame_lambda = (self.cl.young_modulus * self.cl.poisson_ratio) / ((1.0 + self.cl.poisson_ratio) * (1.0 - 2.0 * self.cl.poisson_ratio))
lame_mu = self.cl.young_modulus / (2.0 * (1.0 + self.cl.poisson_ratio))
detF = self.get_determinantF(i)
self.reference_stress[0] =( (lame_lambda * 0.5 + lame_mu) * (detF ** 2.0 - 1.0)*(detF ** 2.0) ) / detF
self.reference_stress[1] = 0.5*lame_lambda*(detF ** 2.0 - 1.0) / detF
self.reference_stress[2] = self.reference_stress[1]
return self.reference_stress
class UniaxialHyperElastic3D(UniaxialDeformation):
def __init__(self, deltaDef):
UniaxialDeformation.__init__(self, deltaDef)
self.cl = HyperElastic3D()
def get_reference_stress(self, i):
lame_lambda = (self.cl.young_modulus * self.cl.poisson_ratio) / ((1.0 + self.cl.poisson_ratio) * (1.0 - 2.0 * self.cl.poisson_ratio))
lame_mu = self.cl.young_modulus / (2.0 * (1.0 + self.cl.poisson_ratio))
detF = self.get_determinantF(i)
self.reference_stress[0] = (lame_lambda * math.log(detF) + lame_mu * (detF ** 2.0 - 1.0)) / detF
self.reference_stress[1] = (lame_lambda * math.log(detF)) / detF
self.reference_stress[2] = self.reference_stress[1]
return self.reference_stress
class UniaxialLinearElastic3D(UniaxialDeformation):
def __init__(self, deltaDef):
UniaxialDeformation.__init__(self, deltaDef)
self.cl = LinearElastic3D()
def get_reference_stress(self, i):
c0 = self.cl.young_modulus / ((1.0 + self.cl.poisson_ratio) * (1.0 - 2.0 * self.cl.poisson_ratio))
F00 = self.get_deformation_gradientF(i)[0,0]
self.reference_stress[0] = c0 * (1.0 - self.cl.poisson_ratio) * (F00**2.0-1.0)/2.0
self.reference_stress[1] = c0 * self.cl.poisson_ratio * (F00**2.0-1.0)/2.0
self.reference_stress[2] = self.reference_stress[1]
return self.reference_stress
class UniaxialLinearElasticPlaneStress2D(UniaxialDeformation):
def __init__(self, deltaDef):
UniaxialDeformation.__init__(self, deltaDef)
self.cl = LinearElasticPlaneStress2D()
def get_reference_stress(self, i):
c0 = self.cl.young_modulus / (1.0 - self.cl.poisson_ratio**2)
F00 = self.get_deformation_gradientF(i)[0,0]
self.reference_stress[0] = c0 * (F00**2.0-1.0)/2.0
self.reference_stress[1] = c0 * self.cl.poisson_ratio * (F00**2.0-1.0)/2.0
return self.reference_stress
class UniaxialElasticPlaneStressUncoupledShear2D(UniaxialLinearElasticPlaneStress2D):
def __init__(self, deltaDef):
UniaxialLinearElasticPlaneStress2D.__init__(self, deltaDef)
self.cl = ElasticPlaneStressUncoupledShear2D()
class SimpleShearDeformation(Deformation):
def __init__(self, deltaDef):
Deformation.__init__(self)
self.deltaDef = deltaDef
def get_deformation_gradientF(self, i):
self.F[0,1] = self.deltaDef * i
return self.F
def get_determinantF(self, i):
return 1.0
class SimpleShearKirchhoffSaintVenant3D(SimpleShearDeformation):
def __init__(self, deltaDef):
SimpleShearDeformation.__init__(self, deltaDef)
self.cl = KirchhoffSaintVenant3D()
def get_reference_stress(self, i):
lame_lambda = (self.cl.young_modulus * self.cl.poisson_ratio) / ((1.0 + self.cl.poisson_ratio) * (1.0 - 2.0 * self.cl.poisson_ratio))
lame_mu = self.cl.young_modulus / (2.0 * (1.0 + self.cl.poisson_ratio))
F01 = self.get_deformation_gradientF(i)[0,1]
self.reference_stress[0] = (0.5*lame_lambda + 2*lame_mu) * (F01)**2.0 + (0.5*lame_lambda + lame_mu) * (F01)**4.0
self.reference_stress[1] = (0.5*lame_lambda + lame_mu) * (F01)**2.0
self.reference_stress[2] = 0.5*lame_lambda * (F01)**2.0
self.reference_stress[3] = lame_mu * (F01) + (0.5*lame_lambda + lame_mu) * (F01)**3.0
return self.reference_stress
class SimpleShearHyperElastic3D(SimpleShearDeformation):
def __init__(self, deltaDef):
SimpleShearDeformation.__init__(self, deltaDef)
self.cl = HyperElastic3D()
def get_reference_stress(self, i):
lame_mu = self.cl.young_modulus / (2.0 * (1.0 + self.cl.poisson_ratio))
self.reference_stress[0] = lame_mu * (self.deltaDef * i)**2.0
self.reference_stress[3] = lame_mu * (self.deltaDef * i)
return self.reference_stress
class SimpleShearLinearElastic3D(SimpleShearDeformation):
def __init__(self, deltaDef):
SimpleShearDeformation.__init__(self, deltaDef)
self.cl = LinearElastic3D()
def get_reference_stress(self, i):
c0 = self.cl.young_modulus / ((1.0 + self.cl.poisson_ratio) * (1.0 - 2.0 * self.cl.poisson_ratio))
F01 = self.get_deformation_gradientF(i)[0,1]
self.reference_stress[0] = c0 * self.cl.poisson_ratio * (F01**2.0)/2.0
self.reference_stress[1] = c0 * (1.0 - self.cl.poisson_ratio) * (F01**2.0)/2.0
self.reference_stress[2] = self.reference_stress[0]
self.reference_stress[3] = self.cl.young_modulus / ((1.0 + self.cl.poisson_ratio) * 2.0) * F01
return self.reference_stress
class SimpleShearLinearElasticPlaneStress2D(SimpleShearDeformation):
def __init__(self, deltaDef):
SimpleShearDeformation.__init__(self, deltaDef)
self.cl = LinearElasticPlaneStress2D()
def get_reference_stress(self, i):
c0 = self.cl.young_modulus / (1.0 - self.cl.poisson_ratio**2)
F01 = self.get_deformation_gradientF(i)[0,1]
self.reference_stress[0] = c0 * self.cl.poisson_ratio * (F01**2.0)/2.0
self.reference_stress[1] = c0 * (F01**2.0)/2.0
self.reference_stress[2] = self.cl.young_modulus / ((1.0 + self.cl.poisson_ratio) * 2.0) * F01
return self.reference_stress
class SimpleShearElasticPlaneStressUncoupledShear2D(SimpleShearDeformation):
def __init__(self, deltaDef):
SimpleShearDeformation.__init__(self, deltaDef)
self.cl = ElasticPlaneStressUncoupledShear2D()
def get_reference_stress(self, i):
c0 = self.cl.young_modulus / (1.0 - self.cl.poisson_ratio**2)
F01 = self.get_deformation_gradientF(i)[0,1]
absGamma12 = abs(F01)
self.reference_stress[0] = c0 * self.cl.poisson_ratio * (F01**2.0)/2.0
self.reference_stress[1] = c0 * (F01**2.0)/2.0
self.reference_stress[2] = (self.cl.shear_modulus + self.cl.shear_modulus_gamma12 * absGamma12 + self.cl.shear_modulus_gamma12_2 * absGamma12**2 + self.cl.shear_modulus_gamma12_3 * absGamma12**3 + self.cl.shear_modulus_gamma12_4 * absGamma12**4)* F01
return self.reference_stress
class ShearPlusStrechDeformation(Deformation):
def __init__(self):
Deformation.__init__(self)
self.x1beta = 1.0
self.x2beta = 1.0
self.x3beta = math.pi/200
def get_deformation_gradientF(self, i):
self.F[0,0] = math.cos(self.x3beta * i)
self.F[0,1] = -math.sin(self.x3beta * i)
self.F[1,0] = math.sin(self.x3beta * i)
self.F[1,1] = math.cos(self.x3beta * i)
self.F[0,2] = - self.x1beta * math.sin(self.x3beta * i) - self.x2beta * math.cos(self.x3beta * i)
self.F[1,2] = self.x1beta * math.cos(self.x3beta * i) - self.x2beta * math.sin(self.x3beta * i)
return self.F
def get_determinantF(self, i):
return 1.0
class ShearPlusStrechKirchhoffSaintVenant3D(ShearPlusStrechDeformation):
def __init__(self):
ShearPlusStrechDeformation.__init__(self)
self.cl = KirchhoffSaintVenant3D()
def get_reference_stress(self, i):
lame_lambda = (self.cl.young_modulus * self.cl.poisson_ratio) / ((1.0 + self.cl.poisson_ratio) * (1.0 - 2.0 * self.cl.poisson_ratio))
lame_mu = self.cl.young_modulus / (2.0 * (1.0 + self.cl.poisson_ratio))
x1beta = self.x1beta
x2beta = self.x2beta
x3beta = self.x3beta
self.reference_stress[0]= math.cos(x3beta * i)*(x2beta*lame_mu*(x2beta*math.cos(x3beta * i) + x1beta*math.sin(x3beta * i)) + (lame_lambda*math.cos(x3beta * i)*(x1beta**2 + x2beta**2))/2) + (x2beta*math.cos(x3beta * i) + x1beta*math.sin(x3beta * i))*((lame_lambda/2 + lame_mu)*(x1beta**2 + x2beta**2)*(x2beta*math.cos(x3beta * i) + x1beta*math.sin(x3beta * i)) + x2beta*lame_mu*math.cos(x3beta * i) + x1beta*lame_mu*math.sin(x3beta * i)) + math.sin(x3beta * i)*((lame_lambda*math.sin(x3beta * i)*(x1beta**2 + x2beta**2))/2 + x1beta*lame_mu*(x2beta*math.cos(x3beta * i) + x1beta*math.sin(x3beta * i)))
self.reference_stress[1]= math.cos(x3beta * i)*(x1beta*lame_mu*(x1beta*math.cos(x3beta * i) - x2beta*math.sin(x3beta * i)) + (lame_lambda*math.cos(x3beta * i)*(x1beta**2 + x2beta**2))/2) + (x1beta*math.cos(x3beta * i) - x2beta*math.sin(x3beta * i))*((lame_lambda/2 + lame_mu)*(x1beta**2 + x2beta**2)*(x1beta*math.cos(x3beta * i) - x2beta*math.sin(x3beta * i)) + x1beta*lame_mu*math.cos(x3beta * i) - x2beta*lame_mu*math.sin(x3beta * i)) + math.sin(x3beta * i)*((lame_lambda*math.sin(x3beta * i)*(x1beta**2 + x2beta**2))/2 - x2beta*lame_mu*(x1beta*math.cos(x3beta * i) - x2beta*math.sin(x3beta * i)))
self.reference_stress[2]=(lame_lambda/2 + lame_mu)*(x1beta**2 + x2beta**2)
self.reference_stress[3]= math.sin(x3beta * i)*(x2beta*lame_mu*(x2beta*math.cos(x3beta * i) + x1beta*math.sin(x3beta * i)) + (lame_lambda*math.cos(x3beta * i)*(x1beta**2 + x2beta**2))/2) - math.cos(x3beta * i)*((lame_lambda*math.sin(x3beta * i)*(x1beta**2 + x2beta**2))/2 + x1beta*lame_mu*(x2beta*math.cos(x3beta * i) + x1beta*math.sin(x3beta * i))) - (x1beta*math.cos(x3beta * i) - x2beta*math.sin(x3beta * i))*((lame_lambda/2 + lame_mu)*(x1beta**2 + x2beta**2)*(x2beta*math.cos(x3beta * i) + x1beta*math.sin(x3beta * i)) + x2beta*lame_mu*math.cos(x3beta * i) + x1beta*lame_mu*math.sin(x3beta * i))
self.reference_stress[4]=(lame_lambda/2 + lame_mu)*(x1beta**2 + x2beta**2)*(x1beta*math.cos(x3beta * i) - x2beta*math.sin(x3beta * i)) + x1beta*lame_mu*math.cos(x3beta * i) - x2beta*lame_mu*math.sin(x3beta * i)
self.reference_stress[5]=- (lame_lambda/2 + lame_mu)*(x1beta**2 + x2beta**2)*(x2beta*math.cos(x3beta * i) + x1beta*math.sin(x3beta * i)) - x2beta*lame_mu*math.cos(x3beta * i) - x1beta*lame_mu*math.sin(x3beta * i)
return self.reference_stress
class ShearPlusStrechHyperElastic3D(ShearPlusStrechDeformation):
def __init__(self):
ShearPlusStrechDeformation.__init__(self)
self.cl = HyperElastic3D()
def get_reference_stress(self, i):
lame_mu = self.cl.young_modulus / (2.0 * (1.0 + self.cl.poisson_ratio))
x1beta = self.x1beta
x2beta = self.x2beta
x3beta = self.x3beta
self.reference_stress[0] = (x2beta * math.cos(i * x3beta) + x1beta * math.sin(i * x3beta))**2.0
self.reference_stress[1] = (x1beta * math.cos(i * x3beta) - x2beta * math.sin(i * x3beta))**2.0
self.reference_stress[3] = (x2beta * math.cos(i * x3beta) + x1beta * math.sin(i * x3beta)) * (- x1beta * math.cos(i * x3beta) + x2beta * math.sin(i * x3beta))
self.reference_stress[4] = x1beta * math.cos(i * x3beta) - x2beta * math.sin(i * x3beta)
self.reference_stress[5] = - x2beta * math.cos(i * x3beta) - x1beta * math.sin(i * x3beta)
self.reference_stress *= lame_mu
return self.reference_stress
class ShearPlusStrechLinearElastic3D(ShearPlusStrechDeformation):
def __init__(self):
ShearPlusStrechDeformation.__init__(self)
self.cl = LinearElastic3D()
def get_reference_stress(self, i):
c0 = self.cl.young_modulus / ((1.0 + self.cl.poisson_ratio) * (1.0 - 2.0 * self.cl.poisson_ratio))
c1 = self.cl.young_modulus / (2.0 * (1.0 + self.cl.poisson_ratio))
x1beta = self.x1beta
x2beta = self.x2beta
self.reference_stress[0] = c0 * self.cl.poisson_ratio * (x1beta**2.0 + x2beta**2.0) / 2.0
self.reference_stress[1] = self.reference_stress[0]
self.reference_stress[2] = c0 * (1.0 - self.cl.poisson_ratio) * (x1beta**2.0 + x2beta**2.0) / 2.0
self.reference_stress[4] = x2beta * c1
self.reference_stress[5] = -c1 * x1beta
return self.reference_stress
class DeformationSmallStrainJ2Plasticity(Deformation):
def __init__(self):
Deformation.__init__(self)
self.nr_timesteps = 10
def get_deformation_gradientF(self, i):
return self.F
def get_determinantF(self, i):
return 1.0
def set_deformation(self, cl_params, i):
self.strain = (i+1)/ self.nr_timesteps * self.initial_strain
cl_params.SetStrainVector(self.strain)
class DeformationSmallStrainJ2Plasticity3D(DeformationSmallStrainJ2Plasticity):
def __init__(self):
DeformationSmallStrainJ2Plasticity.__init__(self)
self.cl = SmallStrainJ2Plasticity3D()
def get_deformation_gradientF(self, i):
return self.F
def get_determinantF(self, i):
return 1.0
def initialize_reference_stress(self, strain_size):
self.initial_strain = KratosMultiphysics.Vector(strain_size)
self.initial_strain[0] = 0.001
self.initial_strain[1] = 0.001
self.initial_strain[2] = 0.0
self.initial_strain[3] = 0.001
self.initial_strain[4] = 0.0
self.initial_strain[5] = 0.001
r_stress = []
for i in range(self.nr_timesteps):
r_stress.append(KratosMultiphysics.Vector(strain_size))
r_stress[0][0] = 4.03846; r_stress[0][1] = 4.03846; r_stress[0][2] = 2.42308; r_stress[0][3] = 0.80769; r_stress[0][4] = 0.0; r_stress[0][5] = 0.80769
r_stress[1][0] = 8.07692; r_stress[1][1] = 8.07692; r_stress[1][2] = 4.84615; r_stress[1][3] = 1.61538; r_stress[1][4] = 0.0; r_stress[1][5] = 1.61538
r_stress[2][0] = 11.6595; r_stress[2][1] = 11.6595; r_stress[2][2] = 8.18099; r_stress[2][3] = 1.73926; r_stress[2][4] = 0.0; r_stress[2][5] = 1.73926
r_stress[3][0] = 15.1595; r_stress[3][1] = 15.1595; r_stress[3][2] = 11.681 ; r_stress[3][3] = 1.73926; r_stress[3][4] = 0.0; r_stress[3][5] = 1.73926
r_stress[4][0] = 18.6595; r_stress[4][1] = 18.6595; r_stress[4][2] = 15.181 ; r_stress[4][3] = 1.73926; r_stress[4][4] = 0.0; r_stress[4][5] = 1.73926
r_stress[5][0] = 22.1595; r_stress[5][1] = 22.1595; r_stress[5][2] = 18.681 ; r_stress[5][3] = 1.73927; r_stress[5][4] = 0.0; r_stress[5][5] = 1.73927
r_stress[6][0] = 25.6595; r_stress[6][1] = 25.6595; r_stress[6][2] = 22.181 ; r_stress[6][3] = 1.73927; r_stress[6][4] = 0.0; r_stress[6][5] = 1.73927
r_stress[7][0] = 29.1595; r_stress[7][1] = 29.1595; r_stress[7][2] = 25.681 ; r_stress[7][3] = 1.73928; r_stress[7][4] = 0.0; r_stress[7][5] = 1.73928
r_stress[8][0] = 32.6595; r_stress[8][1] = 32.6595; r_stress[8][2] = 29.181 ; r_stress[8][3] = 1.73928; r_stress[8][4] = 0.0; r_stress[8][5] = 1.73928
r_stress[9][0] = 36.1595; r_stress[9][1] = 36.1595; r_stress[9][2] = 32.681; r_stress[9][3] = 1.73929; r_stress[9][4] = 0.0; r_stress[9][5] = 1.73929
self.reference_stress = r_stress
def get_reference_stress(self, i):
return self.reference_stress[i]
class DeformationSmallStrainJ2PlasticityPlaneStrain2D(DeformationSmallStrainJ2Plasticity):
def __init__(self):
DeformationSmallStrainJ2Plasticity.__init__(self)
self.cl = SmallStrainJ2PlasticityPlaneStrain2D()
def get_deformation_gradientF(self, i):
return self.F
def get_determinantF(self, i):
return 1.0
def initialize_reference_stress(self, strain_size):
self.initial_strain = KratosMultiphysics.Vector(strain_size)
self.initial_strain[0] = 0.001
self.initial_strain[1] = 0.001
self.initial_strain[2] = 0.0
self.initial_strain[3] = 0.001
r_stress = []
for i in range(self.nr_timesteps):
r_stress.append(KratosMultiphysics.Vector(strain_size))
r_stress[0][0] = 4.03846; r_stress[0][1] = 4.03846; r_stress[0][2] = 2.42308; r_stress[0][3] = 0.807692;
r_stress[1][0] = 8.07692; r_stress[1][1] = 8.07692; r_stress[1][2] = 4.84615; r_stress[1][3] = 1.61538;
r_stress[2][0] = 11.8859; r_stress[2][1] = 11.8859; r_stress[2][2] = 7.72826; r_stress[2][3] = 2.07881;
r_stress[3][0] = 15.3859; r_stress[3][1] = 15.3859; r_stress[3][2] = 11.2283; r_stress[3][3] = 2.07881;
r_stress[4][0] = 18.8859; r_stress[4][1] = 18.8859; r_stress[4][2] = 14.7282; r_stress[4][3] = 2.07882;
r_stress[5][0] = 22.3859; r_stress[5][1] = 22.3859; r_stress[5][2] = 18.2282; r_stress[5][3] = 2.07882;
r_stress[6][0] = 25.8859; r_stress[6][1] = 25.8859; r_stress[6][2] = 21.7282; r_stress[6][3] = 2.07882;
r_stress[7][0] = 29.3859; r_stress[7][1] = 29.3859; r_stress[7][2] = 25.2282; r_stress[7][3] = 2.07883;
r_stress[8][0] = 32.8859; r_stress[8][1] = 32.8859; r_stress[8][2] = 28.7282; r_stress[8][3] = 2.07883;
r_stress[9][0] = 36.3859; r_stress[9][1] = 36.3859; r_stress[9][2] = 32.2282; r_stress[9][3] = 2.07884;
self.reference_stress = r_stress
def get_reference_stress(self, i):
return self.reference_stress[i]
class DeformationSmallStrainIsotropicDamagePlaneStrain2D(Deformation):
def __init__(self):
Deformation.__init__(self)
self.nr_timesteps = 10
self.cl = SmallStrainIsotropicDamagePlaneStrain2D()
def get_deformation_gradientF(self, i):
return self.F
def get_determinantF(self, i):
return 1.0
def initialize_reference_stress(self, strain_size):
self.initial_strain = KratosMultiphysics.Vector(strain_size)
self.initial_strain[0] = 0.001
self.initial_strain[1] = 0.001
self.initial_strain[2] = 0.001
r_stress = []
for i in range(self.nr_timesteps):
r_stress.append(KratosMultiphysics.Vector(strain_size))
r_stress[0][0] = 0.57692; r_stress[0][1] = 0.57692; r_stress[0][2] = 0.11538;
r_stress[1][0] = 1.15384; r_stress[1][1] = 1.15384; r_stress[1][2] = 0.23077;
r_stress[2][0] = 1.73076; r_stress[2][1] = 1.73076; r_stress[2][2] = 0.34615;
r_stress[3][0] = 2.00123; r_stress[3][1] = 2.00123; r_stress[3][2] = 0.40025;
r_stress[4][0] = 2.17431; r_stress[4][1] = 2.17431; r_stress[4][2] = 0.43486;
r_stress[5][0] = 2.34738; r_stress[5][1] = 2.34738; r_stress[5][2] = 0.46948;
r_stress[6][0] = 2.52046; r_stress[6][1] = 2.52046; r_stress[6][2] = 0.50409;
r_stress[7][0] = 2.69354; r_stress[7][1] = 2.69354; r_stress[7][2] = 0.53871;
r_stress[8][0] = 2.80484; r_stress[8][1] = 2.80484; r_stress[8][2] = 0.56097;
r_stress[9][0] = 2.80484; r_stress[9][1] = 2.80484; r_stress[9][2] = 0.56097;
self.reference_stress = r_stress
def get_reference_stress(self, i):
return self.reference_stress[i]
class DeformationSmallStrainIsotropicPlasticity3D(Deformation):
def __init__(self):
Deformation.__init__(self)
self.nr_timesteps = 10
self.cl = SmallStrainIsotropicDamage3D()
def get_deformation_gradientF(self, i):
return self.F
def get_determinantF(self, i):
return 1.0
def initialize_reference_stress(self, strain_size):
self.initial_strain = KratosMultiphysics.Vector(strain_size)
self.initial_strain[0] = 0.001
self.initial_strain[1] = 0.001
self.initial_strain[2] = 0.0
self.initial_strain[3] = 0.001
self.initial_strain[4] = 0.0
self.initial_strain[5] = 0.001
r_stress = []
for i in range(self.nr_timesteps):
r_stress.append(KratosMultiphysics.Vector(strain_size))
r_stress[0][0] = 0.57692; r_stress[0][1] = 0.57692; r_stress[0][2] = 0.34615; r_stress[0][3] = 0.11538; r_stress[0][4] = 0.0; r_stress[0][5] = 0.11538;
r_stress[1][0] = 1.15384; r_stress[1][1] = 1.15384; r_stress[1][2] = 0.69231; r_stress[1][3] = 0.23077; r_stress[1][4] = 0.0; r_stress[1][5] = 0.23077;
r_stress[2][0] = 1.73076; r_stress[2][1] = 1.73076; r_stress[2][2] = 1.03850; r_stress[2][3] = 0.34615; r_stress[2][4] = 0.0; r_stress[2][5] = 0.34615;
r_stress[3][0] = 1.94550; r_stress[3][1] = 1.94550; r_stress[3][2] = 1.16730; r_stress[3][3] = 0.38910; r_stress[3][4] = 0.0; r_stress[3][5] = 0.38910;
r_stress[4][0] = 2.11858; r_stress[4][1] = 2.11858; r_stress[4][2] = 1.27120; r_stress[4][3] = 0.42372; r_stress[4][4] = 0.0; r_stress[4][5] = 0.42372;
r_stress[5][0] = 2.29166; r_stress[5][1] = 2.29166; r_stress[5][2] = 1.37500; r_stress[5][3] = 0.45833; r_stress[5][4] = 0.0; r_stress[5][5] = 0.45833;
r_stress[6][0] = 2.46473; r_stress[6][1] = 2.46473; r_stress[6][2] = 1.47880; r_stress[6][3] = 0.49295; r_stress[6][4] = 0.0; r_stress[6][5] = 0.49295;
r_stress[7][0] = 2.63781; r_stress[7][1] = 2.63781; r_stress[7][2] = 1.58270; r_stress[7][3] = 0.52756; r_stress[7][4] = 0.0; r_stress[7][5] = 0.52756;
r_stress[8][0] = 2.68543; r_stress[8][1] = 2.68543; r_stress[8][2] = 1.61130; r_stress[8][3] = 0.53709; r_stress[8][4] = 0.0; r_stress[8][5] = 0.53709;
r_stress[9][0] = 2.68543; r_stress[9][1] = 2.68543; r_stress[9][2] = 1.61130; r_stress[9][3] = 0.53709; r_stress[9][4] = 0.0; r_stress[9][5] = 0.53709;
self.reference_stress = r_stress
def get_reference_stress(self, i):
return self.reference_stress[i]
class LinearElastic():
def __init__(self):
self.young_modulus = 200e9
self.poisson_ratio = 0.3
def create_properties(self, model_part):
prop_id = 0
properties = model_part.Properties[prop_id]
properties.SetValue(KratosMultiphysics.YOUNG_MODULUS, self.young_modulus)
properties.SetValue(KratosMultiphysics.POISSON_RATIO, self.poisson_ratio)
return properties
class KirchhoffSaintVenant3D(LinearElastic):
def __init__(self):
LinearElastic.__init__(self)
self.dim = 3
@staticmethod
def create_constitutive_Law():
return ConstitutiveLawsApplication.KirchhoffSaintVenant3DLaw()
class HyperElastic3D(LinearElastic):
def __init__(self):
LinearElastic.__init__(self)
self.dim = 3
@staticmethod
def create_constitutive_Law():
return ConstitutiveLawsApplication.HyperElastic3DLaw()
class LinearElastic3D(LinearElastic):
def __init__(self):
LinearElastic.__init__(self)
self.dim = 3
@staticmethod
def create_constitutive_Law():
return StructuralMechanicsApplication.LinearElastic3DLaw()
class LinearElasticPlaneStress2D(LinearElastic):
def __init__(self):
LinearElastic.__init__(self)
self.dim = 2
@staticmethod
def create_constitutive_Law():
return StructuralMechanicsApplication.LinearElasticPlaneStress2DLaw()
class ElasticPlaneStressUncoupledShear2D(LinearElasticPlaneStress2D):
def __init__(self):
LinearElasticPlaneStress2D.__init__(self)
self.shear_modulus = 0.2e6 #shear_modulus = 75e9
self.shear_modulus_gamma12 = -1.6e6
self.shear_modulus_gamma12_2 = 6.4e6
self.shear_modulus_gamma12_3 = -9.8e6
self.shear_modulus_gamma12_4 = 6.7e6
def create_properties(self, model_part):
properties = LinearElastic.create_properties(self, model_part)
properties.SetValue(KratosMultiphysics.SHEAR_MODULUS, self.shear_modulus)
properties.SetValue(KratosMultiphysics.SHEAR_MODULUS_GAMMA12, self.shear_modulus_gamma12)
properties.SetValue(KratosMultiphysics.SHEAR_MODULUS_GAMMA12_2, self.shear_modulus_gamma12_2)
properties.SetValue(KratosMultiphysics.SHEAR_MODULUS_GAMMA12_3, self.shear_modulus_gamma12_3)
properties.SetValue(KratosMultiphysics.SHEAR_MODULUS_GAMMA12_4, self.shear_modulus_gamma12_4)
return properties
@staticmethod
def create_constitutive_Law():
return ConstitutiveLawsApplication.ElasticPlaneStressUncoupledShear2DLaw()
class SmallStrainJ2Plasticity3D():
def __init__(self):
self.dim = 3
self.young_modulus = 21000
self.poisson_ratio = 0.3
self.yield_stress = 5.5
self.isotropic_hardening_modulus = 0.12924
self.exponential_saturation_yield_stress = 5.5
self.hardening_exponent = 1.0
def create_properties(self, model_part):
properties = model_part.Properties[0]
properties.SetValue(KratosMultiphysics.YOUNG_MODULUS, self.young_modulus)
properties.SetValue(KratosMultiphysics.POISSON_RATIO, self.poisson_ratio)
properties.SetValue(KratosMultiphysics.YIELD_STRESS, self.yield_stress)
properties.SetValue(KratosMultiphysics.ISOTROPIC_HARDENING_MODULUS, self.isotropic_hardening_modulus)
properties.SetValue(ConstitutiveLawsApplication.EXPONENTIAL_SATURATION_YIELD_STRESS, self.exponential_saturation_yield_stress)
properties.SetValue(KratosMultiphysics.HARDENING_EXPONENT, self.hardening_exponent)
return properties
@staticmethod
def create_constitutive_Law():
return ConstitutiveLawsApplication.SmallStrainJ2Plasticity3DLaw()
class SmallStrainJ2PlasticityPlaneStrain2D():
def __init__(self):
self.dim = 2
self.young_modulus = 21000
self.poisson_ratio = 0.3
self.yield_stress = 5.5
self.isotropic_hardening_modulus = 0.12924
self.exponential_saturation_yield_stress = 5.5
self.hardening_exponent = 1.0
def create_properties(self, model_part):
properties = model_part.Properties[0]
properties.SetValue(KratosMultiphysics.YOUNG_MODULUS, self.young_modulus)
properties.SetValue(KratosMultiphysics.POISSON_RATIO, self.poisson_ratio)
properties.SetValue(KratosMultiphysics.YIELD_STRESS, self.yield_stress)
properties.SetValue(KratosMultiphysics.ISOTROPIC_HARDENING_MODULUS, self.isotropic_hardening_modulus)
properties.SetValue(ConstitutiveLawsApplication.EXPONENTIAL_SATURATION_YIELD_STRESS, self.exponential_saturation_yield_stress)
properties.SetValue(KratosMultiphysics.HARDENING_EXPONENT, self.hardening_exponent)
return properties
@staticmethod
def create_constitutive_Law():
return ConstitutiveLawsApplication.SmallStrainJ2PlasticityPlaneStrain2DLaw()
class SmallStrainIsotropicDamagePlaneStrain2D():
def __init__(self):
self.dim = 2
self.young_modulus = 3000
self.poisson_ratio = 0.3
def create_properties(self, model_part):
properties = model_part.Properties[0]
properties.SetValue(KratosMultiphysics.YOUNG_MODULUS, self.young_modulus)
properties.SetValue(KratosMultiphysics.POISSON_RATIO, self.poisson_ratio)
properties.SetValue(ConstitutiveLawsApplication.HARDENING_CURVE, 1)
stress_limits = KratosMultiphysics.Vector(2)
stress_limits[0] = 2.0
stress_limits[1] = 3.0
properties.SetValue(ConstitutiveLawsApplication.STRESS_LIMITS, stress_limits)
hardening_modulus = KratosMultiphysics.Vector(2)
hardening_modulus[0] = 0.3
hardening_modulus[1] = 0.
properties.SetValue(ConstitutiveLawsApplication.HARDENING_PARAMETERS, hardening_modulus)
return properties
@staticmethod
def create_constitutive_Law():
return ConstitutiveLawsApplication.SmallStrainIsotropicDamagePlaneStrain2DLaw()
if __name__ == '__main__':
KratosUnittest.main()
| 49.677725 | 607 | 0.673536 |
3bec5b5e7f4b296055df1719e93b9d2599472a82 | 60,246 | py | Python | kubernetes/client/apis/storage_v1alpha1_api.py | redjohn/python | 5e512ff564c244c50cab780d821542ed56aa965a | [
"Apache-2.0"
] | 1 | 2019-04-14T23:51:35.000Z | 2019-04-14T23:51:35.000Z | kubernetes/client/apis/storage_v1alpha1_api.py | redjohn/python | 5e512ff564c244c50cab780d821542ed56aa965a | [
"Apache-2.0"
] | null | null | null | kubernetes/client/apis/storage_v1alpha1_api.py | redjohn/python | 5e512ff564c244c50cab780d821542ed56aa965a | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.14.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import sys
import os
import re
# python 2 and python 3 compatibility library
from six import iteritems
from ..api_client import ApiClient
class StorageV1alpha1Api(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def create_volume_attachment(self, body, **kwargs):
"""
create a VolumeAttachment
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_volume_attachment(body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param V1alpha1VolumeAttachment body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:return: V1alpha1VolumeAttachment
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.create_volume_attachment_with_http_info(body, **kwargs)
else:
(data) = self.create_volume_attachment_with_http_info(body, **kwargs)
return data
def create_volume_attachment_with_http_info(self, body, **kwargs):
"""
create a VolumeAttachment
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_volume_attachment_with_http_info(body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param V1alpha1VolumeAttachment body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:return: V1alpha1VolumeAttachment
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body', 'pretty', 'dry_run', 'field_manager']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_volume_attachment" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `create_volume_attachment`")
collection_formats = {}
path_params = {}
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'dry_run' in params:
query_params.append(('dryRun', params['dry_run']))
if 'field_manager' in params:
query_params.append(('fieldManager', params['field_manager']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/storage.k8s.io/v1alpha1/volumeattachments', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1alpha1VolumeAttachment',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_collection_volume_attachment(self, **kwargs):
"""
delete collection of VolumeAttachment
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_collection_volume_attachment(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_collection_volume_attachment_with_http_info(**kwargs)
else:
(data) = self.delete_collection_volume_attachment_with_http_info(**kwargs)
return data
def delete_collection_volume_attachment_with_http_info(self, **kwargs):
"""
delete collection of VolumeAttachment
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_collection_volume_attachment_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['pretty', '_continue', 'field_selector', 'label_selector', 'limit', 'resource_version', 'timeout_seconds', 'watch']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_collection_volume_attachment" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if '_continue' in params:
query_params.append(('continue', params['_continue']))
if 'field_selector' in params:
query_params.append(('fieldSelector', params['field_selector']))
if 'label_selector' in params:
query_params.append(('labelSelector', params['label_selector']))
if 'limit' in params:
query_params.append(('limit', params['limit']))
if 'resource_version' in params:
query_params.append(('resourceVersion', params['resource_version']))
if 'timeout_seconds' in params:
query_params.append(('timeoutSeconds', params['timeout_seconds']))
if 'watch' in params:
query_params.append(('watch', params['watch']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/storage.k8s.io/v1alpha1/volumeattachments', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Status',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_volume_attachment(self, name, **kwargs):
"""
delete a VolumeAttachment
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_volume_attachment(name, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the VolumeAttachment (required)
:param str pretty: If 'true', then the output is pretty printed.
:param V1DeleteOptions body:
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_volume_attachment_with_http_info(name, **kwargs)
else:
(data) = self.delete_volume_attachment_with_http_info(name, **kwargs)
return data
def delete_volume_attachment_with_http_info(self, name, **kwargs):
"""
delete a VolumeAttachment
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_volume_attachment_with_http_info(name, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the VolumeAttachment (required)
:param str pretty: If 'true', then the output is pretty printed.
:param V1DeleteOptions body:
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'pretty', 'body', 'dry_run', 'grace_period_seconds', 'orphan_dependents', 'propagation_policy']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_volume_attachment" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `delete_volume_attachment`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'dry_run' in params:
query_params.append(('dryRun', params['dry_run']))
if 'grace_period_seconds' in params:
query_params.append(('gracePeriodSeconds', params['grace_period_seconds']))
if 'orphan_dependents' in params:
query_params.append(('orphanDependents', params['orphan_dependents']))
if 'propagation_policy' in params:
query_params.append(('propagationPolicy', params['propagation_policy']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/storage.k8s.io/v1alpha1/volumeattachments/{name}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Status',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_api_resources(self, **kwargs):
"""
get available resources
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_api_resources(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: V1APIResourceList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_api_resources_with_http_info(**kwargs)
else:
(data) = self.get_api_resources_with_http_info(**kwargs)
return data
def get_api_resources_with_http_info(self, **kwargs):
"""
get available resources
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_api_resources_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: V1APIResourceList
If the method is called asynchronously,
returns the request thread.
"""
all_params = []
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_api_resources" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/storage.k8s.io/v1alpha1/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1APIResourceList',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def list_volume_attachment(self, **kwargs):
"""
list or watch objects of kind VolumeAttachment
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_volume_attachment(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1alpha1VolumeAttachmentList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.list_volume_attachment_with_http_info(**kwargs)
else:
(data) = self.list_volume_attachment_with_http_info(**kwargs)
return data
def list_volume_attachment_with_http_info(self, **kwargs):
"""
list or watch objects of kind VolumeAttachment
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_volume_attachment_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1alpha1VolumeAttachmentList
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['pretty', '_continue', 'field_selector', 'label_selector', 'limit', 'resource_version', 'timeout_seconds', 'watch']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list_volume_attachment" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if '_continue' in params:
query_params.append(('continue', params['_continue']))
if 'field_selector' in params:
query_params.append(('fieldSelector', params['field_selector']))
if 'label_selector' in params:
query_params.append(('labelSelector', params['label_selector']))
if 'limit' in params:
query_params.append(('limit', params['limit']))
if 'resource_version' in params:
query_params.append(('resourceVersion', params['resource_version']))
if 'timeout_seconds' in params:
query_params.append(('timeoutSeconds', params['timeout_seconds']))
if 'watch' in params:
query_params.append(('watch', params['watch']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/storage.k8s.io/v1alpha1/volumeattachments', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1alpha1VolumeAttachmentList',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def patch_volume_attachment(self, name, body, **kwargs):
"""
partially update the specified VolumeAttachment
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_volume_attachment(name, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the VolumeAttachment (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
:param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
:return: V1alpha1VolumeAttachment
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.patch_volume_attachment_with_http_info(name, body, **kwargs)
else:
(data) = self.patch_volume_attachment_with_http_info(name, body, **kwargs)
return data
def patch_volume_attachment_with_http_info(self, name, body, **kwargs):
"""
partially update the specified VolumeAttachment
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_volume_attachment_with_http_info(name, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the VolumeAttachment (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
:param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
:return: V1alpha1VolumeAttachment
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'body', 'pretty', 'dry_run', 'field_manager', 'force']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method patch_volume_attachment" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `patch_volume_attachment`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `patch_volume_attachment`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'dry_run' in params:
query_params.append(('dryRun', params['dry_run']))
if 'field_manager' in params:
query_params.append(('fieldManager', params['field_manager']))
if 'force' in params:
query_params.append(('force', params['force']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/storage.k8s.io/v1alpha1/volumeattachments/{name}', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1alpha1VolumeAttachment',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def read_volume_attachment(self, name, **kwargs):
"""
read the specified VolumeAttachment
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_volume_attachment(name, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the VolumeAttachment (required)
:param str pretty: If 'true', then the output is pretty printed.
:param bool exact: Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'. Deprecated. Planned for removal in 1.18.
:param bool export: Should this value be exported. Export strips fields that a user can not specify. Deprecated. Planned for removal in 1.18.
:return: V1alpha1VolumeAttachment
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.read_volume_attachment_with_http_info(name, **kwargs)
else:
(data) = self.read_volume_attachment_with_http_info(name, **kwargs)
return data
def read_volume_attachment_with_http_info(self, name, **kwargs):
"""
read the specified VolumeAttachment
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_volume_attachment_with_http_info(name, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the VolumeAttachment (required)
:param str pretty: If 'true', then the output is pretty printed.
:param bool exact: Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'. Deprecated. Planned for removal in 1.18.
:param bool export: Should this value be exported. Export strips fields that a user can not specify. Deprecated. Planned for removal in 1.18.
:return: V1alpha1VolumeAttachment
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'pretty', 'exact', 'export']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method read_volume_attachment" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `read_volume_attachment`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'exact' in params:
query_params.append(('exact', params['exact']))
if 'export' in params:
query_params.append(('export', params['export']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/storage.k8s.io/v1alpha1/volumeattachments/{name}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1alpha1VolumeAttachment',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def replace_volume_attachment(self, name, body, **kwargs):
"""
replace the specified VolumeAttachment
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_volume_attachment(name, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the VolumeAttachment (required)
:param V1alpha1VolumeAttachment body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:return: V1alpha1VolumeAttachment
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.replace_volume_attachment_with_http_info(name, body, **kwargs)
else:
(data) = self.replace_volume_attachment_with_http_info(name, body, **kwargs)
return data
def replace_volume_attachment_with_http_info(self, name, body, **kwargs):
"""
replace the specified VolumeAttachment
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_volume_attachment_with_http_info(name, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the VolumeAttachment (required)
:param V1alpha1VolumeAttachment body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:return: V1alpha1VolumeAttachment
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'body', 'pretty', 'dry_run', 'field_manager']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method replace_volume_attachment" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `replace_volume_attachment`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `replace_volume_attachment`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'dry_run' in params:
query_params.append(('dryRun', params['dry_run']))
if 'field_manager' in params:
query_params.append(('fieldManager', params['field_manager']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/storage.k8s.io/v1alpha1/volumeattachments/{name}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1alpha1VolumeAttachment',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| 64.296692 | 1,390 | 0.652641 |
49eb92c54d2dec5992906faf0003895412a6f8e2 | 11,496 | py | Python | menpo/transform/test/h_align_test.py | yuxiang-zhou/menpo | 01deaf3808cbe7a3d9db5542ac9d9f53cd81743a | [
"BSD-3-Clause"
] | 1 | 2021-04-20T00:36:57.000Z | 2021-04-20T00:36:57.000Z | menpo/transform/test/h_align_test.py | yuxiang-zhou/menpo | 01deaf3808cbe7a3d9db5542ac9d9f53cd81743a | [
"BSD-3-Clause"
] | 1 | 2019-03-09T16:01:46.000Z | 2019-03-09T16:01:46.000Z | menpo/transform/test/h_align_test.py | yuxiang-zhou/menpo | 01deaf3808cbe7a3d9db5542ac9d9f53cd81743a | [
"BSD-3-Clause"
] | 1 | 2020-05-01T09:55:57.000Z | 2020-05-01T09:55:57.000Z | import numpy as np
from numpy.testing import assert_allclose, raises
from menpo.shape import PointCloud
from menpo.transform import (Affine, AlignmentAffine,
Similarity, AlignmentSimilarity,
Rotation, AlignmentRotation,
Translation, AlignmentTranslation,
UniformScale, AlignmentUniformScale)
# TODO check composition works correctly on all alignment methods
# AFFINE
def test_align_2d_affine():
linear_component = np.array([[1, -6],
[-3, 2]])
translation_component = np.array([7, -8])
h_matrix = np.eye(3, 3)
h_matrix[:-1, :-1] = linear_component
h_matrix[:-1, -1] = translation_component
affine = Affine(h_matrix)
source = PointCloud(np.array([[0, 1],
[1, 1],
[-1, -5],
[3, -5]]))
target = affine.apply(source)
# estimate the transform from source and target
estimate = AlignmentAffine(source, target)
# check the estimates is correct
assert_allclose(affine.h_matrix, estimate.h_matrix)
def test_align_2d_affine_compose_target():
source = PointCloud(np.array([[0, 1],
[1, 1],
[-1, -5],
[3, -5]]))
target = UniformScale(2.0, n_dims=2).apply(source)
original_estimate = AlignmentAffine(source, target)
new_estimate = original_estimate.copy()
new_estimate.compose_after_from_vector_inplace(
np.array([0, 0, 0, 0, 1, 1.]))
estimate_target = new_estimate.target
correct_target = original_estimate.compose_after(
Translation([1, 1.])).apply(source)
assert_allclose(estimate_target.points, correct_target.points)
def test_align_2d_affine_set_target():
linear_component = np.array([[1, -6],
[-3, 2]])
translation_component = np.array([7, -8])
h_matrix = np.eye(3, 3)
h_matrix[:-1, :-1] = linear_component
h_matrix[:-1, -1] = translation_component
affine = Affine(h_matrix)
source = PointCloud(np.array([[0, 1],
[1, 1],
[-1, -5],
[3, -5]]))
target = affine.apply(source)
# estimate the transform from source and source
estimate = AlignmentAffine(source, source)
# and set the target
estimate.set_target(target)
# check the estimates is correct
assert_allclose(affine.h_matrix, estimate.h_matrix)
def test_align_2d_affine_as_non_alignment():
linear_component = np.array([[1, -6],
[-3, 2]])
translation_component = np.array([7, -8])
h_matrix = np.eye(3, 3)
h_matrix[:-1, :-1] = linear_component
h_matrix[:-1, -1] = translation_component
affine = Affine(h_matrix)
source = PointCloud(np.array([[0, 1],
[1, 1],
[-1, -5],
[3, -5]]))
target = affine.apply(source)
# estimate the transform from source and source
estimate = AlignmentAffine(source, source)
# and set the h_matrix
non_align = estimate.as_non_alignment()
# check the estimates is correct
assert_allclose(non_align.h_matrix, estimate.h_matrix)
assert(type(non_align) == Affine)
# TODO check from_vector, from_vector_inplace works correctly
# SIMILARITY
def test_align_2d_similarity():
linear_component = np.array([[2, -6],
[6, 2]])
translation_component = np.array([7, -8])
h_matrix = np.eye(3, 3)
h_matrix[:-1, :-1] = linear_component
h_matrix[:-1, -1] = translation_component
similarity = Similarity(h_matrix)
source = PointCloud(np.array([[0, 1],
[1, 1],
[-1, -5],
[3, -5]]))
target = similarity.apply(source)
# estimate the transform from source and target
estimate = AlignmentSimilarity(source, target)
# check the estimates is correct
assert_allclose(similarity.h_matrix,
estimate.h_matrix)
def test_align_2d_similarity_set_target():
linear_component = np.array([[2, -6],
[6, 2]])
translation_component = np.array([7, -8])
h_matrix = np.eye(3, 3)
h_matrix[:-1, :-1] = linear_component
h_matrix[:-1, -1] = translation_component
similarity = Similarity(h_matrix)
source = PointCloud(np.array([[0, 1],
[1, 1],
[-1, -5],
[3, -5]]))
target = similarity.apply(source)
# estimate the transform from source to source
estimate = AlignmentSimilarity(source, source, allow_mirror=True)
# and set the target
estimate.set_target(target)
# check the estimates is correct
assert_allclose(similarity.h_matrix,
estimate.h_matrix)
# ROTATION
def test_align_2d_rotation():
rotation_matrix = np.array([[0, 1],
[-1, 0]])
rotation = Rotation(rotation_matrix)
source = PointCloud(np.array([[0, 1],
[1, 1],
[-1, -5],
[3, -5]]))
target = rotation.apply(source)
# estimate the transform from source and target
estimate = AlignmentRotation(source, target)
# check the estimates is correct
assert_allclose(rotation.h_matrix,
estimate.h_matrix, atol=1e-14)
def test_align_2d_rotation_allow_mirror():
s_init = PointCloud(np.array([[-1., 1.], [1., 1.], [1., -1.], [-1., -1.]]))
s_trg = PointCloud(np.array([[1., -1.], [1., 1.], [-1., 1.], [-1., -1.]]))
# estimate the transform from source and target with mirroring allowed
tr = AlignmentRotation(s_init, s_trg, allow_mirror=True)
s_final = tr.apply(s_init)
assert_allclose(s_final.points, s_trg.points, atol=1e-14)
# estimate the transform from source and target with mirroring allowed
tr = AlignmentRotation(s_init, s_trg, allow_mirror=False)
s_final = tr.apply(s_init)
assert_allclose(s_final.points, np.array([[-1., -1.], [-1., 1.], [1., 1.],
[1., -1.]]), atol=1e-14)
def test_align_2d_rotation_set_target():
rotation_matrix = np.array([[0, 1],
[-1, 0]])
rotation = Rotation(rotation_matrix)
source = PointCloud(np.array([[0, 1],
[1, 1],
[-1, -5],
[3, -5]]))
target = rotation.apply(source)
# estimate the transform from source and source
estimate = AlignmentRotation(source, source)
# and set the target
estimate.set_target(target)
# check the estimates is correct
assert_allclose(rotation.h_matrix,
estimate.h_matrix, atol=1e-14)
def test_align_2d_rotation_set_rotation_matrix():
rotation_matrix = np.array([[0, 1],
[-1, 0]])
rotation = Rotation(rotation_matrix)
source = PointCloud(np.array([[0, 1],
[1, 1],
[-1, -5],
[3, -5]]))
target = rotation.apply(source)
# estimate the transform from source and source
estimate = AlignmentRotation(source, source)
# and set the target
estimate.set_rotation_matrix(rotation.rotation_matrix)
# check the estimates is correct
assert_allclose(target.points,
estimate.target.points, atol=1e-14)
# UNIFORM SCALE
def test_align_2d_uniform_scale():
scale = UniformScale(2.5, 2)
source = PointCloud(np.array([[0, 1],
[1, 1],
[-1, -5],
[3, -5]]))
target = scale.apply(source)
# estimate the transform from source and target
estimate = AlignmentUniformScale(source, target)
# check the estimates is correct
assert_allclose(scale.h_matrix, estimate.h_matrix)
def test_align_2d_uniform_scale_set_target():
scale = UniformScale(2.5, 2)
source = PointCloud(np.array([[0, 1],
[1, 1],
[-1, -5],
[3, -5]]))
target = scale.apply(source)
# estimate the transform from source and source
estimate = AlignmentUniformScale(source, source)
# and set the target
estimate.set_target(target)
# check the estimates is correct
assert_allclose(scale.h_matrix, estimate.h_matrix)
# TRANSLATION
def test_align_2d_translation():
t_vec = np.array([1, 2])
translation = Translation(t_vec)
source = PointCloud(np.array([[0, 1],
[1, 1],
[-1, -5],
[3, -5]]))
target = translation.apply(source)
# estimate the transform from source and target
estimate = AlignmentTranslation(source, target)
# check the estimates is correct
assert_allclose(translation.h_matrix,
estimate.h_matrix)
def test_align_2d_translation_set_target():
t_vec = np.array([1, 2])
translation = Translation(t_vec)
source = PointCloud(np.array([[0, 1],
[1, 1],
[-1, -5],
[3, -5]]))
target = translation.apply(source)
# estimate the transform from source to source..
estimate = AlignmentTranslation(source, source)
# and change the target.
estimate.set_target(target)
# check the estimates is correct
assert_allclose(translation.h_matrix,
estimate.h_matrix)
def test_align_2d_translation_from_vector_inplace():
t_vec = np.array([1, 2])
translation = Translation(t_vec)
source = PointCloud(np.array([[0, 1],
[1, 1],
[-1, -5],
[3, -5]]))
target = translation.apply(source)
# estimate the transform from source to source..
estimate = AlignmentTranslation(source, source)
# and update from_vector
estimate._from_vector_inplace(t_vec)
# check the estimates is correct
assert_allclose(target.points,
estimate.target.points)
def test_align_2d_translation_from_vector():
t_vec = np.array([1, 2])
translation = Translation(t_vec)
source = PointCloud(np.array([[0, 1],
[1, 1],
[-1, -5],
[3, -5]]))
target = translation.apply(source)
# estimate the transform from source to source..
estimate = AlignmentTranslation(source, source)
# and update from_vector
new_est = estimate.from_vector(t_vec)
# check the original is unchanged
assert_allclose(estimate.source.points, source.points)
assert_allclose(estimate.target.points, source.points)
# check the new estimate has the source and target correct
assert_allclose(new_est.source.points, source.points)
assert_allclose(new_est.target.points, target.points)
| 37.203883 | 79 | 0.560282 |
568cf30358df7417820d9ed0508e3ef4b95ec64e | 4,883 | py | Python | cs15211/PrisonCellsAfterNDays.py | JulyKikuAkita/PythonPrac | 0ba027d9b8bc7c80bc89ce2da3543ce7a49a403c | [
"Apache-2.0"
] | 1 | 2021-07-05T01:53:30.000Z | 2021-07-05T01:53:30.000Z | cs15211/PrisonCellsAfterNDays.py | JulyKikuAkita/PythonPrac | 0ba027d9b8bc7c80bc89ce2da3543ce7a49a403c | [
"Apache-2.0"
] | null | null | null | cs15211/PrisonCellsAfterNDays.py | JulyKikuAkita/PythonPrac | 0ba027d9b8bc7c80bc89ce2da3543ce7a49a403c | [
"Apache-2.0"
] | 1 | 2018-01-08T07:14:08.000Z | 2018-01-08T07:14:08.000Z | __source__ = 'https://leetcode.com/problems/prison-cells-after-n-days/'
# Time: O()
# Space: O()
#
# Description: Leetcode # 957. Prison Cells After N Days
#
# There are 8 prison cells in a row, and each cell is either occupied or vacant.
#
# Each day, whether the cell is occupied or vacant changes according to the following rules:
#
# If a cell has two adjacent neighbors that are both occupied or both vacant,
# then the cell becomes occupied.
# Otherwise, it becomes vacant.
#
# (Note that because the prison is a row,
# the first and the last cells in the row can't have two adjacent neighbors.)
#
# We describe the current state of the prison in the following way:
# cells[i] == 1 if the i-th cell is occupied, else cells[i] == 0.
#
# Given the initial state of the prison,
# return the state of the prison after N days (and N such changes described above.)
#
# Example 1:
#
# Input: cells = [0,1,0,1,1,0,0,1], N = 7
# Output: [0,0,1,1,0,0,0,0]
# Explanation:
# The following table summarizes the state of the prison on each day:
# Day 0: [0, 1, 0, 1, 1, 0, 0, 1]
# Day 1: [0, 1, 1, 0, 0, 0, 0, 0]
# Day 2: [0, 0, 0, 0, 1, 1, 1, 0]
# Day 3: [0, 1, 1, 0, 0, 1, 0, 0]
# Day 4: [0, 0, 0, 0, 0, 1, 0, 0]
# Day 5: [0, 1, 1, 1, 0, 1, 0, 0]
# Day 6: [0, 0, 1, 0, 1, 1, 0, 0]
# Day 7: [0, 0, 1, 1, 0, 0, 0, 0]
#
# Example 2:
#
# Input: cells = [1,0,0,1,0,0,1,0], N = 1000000000
# Output: [0,0,1,1,1,1,1,0]
#
# Note:
#
# cells.length == 8
# cells[i] is in {0, 1}
# 1 <= N <= 10^9
#
import unittest
# 60ms 19.12%
class Solution(object):
def prisonAfterNDays(self, cells, N):
"""
:type cells: List[int]
:type N: int
:rtype: List[int]
"""
def nextday(cells):
return [int(i > 0 and i < 7 and cells[i-1] == cells[i+1])
for i in xrange(8)]
seen = {}
while N > 0:
c = tuple(cells)
if c in seen:
N %= seen[c] - N
seen[c] = N
if N >= 1:
N -= 1
cells = nextday(cells)
return cells
class TestMethods(unittest.TestCase):
def test_Local(self):
self.assertEqual(1, 1)
if __name__ == '__main__':
unittest.main()
Java = '''
# Thought: https://leetcode.com/problems/prison-cells-after-n-days/solution/
#
# BruteForce
# TLE
class Solution {
public int[] prisonAfterNDays(int[] cells, int N) {
while (N > 0) {
N--;
int[] cells2 = new int[8];
for (int i = 1; i < 7; ++i)
cells2[i] = cells[i - 1] == cells[i + 1] ? 1 : 0;
cells = cells2;
}
return cells;
}
}
Approach 1: Simulation
Complexity Analysis
Time Complexity: O(2^N), where N is the number of cells in the prison.
Space Complexity: O(2^N * N)
# 18ms 33.75%
class Solution {
public int[] prisonAfterNDays(int[] cells, int N) {
Map<Integer, Integer> seen = new HashMap();
// state = integer representing state of prison
int state = 0;
for (int i = 0; i < 8; ++i) {
if (cells[i] > 0)
state ^= 1 << i;
}
// While days remaining, simulate a day
while (N > 0) {
// If this is a cycle, fast forward by
// seen.get(state) - N, the period of the cycle.
if (seen.containsKey(state)) {
N %= seen.get(state) - N;
}
seen.put(state, N);
if (N >= 1) {
N--;
state = nextDay(state);
}
}
// Convert the state back to the required answer.
int[] ans = new int[8];
for (int i = 0; i < 8; ++i) {
if (((state >> i) & 1) > 0) {
ans[i] = 1;
}
}
return ans;
}
public int nextDay(int state) {
int ans = 0;
// We only loop from 1 to 6 because 0 and 7 are impossible,
// as those cells only have one neighbor.
for (int i = 1; i <= 6; ++i) {
if (((state >> (i-1)) & 1) == ((state >> (i+1)) & 1)) {
ans ^= 1 << i;
}
}
return ans;
}
}
https://leetcode.com/problems/prison-cells-after-n-days/discuss/205684/JavaPython-Find-the-Loop-or-Mod-14
Well, the length of loop can be 1, 7, or 14.
So once we enter the loop, every 14 steps must be the same state.
The length of cells is even,
so for any state, we can find a previous state.
So all states are in a loop.
# 9ms 97.09%
class Solution {
public int[] prisonAfterNDays(int[] cells, int N) {
for (N = (N - 1) % 14 + 1; N > 0; N--) {
int[] cells2 = new int[8];
for (int i = 1; i < 7; i++) {
cells2[i] = cells[i - 1] == cells[i + 1] ? 1: 0;
}
cells = cells2;
}
return cells;
}
}
'''
| 27.587571 | 105 | 0.5171 |
10c4b2d29f9bfe72969dddcaaafa11bb08ab6d12 | 2,224 | py | Python | libs/ConfigHelpers.py | loi219/RootTheBox | e17d5420b71d313d6f5f5e69e3f83defb34578a6 | [
"Apache-2.0"
] | 2 | 2020-04-19T18:50:40.000Z | 2020-09-19T18:37:10.000Z | libs/ConfigHelpers.py | loi219/RootTheBox | e17d5420b71d313d6f5f5e69e3f83defb34578a6 | [
"Apache-2.0"
] | null | null | null | libs/ConfigHelpers.py | loi219/RootTheBox | e17d5420b71d313d6f5f5e69e3f83defb34578a6 | [
"Apache-2.0"
] | 1 | 2020-02-13T12:06:27.000Z | 2020-02-13T12:06:27.000Z | import logging
import imghdr
import hashlib
from base64 import b64decode
from tornado.options import options
from datetime import datetime
from past.builtins import basestring
from libs.XSSImageCheck import is_xss_image
from libs.ValidationError import ValidationError
def save_config():
logging.info("Saving current config to: %s" % options.config)
with open(options.config, "w") as fp:
fp.write("##########################")
fp.write(" Root the Box Config File ")
fp.write("##########################\n")
fp.write(
"# Documentation: %s\n"
% "https://github.com/moloch--/RootTheBox/wiki/Configuration-File-Details"
)
fp.write("# Last updated: %s\n" % datetime.now())
for group in options.groups():
# Shitty work around for Tornado 4.1
if "rootthebox.py" in group.lower() or group == "":
continue
fp.write("\n# [ %s ]\n" % group.title())
opt = list(options.group_dict(group).items())
for key, value in opt:
try:
# python2
value_type = basestring
except NameError:
# python 3
value_type = str
if isinstance(value, value_type):
# Str/Unicode needs to have quotes
fp.write('%s = "%s"\n' % (key, value))
else:
# Int/Bool/List use __str__
fp.write("%s = %s\n" % (key, value))
def save_config_image(b64_data):
image_data = bytearray(b64decode(b64_data))
if len(image_data) < (2048 * 2048):
ext = imghdr.what("", h=image_data)
file_name = "story/%s.%s" % (hashlib.sha1(image_data).hexdigest(), ext)
if ext in ["png", "jpeg", "gif", "bmp"] and not is_xss_image(image_data):
with open("files/" + file_name, "wb") as fp:
fp.write(image_data)
return file_name
else:
raise ValidationError(
"Invalid image format, avatar must be: .png .jpeg .gif or .bmp"
)
else:
raise ValidationError("The image is too large")
| 36.459016 | 86 | 0.53732 |
34e55bbdaf8965f9795b02888443f43e232a88cc | 2,095 | py | Python | setup.py | sethwoodworth/python-prompt-toolkit | dc3223534f224dc3bb37c108f271f57b2fba96d1 | [
"BSD-3-Clause"
] | 1 | 2020-03-12T06:45:06.000Z | 2020-03-12T06:45:06.000Z | setup.py | sethwoodworth/python-prompt-toolkit | dc3223534f224dc3bb37c108f271f57b2fba96d1 | [
"BSD-3-Clause"
] | null | null | null | setup.py | sethwoodworth/python-prompt-toolkit | dc3223534f224dc3bb37c108f271f57b2fba96d1 | [
"BSD-3-Clause"
] | 1 | 2019-06-09T23:34:42.000Z | 2019-06-09T23:34:42.000Z | #!/usr/bin/env python
import os
import re
from setuptools import find_packages, setup
with open(os.path.join(os.path.dirname(__file__), "README.rst")) as f:
long_description = f.read()
def get_version(package):
"""
Return package version as listed in `__version__` in `__init__.py`.
"""
path = os.path.join(os.path.dirname(__file__), package, "__init__.py")
with open(path, "rb") as f:
init_py = f.read().decode("utf-8")
return re.search("__version__ = ['\"]([^'\"]+)['\"]", init_py).group(1)
setup(
name="prompt_toolkit",
author="Jonathan Slenders",
version=get_version("prompt_toolkit"),
url="https://github.com/prompt-toolkit/python-prompt-toolkit",
description="Library for building powerful interactive command lines in Python",
long_description=long_description,
long_description_content_type="text/x-rst",
packages=find_packages("."),
install_requires=["wcwidth",],
# We require Python 3.6.1 for two reasons:
# - Syntax for variable annotations - PEP 526.
# - Asynchronous generators - PEP 525.
# Also, 3.6.0 doesn't have `typing.AsyncGenerator` yet. 3.6.1 does.
# Python 3.7 is suggested, because:
# - Context variables - PEP 567
# (The current application is derived from a context variable.)
# There is no intension to support Python 3.5, because prompt_toolkit 2.0
# does run fine on any older Python version starting from Python 2.6, and
# it is possible to write code that runs both against prompt_toolkit
# version 2 and 3.
python_requires=">=3.6.1",
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"License :: OSI Approved :: BSD License",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3 :: Only",
"Programming Language :: Python",
"Topic :: Software Development",
],
)
| 37.410714 | 84 | 0.656802 |
61647f020901a9446b9dc7ac7641948d548ebd02 | 32,541 | py | Python | syllogistic/2020_bischofberger/modular_models/models/basic_models/psycop.py | monthie/cogmods | 62af4b8bf2effb77f26a8877d6a89949164d83f0 | [
"MIT"
] | null | null | null | syllogistic/2020_bischofberger/modular_models/models/basic_models/psycop.py | monthie/cogmods | 62af4b8bf2effb77f26a8877d6a89949164d83f0 | [
"MIT"
] | 11 | 2020-05-04T09:05:29.000Z | 2021-04-08T13:22:34.000Z | syllogistic/2020_bischofberger/modular_models/models/basic_models/psycop.py | monthie/cogmods | 62af4b8bf2effb77f26a8877d6a89949164d83f0 | [
"MIT"
] | 12 | 2020-05-02T09:36:14.000Z | 2021-06-22T08:10:45.000Z | # coding=utf-8
import os
import random
import sys
from collections import namedtuple
from enum import Enum
import ccobra
from anytree import AnyNode, LevelOrderIter
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(os.path.abspath(__file__)), "../../..")))
from modular_models.util import sylutil
from modular_models.models.basic_models.interface import SyllogisticReasoningModel
class PSYCOP(SyllogisticReasoningModel):
""" PSYCOP model according to Rips (1994). """
def __init__(self):
SyllogisticReasoningModel.__init__(self)
# Prospensity to guess instead of replying NVC if no conclusion is found
self.params["guess"] = 0.0
# Whether or not existential implicatures are added to the forward propositions
self.params["premise_implicatures_existential"] = True
# Whether or not gricean implicatures are added to the forward propositions
self.params["premise_implicatures_grice"] = True
# Whether or not proving conclusion implicatures is required to prove a conclusion
self.params["conclusion_implicatures"] = False
# Availability of rules
self.params["rule_transitivity"] = True
self.params["rule_exclusivity"] = True
self.params["rule_conversion"] = True
self.params["rule_fw_and_elimination"] = True
self.params["rule_bw_and_introduction"] = True
self.params["rule_bw_conjunctive_syllogism"] = True
self.params["rule_bw_if_elimination"] = True
self.params["rule_bw_not_introduction"] = True
self.param_grid["guess"] = [0.0, 1.0]
self.param_grid["premise_implicatures_existential"] = [True, False]
self.param_grid["premise_implicatures_grice"] = [True, False]
self.param_grid["conclusion_implicatures"] = [False, True]
self.param_grid["rule_transitivity"] = [True, False]
self.param_grid["rule_exclusivity"] = [True, False]
self.param_grid["rule_conversion"] = [True, False]
self.param_grid["rule_fw_and_elimination"] = [True, False]
self.param_grid["rule_bw_and_introduction"] = [True, False]
self.param_grid["rule_bw_conjunctive_syllogism"] = [True, False]
self.param_grid["rule_bw_if_elimination"] = [True, False]
self.param_grid["rule_bw_not_introduction"] = [True, False]
class Prop:
""" abstract representation of a categorical proposition like a syllogistic premise or
conclusion.
Example:
All A are B = Prop(PT.implies, Prop(PT.atomic, Atom("A", 936), None), Prop(PT.atomic, Atom("B", 936), None))
"""
def __init__(self, type, arg1, arg2):
# proposition type like atom or conjunction
self.type = type
self.v1 = arg1
self.v2 = arg2
def __repr__(self):
if self.type == PSYCOP.PT.atomic:
if self.v1.is_name:
if self.v1.hat:
var = "â"
else:
var = "a"
else:
var = "x"
return self.v1.predicate + "(" + var + "_" + str(self.v1.arg_id) + ")"
elif self.type == PSYCOP.PT.negation:
return "NOT (" + self.v1.__repr__() + ")"
elif self.type == PSYCOP.PT.implies:
return "(" + self.v1.__repr__() + " -> " + self.v2.__repr__() + ")"
elif self.type == PSYCOP.PT.conjunction:
return "(" + self.v1.__repr__() + " AND " + self.v2.__repr__() + ")"
# proposition type
PT = Enum("PT", "atomic negation implies conjunction")
""" representation of an atom = predicate + argument. Additional info (hat, name) is required by
PSYCOP, example:
Red(â) = Atom("Red", i, True, True) where i identifies â """
Atom = namedtuple("Atom", "predicate arg_id is_name hat")
# unique identifier for objects
max_id = -1
def get_fresh_id(self):
self.max_id = self.max_id + 1
return self.max_id
def get_atomic_proposition(self, predicate, arg_id, is_name, hat):
return self.Prop(self.PT.atomic, self.Atom(predicate, arg_id, is_name, hat), None)
def encode_proposition(self, p, hat=False):
"""
>>> m = PSYCOP()
>>> m.encode_proposition("Aac")
(A(x_0) -> C(x_0))
>>> m.encode_proposition("Iac")
(A(a_1) AND C(a_1))
"""
i = self.get_fresh_id()
if p[0] == "A":
# A(x) -> B(x)
return self.Prop(self.PT.implies,
self.get_atomic_proposition(p[1].upper(), i, False, hat),
self.get_atomic_proposition(p[2].upper(), i, False, hat))
elif p[0] == "E":
# not (A(x) and B(x))
return self.Prop(self.PT.negation,
self.Prop(self.PT.conjunction,
self.get_atomic_proposition(p[1].upper(), i, False, hat),
self.get_atomic_proposition(p[2].upper(), i, False, hat)),
None)
elif p[0] == "I":
# A(a) and B(a)
return self.Prop(self.PT.conjunction,
self.get_atomic_proposition(p[1].upper(), i, True, hat),
self.get_atomic_proposition(p[2].upper(), i, True, hat))
else:
# A(a) and not B(a)
return self.Prop(self.PT.conjunction,
self.get_atomic_proposition(p[1].upper(), i, True, hat),
self.Prop(self.PT.negation,
self.get_atomic_proposition(p[2].upper(), i, True, hat),
None))
def encode_premises(self, syllogism, ex_implicatures=True, grice_implicatures=False):
""" Encode premises as propositions, possibly adding implicatures """
to = sylutil.term_order(syllogism[2])
premises = []
pr = []
for i in [0, 1]:
pr.append(syllogism[i] + to[i])
pr = sylutil.add_implicatures(pr, existential=ex_implicatures, gricean=grice_implicatures)
for p in pr:
premises.append(self.encode_proposition(p, True))
return premises
def isomorphic(self, p1, p2, same_nameness=False):
""" same_nameness = True <-> "notational variant", see p. 197
>>> m = PSYCOP()
>>> a0 = m.Prop(m.PT.atomic, m.Atom("A", 0, False, False), None)
>>> a1 = m.Prop(m.PT.atomic, m.Atom("A", 1, False, False), None)
>>> b = m.Prop(m.PT.atomic, m.Atom("B", 2, False, False), None)
>>> p1 = m.Prop(m.PT.implies, a0, b)
>>> p2 = m.Prop(m.PT.implies, a1, b)
>>> m.isomorphic(p1,p2)
True
>>> m.isomorphic(m.Prop(m.PT.negation, p1, None),m.Prop(m.PT.negation, p2, None))
True
>>> m.isomorphic(p1,m.Prop(m.PT.negation, p2, None))
False
>>> p3 = m.Prop(m.PT.conjunction, a1, b)
>>> m.isomorphic(p1,p3)
False
"""
if p1 is None and p2 is None:
return True
if p1 is None or p2 is None:
return False
if type(p1) is self.Atom and type(p2) is self.Atom:
if p1.predicate == p2.predicate:
if same_nameness:
if p1.is_name == p2.is_name:
return True
return False
return True
return False
if type(p1) is self.Atom or type(p2) is self.Atom:
return False
if p1.type == p2.type:
return self.isomorphic(p1.v1, p2.v1) and self.isomorphic(p1.v2, p2.v2)
return False
def contains_isomorphic_proposition(self, domain, p):
for pd in domain:
if self.isomorphic(pd, p):
return True
return False
def atom_prop_replace_properties(self, p, new_arg_id=None, new_is_name=None, new_hat=None):
if new_arg_id is None:
new_arg_id = p.v1.arg_id
if new_is_name is None:
new_is_name = p.v1.is_name
if new_hat is None:
new_hat = p.v1.hat
return self.Prop(self.PT.atomic,
self.Atom(p.v1.predicate, new_arg_id, new_is_name, new_hat), None)
def prop_replace_properties(self, p, new_arg_id=None, new_is_name=None, new_hat=None):
if p.type == self.PT.negation:
return self.Prop(self.PT.negation,
self.atom_prop_replace_properties(p.v1, new_arg_id, new_is_name,
new_hat), None)
return self.atom_prop_replace_properties(p, new_arg_id, new_is_name, new_hat)
def rule_transitivity(self, p1, p2, domain):
""" PSYCOP transitivity rule
>>> m = PSYCOP()
>>> i = m.get_fresh_id()
>>> a = m.Prop(m.PT.atomic, m.Atom("A", i, False, False), None)
>>> b = m.Prop(m.PT.atomic, m.Atom("B", i, False, False), None)
>>> c = m.Prop(m.PT.atomic, m.Atom("C", i, False, False), None)
>>> p1 = m.Prop(m.PT.implies, a, b)
>>> p2 = m.Prop(m.PT.implies, b, c)
>>> m.rule_transitivity(p1, p2, set())
[(A(x_1) -> C(x_1))]
"""
if p1.type == self.PT.implies and p2.type == self.PT.implies:
if p1.v1.type == self.PT.atomic and p1.v2.type == self.PT.atomic and \
p2.v1.type == self.PT.atomic and p2.v2.type == self.PT.atomic:
if p1.v1.v1.arg_id == p1.v2.v1.arg_id and p2.v1.v1.arg_id == p2.v2.v1.arg_id:
if not p1.v1.v1.is_name and not p1.v2.v1.is_name and not p2.v1.v1.is_name and not p2.v2.v1.is_name:
if p1.v2.v1.predicate == p2.v1.v1.predicate:
i = self.get_fresh_id()
p = self.Prop(self.PT.implies,
self.atom_prop_replace_properties(p1.v1, i),
self.atom_prop_replace_properties(p2.v2, i))
if not self.contains_isomorphic_proposition(domain, p):
return [p]
return []
def rule_exclusivity(self, p1, p2, domain):
""" PSYCOP exclusivity rule
>>> m = PSYCOP()
>>> i = m.get_fresh_id()
>>> j = m.get_fresh_id()
>>> ai = m.Prop(m.PT.atomic, m.Atom("A", i, False, False), None)
>>> bi = m.Prop(m.PT.atomic, m.Atom("B", i, False, False), None)
>>> bj = m.Prop(m.PT.atomic, m.Atom("B", j, False, False), None)
>>> cj = m.Prop(m.PT.atomic, m.Atom("C", j, False, False), None)
>>> p1 = m.Prop(m.PT.implies, ai, bi)
>>> p2 = m.Prop(m.PT.negation, m.Prop(m.PT.conjunction, bj, cj), None)
>>> m.rule_exclusivity(p1, p2, set())
[NOT ((A(x_2) AND C(x_2)))]
"""
if p1.type == self.PT.implies and p2.type == self.PT.negation:
if p2.v1.type == self.PT.conjunction:
if p1.v1.type == self.PT.atomic and p1.v2.type == self.PT.atomic:
if p2.v1.v1.type == self.PT.atomic and p2.v1.v2.type == self.PT.atomic:
if p1.v1.v1.arg_id == p1.v2.v1.arg_id and p2.v1.v1.v1.arg_id == p2.v1.v2.v1.arg_id:
if not p1.v1.v1.is_name and not p1.v2.v1.is_name and not p2.v1.v1.v1.is_name and not p2.v1.v2.v1.is_name:
if p1.v2.v1.predicate == p2.v1.v1.v1.predicate:
i = self.get_fresh_id()
p = self.Prop(self.PT.negation,
self.Prop(self.PT.conjunction,
self.atom_prop_replace_properties(p1.v1,
i),
self.atom_prop_replace_properties(
p2.v1.v2, i)),
None)
if not self.contains_isomorphic_proposition(domain, p):
return [p]
return []
def rule_conversion(self, p, domain):
""" PSYCOP conversion rule
>>> m = PSYCOP()
>>> i = m.get_fresh_id()
>>> a = m.Prop(m.PT.atomic, m.Atom("A", i, False, False), None)
>>> b = m.Prop(m.PT.atomic, m.Atom("B", i, False, False), None)
>>> p = m.Prop(m.PT.negation, m.Prop(m.PT.conjunction, a, b), None)
>>> m.rule_conversion(p, set())
[NOT ((B(x_1) AND A(x_1)))]
"""
if p.type == self.PT.negation:
if p.v1.type == self.PT.conjunction:
if p.v1.v1.type == self.PT.atomic and p.v1.v2.type == self.PT.atomic:
i = self.get_fresh_id()
p_new = self.Prop(self.PT.negation,
self.Prop(self.PT.conjunction,
self.atom_prop_replace_properties(p.v1.v2, i),
self.atom_prop_replace_properties(p.v1.v1, i)),
None)
if not self.contains_isomorphic_proposition(domain, p_new):
return [p_new]
return []
def get_leftmost_atom(self, p):
""" Returns leftmost atom in p. """
if p.type == self.PT.atomic:
return p.v1
else:
return self.get_leftmost_atom(p.v1)
def matching(self, p, g):
if self.isomorphic(p, g):
# note: the leftmost atom is equal to any atom in the proposition
pa, ga = self.get_leftmost_atom(p), self.get_leftmost_atom(g)
if pa == ga:
# Propositions are equal
return True
if not pa.is_name and not ga.is_name:
# Matching 1
return True
if pa.is_name and ga.is_name and not ga.hat:
# Matching 2
return True
if not pa.is_name and ga.is_name:
# Matching 4
return True # ?
return False
def rule_forward_and_elimination(self, p):
if p.type == self.PT.conjunction:
return [p.v1, p.v2]
return []
def rule_backward_and_introduction(self, g):
return self.rule_forward_and_elimination(g)
def rule_backward_conjunctive_syllogism(self, p, g):
"""
a = m.Prop(m.PT.atomic, v1='a', v2=None)
b = m.Prop(m.PT.atomic, v1='b', v2=None)
>>> m = PSYCOP()
>>> i = m.get_fresh_id()
>>> a = m.Prop(m.PT.atomic, m.Atom("A", i, False, False), None)
>>> b = m.Prop(m.PT.atomic, m.Atom("B", i, False, False), None)
>>> prop = m.Prop(m.PT.negation, m.Prop(m.PT.conjunction, a, b), None)
>>> m.rule_backward_conjunctive_syllogism(prop, m.Prop(m.PT.negation, a, None))
[B(x_0)]
"""
if g.type == self.PT.negation and p.type == self.PT.negation:
# g = NOT(A(x))
if p.v1.type == self.PT.conjunction:
# p = NOT(A(x) AND B(x))
if self.matching(p.v1.v1, g.v1):
return [self.atom_prop_replace_properties(p.v1.v2, new_arg_id=g.v1.v1.arg_id,
new_is_name=g.v1.v1.is_name,
new_hat=g.v1.v1.hat)]
elif self.matching(p.v1.v2, g.v1):
return [self.atom_prop_replace_properties(p.v1.v1, new_arg_id=g.v1.v1.arg_id,
new_is_name=g.v1.v1.is_name,
new_hat=g.v1.v1.hat)]
return []
def rule_backward_if_elimination(self, p, g):
"""
>>> m = PSYCOP()
>>> i = m.get_fresh_id()
>>> a = m.Prop(m.PT.atomic, m.Atom("A", i, False, False), None)
>>> b = m.Prop(m.PT.atomic, m.Atom("B", i, False, False), None)
>>> m.rule_backward_if_elimination(m.Prop(m.PT.implies, a, b), b)
[A(x_0)]
"""
if p.type == self.PT.implies:
# p = IF A(x) THEN B(x)
if self.matching(p.v2, g):
return [self.atom_prop_replace_properties(p.v1, new_arg_id=g.v1.arg_id,
new_is_name=g.v1.is_name,
new_hat=g.v1.hat)]
return None
def rule_backward_not_introduction(self, g):
new_subgoals = []
if g.type == self.PT.negation:
if any(self.isomorphic(g.v1, s, True) for s in self.subformulas):
for s in self.subformulas:
new_subgoals.append(
self.Prop(self.PT.conjunction, s, self.Prop(self.PT.negation, s, None)))
new_subgoals = self.remove_duplicates(new_subgoals)
return g.v1, new_subgoals
return None, None
def tentative_conclusion_mood(self, syllogism):
if "E" in syllogism:
return "E"
elif "O" in syllogism:
return "O"
elif "I" in syllogism:
return "I"
return "A"
def flatten(self, list):
return [element for sublist in list for element in sublist]
def apply_backward_rules(self, fw_propositions, g_node):
g = g_node.goal
new_subgoals = []
by_which_rule = []
matched_propositions = []
suppositions = []
for p in fw_propositions:
if self.matching(p, g):
matched_propositions.append(p)
new_subgoals.append(p)
by_which_rule.append("by-match")
suppositions.append(None)
if self.params["rule_bw_and_introduction"]:
r = self.rule_backward_and_introduction(g) # applies iff g = P AND Q
if r:
new_subgoals.extend(r)
matched_propositions.extend([None] * len(r))
by_which_rule.extend(["by-ai"] * len(r))
suppositions.extend([None] * len(r))
for p in fw_propositions:
if self.params["rule_bw_conjunctive_syllogism"]:
r = self.rule_backward_conjunctive_syllogism(p, g) # applies iff g = NOT P
if r:
new_subgoals.extend(r)
matched_propositions.extend([None] * len(r))
by_which_rule.extend(["by-cs"] * len(r))
suppositions.extend([None] * len(r))
if self.params["rule_bw_if_elimination"]:
r = self.rule_backward_if_elimination(p, g) # applies iff p = IF P THEN g => g = A(x)
if r:
new_subgoals.extend(r)
matched_propositions.extend([None] * len(r))
by_which_rule.extend(["by-ie"] * len(r))
suppositions.extend([None] * len(r))
if not g_node.suppositions:
if self.params["rule_bw_not_introduction"]:
supposition, r = self.rule_backward_not_introduction(g) # g = NOT P
if r:
new_subgoals.extend(r)
matched_propositions.extend([None] * len(r))
by_which_rule.extend(["by-ni"] * len(r))
suppositions.extend([supposition] * len(r))
return new_subgoals, matched_propositions, by_which_rule, suppositions
def solve_disjunctive_tree(self, root_node, fw_propositions, right_conjunct=None):
right_conjunct_alternatives = None
if right_conjunct is not None:
right_conjunct_alternatives = []
root = AnyNode(goal=root_node.goal, exhausted=False, suppositions=root_node.suppositions)
current_node = root
branch_sat = False
while True:
if current_node.goal.type == self.PT.conjunction:
current_node.exhausted = True
if self.solve_conjunction_tree(current_node, fw_propositions):
branch_sat = True
else:
new_subgoals, matched_props, b, suppositions = self.apply_backward_rules(
fw_propositions + current_node.suppositions, current_node)
current_node.exhausted = True
for i, sg in enumerate(new_subgoals):
mp = matched_props[i]
supp = suppositions[i]
if mp == current_node.goal:
pa = self.get_leftmost_atom(mp)
if right_conjunct is not None:
right_conjunct_alternatives.append(
self.prop_replace_properties(right_conjunct.goal, pa.arg_id,
pa.is_name, pa.hat))
branch_sat = True
elif all(m != current_node.goal for m in matched_props):
if supp is None:
supp = []
else:
supp = [supp]
AnyNode(goal=sg, parent=current_node, exhausted=False,
suppositions=supp + current_node.suppositions)
for c in LevelOrderIter(root):
if not c.exhausted:
current_node = c
break
if current_node.exhausted:
return branch_sat, right_conjunct_alternatives
def solve_conjunction_tree(self, conjunction_node, fw_propositions):
root = AnyNode(goal=conjunction_node.goal, exhausted=True,
suppositions=conjunction_node.suppositions)
current_node = root
# the arguments of root.goal are either atomic or negation
new_subgoals, matched_props, _, _ = self.apply_backward_rules(fw_propositions, current_node)
if any(p is not None for p in matched_props):
# direct match of the conjunction
return True
if len(new_subgoals) != 2:
return False # ?
left_conjunct = AnyNode(goal=new_subgoals[0], parent=root, exhausted=False,
suppositions=root.suppositions)
right_conjunct = AnyNode(goal=new_subgoals[1], parent=root, exhausted=True,
suppositions=root.suppositions)
left_branch_sat, conjunct2_alternatives = self.solve_disjunctive_tree(left_conjunct,
fw_propositions,
right_conjunct)
if not left_branch_sat:
return False
for c in conjunct2_alternatives:
alternative_node = AnyNode(goal=c, suppositions=root.suppositions)
right_branch_sat, _ = self.solve_disjunctive_tree(alternative_node, fw_propositions)
if right_branch_sat:
return True
return False
def run_backward_rules(self, fw_propositions, conclusion):
ret, _ = self.solve_disjunctive_tree(AnyNode(goal=conclusion, suppositions=[]),
fw_propositions, None)
return ret
def remove_duplicates(self, propositions):
""" Removes isomorphic propositions where both involve variables """
propositions_copy = list(propositions)
uniques = []
while True:
duplicates = []
if len(propositions_copy) == 0:
return uniques
p1 = propositions_copy[0]
for p2 in propositions_copy:
if self.isomorphic(p1, p2):
if not (self.get_leftmost_atom(p1).is_name or self.get_leftmost_atom(
p2).is_name):
duplicates.append(p2)
uniques.append(p1)
propositions_copy.remove(p1)
[propositions_copy.remove(x) for x in duplicates if x in propositions_copy]
def run_forward_rules(self, fw_propositions):
while True:
new_propositions = []
for p1 in fw_propositions:
for p2 in fw_propositions:
if self.params["rule_fw_and_elimination"]:
new_propositions.extend(self.rule_forward_and_elimination(p1))
if self.params["rule_transitivity"]:
new_propositions.extend(self.rule_transitivity(p1, p2, fw_propositions))
if self.params["rule_exclusivity"]:
new_propositions.extend(self.rule_exclusivity(p1, p2, fw_propositions))
if self.params["rule_exclusivity"]:
new_propositions.extend(self.rule_conversion(p1, fw_propositions))
if set(fw_propositions) == set(fw_propositions + new_propositions):
# exhausted all possibilities: no more rules apply.
break
fw_propositions = sylutil.uniquify_keep_order(fw_propositions + new_propositions)
return self.remove_duplicates(fw_propositions)
def proposition_to_string(self, p):
if p.type == self.PT.negation:
if p.v1.type == self.PT.conjunction:
if p.v1.v1.type == self.PT.atomic and p.v1.v2.type == self.PT.atomic:
if not p.v1.v1.v1.is_name and not p.v1.v2.v1.is_name:
return "E" + p.v1.v1.v1.predicate.lower() + p.v1.v2.v1.predicate.lower()
elif p.type == self.PT.conjunction:
if p.v1.type == self.PT.atomic:
if p.v2.type == self.PT.atomic:
if p.v1.v1.is_name and p.v2.v1.is_name:
return "I" + p.v1.v1.predicate.lower() + p.v2.v1.predicate.lower()
elif p.v2.type == self.PT.negation:
if p.v2.v1.type == self.PT.atomic:
if p.v1.v1.is_name and p.v2.v1.v1.is_name:
return "O" + p.v1.v1.predicate.lower() + p.v2.v1.v1.predicate.lower()
elif p.type == self.PT.implies:
if p.v1.type == self.PT.atomic and p.v2.type == self.PT.atomic:
if not p.v1.v1.is_name and not p.v2.v1.is_name:
return "A" + p.v1.v1.predicate.lower() + p.v2.v1.predicate.lower()
return None
def extract_ac_conclusions(self, propositions):
"""
>>> m = PSYCOP()
>>> i = m.get_fresh_id()
>>> a = m.Prop(m.PT.atomic, m.Atom("A", i, False, False), None)
>>> b = m.Prop(m.PT.atomic, m.Atom("B", i, False, False), None)
>>> c = m.Prop(m.PT.atomic, m.Atom("C", i, False, False), None)
>>> p1 = m.Prop(m.PT.implies, a, b)
>>> p2 = m.Prop(m.PT.implies, b, c)
>>> p3 = m.Prop(m.PT.implies, a, c)
>>> m.extract_ac_conclusions({p1, p2, p3})
['Aac']
>>> m.extract_ac_conclusions({p1, p2})
[]
"""
prop_ac = []
for p in propositions:
s = self.proposition_to_string(p)
if s is not None:
if {s[1], s[2]} == {"a", "c"}:
prop_ac.append(s)
return prop_ac
def extract_atomic_subformulas(self, p):
if p.type == self.PT.atomic:
return [p]
elif p.type == self.PT.negation:
return self.extract_atomic_subformulas(p.v1)
else:
return self.extract_atomic_subformulas(p.v1) + self.extract_atomic_subformulas(p.v2)
def extract_all_atomic_subformulas(self, propositions):
subformulas = []
for p in propositions:
subformulas.extend(self.extract_atomic_subformulas(p))
return subformulas
def heuristic(self, syllogism):
return {"AA": "A",
"AI": "I",
"AE": "E",
"AO": "O",
"EI": "E",
"EE": "E",
"EO": "E",
"II": "I",
"IO": "O",
"OO": "O",
}[''.join(sorted(syllogism[:2]))]
def conclusions_positive_checks(self, syllogism, additional_premises=[]):
premises = self.encode_premises(syllogism,
ex_implicatures=self.params["premise_implicatures_existential"],
grice_implicatures=self.params["premise_implicatures_grice"])
for p in additional_premises:
premises.append(self.encode_proposition(p, True))
# 1. Try to get conclusions by applying forward rules
fw_propositions = self.run_forward_rules(premises)
fw_conclusions = []
for prop in fw_propositions:
for c in ccobra.syllogistic.RESPONSES:
conclusion = self.encode_proposition(c, hat=False)
if self.proposition_to_string(conclusion) == self.proposition_to_string(prop):
fw_conclusions.append(c)
checked_conclusions = fw_conclusions
for concl in ccobra.syllogistic.RESPONSES:
tc_enc = self.encode_proposition(concl, hat=False)
self.subformulas = self.extract_all_atomic_subformulas(premises + [tc_enc])
success = self.run_backward_rules(fw_propositions, tc_enc)
if success:
checked_conclusions.append(concl)
checked_conclusions = checked_conclusions if len(checked_conclusions) != 0 else ["NVC"]
return checked_conclusions
def predict(self, syllogism):
premises = self.encode_premises(syllogism,
ex_implicatures=self.params["premise_implicatures_existential"],
grice_implicatures=self.params["premise_implicatures_grice"])
# 1. Try to get conclusions by applying forward rules
fw_propositions = self.run_forward_rules(premises)
fw_conclusions = []
for prop in fw_propositions:
for c in ccobra.syllogistic.RESPONSES:
conclusion = self.encode_proposition(c, hat=False)
if self.proposition_to_string(conclusion) == self.proposition_to_string(prop):
fw_conclusions.append(c)
if len(fw_conclusions) != 0:
return fw_conclusions
ac = "ac" if random.random() < 0.5 else "ca"
tentative_conclusion = self.heuristic(syllogism) + ac
tc_enc = self.encode_proposition(tentative_conclusion, hat=False)
self.subformulas = self.extract_all_atomic_subformulas(premises + [tc_enc])
success = self.run_backward_rules(fw_propositions, tc_enc)
if success:
if self.params["conclusion_implicatures"]:
c_impl = sylutil.add_implicatures([tentative_conclusion], True, True)[1]
conclusion_impl = self.encode_proposition(c_impl, hat=False)
self.subformulas = self.extract_all_atomic_subformulas(premises + [conclusion_impl])
success_impl = self.run_backward_rules(fw_propositions, conclusion_impl)
if success_impl:
return [tentative_conclusion]
else:
return [tentative_conclusion]
if random.random() < self.params["guess"]:
return ["Aac", "Aca", "Iac", "Ica", "Eac", "Eca", "Oac", "Oca"]
return ["NVC"]
| 45.703652 | 134 | 0.524815 |
583e61ff639070fb6fe875d64d15c06a9de26980 | 1,861 | py | Python | vk_api/enums.py | dypick/vk_api | 718ca74989ceaccb33f268f1afa66c936da700e8 | [
"Apache-2.0"
] | 5 | 2020-03-28T23:31:56.000Z | 2020-08-01T16:51:58.000Z | vk_api/enums.py | TyFoonCS/vk_api | ae6584d5b8a6b5e2593f60289bfd3be823ef6916 | [
"Apache-2.0"
] | 1 | 2021-05-03T19:21:36.000Z | 2021-05-03T19:21:36.000Z | vk_api/enums.py | TyFoonCS/vk_api | ae6584d5b8a6b5e2593f60289bfd3be823ef6916 | [
"Apache-2.0"
] | 4 | 2020-05-10T06:58:10.000Z | 2020-09-03T14:26:49.000Z | # -*- coding: utf-8 -*-
"""
:authors: python273
:license: Apache License, Version 2.0, see LICENSE file
:copyright: (c) 2019 python273
"""
from enum import IntEnum
class VkUserPermissions(IntEnum):
"""
Перечисление прав пользователя.
Список прав получается побитовым сложением (x | y) каждого права.
Подробнее в документации VK API: https://vk.com/dev/permissions
"""
#: Пользователь разрешил отправлять ему уведомления
#: (для flash/iframe-приложений).
#: Не работает с этой библиотекой.
NOTIFY = 1
#: Доступ к друзьям.
FRIEND = 2
#: Доступ к фотографиям.
PHOTOS = 2**2
#: Доступ к аудиозаписям.
#: При отсутствии доступа к закрытому API аудиозаписей это право позволяет
#: только загрузку аудио.
AUDIO = 2**3
#: Доступ к видеозаписям.
VIDEO = 2**4
#: Доступ к историям.
STORIES = 2**6
#: Доступ к wiki-страницам.
PAGES = 2**7
#: Добавление ссылки на приложение в меню слева.
ADD_LINK = 2**8
#: Доступ к статусу пользователя.
STATUS = 2**10
#: Доступ к заметкам пользователя.
NOTES = 2**11
#: Доступ к расширенным методам работы с сообщениями.
MESSAGES = 2**12
#: Доступ к обычным и расширенным методам работы со стеной.
WALL = 2**13
#: Доступ к расширенным методам работы с рекламным API.
ADS = 2**15
#: Доступ к API в любое время. Рекомендуется при работе с этой библиотекой.
OFFLINE = 2**16
#: Доступ к документам.
DOCS = 2**17
#: Доступ к группам пользователя.
GROUPS = 2**18
#: Доступ к оповещениям об ответах пользователю.
NOTIFICATIONS = 2**19
#: Доступ к статистике групп и приложений пользователя, администратором которых он является.
STATS = 2**20
#: Доступ к email пользователя.
EMAIL = 2**22
#: Доступ к товарам.
MARKET = 2**27
| 22.695122 | 96 | 0.646964 |
6fd815cf502aa7cc9aa3d464eb3879a274bd16b9 | 47,925 | py | Python | aesara/scan/basic.py | hs2361/aesara | 16f98e4fd69db92e0c2cde9dd97a0d005235deea | [
"BSD-3-Clause"
] | 1 | 2021-11-30T06:38:39.000Z | 2021-11-30T06:38:39.000Z | aesara/scan/basic.py | fonnesbeck/aesara | 02378861f1a77135f2556018630092a09262ea76 | [
"BSD-3-Clause"
] | null | null | null | aesara/scan/basic.py | fonnesbeck/aesara | 02378861f1a77135f2556018630092a09262ea76 | [
"BSD-3-Clause"
] | null | null | null | __docformat__ = "restructedtext en"
__authors__ = (
"Razvan Pascanu "
"Frederic Bastien "
"James Bergstra "
"Pascal Lamblin "
"PyMC Developers"
)
__copyright__ = "(c) 2010, Universite de Montreal"
import logging
from collections import OrderedDict
import numpy as np
import aesara.tensor as aet
from aesara.compile import SharedVariable, ops
from aesara.compile.function import function
from aesara.compile.mode import Mode
from aesara.configdefaults import config
from aesara.graph.basic import Constant, Variable, clone_replace, graph_inputs
from aesara.graph.fg import MissingInputError
from aesara.graph.op import get_test_value
from aesara.graph.utils import TestValueError
from aesara.scan import utils
from aesara.scan.op import Scan
from aesara.scan.utils import safe_new, traverse
from aesara.tensor.exceptions import NotScalarConstantError
from aesara.tensor.math import minimum
from aesara.tensor.shape import shape_padleft
from aesara.tensor.type import TensorType, integer_dtypes
from aesara.updates import OrderedUpdates
_logger = logging.getLogger("aesara.scan.basic")
def scan(
fn,
sequences=None,
outputs_info=None,
non_sequences=None,
n_steps=None,
truncate_gradient=-1,
go_backwards=False,
mode=None,
name=None,
profile=False,
allow_gc=None,
strict=False,
return_list=False,
):
"""This function constructs and applies a Scan op to the provided
arguments.
Parameters
----------
fn
``fn`` is a function that describes the operations involved in one
step of ``scan``. ``fn`` should construct variables describing the
output of one iteration step. It should expect as input aesara
variables representing all the slices of the input sequences
and previous values of the outputs, as well as all other arguments
given to scan as ``non_sequences``. The order in which scan passes
these variables to ``fn`` is the following :
* all time slices of the first sequence
* all time slices of the second sequence
* ...
* all time slices of the last sequence
* all past slices of the first output
* all past slices of the second output
* ...
* all past slices of the last output
* all other arguments (the list given as `non_sequences` to
scan)
The order of the sequences is the same as the one in the list
`sequences` given to scan. The order of the outputs is the same
as the order of ``outputs_info``. For any sequence or output the
order of the time slices is the same as the one in which they have
been given as taps. For example if one writes the following :
.. code-block:: python
scan(fn, sequences = [ dict(input= Sequence1, taps = [-3,2,-1])
, Sequence2
, dict(input = Sequence3, taps = 3) ]
, outputs_info = [ dict(initial = Output1, taps = [-3,-5])
, dict(initial = Output2, taps = None)
, Output3 ]
, non_sequences = [ Argument1, Argument2])
``fn`` should expect the following arguments in this given order:
#. ``Sequence1[t-3]``
#. ``Sequence1[t+2]``
#. ``Sequence1[t-1]``
#. ``Sequence2[t]``
#. ``Sequence3[t+3]``
#. ``Output1[t-3]``
#. ``Output1[t-5]``
#. ``Output3[t-1]``
#. ``Argument1``
#. ``Argument2``
The list of ``non_sequences`` can also contain shared variables
used in the function, though ``scan`` is able to figure those
out on its own so they can be skipped. For the clarity of the
code we recommend though to provide them to scan. To some extend
``scan`` can also figure out other ``non sequences`` (not shared)
even if not passed to scan (but used by `fn`). A simple example of
this would be :
.. code-block:: python
import aesara.tensor as aet
W = aet.matrix()
W_2 = W**2
def f(x):
return aet.dot(x,W_2)
The function is expected to return two things. One is a list of
outputs ordered in the same order as ``outputs_info``, with the
difference that there should be only one output variable per
output initial state (even if no tap value is used). Secondly
`fn` should return an update dictionary (that tells how to
update any shared variable after each iteration step). The
dictionary can optionally be given as a list of tuples. There is
no constraint on the order of these two list, ``fn`` can return
either ``(outputs_list, update_dictionary)`` or
``(update_dictionary, outputs_list)`` or just one of the two (in
case the other is empty).
To use ``scan`` as a while loop, the user needs to change the
function ``fn`` such that also a stopping condition is returned.
To do so, he/she needs to wrap the condition in an ``until`` class.
The condition should be returned as a third element, for example:
.. code-block:: python
...
return [y1_t, y2_t], {x:x+1}, until(x < 50)
Note that a number of steps (considered in here as the maximum
number of steps ) is still required even though a condition is
passed (and it is used to allocate memory if needed). = {}):
sequences
``sequences`` is the list of Aesara variables or dictionaries
describing the sequences ``scan`` has to iterate over. If a
sequence is given as wrapped in a dictionary, then a set of optional
information can be provided about the sequence. The dictionary
should have the following keys:
* ``input`` (*mandatory*) -- Aesara variable representing the
sequence.
* ``taps`` -- Temporal taps of the sequence required by ``fn``.
They are provided as a list of integers, where a value ``k``
impiles that at iteration step ``t`` scan will pass to ``fn``
the slice ``t+k``. Default value is ``[0]``
Any Aesara variable in the list ``sequences`` is automatically
wrapped into a dictionary where ``taps`` is set to ``[0]``
outputs_info
``outputs_info`` is the list of Aesara variables or dictionaries
describing the initial state of the outputs computed
recurrently. When this initial states are given as dictionary
optional information can be provided about the output corresponding
to these initial states. The dictionary should have the following
keys:
* ``initial`` -- Aesara variable that represents the initial
state of a given output. In case the output is not computed
recursively (think of a map) and does not require an initial
state this field can be skipped. Given that (only) the previous
time step of the output is used by ``fn``, the initial state
**should have the same shape** as the output and **should not
involve a downcast** of the data type of the output. If multiple
time taps are used, the initial state should have one extra
dimension that should cover all the possible taps. For example
if we use ``-5``, ``-2`` and ``-1`` as past taps, at step 0,
``fn`` will require (by an abuse of notation) ``output[-5]``,
``output[-2]`` and ``output[-1]``. This will be given by
the initial state, which in this case should have the shape
(5,)+output.shape. If this variable containing the initial
state is called ``init_y`` then ``init_y[0]`` *corresponds to*
``output[-5]``. ``init_y[1]`` *correponds to* ``output[-4]``,
``init_y[2]`` corresponds to ``output[-3]``, ``init_y[3]``
coresponds to ``output[-2]``, ``init_y[4]`` corresponds to
``output[-1]``. While this order might seem strange, it comes
natural from splitting an array at a given point. Assume that
we have a array ``x``, and we choose ``k`` to be time step
``0``. Then our initial state would be ``x[:k]``, while the
output will be ``x[k:]``. Looking at this split, elements in
``x[:k]`` are ordered exactly like those in ``init_y``.
* ``taps`` -- Temporal taps of the output that will be pass to
``fn``. They are provided as a list of *negative* integers,
where a value ``k`` implies that at iteration step ``t`` scan
will pass to ``fn`` the slice ``t+k``.
``scan`` will follow this logic if partial information is given:
* If an output is not wrapped in a dictionary, ``scan`` will wrap
it in one assuming that you use only the last step of the output
(i.e. it makes your tap value list equal to [-1]).
* If you wrap an output in a dictionary and you do not provide any
taps but you provide an initial state it will assume that you are
using only a tap value of -1.
* If you wrap an output in a dictionary but you do not provide any
initial state, it assumes that you are not using any form of
taps.
* If you provide a ``None`` instead of a variable or a empty
dictionary ``scan`` assumes that you will not use any taps for
this output (like for example in case of a map)
If ``outputs_info`` is an empty list or None, ``scan`` assumes
that no tap is used for any of the outputs. If information is
provided just for a subset of the outputs an exception is
raised (because there is no convention on how scan should map
the provided information to the outputs of ``fn``)
non_sequences
``non_sequences`` is the list of arguments that are passed to
``fn`` at each steps. One can opt to exclude variable
used in ``fn`` from this list as long as they are part of the
computational graph, though for clarity we encourage not to do so.
n_steps
``n_steps`` is the number of steps to iterate given as an int
or Aesara scalar. If any of the input sequences do not have
enough elements, scan will raise an error. If the *value is 0* the
outputs will have *0 rows*. If n_steps is not provided, ``scan`` will
figure out the amount of steps it should run given its input
sequences. ``n_steps`` < 0 is not supported anymore.
truncate_gradient
``truncate_gradient`` is the number of steps to use in truncated
BPTT. If you compute gradients through a scan op, they are
computed using backpropagation through time. By providing a
different value then -1, you choose to use truncated BPTT instead
of classical BPTT, where you go for only ``truncate_gradient``
number of steps back in time.
go_backwards
``go_backwards`` is a flag indicating if ``scan`` should go
backwards through the sequences. If you think of each sequence
as indexed by time, making this flag True would mean that
``scan`` goes back in time, namely that for any sequence it
starts from the end and goes towards 0.
name
When profiling ``scan``, it is crucial to provide a name for any
instance of ``scan``. The profiler will produce an overall
profile of your code as well as profiles for the computation of
one step of each instance of ``scan``. The ``name`` of the instance
appears in those profiles and can greatly help to disambiguate
information.
mode
It is recommended to leave this argument to None, especially
when profiling ``scan`` (otherwise the results are not going to
be accurate). If you prefer the computations of one step of
``scan`` to be done differently then the entire function, you
can use this parameter to describe how the computations in this
loop are done (see ``aesara.function`` for details about
possible values and their meaning).
profile
Flag or string. If true, or different from the empty string, a
profile object will be created and attached to the inner graph of
scan. In case ``profile`` is True, the profile object will have the
name of the scan instance, otherwise it will have the passed string.
Profile object collect (and print) information only when running the
inner graph with the new cvm linker ( with default modes,
other linkers this argument is useless)
allow_gc
Set the value of allow gc for the internal graph of scan. If
set to None, this will use the value of config.scan__allow_gc.
The full scan behavior related to allocation is determined by
this value and the Aesara flag allow_gc. If the flag allow_gc
is True (default) and this scan parameter allow_gc is False
(default), then we let scan allocate all intermediate memory
on the first iteration, those are not garbage collected them
during that first iteration (this is determined by the scan
allow_gc). This speed up allocation of the following
iteration. But we free all those temp allocation at the end of
all iterations (this is what the Aesara flag allow_gc mean).
If you use preallocate and this scan is on GPU, the speed up
from the scan allow_gc is small. If you are missing memory,
disable the scan allow_gc could help you run graph that
request much memory.
strict
If true, all the shared variables used in ``fn`` must be provided as a
part of ``non_sequences`` or ``sequences``.
return_list
If True, will always return a list, even if there is only 1 output.
Returns
-------
tuple
Tuple of the form (outputs, updates); ``outputs`` is either a
Aesara variable or a list of Aesara variables representing the
outputs of ``scan`` (in the same order as in ``outputs_info``).
``updates`` is a subclass of dictionary specifying the update rules for
all shared variables used in scan.
This dictionary should be passed to ``aesara.function`` when you compile
your function. The change compared to a normal dictionary is that we
validate that keys are SharedVariable and addition of those dictionary
are validated to be consistent.
"""
# General observation : this code is executed only once, at creation
# of the computational graph, so we don't yet need to be smart about
# anything (to speed things up)
##
# Step 1. Wrap all inputs in dictionaries and add default values
##
# check if inputs are just single variables instead of lists
def wrap_into_list(x):
"""
Wrap the input into a list if it is not already a list.
"""
if x is None:
return []
elif not isinstance(x, (list, tuple)):
return [x]
else:
return list(x)
seqs = wrap_into_list(sequences)
outs_info = wrap_into_list(outputs_info)
# Make sure we get rid of numpy arrays or ints or anything like that
# passed as inputs to scan
non_seqs = []
for elem in wrap_into_list(non_sequences):
if not isinstance(elem, Variable):
non_seqs.append(aet.as_tensor_variable(elem))
else:
non_seqs.append(elem)
# If we provided a known number of steps ( before compilation)
# and if that number is 1 or -1, then we can skip the Scan Op,
# and just apply the inner function once
# To do that we check here to see the nature of n_steps
n_fixed_steps = None
if isinstance(n_steps, (float, int)):
n_fixed_steps = int(n_steps)
else:
try:
n_fixed_steps = aet.get_scalar_constant_value(n_steps)
except NotScalarConstantError:
n_fixed_steps = None
# Check n_steps is an int
if hasattr(n_steps, "dtype") and str(n_steps.dtype) not in integer_dtypes:
raise ValueError(f" n_steps must be an int. dtype provided is {n_steps.dtype}")
# compute number of sequences and number of outputs
n_seqs = len(seqs)
n_outs = len(outs_info)
return_steps = OrderedDict()
# wrap sequences in a dictionary if they are not already dictionaries
for i in range(n_seqs):
if not isinstance(seqs[i], dict):
seqs[i] = OrderedDict([("input", seqs[i]), ("taps", [0])])
elif seqs[i].get("taps", None) is not None:
seqs[i]["taps"] = wrap_into_list(seqs[i]["taps"])
elif seqs[i].get("taps", None) is None:
# seqs dictionary does not have the ``taps`` key
seqs[i]["taps"] = [0]
# wrap outputs info in a dictionary if they are not already in one
for i in range(n_outs):
if outs_info[i] is not None:
if isinstance(outs_info[i], dict):
if outs_info[i].get("return_steps", None) is not None:
raise DeprecationWarning(
"Using `return_steps` has been deprecated. "
"Simply select the entries you need using a "
"subtensor. Scan will optimize memory "
"consumption, so do not worry about that."
)
# END
if not isinstance(outs_info[i], dict):
# by default any output has a tap value of -1
outs_info[i] = OrderedDict([("initial", outs_info[i]), ("taps", [-1])])
elif (
outs_info[i].get("initial", None) is None
and outs_info[i].get("taps", None) is not None
):
# ^ no initial state but taps provided
raise ValueError(
"If you are using slices of an output "
"you need to provide a initial state "
f"for it: {outs_info[i]}"
)
elif (
outs_info[i].get("initial", None) is not None
and outs_info[i].get("taps", None) is None
):
# ^ initial state but taps not provided
if "taps" in outs_info[i]:
# ^ explicitly provided a None for taps
_logger.warning(
f"Output {getattr(outs_info[i]['initial'], 'name', 'None')} (index {i}) has a initial "
"state but taps is explicitly set to None ",
)
outs_info[i]["taps"] = [-1]
elif outs_info[i].get("taps", None) is not None:
# Check that taps are valid (< 0 and all dfferent)
taps = outs_info[i]["taps"]
if len(taps) > len(set(taps)):
raise ValueError(
("All the taps must be different in " " `outputs_info`"),
outs_info[i],
)
for t in taps:
if t >= 0:
raise ValueError(
("All the tap values must be " "smaller than 0."),
outs_info[i],
)
else:
# if a None is provided as the output info we replace it
# with an empty OrdereDict() to simplify handling
outs_info[i] = OrderedDict()
##
# Step 2. Generate inputs and outputs of the inner functions
# for compiling a dummy function (Iteration #1)
##
# create aesara inputs for the recursive function
# note : this is a first batch of possible inputs that will
# be compiled in a dummy function; we used this dummy
# function to detect shared variables and their updates
# and to construct a new and complete list of inputs and
# outputs
n_seqs = 0
scan_seqs = [] # Variables passed as inputs to the scan op
inner_seqs = [] # Variables passed as inputs to the inner function
inner_slices = [] # Actual slices if scan is removed from the picture
# go through sequences picking up time slices as needed
for i, seq in enumerate(seqs):
# Note that you can have something like no taps for
# a sequence, though is highly unlikely in practice
if "taps" in seq:
# go through the indicated slice
mintap = np.min(seq["taps"])
maxtap = np.max(seq["taps"])
# We cut the sequence such that seq[i] to correspond to
# seq[i-k]. For the purposes of cutting the sequences, we
# need to pretend tap 0 is used to avoid cutting the sequences
# too long if the taps are all lower or all higher than 0.
maxtap_proxy = max(maxtap, 0)
mintap_proxy = min(mintap, 0)
for k in seq["taps"]:
# create one slice of the input
# Later on, if we decide not to use scan because we are
# going for just one step, it makes things easier if we
# compute the correct outputs here. This way we can use
# the output of the lambda expression directly to replace
# the output of scan.
# If not we need to use copies, that will be replaced at
# each frame by the corresponding slice
actual_slice = seq["input"][k - mintap_proxy]
_seq_val = aet.as_tensor_variable(seq["input"])
_seq_val_slice = _seq_val[k - mintap_proxy]
nw_slice = _seq_val_slice.type()
# Try to transfer test_value to the new variable
if config.compute_test_value != "off":
try:
nw_slice.tag.test_value = get_test_value(_seq_val_slice)
except TestValueError:
if config.compute_test_value != "ignore":
# No need to print a warning or raise an error now,
# it will be done when fn will be called.
_logger.warning(
(
"Cannot compute test value for "
"the inner function of scan, input value "
"missing {}"
).format(_seq_val_slice)
)
# Add names to slices for debugging and pretty printing ..
# that is if the input already has a name
if getattr(seq["input"], "name", None) is not None:
if k > 0:
nw_name = seq["input"].name + f"[t+{int(k)}]"
elif k == 0:
nw_name = seq["input"].name + "[t]"
else:
nw_name = seq["input"].name + f"[t{int(k)}]"
nw_slice.name = nw_name
start = k - mintap_proxy
nw_name = None
if k == maxtap_proxy:
nw_seq = seq["input"][start:]
if getattr(seq["input"], "name", None) is not None:
nw_name = seq["input"].name + f"[{int(start)}:]"
else:
end = -(maxtap_proxy - k)
nw_seq = seq["input"][start:end]
if getattr(seq["input"], "name", None) is not None:
nw_name = seq["input"].name + f"[{int(start)}:{int(end)}]"
if go_backwards:
nw_seq = nw_seq[::-1]
scan_seqs.append(nw_seq)
inner_seqs.append(nw_slice)
inner_slices.append(actual_slice)
n_seqs += 1
# Add names -- it helps a lot when debugging
if nw_name is not None:
nw_seq.name = nw_name
# Since we've added all sequences now we need to level them up based on
# n_steps or their different shapes
lengths_vec = []
for seq in scan_seqs:
lengths_vec.append(seq.shape[0])
if not utils.isNaN_or_Inf_or_None(n_steps):
# ^ N_steps should also be considered
lengths_vec.append(aet.as_tensor(n_steps))
if len(lengths_vec) == 0:
# ^ No information about the number of steps
raise ValueError(
"No information about the number of steps "
"provided. Either provide a value for "
"n_steps argument of scan or provide an input "
"sequence"
)
# If the user has provided the number of steps, do that regardless ( and
# raise an error if the sequences are not long enough )
if utils.isNaN_or_Inf_or_None(n_steps):
actual_n_steps = lengths_vec[0]
for contestant in lengths_vec[1:]:
actual_n_steps = minimum(actual_n_steps, contestant)
else:
actual_n_steps = aet.as_tensor(n_steps)
scan_seqs = [seq[:actual_n_steps] for seq in scan_seqs]
# Conventions :
# mit_mot = multiple input taps, multiple output taps ( only provided
# by the gradient function )
# mit_sot = multiple input taps, single output tap (t + 0)
# sit_sot = single input tap, single output tap (t + 0)
# nit_sot = no input tap, single output tap (t + 0)
# MIT_MOT -- not provided by the user only by the grad function
n_mit_mot = 0
n_mit_mot_outs = 0
mit_mot_scan_inputs = []
mit_mot_inner_inputs = []
mit_mot_inner_outputs = []
mit_mot_out_slices = []
# SIT_SOT -- provided by the user
n_mit_sot = 0
mit_sot_scan_inputs = []
mit_sot_inner_inputs = []
mit_sot_inner_slices = []
mit_sot_inner_outputs = []
mit_sot_return_steps = OrderedDict()
mit_sot_tap_array = []
mit_sot_rightOrder = []
n_sit_sot = 0
sit_sot_scan_inputs = []
sit_sot_inner_inputs = []
sit_sot_inner_slices = []
sit_sot_inner_outputs = []
sit_sot_return_steps = OrderedDict()
sit_sot_rightOrder = []
# go through outputs picking up time slices as needed
for i, init_out in enumerate(outs_info):
# Note that our convention dictates that if an output uses
# just the previous time step, as a initial state we will only
# provide a tensor of the same dimension as one time step; This
# makes code much cleaner for those who do not use taps. Otherwise
# they would always had to shape_padleft the initial state ..
# which is ugly
if init_out.get("taps", None) == [-1]:
actual_arg = init_out["initial"]
if not isinstance(actual_arg, Variable):
actual_arg = aet.as_tensor_variable(actual_arg)
arg = safe_new(actual_arg)
if isinstance(arg, Constant):
# safe new returns a clone of the constants, but that is not
# what we need for initial states
arg = arg.type()
# Try to transfer test_value to the new variable
if config.compute_test_value != "off":
try:
arg.tag.test_value = get_test_value(actual_arg)
except TestValueError:
if config.compute_test_value != "ignore":
_logger.warning(
(
"Cannot compute test value for the "
"inner function of scan, test value missing: {}"
).format(actual_arg)
)
if getattr(init_out["initial"], "name", None) is not None:
arg.name = init_out["initial"].name + "[t-1]"
# We need now to allocate space for storing the output and copy
# the initial state over. We do this using the expand function
# defined in scan utils
sit_sot_scan_inputs.append(
utils.expand_empty(
aet.unbroadcast(shape_padleft(actual_arg), 0),
actual_n_steps,
)
)
sit_sot_inner_slices.append(actual_arg)
if i in return_steps:
sit_sot_return_steps[n_sit_sot] = return_steps[i]
sit_sot_inner_inputs.append(arg)
sit_sot_rightOrder.append(i)
n_sit_sot += 1
elif init_out.get("taps", None):
if np.any(np.array(init_out.get("taps", [])) > 0):
# Make sure we do not have requests for future values of a
# sequence we can not provide such values
raise ValueError("Can not use future taps of outputs", init_out)
# go through the taps
mintap = abs(np.min(init_out["taps"]))
mit_sot_tap_array.append(init_out["taps"])
# Sequence
mit_sot_scan_inputs.append(
utils.expand_empty(init_out["initial"][:mintap], actual_n_steps)
)
if i in return_steps:
mit_sot_return_steps[n_mit_sot] = return_steps[i]
mit_sot_rightOrder.append(i)
n_mit_sot += 1
for k in init_out["taps"]:
# create a new slice
actual_nw_slice = init_out["initial"][k + mintap]
_init_out_var = aet.as_tensor_variable(init_out["initial"])
_init_out_var_slice = _init_out_var[k + mintap]
nw_slice = _init_out_var_slice.type()
# Try to transfer test_value to the new variable
if config.compute_test_value != "off":
try:
nw_slice.tag.test_value = get_test_value(_init_out_var_slice)
except TestValueError:
if config.compute_test_value != "ignore":
_logger.warning(
(
"Cannot compute test value for "
"the inner function of scan, test value "
"missing: {}"
).format(_init_out_var_slice)
)
# give it a name or debugging and pretty printing
if getattr(init_out["initial"], "name", None) is not None:
if k > 0:
nw_slice.name = init_out["initial"].name + f"[t+{int(k)}]"
elif k == 0:
nw_slice.name = init_out["initial"].name + "[t]"
else:
nw_slice.name = init_out["initial"].name + f"[t{int(k)}]"
mit_sot_inner_inputs.append(nw_slice)
mit_sot_inner_slices.append(actual_nw_slice)
# NOTE: there is another case, in which we do not want to provide
# any previous value of the output to the inner function (i.e.
# a map); in that case we do not have to do anything ..
# Re-order args
max_mit_sot = np.max([-1] + mit_sot_rightOrder) + 1
max_sit_sot = np.max([-1] + sit_sot_rightOrder) + 1
n_elems = np.max([max_mit_sot, max_sit_sot])
_ordered_args = [[] for x in range(n_elems)]
offset = 0
for idx in range(n_mit_sot):
n_inputs = len(mit_sot_tap_array[idx])
if n_fixed_steps in [1, -1]:
_ordered_args[mit_sot_rightOrder[idx]] = mit_sot_inner_slices[
offset : offset + n_inputs
]
else:
_ordered_args[mit_sot_rightOrder[idx]] = mit_sot_inner_inputs[
offset : offset + n_inputs
]
offset += n_inputs
for idx in range(n_sit_sot):
if n_fixed_steps in [1, -1]:
_ordered_args[sit_sot_rightOrder[idx]] = [sit_sot_inner_slices[idx]]
else:
_ordered_args[sit_sot_rightOrder[idx]] = [sit_sot_inner_inputs[idx]]
ordered_args = []
for ls in _ordered_args:
ordered_args += ls
if n_fixed_steps in [1, -1]:
args = inner_slices + ordered_args + non_seqs
else:
args = inner_seqs + ordered_args + non_seqs
# add only the non-shared variables and non-constants to the arguments of
# the dummy function [ a function should not get shared variables or
# constants as input ]
dummy_args = [
arg
for arg in args
if (not isinstance(arg, SharedVariable) and not isinstance(arg, Constant))
]
# when we apply the lambda expression we get a mixture of update rules
# and outputs that needs to be separated
condition, outputs, updates = utils.get_updates_and_outputs(fn(*args))
if condition is not None:
as_while = True
else:
as_while = False
##
# Step 3. Check if we actually need scan and remove it if we don't
##
if n_fixed_steps in [1, -1]:
# We do not need to use the scan op anymore, so we can just return
# the outputs and updates we have
if condition is not None:
_logger.warning(
(
"When the number of steps is fixed and equal "
"to 1, the provided stopping condition, {} is ignored",
).format(condition)
)
for pos, inner_out in enumerate(outputs):
# we need to see if we need to pad our sequences with an
# unbroadcastable dimension; case example : we return an
# output for which we want all intermediate. If n_steps is 1
# then, if we return the output as given by the innner function
# this will represent only a slice and it will have one
# dimension less.
if isinstance(inner_out.type, TensorType) and return_steps.get(pos, 0) != 1:
outputs[pos] = aet.unbroadcast(shape_padleft(inner_out), 0)
if return_list is not True and len(outputs) == 1:
outputs = outputs[0]
return (outputs, updates)
##
# Step 4. Compile the dummy function
##
# We can now compile a dummy function just to see what shared variable
# we have and what are their update rules (note that the user has
# the option not to pass the shared variable to scan, so we need to
# pick them manually and add them to scan)
# make the compilation as fast as possible by not applying any
# optimization or conversion to C [ note this region is not important
# for performance so we can do stuff as unoptimal as we wish ]
# extract still missing inputs (there still might be so) and add them
# as non sequences at the end of our args
if condition is not None:
outputs.append(condition)
fake_nonseqs = [x.type() for x in non_seqs]
fake_outputs = clone_replace(
outputs, replace=OrderedDict(zip(non_seqs, fake_nonseqs))
)
all_inputs = filter(
lambda x: (
isinstance(x, Variable)
and not isinstance(x, SharedVariable)
and not isinstance(x, Constant)
),
graph_inputs(fake_outputs),
)
extra_inputs = [x for x in all_inputs if x not in args + fake_nonseqs]
non_seqs += extra_inputs
# Note we do not use all_inputs directly since the order of variables
# in args is quite important
dummy_args += extra_inputs
dummy_outs = outputs
# Perform a try-except to provide a meaningful error message to the
# user if inputs of the inner function are missing.
try:
dummy_f = function(
dummy_args,
dummy_outs,
updates=updates,
mode=Mode(linker="py", optimizer=None),
on_unused_input="ignore",
profile=False,
)
except MissingInputError as err:
msg = (
"\nPlease pass this variable to the scan's inner function. Do "
"not forget to also pass it to the `non_sequences` attribute "
"of scan."
)
raise MissingInputError(err.args[0] + msg)
##
# Step 5. Re-arange inputs of scan into a more strict order
##
# Step 5.0 Check the outputs of the dummy function to see if they
# match with user provided data
# if the number of outputs to the function does not match the number of
# assumed outputs until now (provided by the user) there can be
# only one explanation: No information is provided for any of the
# outputs (i.e. we are dealing with a map)
tmp_dummy_f_outs = len(dummy_f.maker.outputs)
if as_while:
tmp_dummy_f_outs -= 1
if not (tmp_dummy_f_outs == n_outs or outs_info == []):
raise ValueError(
"Please provide None as outputs_info for "
"any output that does not feed back into "
"scan (i.e. it behaves like a map) "
)
if outs_info == []:
n_outs = len(dummy_f.maker.outputs)
if as_while:
n_outs = n_outs - 1
outs_info = [OrderedDict() for x in range(n_outs)]
# Step 5.1 Outputs with taps different then -1
for i, out in enumerate(outs_info):
if "taps" in out and out["taps"] != [-1]:
mit_sot_inner_outputs.append(outputs[i])
# Step 5.2 Outputs with tap equal to -1
for i, out in enumerate(outs_info):
if "taps" in out and out["taps"] == [-1]:
sit_sot_inner_outputs.append(outputs[i])
# Step 5.3 Outputs that correspond to update rules of shared variables
givens = OrderedDict()
n_shared_outs = 0
shared_scan_inputs = []
shared_inner_inputs = []
shared_inner_outputs = []
sit_sot_shared = []
for input in dummy_f.maker.expanded_inputs:
if isinstance(input.variable, SharedVariable) and input.update:
new_var = safe_new(input.variable)
if getattr(input.variable, "name", None) is not None:
new_var.name = input.variable.name + "_copy"
if isinstance(new_var.type, ops.expandable_types):
sit_sot_inner_inputs.append(new_var)
sit_sot_scan_inputs.append(
utils.expand_empty(
aet.unbroadcast(shape_padleft(input.variable), 0),
actual_n_steps,
)
)
tensor_update = aet.as_tensor_variable(input.update)
sit_sot_inner_outputs.append(tensor_update)
# Not that pos is not a negative index. The sign of pos is used
# as a flag to indicate if this output should be part of the
# update rules or part of the standard outputs of scan.
# If `pos` is positive than it corresponds to the standard
# outputs of scan and it refers to output of index `pos`. If `pos`
# is negative that it corresponds to update rules of scan and it
# refers to update rule of index -1 - `pos`.
sit_sot_rightOrder.append(-1 - len(sit_sot_shared))
sit_sot_shared.append(input.variable)
givens[input.variable] = new_var
else:
shared_inner_inputs.append(new_var)
shared_scan_inputs.append(input.variable)
shared_inner_outputs.append(input.update)
givens[input.variable] = new_var
n_shared_outs += 1
n_sit_sot = len(sit_sot_inner_inputs)
# Step 5.4 Outputs with no taps used in the input
n_nit_sot = 0
nit_sot_inner_outputs = []
nit_sot_return_steps = OrderedDict()
nit_sot_rightOrder = []
for i, out in enumerate(outs_info):
if "taps" not in out:
nit_sot_inner_outputs.append(outputs[i])
if i in return_steps:
nit_sot_return_steps[n_nit_sot] = return_steps[i]
nit_sot_rightOrder.append(i)
n_nit_sot += 1
# Step 5.5 all other arguments including extra inputs
other_scan_args = []
other_inner_args = []
other_scan_args += [
arg
for arg in non_seqs
if (not isinstance(arg, SharedVariable) and not isinstance(arg, Constant))
]
# Step 5.6 all shared variables with no update rules
other_inner_args += [
safe_new(arg, "_copy")
for arg in non_seqs
if (not isinstance(arg, SharedVariable) and not isinstance(arg, Constant))
]
givens.update(OrderedDict(zip(other_scan_args, other_inner_args)))
if strict:
non_seqs_set = set(non_sequences if non_sequences is not None else [])
other_shared_scan_args = [
arg.variable
for arg in dummy_f.maker.expanded_inputs
if (
isinstance(arg.variable, SharedVariable)
and not arg.update
and arg.variable in non_seqs_set
)
]
other_shared_inner_args = [
safe_new(arg.variable, "_copy")
for arg in dummy_f.maker.expanded_inputs
if (
isinstance(arg.variable, SharedVariable)
and not arg.update
and arg.variable in non_seqs_set
)
]
else:
other_shared_scan_args = [
arg.variable
for arg in dummy_f.maker.expanded_inputs
if (isinstance(arg.variable, SharedVariable) and not arg.update)
]
other_shared_inner_args = [
safe_new(arg.variable, "_copy")
for arg in dummy_f.maker.expanded_inputs
if (isinstance(arg.variable, SharedVariable) and not arg.update)
]
givens.update(OrderedDict(zip(other_shared_scan_args, other_shared_inner_args)))
##
# Step 6. Re-order the outputs and clone them replacing things
# using the givens
##
inner_inputs = (
inner_seqs
+ mit_mot_inner_inputs
+ mit_sot_inner_inputs
+ sit_sot_inner_inputs
+ shared_inner_inputs
+ other_shared_inner_args
+ other_inner_args
)
inner_outs = (
mit_mot_inner_outputs
+ mit_sot_inner_outputs
+ sit_sot_inner_outputs
+ nit_sot_inner_outputs
+ shared_inner_outputs
)
if condition is not None:
inner_outs.append(condition)
# gpuarray is imported here, instead of being imported on top of
# the file because that would force on the user some dependencies that we
# might do not want to. Currently we are working on removing the
# dependencies on sandbox code completeley.
from aesara import gpuarray
if gpuarray.pygpu_activated:
# very often we end up in this situation when we want to
# replace w with w_copy, where w is a GPU variable
# and w_copy is TensorType. This is caused because shared
# variables are put on GPU right away >:| ,
new_givens = OrderedDict()
for w, w_copy in givens.items():
if isinstance(w.type, gpuarray.GpuArrayType) and isinstance(
w_copy.type, TensorType
):
for o in inner_outs:
new_givens = traverse(o, w, w_copy, new_givens)
else:
new_givens[w] = w_copy
else:
new_givens = givens
new_outs = clone_replace(inner_outs, replace=new_givens)
##
# Step 7. Create the Scan Op
##
tap_array = mit_sot_tap_array + [[-1] for x in range(n_sit_sot)]
if allow_gc is None:
allow_gc = config.scan__allow_gc
info = OrderedDict()
info["tap_array"] = tap_array
info["n_seqs"] = n_seqs
info["n_mit_mot"] = n_mit_mot
info["n_mit_mot_outs"] = n_mit_mot_outs
info["mit_mot_out_slices"] = mit_mot_out_slices
info["n_mit_sot"] = n_mit_sot
info["n_sit_sot"] = n_sit_sot
info["n_shared_outs"] = n_shared_outs
info["n_nit_sot"] = n_nit_sot
info["truncate_gradient"] = truncate_gradient
info["name"] = name
info["mode"] = mode
info["destroy_map"] = OrderedDict()
info["gpua"] = False
info["as_while"] = as_while
info["profile"] = profile
info["allow_gc"] = allow_gc
info["strict"] = strict
local_op = Scan(inner_inputs, new_outs, info)
##
# Step 8. Compute the outputs using the scan op
##
_scan_inputs = (
scan_seqs
+ mit_mot_scan_inputs
+ mit_sot_scan_inputs
+ sit_sot_scan_inputs
+ shared_scan_inputs
+ [actual_n_steps for x in range(n_nit_sot)]
+ other_shared_scan_args
+ other_scan_args
)
scan_inputs = []
for arg in [actual_n_steps] + _scan_inputs:
try:
arg = aet.as_tensor_variable(arg)
except TypeError:
# This happens for Random States for e.g. but it is a good way
# to make sure all inputs are tensors.
pass
scan_inputs += [arg]
scan_outs = local_op(*scan_inputs)
if type(scan_outs) not in (list, tuple):
scan_outs = [scan_outs]
##
# Step 9. Figure out which outs are update rules for shared variables
# and so on ...
##
update_map = OrderedUpdates()
def remove_dimensions(outs, steps_return, offsets=None):
out_ls = []
for idx, out in enumerate(outs):
if idx in steps_return:
if steps_return[idx] > 1:
out_ls.append(out[-steps_return[idx] :])
else:
out_ls.append(out[-1])
else:
if offsets is None:
out_ls.append(out)
else:
out_ls.append(out[offsets[idx] :])
return out_ls
offset = n_mit_mot
offsets = [abs(np.min(x)) for x in mit_sot_tap_array]
mit_sot_outs = remove_dimensions(
scan_outs[offset : offset + n_mit_sot], mit_sot_return_steps, offsets
)
offset += n_mit_sot
offsets = [1 for x in range(n_sit_sot)]
sit_sot_outs = remove_dimensions(
scan_outs[offset : offset + n_sit_sot], sit_sot_return_steps, offsets
)
offset += n_sit_sot
nit_sot_outs = remove_dimensions(
scan_outs[offset : offset + n_nit_sot], nit_sot_return_steps
)
offset += n_nit_sot
for idx, update_rule in enumerate(scan_outs[offset : offset + n_shared_outs]):
update_map[shared_scan_inputs[idx]] = update_rule
_scan_out_list = mit_sot_outs + sit_sot_outs + nit_sot_outs
# Step 10. I need to reorder the outputs to be in the order expected by
# the user
rightOrder = mit_sot_rightOrder + sit_sot_rightOrder + nit_sot_rightOrder
scan_out_list = [None] * len(rightOrder)
for idx, pos in enumerate(rightOrder):
if pos >= 0:
scan_out_list[pos] = _scan_out_list[idx]
else:
# Not that pos is not a negative index. The sign of pos is used
# as a flag to indicate if this output should be part of the
# update rules or part of the standard outputs of scan.
# If `pos` is positive than it corresponds to the standard
# outputs of scan and it refers to output of index `pos`. If `pos`
# is negative that it corresponds to update rules of scan and it
# refers to update rule of index -1 - `pos`.
update_map[sit_sot_shared[abs(pos) - 1]] = _scan_out_list[idx][-1]
scan_out_list = [x for x in scan_out_list if x is not None]
if return_list is not True and len(scan_out_list) == 1:
scan_out_list = scan_out_list[0]
elif len(scan_out_list) == 0:
scan_out_list = None
return (scan_out_list, update_map)
| 41.529463 | 111 | 0.5971 |
c78c4b1b218a97eab0f4f481f5404431189560b4 | 184,513 | py | Python | distributed/scheduler.py | dazza-codes/distributed | 0bed9fe57fa6c0f9416b337aa816024a6bb31acf | [
"BSD-3-Clause"
] | null | null | null | distributed/scheduler.py | dazza-codes/distributed | 0bed9fe57fa6c0f9416b337aa816024a6bb31acf | [
"BSD-3-Clause"
] | null | null | null | distributed/scheduler.py | dazza-codes/distributed | 0bed9fe57fa6c0f9416b337aa816024a6bb31acf | [
"BSD-3-Clause"
] | null | null | null | import asyncio
from collections import defaultdict, deque, OrderedDict
from collections.abc import Mapping, Set
from datetime import timedelta
from functools import partial
from inspect import isawaitable
import itertools
import json
import logging
import math
from numbers import Number
import operator
import os
import pickle
import random
import warnings
import weakref
import psutil
import sortedcontainers
try:
from cytoolz import frequencies, merge, pluck, merge_sorted, first, merge_with
except ImportError:
from toolz import frequencies, merge, pluck, merge_sorted, first, merge_with
from toolz import valmap, second, compose, groupby
from tornado.ioloop import IOLoop
import dask
from .batched import BatchedSend
from .comm import (
normalize_address,
resolve_address,
get_address_host,
unparse_host_port,
)
from .comm.addressing import addresses_from_user_args
from .core import rpc, connect, send_recv, clean_exception, CommClosedError
from .diagnostics.plugin import SchedulerPlugin
from . import profile
from .metrics import time
from .node import ServerNode
from .preloading import preload_modules
from .proctitle import setproctitle
from .security import Security
from .utils import (
All,
ignoring,
get_fileno_limit,
log_errors,
key_split,
validate_key,
no_default,
parse_timedelta,
parse_bytes,
PeriodicCallback,
shutting_down,
key_split_group,
empty_context,
tmpfile,
format_bytes,
format_time,
TimeoutError,
)
from .utils_comm import scatter_to_workers, gather_from_workers, retry_operation
from .utils_perf import enable_gc_diagnosis, disable_gc_diagnosis
from . import versions as version_module
from .publish import PublishExtension
from .queues import QueueExtension
from .recreate_exceptions import ReplayExceptionScheduler
from .lock import LockExtension
from .pubsub import PubSubSchedulerExtension
from .stealing import WorkStealing
from .variable import VariableExtension
logger = logging.getLogger(__name__)
LOG_PDB = dask.config.get("distributed.admin.pdb-on-err")
DEFAULT_DATA_SIZE = dask.config.get("distributed.scheduler.default-data-size")
DEFAULT_EXTENSIONS = [
LockExtension,
PublishExtension,
ReplayExceptionScheduler,
QueueExtension,
VariableExtension,
PubSubSchedulerExtension,
]
ALL_TASK_STATES = {"released", "waiting", "no-worker", "processing", "erred", "memory"}
class ClientState:
"""
A simple object holding information about a client.
.. attribute:: client_key: str
A unique identifier for this client. This is generally an opaque
string generated by the client itself.
.. attribute:: wants_what: {TaskState}
A set of tasks this client wants kept in memory, so that it can
download its result when desired. This is the reverse mapping of
:class:`TaskState.who_wants`.
Tasks are typically removed from this set when the corresponding
object in the client's space (for example a ``Future`` or a Dask
collection) gets garbage-collected.
"""
__slots__ = ("client_key", "wants_what", "last_seen", "versions")
def __init__(self, client, versions=None):
self.client_key = client
self.wants_what = set()
self.last_seen = time()
self.versions = versions or {}
def __repr__(self):
return "<Client %r>" % (self.client_key,)
def __str__(self):
return self.client_key
class WorkerState:
"""
A simple object holding information about a worker.
.. attribute:: address
This worker's unique key. This can be its connected address
(such as ``'tcp://127.0.0.1:8891'``) or an alias (such as ``'alice'``).
.. attribute:: processing: {TaskState: cost}
A dictionary of tasks that have been submitted to this worker.
Each task state is asssociated with the expected cost in seconds
of running that task, summing both the task's expected computation
time and the expected communication time of its result.
Multiple tasks may be submitted to a worker in advance and the worker
will run them eventually, depending on its execution resources
(but see :doc:`work-stealing`).
All the tasks here are in the "processing" state.
This attribute is kept in sync with :attr:`TaskState.processing_on`.
.. attribute:: has_what: {TaskState}
The set of tasks which currently reside on this worker.
All the tasks here are in the "memory" state.
This is the reverse mapping of :class:`TaskState.who_has`.
.. attribute:: nbytes: int
The total memory size, in bytes, used by the tasks this worker
holds in memory (i.e. the tasks in this worker's :attr:`has_what`).
.. attribute:: nthreads: int
The number of CPU threads made available on this worker.
.. attribute:: resources: {str: Number}
The available resources on this worker like ``{'gpu': 2}``.
These are abstract quantities that constrain certain tasks from
running at the same time on this worker.
.. attribute:: used_resources: {str: Number}
The sum of each resource used by all tasks allocated to this worker.
The numbers in this dictionary can only be less or equal than
those in this worker's :attr:`resources`.
.. attribute:: occupancy: Number
The total expected runtime, in seconds, of all tasks currently
processing on this worker. This is the sum of all the costs in
this worker's :attr:`processing` dictionary.
.. attribute:: status: str
The current status of the worker, either ``'running'`` or ``'closed'``
.. attribute:: nanny: str
Address of the associated Nanny, if present
.. attribute:: last_seen: Number
The last time we received a heartbeat from this worker, in local
scheduler time.
.. attribute:: actors: {TaskState}
A set of all TaskStates on this worker that are actors. This only
includes those actors whose state actually lives on this worker, not
actors to which this worker has a reference.
"""
# XXX need a state field to signal active/removed?
__slots__ = (
"actors",
"address",
"bandwidth",
"extra",
"has_what",
"last_seen",
"local_directory",
"memory_limit",
"metrics",
"name",
"nanny",
"nbytes",
"nthreads",
"occupancy",
"pid",
"processing",
"resources",
"services",
"status",
"time_delay",
"used_resources",
"versions",
)
def __init__(
self,
address=None,
pid=0,
name=None,
nthreads=0,
memory_limit=0,
local_directory=None,
services=None,
versions=None,
nanny=None,
extra=None,
):
self.address = address
self.pid = pid
self.name = name
self.nthreads = nthreads
self.memory_limit = memory_limit
self.local_directory = local_directory
self.services = services or {}
self.versions = versions or {}
self.nanny = nanny
self.status = "running"
self.nbytes = 0
self.occupancy = 0
self.metrics = {}
self.last_seen = 0
self.time_delay = 0
self.bandwidth = parse_bytes(dask.config.get("distributed.scheduler.bandwidth"))
self.actors = set()
self.has_what = set()
self.processing = {}
self.resources = {}
self.used_resources = {}
self.extra = extra or {}
def __hash__(self):
return hash(self.address)
def __eq__(self, other):
return type(self) == type(other) and self.address == other.address
@property
def host(self):
return get_address_host(self.address)
def clean(self):
""" Return a version of this object that is appropriate for serialization """
ws = WorkerState(
address=self.address,
pid=self.pid,
name=self.name,
nthreads=self.nthreads,
memory_limit=self.memory_limit,
local_directory=self.local_directory,
services=self.services,
nanny=self.nanny,
extra=self.extra,
)
ws.processing = {ts.key for ts in self.processing}
return ws
def __repr__(self):
return "<Worker %r, name: %s, memory: %d, processing: %d>" % (
self.address,
self.name,
len(self.has_what),
len(self.processing),
)
def identity(self):
return {
"type": "Worker",
"id": self.name,
"host": self.host,
"resources": self.resources,
"local_directory": self.local_directory,
"name": self.name,
"nthreads": self.nthreads,
"memory_limit": self.memory_limit,
"last_seen": self.last_seen,
"services": self.services,
"metrics": self.metrics,
"nanny": self.nanny,
**self.extra,
}
@property
def ncores(self):
warnings.warn("WorkerState.ncores has moved to WorkerState.nthreads")
return self.nthreads
class TaskState:
"""
A simple object holding information about a task.
.. attribute:: key: str
The key is the unique identifier of a task, generally formed
from the name of the function, followed by a hash of the function
and arguments, like ``'inc-ab31c010444977004d656610d2d421ec'``.
.. attribute:: prefix: TaskPrefix
The broad class of tasks to which this task belongs like "inc" or
"read_csv"
.. attribute:: run_spec: object
A specification of how to run the task. The type and meaning of this
value is opaque to the scheduler, as it is only interpreted by the
worker to which the task is sent for executing.
As a special case, this attribute may also be ``None``, in which case
the task is "pure data" (such as, for example, a piece of data loaded
in the scheduler using :meth:`Client.scatter`). A "pure data" task
cannot be computed again if its value is lost.
.. attribute:: priority: tuple
The priority provides each task with a relative ranking which is used
to break ties when many tasks are being considered for execution.
This ranking is generally a 2-item tuple. The first (and dominant)
item corresponds to when it was submitted. Generally, earlier tasks
take precedence. The second item is determined by the client, and is
a way to prioritize tasks within a large graph that may be important,
such as if they are on the critical path, or good to run in order to
release many dependencies. This is explained further in
:doc:`Scheduling Policy <scheduling-policies>`.
.. attribute:: state: str
This task's current state. Valid states include ``released``,
``waiting``, ``no-worker``, ``processing``, ``memory``, ``erred``
and ``forgotten``. If it is ``forgotten``, the task isn't stored
in the ``tasks`` dictionary anymore and will probably disappear
soon from memory.
.. attribute:: dependencies: {TaskState}
The set of tasks this task depends on for proper execution. Only
tasks still alive are listed in this set. If, for whatever reason,
this task also depends on a forgotten task, the
:attr:`has_lost_dependencies` flag is set.
A task can only be executed once all its dependencies have already
been successfully executed and have their result stored on at least
one worker. This is tracked by progressively draining the
:attr:`waiting_on` set.
.. attribute:: dependents: {TaskState}
The set of tasks which depend on this task. Only tasks still alive
are listed in this set.
This is the reverse mapping of :attr:`dependencies`.
.. attribute:: has_lost_dependencies: bool
Whether any of the dependencies of this task has been forgotten.
For memory consumption reasons, forgotten tasks are not kept in
memory even though they may have dependent tasks. When a task is
forgotten, therefore, each of its dependents has their
:attr:`has_lost_dependencies` attribute set to ``True``.
If :attr:`has_lost_dependencies` is true, this task cannot go
into the "processing" state anymore.
.. attribute:: waiting_on: {TaskState}
The set of tasks this task is waiting on *before* it can be executed.
This is always a subset of :attr:`dependencies`. Each time one of the
dependencies has finished processing, it is removed from the
:attr:`waiting_on` set.
Once :attr:`waiting_on` becomes empty, this task can move from the
"waiting" state to the "processing" state (unless one of the
dependencies errored out, in which case this task is instead
marked "erred").
.. attribute:: waiters: {TaskState}
The set of tasks which need this task to remain alive. This is always
a subset of :attr:`dependents`. Each time one of the dependents
has finished processing, it is removed from the :attr:`waiters`
set.
Once both :attr:`waiters` and :attr:`who_wants` become empty, this
task can be released (if it has a non-empty :attr:`run_spec`) or
forgotten (otherwise) by the scheduler, and by any workers
in :attr:`who_has`.
.. note:: Counter-intuitively, :attr:`waiting_on` and
:attr:`waiters` are not reverse mappings of each other.
.. attribute:: who_wants: {ClientState}
The set of clients who want this task's result to remain alive.
This is the reverse mapping of :attr:`ClientState.wants_what`.
When a client submits a graph to the scheduler it also specifies
which output tasks it desires, such that their results are not released
from memory.
Once a task has finished executing (i.e. moves into the "memory"
or "erred" state), the clients in :attr:`who_wants` are notified.
Once both :attr:`waiters` and :attr:`who_wants` become empty, this
task can be released (if it has a non-empty :attr:`run_spec`) or
forgotten (otherwise) by the scheduler, and by any workers
in :attr:`who_has`.
.. attribute:: who_has: {WorkerState}
The set of workers who have this task's result in memory.
It is non-empty iff the task is in the "memory" state. There can be
more than one worker in this set if, for example, :meth:`Client.scatter`
or :meth:`Client.replicate` was used.
This is the reverse mapping of :attr:`WorkerState.has_what`.
.. attribute:: processing_on: WorkerState (or None)
If this task is in the "processing" state, which worker is currently
processing it. Otherwise this is ``None``.
This attribute is kept in sync with :attr:`WorkerState.processing`.
.. attribute:: retries: int
The number of times this task can automatically be retried in case
of failure. If a task fails executing (the worker returns with
an error), its :attr:`retries` attribute is checked. If it is
equal to 0, the task is marked "erred". If it is greater than 0,
the :attr:`retries` attribute is decremented and execution is
attempted again.
.. attribute:: nbytes: int (or None)
The number of bytes, as determined by ``sizeof``, of the result
of a finished task. This number is used for diagnostics and to
help prioritize work.
.. attribute:: type: str
The type of the object as a string. Only present for tasks that have
been computed.
.. attribute:: exception: object
If this task failed executing, the exception object is stored here.
Otherwise this is ``None``.
.. attribute:: traceback: object
If this task failed executing, the traceback object is stored here.
Otherwise this is ``None``.
.. attribute:: exception_blame: TaskState (or None)
If this task or one of its dependencies failed executing, the
failed task is stored here (possibly itself). Otherwise this
is ``None``.
.. attribute:: suspicious: int
The number of times this task has been involved in a worker death.
Some tasks may cause workers to die (such as calling ``os._exit(0)``).
When a worker dies, all of the tasks on that worker are reassigned
to others. This combination of behaviors can cause a bad task to
catastrophically destroy all workers on the cluster, one after
another. Whenever a worker dies, we mark each task currently
processing on that worker (as recorded by
:attr:`WorkerState.processing`) as suspicious.
If a task is involved in three deaths (or some other fixed constant)
then we mark the task as ``erred``.
.. attribute:: host_restrictions: {hostnames}
A set of hostnames where this task can be run (or ``None`` if empty).
Usually this is empty unless the task has been specifically restricted
to only run on certain hosts. A hostname may correspond to one or
several connected workers.
.. attribute:: worker_restrictions: {worker addresses}
A set of complete worker addresses where this can be run (or ``None``
if empty). Usually this is empty unless the task has been specifically
restricted to only run on certain workers.
Note this is tracking worker addresses, not worker states, since
the specific workers may not be connected at this time.
.. attribute:: resource_restrictions: {resource: quantity}
Resources required by this task, such as ``{'gpu': 1}`` or
``{'memory': 1e9}`` (or ``None`` if empty). These are user-defined
names and are matched against the contents of each
:attr:`WorkerState.resources` dictionary.
.. attribute:: loose_restrictions: bool
If ``False``, each of :attr:`host_restrictions`,
:attr:`worker_restrictions` and :attr:`resource_restrictions` is
a hard constraint: if no worker is available satisfying those
restrictions, the task cannot go into the "processing" state and
will instead go into the "no-worker" state.
If ``True``, the above restrictions are mere preferences: if no worker
is available satisfying those restrictions, the task can still go
into the "processing" state and be sent for execution to another
connected worker.
.. attribute: actor: bool
Whether or not this task is an Actor.
.. attribute: group: TaskGroup
: The group of tasks to which this one belongs.
"""
__slots__ = (
# === General description ===
"actor",
# Key name
"key",
# Key prefix (see key_split())
"prefix",
# How to run the task (None if pure data)
"run_spec",
# Alive dependents and dependencies
"dependencies",
"dependents",
# Compute priority
"priority",
# Restrictions
"host_restrictions",
"worker_restrictions", # not WorkerStates but addresses
"resource_restrictions",
"loose_restrictions",
# === Task state ===
"_state",
# Whether some dependencies were forgotten
"has_lost_dependencies",
# If in 'waiting' state, which tasks need to complete
# before we can run
"waiting_on",
# If in 'waiting' or 'processing' state, which tasks needs us
# to complete before they can run
"waiters",
# In in 'processing' state, which worker we are processing on
"processing_on",
# If in 'memory' state, Which workers have us
"who_has",
# Which clients want us
"who_wants",
"exception",
"traceback",
"exception_blame",
"suspicious",
"retries",
"nbytes",
"type",
"group_key",
"group",
)
def __init__(self, key, run_spec):
self.key = key
self.run_spec = run_spec
self._state = None
self.exception = self.traceback = self.exception_blame = None
self.suspicious = self.retries = 0
self.nbytes = None
self.priority = None
self.who_wants = set()
self.dependencies = set()
self.dependents = set()
self.waiting_on = set()
self.waiters = set()
self.who_has = set()
self.processing_on = None
self.has_lost_dependencies = False
self.host_restrictions = None
self.worker_restrictions = None
self.resource_restrictions = None
self.loose_restrictions = False
self.actor = None
self.type = None
self.group_key = key_split_group(key)
self.group = None
@property
def state(self) -> str:
return self._state
@property
def prefix_key(self):
return self.prefix.name
@state.setter
def state(self, value: str):
self.group.states[self._state] -= 1
self.group.states[value] += 1
self._state = value
def add_dependency(self, other: "TaskState"):
""" Add another task as a dependency of this task """
self.dependencies.add(other)
self.group.dependencies.add(other.group)
other.dependents.add(self)
def get_nbytes(self) -> int:
nbytes = self.nbytes
return nbytes if nbytes is not None else DEFAULT_DATA_SIZE
def set_nbytes(self, nbytes: int):
old_nbytes = self.nbytes
diff = nbytes - (old_nbytes or 0)
self.group.nbytes_total += diff
self.group.nbytes_in_memory += diff
for ws in self.who_has:
ws.nbytes += diff
self.nbytes = nbytes
def __repr__(self):
return "<Task %r %s>" % (self.key, self.state)
def validate(self):
try:
for cs in self.who_wants:
assert isinstance(cs, ClientState), (repr(cs), self.who_wants)
for ws in self.who_has:
assert isinstance(ws, WorkerState), (repr(ws), self.who_has)
for ts in self.dependencies:
assert isinstance(ts, TaskState), (repr(ts), self.dependencies)
for ts in self.dependents:
assert isinstance(ts, TaskState), (repr(ts), self.dependents)
validate_task_state(self)
except Exception as e:
logger.exception(e)
if LOG_PDB:
import pdb
pdb.set_trace()
class TaskGroup:
""" Collection tracking all tasks within a group
Keys often have a structure like ``("x-123", 0)``
A group takes the first section, like ``"x-123"``
.. attribute:: name: str
The name of a group of tasks.
For a task like ``("x-123", 0)`` this is the text ``"x-123"``
.. attribute:: states: Dict[str, int]
The number of tasks in each state,
like ``{"memory": 10, "processing": 3, "released": 4, ...}``
.. attribute:: dependencies: Set[TaskGroup]
The other TaskGroups on which this one depends
.. attribute:: nbytes_total: int
The total number of bytes that this task group has produced
.. attribute:: nbytes_in_memory: int
The number of bytes currently stored by this TaskGroup
.. attribute:: duration: float
The total amount of time spent on all tasks in this TaskGroup
.. attribute:: types: Set[str]
The result types of this TaskGroup
See also
--------
TaskPrefix
"""
def __init__(self, name):
self.name = name
self.states = {state: 0 for state in ALL_TASK_STATES}
self.states["forgotten"] = 0
self.dependencies = set()
self.nbytes_total = 0
self.nbytes_in_memory = 0
self.duration = 0
self.types = set()
def add(self, ts):
# self.tasks.add(ts)
self.states[ts.state] += 1
ts.group = self
def __repr__(self):
return (
"<"
+ (self.name or "no-group")
+ ": "
+ ", ".join(
"%s: %d" % (k, v) for (k, v) in sorted(self.states.items()) if v
)
+ ">"
)
def __len__(self):
return sum(self.states.values())
class TaskPrefix:
""" Collection tracking all tasks within a group
Keys often have a structure like ``("x-123", 0)``
A group takes the first section, like ``"x"``
.. attribute:: name: str
The name of a group of tasks.
For a task like ``("x-123", 0)`` this is the text ``"x"``
.. attribute:: states: Dict[str, int]
The number of tasks in each state,
like ``{"memory": 10, "processing": 3, "released": 4, ...}``
.. attribute:: duration_average: float
An exponentially weighted moving average duration of all tasks with this prefix
See Also
--------
TaskGroup
"""
def __init__(self, name):
self.name = name
self.groups = []
if self.name in dask.config.get("distributed.scheduler.default-task-durations"):
self.duration_average = parse_timedelta(
dask.config.get("distributed.scheduler.default-task-durations")[
self.name
]
)
else:
self.duration_average = None
@property
def states(self):
return merge_with(sum, [g.states for g in self.groups])
@property
def active(self):
return [
g
for g in self.groups
if any(v != 0 for k, v in g.states.items() if k != "forgotten")
]
@property
def active_states(self):
return merge_with(sum, [g.states for g in self.active])
def __repr__(self):
return (
"<"
+ self.name
+ ": "
+ ", ".join(
"%s: %d" % (k, v) for (k, v) in sorted(self.states.items()) if v
)
+ ">"
)
@property
def nbytes_in_memory(self):
return sum(tg.nbytes_in_memory for tg in self.groups)
@property
def nbytes_total(self):
return sum(tg.nbytes_total for tg in self.groups)
def __len__(self):
return sum(map(len, self.groups))
@property
def duration(self):
return sum(tg.duration for tg in self.groups)
@property
def types(self):
return set().union(*[tg.types for tg in self.groups])
class _StateLegacyMapping(Mapping):
"""
A mapping interface mimicking the former Scheduler state dictionaries.
"""
def __init__(self, states, accessor):
self._states = states
self._accessor = accessor
def __iter__(self):
return iter(self._states)
def __len__(self):
return len(self._states)
def __getitem__(self, key):
return self._accessor(self._states[key])
def __repr__(self):
return "%s(%s)" % (self.__class__, dict(self))
class _OptionalStateLegacyMapping(_StateLegacyMapping):
"""
Similar to _StateLegacyMapping, but a false-y value is interpreted
as a missing key.
"""
# For tasks etc.
def __iter__(self):
accessor = self._accessor
for k, v in self._states.items():
if accessor(v):
yield k
def __len__(self):
accessor = self._accessor
return sum(bool(accessor(v)) for v in self._states.values())
def __getitem__(self, key):
v = self._accessor(self._states[key])
if v:
return v
else:
raise KeyError
class _StateLegacySet(Set):
"""
Similar to _StateLegacyMapping, but exposes a set containing
all values with a true value.
"""
# For loose_restrictions
def __init__(self, states, accessor):
self._states = states
self._accessor = accessor
def __iter__(self):
return (k for k, v in self._states.items() if self._accessor(v))
def __len__(self):
return sum(map(bool, map(self._accessor, self._states.values())))
def __contains__(self, k):
st = self._states.get(k)
return st is not None and bool(self._accessor(st))
def __repr__(self):
return "%s(%s)" % (self.__class__, set(self))
def _legacy_task_key_set(tasks):
"""
Transform a set of task states into a set of task keys.
"""
return {ts.key for ts in tasks}
def _legacy_client_key_set(clients):
"""
Transform a set of client states into a set of client keys.
"""
return {cs.client_key for cs in clients}
def _legacy_worker_key_set(workers):
"""
Transform a set of worker states into a set of worker keys.
"""
return {ws.address for ws in workers}
def _legacy_task_key_dict(task_dict):
"""
Transform a dict of {task state: value} into a dict of {task key: value}.
"""
return {ts.key: value for ts, value in task_dict.items()}
def _task_key_or_none(task):
return task.key if task is not None else None
class Scheduler(ServerNode):
""" Dynamic distributed task scheduler
The scheduler tracks the current state of workers, data, and computations.
The scheduler listens for events and responds by controlling workers
appropriately. It continuously tries to use the workers to execute an ever
growing dask graph.
All events are handled quickly, in linear time with respect to their input
(which is often of constant size) and generally within a millisecond. To
accomplish this the scheduler tracks a lot of state. Every operation
maintains the consistency of this state.
The scheduler communicates with the outside world through Comm objects.
It maintains a consistent and valid view of the world even when listening
to several clients at once.
A Scheduler is typically started either with the ``dask-scheduler``
executable::
$ dask-scheduler
Scheduler started at 127.0.0.1:8786
Or within a LocalCluster a Client starts up without connection
information::
>>> c = Client() # doctest: +SKIP
>>> c.cluster.scheduler # doctest: +SKIP
Scheduler(...)
Users typically do not interact with the scheduler directly but rather with
the client object ``Client``.
**State**
The scheduler contains the following state variables. Each variable is
listed along with what it stores and a brief description.
* **tasks:** ``{task key: TaskState}``
Tasks currently known to the scheduler
* **unrunnable:** ``{TaskState}``
Tasks in the "no-worker" state
* **workers:** ``{worker key: WorkerState}``
Workers currently connected to the scheduler
* **idle:** ``{WorkerState}``:
Set of workers that are not fully utilized
* **saturated:** ``{WorkerState}``:
Set of workers that are not over-utilized
* **host_info:** ``{hostname: dict}``:
Information about each worker host
* **clients:** ``{client key: ClientState}``
Clients currently connected to the scheduler
* **services:** ``{str: port}``:
Other services running on this scheduler, like Bokeh
* **loop:** ``IOLoop``:
The running Tornado IOLoop
* **client_comms:** ``{client key: Comm}``
For each client, a Comm object used to receive task requests and
report task status updates.
* **stream_comms:** ``{worker key: Comm}``
For each worker, a Comm object from which we both accept stimuli and
report results
* **task_duration:** ``{key-prefix: time}``
Time we expect certain functions to take, e.g. ``{'sum': 0.25}``
"""
default_port = 8786
_instances = weakref.WeakSet()
def __init__(
self,
loop=None,
delete_interval="500ms",
synchronize_worker_interval="60s",
services=None,
service_kwargs=None,
allowed_failures=None,
extensions=None,
validate=None,
scheduler_file=None,
security=None,
worker_ttl=None,
idle_timeout=None,
interface=None,
host=None,
port=0,
protocol=None,
dashboard_address=None,
preload=None,
preload_argv=(),
plugins=(),
**kwargs
):
self._setup_logging(logger)
# Attributes
if allowed_failures is None:
allowed_failures = dask.config.get("distributed.scheduler.allowed-failures")
self.allowed_failures = allowed_failures
if validate is None:
validate = dask.config.get("distributed.scheduler.validate")
self.validate = validate
self.status = None
self.proc = psutil.Process()
self.delete_interval = parse_timedelta(delete_interval, default="ms")
self.synchronize_worker_interval = parse_timedelta(
synchronize_worker_interval, default="ms"
)
self.digests = None
self.service_specs = services or {}
self.service_kwargs = service_kwargs or {}
self.services = {}
self.scheduler_file = scheduler_file
worker_ttl = worker_ttl or dask.config.get("distributed.scheduler.worker-ttl")
self.worker_ttl = parse_timedelta(worker_ttl) if worker_ttl else None
idle_timeout = idle_timeout or dask.config.get(
"distributed.scheduler.idle-timeout"
)
if idle_timeout:
self.idle_timeout = parse_timedelta(idle_timeout)
else:
self.idle_timeout = None
self.time_started = time()
self._lock = asyncio.Lock()
self.bandwidth = parse_bytes(dask.config.get("distributed.scheduler.bandwidth"))
self.bandwidth_workers = defaultdict(float)
self.bandwidth_types = defaultdict(float)
if not preload:
preload = dask.config.get("distributed.scheduler.preload")
if not preload_argv:
preload_argv = dask.config.get("distributed.scheduler.preload-argv")
self.preload = preload
self.preload_argv = preload_argv
self.security = security or Security()
assert isinstance(self.security, Security)
self.connection_args = self.security.get_connection_args("scheduler")
self.listen_args = self.security.get_listen_args("scheduler")
if dashboard_address is not None:
try:
from distributed.dashboard import BokehScheduler
except ImportError:
logger.debug("To start diagnostics web server please install Bokeh")
else:
self.service_specs[("dashboard", dashboard_address)] = (
BokehScheduler,
(service_kwargs or {}).get("dashboard", {}),
)
# Communication state
self.loop = loop or IOLoop.current()
self.client_comms = dict()
self.stream_comms = dict()
self._worker_coroutines = []
self._ipython_kernel = None
# Task state
self.tasks = dict()
self.task_groups = dict()
self.task_prefixes = dict()
for old_attr, new_attr, wrap in [
("priority", "priority", None),
("dependencies", "dependencies", _legacy_task_key_set),
("dependents", "dependents", _legacy_task_key_set),
("retries", "retries", None),
]:
func = operator.attrgetter(new_attr)
if wrap is not None:
func = compose(wrap, func)
setattr(self, old_attr, _StateLegacyMapping(self.tasks, func))
for old_attr, new_attr, wrap in [
("nbytes", "nbytes", None),
("who_wants", "who_wants", _legacy_client_key_set),
("who_has", "who_has", _legacy_worker_key_set),
("waiting", "waiting_on", _legacy_task_key_set),
("waiting_data", "waiters", _legacy_task_key_set),
("rprocessing", "processing_on", None),
("host_restrictions", "host_restrictions", None),
("worker_restrictions", "worker_restrictions", None),
("resource_restrictions", "resource_restrictions", None),
("suspicious_tasks", "suspicious", None),
("exceptions", "exception", None),
("tracebacks", "traceback", None),
("exceptions_blame", "exception_blame", _task_key_or_none),
]:
func = operator.attrgetter(new_attr)
if wrap is not None:
func = compose(wrap, func)
setattr(self, old_attr, _OptionalStateLegacyMapping(self.tasks, func))
for old_attr, new_attr, wrap in [
("loose_restrictions", "loose_restrictions", None)
]:
func = operator.attrgetter(new_attr)
if wrap is not None:
func = compose(wrap, func)
setattr(self, old_attr, _StateLegacySet(self.tasks, func))
self.generation = 0
self._last_client = None
self._last_time = 0
self.unrunnable = set()
self.n_tasks = 0
self.task_metadata = dict()
self.datasets = dict()
# Prefix-keyed containers
self.unknown_durations = defaultdict(set)
# Client state
self.clients = dict()
for old_attr, new_attr, wrap in [
("wants_what", "wants_what", _legacy_task_key_set)
]:
func = operator.attrgetter(new_attr)
if wrap is not None:
func = compose(wrap, func)
setattr(self, old_attr, _StateLegacyMapping(self.clients, func))
self.clients["fire-and-forget"] = ClientState("fire-and-forget")
# Worker state
self.workers = sortedcontainers.SortedDict()
for old_attr, new_attr, wrap in [
("nthreads", "nthreads", None),
("worker_bytes", "nbytes", None),
("worker_resources", "resources", None),
("used_resources", "used_resources", None),
("occupancy", "occupancy", None),
("worker_info", "metrics", None),
("processing", "processing", _legacy_task_key_dict),
("has_what", "has_what", _legacy_task_key_set),
]:
func = operator.attrgetter(new_attr)
if wrap is not None:
func = compose(wrap, func)
setattr(self, old_attr, _StateLegacyMapping(self.workers, func))
self.idle = sortedcontainers.SortedSet(key=operator.attrgetter("address"))
self.saturated = set()
self.total_nthreads = 0
self.total_occupancy = 0
self.host_info = defaultdict(dict)
self.resources = defaultdict(dict)
self.aliases = dict()
self._task_state_collections = [self.unrunnable]
self._worker_collections = [
self.workers,
self.host_info,
self.resources,
self.aliases,
]
self.extensions = {}
self.plugins = list(plugins)
self.transition_log = deque(
maxlen=dask.config.get("distributed.scheduler.transition-log-length")
)
self.log = deque(
maxlen=dask.config.get("distributed.scheduler.transition-log-length")
)
self.worker_plugins = []
worker_handlers = {
"task-finished": self.handle_task_finished,
"task-erred": self.handle_task_erred,
"release": self.handle_release_data,
"release-worker-data": self.release_worker_data,
"add-keys": self.add_keys,
"missing-data": self.handle_missing_data,
"long-running": self.handle_long_running,
"reschedule": self.reschedule,
"keep-alive": lambda *args, **kwargs: None,
}
client_handlers = {
"update-graph": self.update_graph,
"client-desires-keys": self.client_desires_keys,
"update-data": self.update_data,
"report-key": self.report_on_key,
"client-releases-keys": self.client_releases_keys,
"heartbeat-client": self.client_heartbeat,
"close-client": self.remove_client,
"restart": self.restart,
}
self.handlers = {
"register-client": self.add_client,
"scatter": self.scatter,
"register-worker": self.add_worker,
"unregister": self.remove_worker,
"gather": self.gather,
"cancel": self.stimulus_cancel,
"retry": self.stimulus_retry,
"feed": self.feed,
"terminate": self.close,
"broadcast": self.broadcast,
"proxy": self.proxy,
"ncores": self.get_ncores,
"has_what": self.get_has_what,
"who_has": self.get_who_has,
"processing": self.get_processing,
"call_stack": self.get_call_stack,
"profile": self.get_profile,
"performance_report": self.performance_report,
"get_logs": self.get_logs,
"logs": self.get_logs,
"worker_logs": self.get_worker_logs,
"nbytes": self.get_nbytes,
"versions": self.versions,
"add_keys": self.add_keys,
"rebalance": self.rebalance,
"replicate": self.replicate,
"start_ipython": self.start_ipython,
"run_function": self.run_function,
"update_data": self.update_data,
"set_resources": self.add_resources,
"retire_workers": self.retire_workers,
"get_metadata": self.get_metadata,
"set_metadata": self.set_metadata,
"heartbeat_worker": self.heartbeat_worker,
"get_task_status": self.get_task_status,
"get_task_stream": self.get_task_stream,
"register_worker_plugin": self.register_worker_plugin,
"adaptive_target": self.adaptive_target,
"workers_to_close": self.workers_to_close,
"subscribe_worker_status": self.subscribe_worker_status,
}
self._transitions = {
("released", "waiting"): self.transition_released_waiting,
("waiting", "released"): self.transition_waiting_released,
("waiting", "processing"): self.transition_waiting_processing,
("waiting", "memory"): self.transition_waiting_memory,
("processing", "released"): self.transition_processing_released,
("processing", "memory"): self.transition_processing_memory,
("processing", "erred"): self.transition_processing_erred,
("no-worker", "released"): self.transition_no_worker_released,
("no-worker", "waiting"): self.transition_no_worker_waiting,
("released", "forgotten"): self.transition_released_forgotten,
("memory", "forgotten"): self.transition_memory_forgotten,
("erred", "forgotten"): self.transition_released_forgotten,
("erred", "released"): self.transition_erred_released,
("memory", "released"): self.transition_memory_released,
("released", "erred"): self.transition_released_erred,
}
connection_limit = get_fileno_limit() / 2
self._start_address = addresses_from_user_args(
host=host,
port=port,
interface=interface,
protocol=protocol,
security=security,
default_port=self.default_port,
)
super(Scheduler, self).__init__(
handlers=self.handlers,
stream_handlers=merge(worker_handlers, client_handlers),
io_loop=self.loop,
connection_limit=connection_limit,
deserialize=False,
connection_args=self.connection_args,
**kwargs
)
if self.worker_ttl:
pc = PeriodicCallback(self.check_worker_ttl, self.worker_ttl, io_loop=loop)
self.periodic_callbacks["worker-ttl"] = pc
if self.idle_timeout:
pc = PeriodicCallback(self.check_idle, self.idle_timeout / 4, io_loop=loop)
self.periodic_callbacks["idle-timeout"] = pc
if extensions is None:
extensions = list(DEFAULT_EXTENSIONS)
if dask.config.get("distributed.scheduler.work-stealing"):
extensions.append(WorkStealing)
for ext in extensions:
ext(self)
setproctitle("dask-scheduler [not started]")
Scheduler._instances.add(self)
##################
# Administration #
##################
def __repr__(self):
return '<Scheduler: "%s" processes: %d cores: %d>' % (
self.address,
len(self.workers),
self.total_nthreads,
)
def identity(self, comm=None):
""" Basic information about ourselves and our cluster """
d = {
"type": type(self).__name__,
"id": str(self.id),
"address": self.address,
"services": {key: v.port for (key, v) in self.services.items()},
"workers": {
worker.address: worker.identity() for worker in self.workers.values()
},
}
return d
def get_worker_service_addr(self, worker, service_name, protocol=False):
"""
Get the (host, port) address of the named service on the *worker*.
Returns None if the service doesn't exist.
Parameters
----------
worker : address
service_name : str
Common services include 'bokeh' and 'nanny'
protocol : boolean
Whether or not to include a full address with protocol (True)
or just a (host, port) pair
"""
ws = self.workers[worker]
port = ws.services.get(service_name)
if port is None:
return None
elif protocol:
return "%(protocol)s://%(host)s:%(port)d" % {
"protocol": ws.address.split("://")[0],
"host": ws.host,
"port": port,
}
else:
return ws.host, port
async def start(self):
""" Clear out old state and restart all running coroutines """
enable_gc_diagnosis()
self.clear_task_state()
with ignoring(AttributeError):
for c in self._worker_coroutines:
c.cancel()
if self.status != "running":
for addr in self._start_address:
await self.listen(addr, listen_args=self.listen_args)
self.ip = get_address_host(self.listen_address)
listen_ip = self.ip
if listen_ip == "0.0.0.0":
listen_ip = ""
if self.address.startswith("inproc://"):
listen_ip = "localhost"
# Services listen on all addresses
self.start_services(listen_ip)
self.status = "running"
for listener in self.listeners:
logger.info(" Scheduler at: %25s", listener.contact_address)
for k, v in self.services.items():
logger.info("%11s at: %25s", k, "%s:%d" % (listen_ip, v.port))
self.loop.add_callback(self.reevaluate_occupancy)
if self.scheduler_file:
with open(self.scheduler_file, "w") as f:
json.dump(self.identity(), f, indent=2)
fn = self.scheduler_file # remove file when we close the process
def del_scheduler_file():
if os.path.exists(fn):
os.remove(fn)
weakref.finalize(self, del_scheduler_file)
preload_modules(self.preload, parameter=self, argv=self.preload_argv)
await asyncio.gather(*[plugin.start(self) for plugin in self.plugins])
self.start_periodic_callbacks()
setproctitle("dask-scheduler [%s]" % (self.address,))
return self
async def close(self, comm=None, fast=False, close_workers=False):
""" Send cleanup signal to all coroutines then wait until finished
See Also
--------
Scheduler.cleanup
"""
if self.status.startswith("clos"):
await self.finished()
return
self.status = "closing"
logger.info("Scheduler closing...")
setproctitle("dask-scheduler [closing]")
if close_workers:
await self.broadcast(msg={"op": "close_gracefully"}, nanny=True)
for worker in self.workers:
self.worker_send(worker, {"op": "close"})
for i in range(20): # wait a second for send signals to clear
if self.workers:
await asyncio.sleep(0.05)
else:
break
await asyncio.gather(*[plugin.close() for plugin in self.plugins])
for pc in self.periodic_callbacks.values():
pc.stop()
self.periodic_callbacks.clear()
self.stop_services()
for ext in self.extensions.values():
with ignoring(AttributeError):
ext.teardown()
logger.info("Scheduler closing all comms")
futures = []
for w, comm in list(self.stream_comms.items()):
if not comm.closed():
comm.send({"op": "close", "report": False})
comm.send({"op": "close-stream"})
with ignoring(AttributeError):
futures.append(comm.close())
for future in futures: # TODO: do all at once
await future
for comm in self.client_comms.values():
comm.abort()
await self.rpc.close()
self.status = "closed"
self.stop()
await super(Scheduler, self).close()
setproctitle("dask-scheduler [closed]")
disable_gc_diagnosis()
async def close_worker(self, stream=None, worker=None, safe=None):
""" Remove a worker from the cluster
This both removes the worker from our local state and also sends a
signal to the worker to shut down. This works regardless of whether or
not the worker has a nanny process restarting it
"""
logger.info("Closing worker %s", worker)
with log_errors():
self.log_event(worker, {"action": "close-worker"})
nanny_addr = self.workers[worker].nanny
address = nanny_addr or worker
self.worker_send(worker, {"op": "close", "report": False})
self.remove_worker(address=worker, safe=safe)
###########
# Stimuli #
###########
def heartbeat_worker(
self,
comm=None,
address=None,
resolve_address=True,
now=None,
resources=None,
host_info=None,
metrics=None,
):
address = self.coerce_address(address, resolve_address)
address = normalize_address(address)
if address not in self.workers:
return {"status": "missing"}
host = get_address_host(address)
local_now = time()
now = now or time()
assert metrics
host_info = host_info or {}
self.host_info[host]["last-seen"] = local_now
frac = 1 / len(self.workers)
self.bandwidth = (
self.bandwidth * (1 - frac) + metrics["bandwidth"]["total"] * frac
)
for other, (bw, count) in metrics["bandwidth"]["workers"].items():
if (address, other) not in self.bandwidth_workers:
self.bandwidth_workers[address, other] = bw / count
else:
alpha = (1 - frac) ** count
self.bandwidth_workers[address, other] = self.bandwidth_workers[
address, other
] * alpha + bw * (1 - alpha)
for typ, (bw, count) in metrics["bandwidth"]["types"].items():
if typ not in self.bandwidth_types:
self.bandwidth_types[typ] = bw / count
else:
alpha = (1 - frac) ** count
self.bandwidth_types[typ] = self.bandwidth_types[typ] * alpha + bw * (
1 - alpha
)
ws = self.workers[address]
ws.last_seen = time()
if metrics:
ws.metrics = metrics
if host_info:
self.host_info[host].update(host_info)
delay = time() - now
ws.time_delay = delay
if resources:
self.add_resources(worker=address, resources=resources)
self.log_event(address, merge({"action": "heartbeat"}, metrics))
return {
"status": "OK",
"time": time(),
"heartbeat-interval": heartbeat_interval(len(self.workers)),
}
async def add_worker(
self,
comm=None,
address=None,
keys=(),
nthreads=None,
name=None,
resolve_address=True,
nbytes=None,
types=None,
now=None,
resources=None,
host_info=None,
memory_limit=None,
metrics=None,
pid=0,
services=None,
local_directory=None,
versions=None,
nanny=None,
extra=None,
):
""" Add a new worker to the cluster """
with log_errors():
address = self.coerce_address(address, resolve_address)
address = normalize_address(address)
host = get_address_host(address)
ws = self.workers.get(address)
if ws is not None:
raise ValueError("Worker already exists %s" % ws)
if name in self.aliases:
msg = {
"status": "error",
"message": "name taken, %s" % name,
"time": time(),
}
if comm:
await comm.write(msg)
return
self.workers[address] = ws = WorkerState(
address=address,
pid=pid,
nthreads=nthreads,
memory_limit=memory_limit,
name=name,
local_directory=local_directory,
services=services,
versions=versions,
nanny=nanny,
extra=extra,
)
if "addresses" not in self.host_info[host]:
self.host_info[host].update({"addresses": set(), "nthreads": 0})
self.host_info[host]["addresses"].add(address)
self.host_info[host]["nthreads"] += nthreads
self.total_nthreads += nthreads
self.aliases[name] = address
response = self.heartbeat_worker(
address=address,
resolve_address=resolve_address,
now=now,
resources=resources,
host_info=host_info,
metrics=metrics,
)
# Do not need to adjust self.total_occupancy as self.occupancy[ws] cannot exist before this.
self.check_idle_saturated(ws)
# for key in keys: # TODO
# self.mark_key_in_memory(key, [address])
self.stream_comms[address] = BatchedSend(interval="5ms", loop=self.loop)
if ws.nthreads > len(ws.processing):
self.idle.add(ws)
for plugin in self.plugins[:]:
try:
plugin.add_worker(scheduler=self, worker=address)
except Exception as e:
logger.exception(e)
if nbytes:
for key in nbytes:
ts = self.tasks.get(key)
if ts is not None and ts.state in ("processing", "waiting"):
recommendations = self.transition(
key,
"memory",
worker=address,
nbytes=nbytes[key],
typename=types[key],
)
self.transitions(recommendations)
recommendations = {}
for ts in list(self.unrunnable):
valid = self.valid_workers(ts)
if valid is True or ws in valid:
recommendations[ts.key] = "waiting"
if recommendations:
self.transitions(recommendations)
self.log_event(address, {"action": "add-worker"})
self.log_event("all", {"action": "add-worker", "worker": address})
logger.info("Register worker %s", ws)
msg = {
"status": "OK",
"time": time(),
"heartbeat-interval": heartbeat_interval(len(self.workers)),
"worker-plugins": self.worker_plugins,
}
version_warning = version_module.error_message(
version_module.get_versions(),
merge(
{w: ws.versions for w, ws in self.workers.items()},
{c: cs.versions for c, cs in self.clients.items() if cs.versions},
),
versions,
client_name="This Worker",
)
if version_warning:
msg["warning"] = version_warning
if comm:
await comm.write(msg)
await self.handle_worker(comm=comm, worker=address)
def update_graph(
self,
client=None,
tasks=None,
keys=None,
dependencies=None,
restrictions=None,
priority=None,
loose_restrictions=None,
resources=None,
submitting_task=None,
retries=None,
user_priority=0,
actors=None,
fifo_timeout=0,
):
"""
Add new computations to the internal dask graph
This happens whenever the Client calls submit, map, get, or compute.
"""
start = time()
fifo_timeout = parse_timedelta(fifo_timeout)
keys = set(keys)
if len(tasks) > 1:
self.log_event(
["all", client], {"action": "update_graph", "count": len(tasks)}
)
# Remove aliases
for k in list(tasks):
if tasks[k] is k:
del tasks[k]
dependencies = dependencies or {}
n = 0
while len(tasks) != n: # walk through new tasks, cancel any bad deps
n = len(tasks)
for k, deps in list(dependencies.items()):
if any(
dep not in self.tasks and dep not in tasks for dep in deps
): # bad key
logger.info("User asked for computation on lost data, %s", k)
del tasks[k]
del dependencies[k]
if k in keys:
keys.remove(k)
self.report({"op": "cancelled-key", "key": k}, client=client)
self.client_releases_keys(keys=[k], client=client)
# Remove any self-dependencies (happens on test_publish_bag() and others)
for k, v in dependencies.items():
deps = set(v)
if k in deps:
deps.remove(k)
dependencies[k] = deps
# Avoid computation that is already finished
already_in_memory = set() # tasks that are already done
for k, v in dependencies.items():
if v and k in self.tasks and self.tasks[k].state in ("memory", "erred"):
already_in_memory.add(k)
if already_in_memory:
dependents = dask.core.reverse_dict(dependencies)
stack = list(already_in_memory)
done = set(already_in_memory)
while stack: # remove unnecessary dependencies
key = stack.pop()
ts = self.tasks[key]
try:
deps = dependencies[key]
except KeyError:
deps = self.dependencies[key]
for dep in deps:
if dep in dependents:
child_deps = dependents[dep]
else:
child_deps = self.dependencies[dep]
if all(d in done for d in child_deps):
if dep in self.tasks and dep not in done:
done.add(dep)
stack.append(dep)
for d in done:
tasks.pop(d, None)
dependencies.pop(d, None)
# Get or create task states
stack = list(keys)
touched_keys = set()
touched_tasks = []
while stack:
k = stack.pop()
if k in touched_keys:
continue
# XXX Have a method get_task_state(self, k) ?
ts = self.tasks.get(k)
if ts is None:
ts = self.new_task(k, tasks.get(k), "released")
elif not ts.run_spec:
ts.run_spec = tasks.get(k)
touched_keys.add(k)
touched_tasks.append(ts)
stack.extend(dependencies.get(k, ()))
self.client_desires_keys(keys=keys, client=client)
# Add dependencies
for key, deps in dependencies.items():
ts = self.tasks.get(key)
if ts is None or ts.dependencies:
continue
for dep in deps:
dts = self.tasks[dep]
ts.add_dependency(dts)
# Compute priorities
if isinstance(user_priority, Number):
user_priority = {k: user_priority for k in tasks}
# Add actors
if actors is True:
actors = list(keys)
for actor in actors or []:
self.tasks[actor].actor = True
priority = priority or dask.order.order(
tasks
) # TODO: define order wrt old graph
if submitting_task: # sub-tasks get better priority than parent tasks
ts = self.tasks.get(submitting_task)
if ts is not None:
generation = ts.priority[0] - 0.01
else: # super-task already cleaned up
generation = self.generation
elif self._last_time + fifo_timeout < start:
self.generation += 1 # older graph generations take precedence
generation = self.generation
self._last_time = start
else:
generation = self.generation
for key in set(priority) & touched_keys:
ts = self.tasks[key]
if ts.priority is None:
ts.priority = (-(user_priority.get(key, 0)), generation, priority[key])
# Ensure all runnables have a priority
runnables = [ts for ts in touched_tasks if ts.run_spec]
for ts in runnables:
if ts.priority is None and ts.run_spec:
ts.priority = (self.generation, 0)
if restrictions:
# *restrictions* is a dict keying task ids to lists of
# restriction specifications (either worker names or addresses)
for k, v in restrictions.items():
if v is None:
continue
ts = self.tasks.get(k)
if ts is None:
continue
ts.host_restrictions = set()
ts.worker_restrictions = set()
for w in v:
try:
w = self.coerce_address(w)
except ValueError:
# Not a valid address, but perhaps it's a hostname
ts.host_restrictions.add(w)
else:
ts.worker_restrictions.add(w)
if loose_restrictions:
for k in loose_restrictions:
ts = self.tasks[k]
ts.loose_restrictions = True
if resources:
for k, v in resources.items():
if v is None:
continue
assert isinstance(v, dict)
ts = self.tasks.get(k)
if ts is None:
continue
ts.resource_restrictions = v
if retries:
for k, v in retries.items():
assert isinstance(v, int)
ts = self.tasks.get(k)
if ts is None:
continue
ts.retries = v
# Compute recommendations
recommendations = OrderedDict()
for ts in sorted(runnables, key=operator.attrgetter("priority"), reverse=True):
if ts.state == "released" and ts.run_spec:
recommendations[ts.key] = "waiting"
for ts in touched_tasks:
for dts in ts.dependencies:
if dts.exception_blame:
ts.exception_blame = dts.exception_blame
recommendations[ts.key] = "erred"
break
for plugin in self.plugins[:]:
try:
plugin.update_graph(
self,
client=client,
tasks=tasks,
keys=keys,
restrictions=restrictions or {},
dependencies=dependencies,
priority=priority,
loose_restrictions=loose_restrictions,
resources=resources,
)
except Exception as e:
logger.exception(e)
self.transitions(recommendations)
for ts in touched_tasks:
if ts.state in ("memory", "erred"):
self.report_on_key(ts.key, client=client)
end = time()
if self.digests is not None:
self.digests["update-graph-duration"].add(end - start)
# TODO: balance workers
def new_task(self, key, spec, state):
""" Create a new task, and associated states """
ts = TaskState(key, spec)
ts._state = state
prefix_key = key_split(key)
try:
tp = self.task_prefixes[prefix_key]
except KeyError:
tp = self.task_prefixes[prefix_key] = TaskPrefix(prefix_key)
ts.prefix = tp
group_key = ts.group_key
try:
tg = self.task_groups[group_key]
except KeyError:
tg = self.task_groups[group_key] = TaskGroup(group_key)
tg.prefix = tp
tp.groups.append(tg)
tg.add(ts)
self.tasks[key] = ts
return ts
def stimulus_task_finished(self, key=None, worker=None, **kwargs):
""" Mark that a task has finished execution on a particular worker """
logger.debug("Stimulus task finished %s, %s", key, worker)
ts = self.tasks.get(key)
if ts is None:
return {}
ws = self.workers[worker]
if ts.state == "processing":
recommendations = self.transition(key, "memory", worker=worker, **kwargs)
if ts.state == "memory":
assert ws in ts.who_has
else:
logger.debug(
"Received already computed task, worker: %s, state: %s"
", key: %s, who_has: %s",
worker,
ts.state,
key,
ts.who_has,
)
if ws not in ts.who_has:
self.worker_send(worker, {"op": "release-task", "key": key})
recommendations = {}
return recommendations
def stimulus_task_erred(
self, key=None, worker=None, exception=None, traceback=None, **kwargs
):
""" Mark that a task has erred on a particular worker """
logger.debug("Stimulus task erred %s, %s", key, worker)
ts = self.tasks.get(key)
if ts is None:
return {}
if ts.state == "processing":
retries = ts.retries
if retries > 0:
ts.retries = retries - 1
recommendations = self.transition(key, "waiting")
else:
recommendations = self.transition(
key,
"erred",
cause=key,
exception=exception,
traceback=traceback,
worker=worker,
**kwargs
)
else:
recommendations = {}
return recommendations
def stimulus_missing_data(
self, cause=None, key=None, worker=None, ensure=True, **kwargs
):
""" Mark that certain keys have gone missing. Recover. """
with log_errors():
logger.debug("Stimulus missing data %s, %s", key, worker)
ts = self.tasks.get(key)
if ts is None or ts.state == "memory":
return {}
cts = self.tasks.get(cause)
recommendations = OrderedDict()
if cts is not None and cts.state == "memory": # couldn't find this
for ws in cts.who_has: # TODO: this behavior is extreme
ws.has_what.remove(cts)
ws.nbytes -= cts.get_nbytes()
cts.who_has.clear()
recommendations[cause] = "released"
if key:
recommendations[key] = "released"
self.transitions(recommendations)
if self.validate:
assert cause not in self.who_has
return {}
def stimulus_retry(self, comm=None, keys=None, client=None):
logger.info("Client %s requests to retry %d keys", client, len(keys))
if client:
self.log_event(client, {"action": "retry", "count": len(keys)})
stack = list(keys)
seen = set()
roots = []
while stack:
key = stack.pop()
seen.add(key)
erred_deps = [
dts.key for dts in self.tasks[key].dependencies if dts.state == "erred"
]
if erred_deps:
stack.extend(erred_deps)
else:
roots.append(key)
recommendations = {key: "waiting" for key in roots}
self.transitions(recommendations)
if self.validate:
for key in seen:
assert not self.tasks[key].exception_blame
return tuple(seen)
def remove_worker(self, comm=None, address=None, safe=False, close=True):
"""
Remove worker from cluster
We do this when a worker reports that it plans to leave or when it
appears to be unresponsive. This may send its tasks back to a released
state.
"""
with log_errors():
if self.status == "closed":
return
address = self.coerce_address(address)
if address not in self.workers:
return "already-removed"
host = get_address_host(address)
ws = self.workers[address]
self.log_event(
["all", address],
{
"action": "remove-worker",
"worker": address,
"processing-tasks": dict(ws.processing),
},
)
logger.info("Remove worker %s", ws)
if close:
with ignoring(AttributeError, CommClosedError):
self.stream_comms[address].send({"op": "close", "report": False})
self.remove_resources(address)
self.host_info[host]["nthreads"] -= ws.nthreads
self.host_info[host]["addresses"].remove(address)
self.total_nthreads -= ws.nthreads
if not self.host_info[host]["addresses"]:
del self.host_info[host]
self.rpc.remove(address)
del self.stream_comms[address]
del self.aliases[ws.name]
self.idle.discard(ws)
self.saturated.discard(ws)
del self.workers[address]
ws.status = "closed"
self.total_occupancy -= ws.occupancy
recommendations = OrderedDict()
for ts in list(ws.processing):
k = ts.key
recommendations[k] = "released"
if not safe:
ts.suspicious += 1
if ts.suspicious > self.allowed_failures:
del recommendations[k]
e = pickle.dumps(
KilledWorker(task=k, last_worker=ws.clean()), -1
)
r = self.transition(k, "erred", exception=e, cause=k)
recommendations.update(r)
for ts in ws.has_what:
ts.who_has.remove(ws)
if not ts.who_has:
if ts.run_spec:
recommendations[ts.key] = "released"
else: # pure data
recommendations[ts.key] = "forgotten"
ws.has_what.clear()
self.transitions(recommendations)
for plugin in self.plugins[:]:
try:
plugin.remove_worker(scheduler=self, worker=address)
except Exception as e:
logger.exception(e)
if not self.workers:
logger.info("Lost all workers")
for w in self.workers:
self.bandwidth_workers.pop((address, w), None)
self.bandwidth_workers.pop((w, address), None)
def remove_worker_from_events():
# If the worker isn't registered anymore after the delay, remove from events
if address not in self.workers and address in self.events:
del self.events[address]
cleanup_delay = parse_timedelta(
dask.config.get("distributed.scheduler.events-cleanup-delay")
)
self.loop.call_later(cleanup_delay, remove_worker_from_events)
logger.debug("Removed worker %s", ws)
return "OK"
def stimulus_cancel(self, comm, keys=None, client=None, force=False):
""" Stop execution on a list of keys """
logger.info("Client %s requests to cancel %d keys", client, len(keys))
if client:
self.log_event(
client, {"action": "cancel", "count": len(keys), "force": force}
)
for key in keys:
self.cancel_key(key, client, force=force)
def cancel_key(self, key, client, retries=5, force=False):
""" Cancel a particular key and all dependents """
# TODO: this should be converted to use the transition mechanism
ts = self.tasks.get(key)
try:
cs = self.clients[client]
except KeyError:
return
if ts is None or not ts.who_wants: # no key yet, lets try again in a moment
if retries:
self.loop.call_later(
0.2, lambda: self.cancel_key(key, client, retries - 1)
)
return
if force or ts.who_wants == {cs}: # no one else wants this key
for dts in list(ts.dependents):
self.cancel_key(dts.key, client, force=force)
logger.info("Scheduler cancels key %s. Force=%s", key, force)
self.report({"op": "cancelled-key", "key": key})
clients = list(ts.who_wants) if force else [cs]
for c in clients:
self.client_releases_keys(keys=[key], client=c.client_key)
def client_desires_keys(self, keys=None, client=None):
cs = self.clients.get(client)
if cs is None:
# For publish, queues etc.
cs = self.clients[client] = ClientState(client)
for k in keys:
ts = self.tasks.get(k)
if ts is None:
# For publish, queues etc.
ts = self.new_task(k, None, "released")
ts.who_wants.add(cs)
cs.wants_what.add(ts)
if ts.state in ("memory", "erred"):
self.report_on_key(k, client=client)
def client_releases_keys(self, keys=None, client=None):
""" Remove keys from client desired list """
logger.debug("Client %s releases keys: %s", client, keys)
cs = self.clients[client]
tasks2 = set()
for key in list(keys):
ts = self.tasks.get(key)
if ts is not None and ts in cs.wants_what:
cs.wants_what.remove(ts)
s = ts.who_wants
s.remove(cs)
if not s:
tasks2.add(ts)
recommendations = {}
for ts in tasks2:
if not ts.dependents:
# No live dependents, can forget
recommendations[ts.key] = "forgotten"
elif ts.state != "erred" and not ts.waiters:
recommendations[ts.key] = "released"
self.transitions(recommendations)
def client_heartbeat(self, client=None):
""" Handle heartbeats from Client """
self.clients[client].last_seen = time()
###################
# Task Validation #
###################
def validate_released(self, key):
ts = self.tasks[key]
assert ts.state == "released"
assert not ts.waiters
assert not ts.waiting_on
assert not ts.who_has
assert not ts.processing_on
assert not any(ts in dts.waiters for dts in ts.dependencies)
assert ts not in self.unrunnable
def validate_waiting(self, key):
ts = self.tasks[key]
assert ts.waiting_on
assert not ts.who_has
assert not ts.processing_on
assert ts not in self.unrunnable
for dts in ts.dependencies:
# We are waiting on a dependency iff it's not stored
assert bool(dts.who_has) + (dts in ts.waiting_on) == 1
assert ts in dts.waiters # XXX even if dts.who_has?
def validate_processing(self, key):
ts = self.tasks[key]
assert not ts.waiting_on
ws = ts.processing_on
assert ws
assert ts in ws.processing
assert not ts.who_has
for dts in ts.dependencies:
assert dts.who_has
assert ts in dts.waiters
def validate_memory(self, key):
ts = self.tasks[key]
assert ts.who_has
assert not ts.processing_on
assert not ts.waiting_on
assert ts not in self.unrunnable
for dts in ts.dependents:
assert (dts in ts.waiters) == (dts.state in ("waiting", "processing"))
assert ts not in dts.waiting_on
def validate_no_worker(self, key):
ts = self.tasks[key]
assert ts in self.unrunnable
assert not ts.waiting_on
assert ts in self.unrunnable
assert not ts.processing_on
assert not ts.who_has
for dts in ts.dependencies:
assert dts.who_has
def validate_erred(self, key):
ts = self.tasks[key]
assert ts.exception_blame
assert not ts.who_has
def validate_key(self, key, ts=None):
try:
if ts is None:
ts = self.tasks.get(key)
if ts is None:
logger.debug("Key lost: %s", key)
else:
ts.validate()
try:
func = getattr(self, "validate_" + ts.state.replace("-", "_"))
except AttributeError:
logger.error(
"self.validate_%s not found", ts.state.replace("-", "_")
)
else:
func(key)
except Exception as e:
logger.exception(e)
if LOG_PDB:
import pdb
pdb.set_trace()
raise
def validate_state(self, allow_overlap=False):
validate_state(self.tasks, self.workers, self.clients)
if not (set(self.workers) == set(self.stream_comms)):
raise ValueError("Workers not the same in all collections")
for w, ws in self.workers.items():
assert isinstance(w, str), (type(w), w)
assert isinstance(ws, WorkerState), (type(ws), ws)
assert ws.address == w
if not ws.processing:
assert not ws.occupancy
assert ws in self.idle
for k, ts in self.tasks.items():
assert isinstance(ts, TaskState), (type(ts), ts)
assert ts.key == k
self.validate_key(k, ts)
for c, cs in self.clients.items():
# client=None is often used in tests...
assert c is None or isinstance(c, str), (type(c), c)
assert isinstance(cs, ClientState), (type(cs), cs)
assert cs.client_key == c
a = {w: ws.nbytes for w, ws in self.workers.items()}
b = {
w: sum(ts.get_nbytes() for ts in ws.has_what)
for w, ws in self.workers.items()
}
assert a == b, (a, b)
actual_total_occupancy = 0
for worker, ws in self.workers.items():
assert abs(sum(ws.processing.values()) - ws.occupancy) < 1e-8
actual_total_occupancy += ws.occupancy
assert abs(actual_total_occupancy - self.total_occupancy) < 1e-8, (
actual_total_occupancy,
self.total_occupancy,
)
###################
# Manage Messages #
###################
def report(self, msg, ts=None, client=None):
"""
Publish updates to all listening Queues and Comms
If the message contains a key then we only send the message to those
comms that care about the key.
"""
comms = set()
if client is not None:
try:
comms.add(self.client_comms[client])
except KeyError:
pass
if ts is None and "key" in msg:
ts = self.tasks.get(msg["key"])
if ts is None:
# Notify all clients
comms |= set(self.client_comms.values())
else:
# Notify clients interested in key
comms |= {
self.client_comms[c.client_key]
for c in ts.who_wants
if c.client_key in self.client_comms
}
for c in comms:
try:
c.send(msg)
# logger.debug("Scheduler sends message to client %s", msg)
except CommClosedError:
if self.status == "running":
logger.critical("Tried writing to closed comm: %s", msg)
async def add_client(self, comm, client=None, versions=None):
""" Add client to network
We listen to all future messages from this Comm.
"""
assert client is not None
comm.name = "Scheduler->Client"
logger.info("Receive client connection: %s", client)
self.log_event(["all", client], {"action": "add-client", "client": client})
self.clients[client] = ClientState(client, versions=versions)
for plugin in self.plugins[:]:
try:
plugin.add_client(scheduler=self, client=client)
except Exception as e:
logger.exception(e)
try:
bcomm = BatchedSend(interval="2ms", loop=self.loop)
bcomm.start(comm)
self.client_comms[client] = bcomm
msg = {"op": "stream-start"}
version_warning = version_module.error_message(
version_module.get_versions(),
{w: ws.versions for w, ws in self.workers.items()},
versions,
)
if version_warning:
msg["warning"] = version_warning
bcomm.send(msg)
try:
await self.handle_stream(comm=comm, extra={"client": client})
finally:
self.remove_client(client=client)
logger.debug("Finished handling client %s", client)
finally:
if not comm.closed():
self.client_comms[client].send({"op": "stream-closed"})
try:
if not shutting_down():
await self.client_comms[client].close()
del self.client_comms[client]
if self.status == "running":
logger.info("Close client connection: %s", client)
except TypeError: # comm becomes None during GC
pass
def remove_client(self, client=None):
""" Remove client from network """
if self.status == "running":
logger.info("Remove client %s", client)
self.log_event(["all", client], {"action": "remove-client", "client": client})
try:
cs = self.clients[client]
except KeyError:
# XXX is this a legitimate condition?
pass
else:
self.client_releases_keys(
keys=[ts.key for ts in cs.wants_what], client=cs.client_key
)
del self.clients[client]
for plugin in self.plugins[:]:
try:
plugin.remove_client(scheduler=self, client=client)
except Exception as e:
logger.exception(e)
def remove_client_from_events():
# If the client isn't registered anymore after the delay, remove from events
if client not in self.clients and client in self.events:
del self.events[client]
cleanup_delay = parse_timedelta(
dask.config.get("distributed.scheduler.events-cleanup-delay")
)
self.loop.call_later(cleanup_delay, remove_client_from_events)
def send_task_to_worker(self, worker, key):
""" Send a single computational task to a worker """
try:
ts = self.tasks[key]
msg = {
"op": "compute-task",
"key": key,
"priority": ts.priority,
"duration": self.get_task_duration(ts),
}
if ts.resource_restrictions:
msg["resource_restrictions"] = ts.resource_restrictions
if ts.actor:
msg["actor"] = True
deps = ts.dependencies
if deps:
msg["who_has"] = {
dep.key: [ws.address for ws in dep.who_has] for dep in deps
}
msg["nbytes"] = {dep.key: dep.nbytes for dep in deps}
if self.validate and deps:
assert all(msg["who_has"].values())
task = ts.run_spec
if type(task) is dict:
msg.update(task)
else:
msg["task"] = task
self.worker_send(worker, msg)
except Exception as e:
logger.exception(e)
if LOG_PDB:
import pdb
pdb.set_trace()
raise
def handle_uncaught_error(self, **msg):
logger.exception(clean_exception(**msg)[1])
def handle_task_finished(self, key=None, worker=None, **msg):
if worker not in self.workers:
return
validate_key(key)
r = self.stimulus_task_finished(key=key, worker=worker, **msg)
self.transitions(r)
def handle_task_erred(self, key=None, **msg):
r = self.stimulus_task_erred(key=key, **msg)
self.transitions(r)
def handle_release_data(self, key=None, worker=None, client=None, **msg):
ts = self.tasks.get(key)
if ts is None:
return
ws = self.workers[worker]
if ts.processing_on != ws:
return
r = self.stimulus_missing_data(key=key, ensure=False, **msg)
self.transitions(r)
def handle_missing_data(self, key=None, errant_worker=None, **kwargs):
logger.debug("handle missing data key=%s worker=%s", key, errant_worker)
self.log.append(("missing", key, errant_worker))
ts = self.tasks.get(key)
if ts is None or not ts.who_has:
return
if errant_worker in self.workers:
ws = self.workers[errant_worker]
if ws in ts.who_has:
ts.who_has.remove(ws)
ws.has_what.remove(ts)
ws.nbytes -= ts.get_nbytes()
if not ts.who_has:
if ts.run_spec:
self.transitions({key: "released"})
else:
self.transitions({key: "forgotten"})
def release_worker_data(self, stream=None, keys=None, worker=None):
ws = self.workers[worker]
tasks = {self.tasks[k] for k in keys}
removed_tasks = tasks & ws.has_what
ws.has_what -= removed_tasks
recommendations = {}
for ts in removed_tasks:
ws.nbytes -= ts.get_nbytes()
wh = ts.who_has
wh.remove(ws)
if not wh:
recommendations[ts.key] = "released"
if recommendations:
self.transitions(recommendations)
def handle_long_running(self, key=None, worker=None, compute_duration=None):
""" A task has seceded from the thread pool
We stop the task from being stolen in the future, and change task
duration accounting as if the task has stopped.
"""
ts = self.tasks[key]
if "stealing" in self.extensions:
self.extensions["stealing"].remove_key_from_stealable(ts)
ws = ts.processing_on
if ws is None:
logger.debug("Received long-running signal from duplicate task. Ignoring.")
return
if compute_duration:
old_duration = ts.prefix.duration_average or 0
new_duration = compute_duration
if not old_duration:
avg_duration = new_duration
else:
avg_duration = 0.5 * old_duration + 0.5 * new_duration
ts.prefix.duration_average = avg_duration
ws.occupancy -= ws.processing[ts]
self.total_occupancy -= ws.processing[ts]
ws.processing[ts] = 0
self.check_idle_saturated(ws)
async def handle_worker(self, comm=None, worker=None):
"""
Listen to responses from a single worker
This is the main loop for scheduler-worker interaction
See Also
--------
Scheduler.handle_client: Equivalent coroutine for clients
"""
comm.name = "Scheduler connection to worker"
worker_comm = self.stream_comms[worker]
worker_comm.start(comm)
logger.info("Starting worker compute stream, %s", worker)
try:
await self.handle_stream(comm=comm, extra={"worker": worker})
finally:
if worker in self.stream_comms:
worker_comm.abort()
self.remove_worker(address=worker)
def add_plugin(self, plugin=None, idempotent=False, **kwargs):
"""
Add external plugin to scheduler
See https://distributed.readthedocs.io/en/latest/plugins.html
"""
if isinstance(plugin, type):
plugin = plugin(self, **kwargs)
if idempotent and any(isinstance(p, type(plugin)) for p in self.plugins):
return
self.plugins.append(plugin)
def remove_plugin(self, plugin):
""" Remove external plugin from scheduler """
self.plugins.remove(plugin)
def worker_send(self, worker, msg):
""" Send message to worker
This also handles connection failures by adding a callback to remove
the worker on the next cycle.
"""
try:
self.stream_comms[worker].send(msg)
except (CommClosedError, AttributeError):
self.loop.add_callback(self.remove_worker, address=worker)
############################
# Less common interactions #
############################
async def scatter(
self,
comm=None,
data=None,
workers=None,
client=None,
broadcast=False,
timeout=2,
):
""" Send data out to workers
See also
--------
Scheduler.broadcast:
"""
start = time()
while not self.workers:
await asyncio.sleep(0.2)
if time() > start + timeout:
raise TimeoutError("No workers found")
if workers is None:
nthreads = {w: ws.nthreads for w, ws in self.workers.items()}
else:
workers = [self.coerce_address(w) for w in workers]
nthreads = {w: self.workers[w].nthreads for w in workers}
assert isinstance(data, dict)
keys, who_has, nbytes = await scatter_to_workers(
nthreads, data, rpc=self.rpc, report=False
)
self.update_data(who_has=who_has, nbytes=nbytes, client=client)
if broadcast:
if broadcast == True: # noqa: E712
n = len(nthreads)
else:
n = broadcast
await self.replicate(keys=keys, workers=workers, n=n)
self.log_event(
[client, "all"], {"action": "scatter", "client": client, "count": len(data)}
)
return keys
async def gather(self, comm=None, keys=None, serializers=None):
""" Collect data in from workers """
keys = list(keys)
who_has = {}
for key in keys:
ts = self.tasks.get(key)
if ts is not None:
who_has[key] = [ws.address for ws in ts.who_has]
else:
who_has[key] = []
data, missing_keys, missing_workers = await gather_from_workers(
who_has, rpc=self.rpc, close=False, serializers=serializers
)
if not missing_keys:
result = {"status": "OK", "data": data}
else:
missing_states = [
(self.tasks[key].state if key in self.tasks else None)
for key in missing_keys
]
logger.exception(
"Couldn't gather keys %s state: %s workers: %s",
missing_keys,
missing_states,
missing_workers,
)
result = {"status": "error", "keys": missing_keys}
with log_errors():
# Remove suspicious workers from the scheduler but allow them to
# reconnect.
for worker in missing_workers:
self.remove_worker(address=worker, close=False)
for key, workers in missing_keys.items():
# Task may already be gone if it was held by a
# `missing_worker`
ts = self.tasks.get(key)
logger.exception(
"Workers don't have promised key: %s, %s",
str(workers),
str(key),
)
if not workers or ts is None:
continue
for worker in workers:
ws = self.workers.get(worker)
if ws is not None and ts in ws.has_what:
ws.has_what.remove(ts)
ts.who_has.remove(ws)
ws.nbytes -= ts.get_nbytes()
self.transitions({key: "released"})
self.log_event("all", {"action": "gather", "count": len(keys)})
return result
def clear_task_state(self):
# XXX what about nested state such as ClientState.wants_what
# (see also fire-and-forget...)
logger.info("Clear task state")
for collection in self._task_state_collections:
collection.clear()
async def restart(self, client=None, timeout=3):
""" Restart all workers. Reset local state. """
with log_errors():
n_workers = len(self.workers)
logger.info("Send lost future signal to clients")
for cs in self.clients.values():
self.client_releases_keys(
keys=[ts.key for ts in cs.wants_what], client=cs.client_key
)
nannies = {addr: ws.nanny for addr, ws in self.workers.items()}
for addr in list(self.workers):
try:
# Ask the worker to close if it doesn't have a nanny,
# otherwise the nanny will kill it anyway
self.remove_worker(address=addr, close=addr not in nannies)
except Exception as e:
logger.info(
"Exception while restarting. This is normal", exc_info=True
)
self.clear_task_state()
for plugin in self.plugins[:]:
try:
plugin.restart(self)
except Exception as e:
logger.exception(e)
logger.debug("Send kill signal to nannies: %s", nannies)
nannies = [
rpc(nanny_address, connection_args=self.connection_args)
for nanny_address in nannies.values()
if nanny_address is not None
]
resps = All(
[
nanny.restart(
close=True, timeout=timeout * 0.8, executor_wait=False
)
for nanny in nannies
]
)
try:
resps = await asyncio.wait_for(resps, timeout)
except TimeoutError:
logger.error(
"Nannies didn't report back restarted within "
"timeout. Continuuing with restart process"
)
else:
if not all(resp == "OK" for resp in resps):
logger.error(
"Not all workers responded positively: %s", resps, exc_info=True
)
finally:
await asyncio.gather(*[nanny.close_rpc() for nanny in nannies])
await self.start()
self.log_event([client, "all"], {"action": "restart", "client": client})
start = time()
while time() < start + 10 and len(self.workers) < n_workers:
await asyncio.sleep(0.01)
self.report({"op": "restart"})
async def broadcast(
self,
comm=None,
msg=None,
workers=None,
hosts=None,
nanny=False,
serializers=None,
):
""" Broadcast message to workers, return all results """
if workers is None or workers is True:
if hosts is None:
workers = list(self.workers)
else:
workers = []
if hosts is not None:
for host in hosts:
if host in self.host_info:
workers.extend(self.host_info[host]["addresses"])
# TODO replace with worker_list
if nanny:
addresses = [self.workers[w].nanny for w in workers]
else:
addresses = workers
async def send_message(addr):
comm = await connect(
addr, deserialize=self.deserialize, connection_args=self.connection_args
)
comm.name = "Scheduler Broadcast"
resp = await send_recv(comm, close=True, serializers=serializers, **msg)
return resp
results = await All(
[send_message(address) for address in addresses if address is not None]
)
return dict(zip(workers, results))
async def proxy(self, comm=None, msg=None, worker=None, serializers=None):
""" Proxy a communication through the scheduler to some other worker """
d = await self.broadcast(
comm=comm, msg=msg, workers=[worker], serializers=serializers
)
return d[worker]
async def _delete_worker_data(self, worker_address, keys):
""" Delete data from a worker and update the corresponding worker/task states
Parameters
----------
worker_address: str
Worker address to delete keys from
keys: List[str]
List of keys to delete on the specified worker
"""
await retry_operation(
self.rpc(addr=worker_address).delete_data, keys=list(keys), report=False
)
ws = self.workers[worker_address]
tasks = {self.tasks[key] for key in keys}
ws.has_what -= tasks
for ts in tasks:
ts.who_has.remove(ws)
ws.nbytes -= ts.get_nbytes()
self.log_event(ws.address, {"action": "remove-worker-data", "keys": keys})
async def rebalance(self, comm=None, keys=None, workers=None):
""" Rebalance keys so that each worker stores roughly equal bytes
**Policy**
This orders the workers by what fraction of bytes of the existing keys
they have. It walks down this list from most-to-least. At each worker
it sends the largest results it can find and sends them to the least
occupied worker until either the sender or the recipient are at the
average expected load.
"""
with log_errors():
async with self._lock:
if keys:
tasks = {self.tasks[k] for k in keys}
missing_data = [ts.key for ts in tasks if not ts.who_has]
if missing_data:
return {"status": "missing-data", "keys": missing_data}
else:
tasks = set(self.tasks.values())
if workers:
workers = {self.workers[w] for w in workers}
workers_by_task = {ts: ts.who_has & workers for ts in tasks}
else:
workers = set(self.workers.values())
workers_by_task = {ts: ts.who_has for ts in tasks}
tasks_by_worker = {ws: set() for ws in workers}
for k, v in workers_by_task.items():
for vv in v:
tasks_by_worker[vv].add(k)
worker_bytes = {
ws: sum(ts.get_nbytes() for ts in v)
for ws, v in tasks_by_worker.items()
}
avg = sum(worker_bytes.values()) / len(worker_bytes)
sorted_workers = list(
map(first, sorted(worker_bytes.items(), key=second, reverse=True))
)
recipients = iter(reversed(sorted_workers))
recipient = next(recipients)
msgs = [] # (sender, recipient, key)
for sender in sorted_workers[: len(workers) // 2]:
sender_keys = {
ts: ts.get_nbytes() for ts in tasks_by_worker[sender]
}
sender_keys = iter(
sorted(sender_keys.items(), key=second, reverse=True)
)
try:
while worker_bytes[sender] > avg:
while (
worker_bytes[recipient] < avg
and worker_bytes[sender] > avg
):
ts, nb = next(sender_keys)
if ts not in tasks_by_worker[recipient]:
tasks_by_worker[recipient].add(ts)
# tasks_by_worker[sender].remove(ts)
msgs.append((sender, recipient, ts))
worker_bytes[sender] -= nb
worker_bytes[recipient] += nb
if worker_bytes[sender] > avg:
recipient = next(recipients)
except StopIteration:
break
to_recipients = defaultdict(lambda: defaultdict(list))
to_senders = defaultdict(list)
for sender, recipient, ts in msgs:
to_recipients[recipient.address][ts.key].append(sender.address)
to_senders[sender.address].append(ts.key)
result = await asyncio.gather(
*(
retry_operation(self.rpc(addr=r).gather, who_has=v)
for r, v in to_recipients.items()
)
)
for r, v in to_recipients.items():
self.log_event(r, {"action": "rebalance", "who_has": v})
self.log_event(
"all",
{
"action": "rebalance",
"total-keys": len(tasks),
"senders": valmap(len, to_senders),
"recipients": valmap(len, to_recipients),
"moved_keys": len(msgs),
},
)
if not all(r["status"] == "OK" for r in result):
return {
"status": "missing-data",
"keys": sum([r["keys"] for r in result if "keys" in r], []),
}
for sender, recipient, ts in msgs:
assert ts.state == "memory"
ts.who_has.add(recipient)
recipient.has_what.add(ts)
recipient.nbytes += ts.get_nbytes()
self.log.append(
("rebalance", ts.key, time(), sender.address, recipient.address)
)
await asyncio.gather(
*(self._delete_worker_data(r, v) for r, v in to_senders.items())
)
return {"status": "OK"}
async def replicate(
self,
comm=None,
keys=None,
n=None,
workers=None,
branching_factor=2,
delete=True,
lock=True,
):
""" Replicate data throughout cluster
This performs a tree copy of the data throughout the network
individually on each piece of data.
Parameters
----------
keys: Iterable
list of keys to replicate
n: int
Number of replications we expect to see within the cluster
branching_factor: int, optional
The number of workers that can copy data in each generation.
The larger the branching factor, the more data we copy in
a single step, but the more a given worker risks being
swamped by data requests.
See also
--------
Scheduler.rebalance
"""
assert branching_factor > 0
async with self._lock if lock else empty_context:
workers = {self.workers[w] for w in self.workers_list(workers)}
if n is None:
n = len(workers)
else:
n = min(n, len(workers))
if n == 0:
raise ValueError("Can not use replicate to delete data")
tasks = {self.tasks[k] for k in keys}
missing_data = [ts.key for ts in tasks if not ts.who_has]
if missing_data:
return {"status": "missing-data", "keys": missing_data}
# Delete extraneous data
if delete:
del_worker_tasks = defaultdict(set)
for ts in tasks:
del_candidates = ts.who_has & workers
if len(del_candidates) > n:
for ws in random.sample(
del_candidates, len(del_candidates) - n
):
del_worker_tasks[ws].add(ts)
await asyncio.gather(
*(
self._delete_worker_data(ws.address, [t.key for t in tasks])
for ws, tasks in del_worker_tasks.items()
)
)
# Copy not-yet-filled data
while tasks:
gathers = defaultdict(dict)
for ts in list(tasks):
n_missing = n - len(ts.who_has & workers)
if n_missing <= 0:
# Already replicated enough
tasks.remove(ts)
continue
count = min(n_missing, branching_factor * len(ts.who_has))
assert count > 0
for ws in random.sample(workers - ts.who_has, count):
gathers[ws.address][ts.key] = [
wws.address for wws in ts.who_has
]
results = await asyncio.gather(
*(
retry_operation(self.rpc(addr=w).gather, who_has=who_has)
for w, who_has in gathers.items()
)
)
for w, v in zip(gathers, results):
if v["status"] == "OK":
self.add_keys(worker=w, keys=list(gathers[w]))
else:
logger.warning("Communication failed during replication: %s", v)
self.log_event(w, {"action": "replicate-add", "keys": gathers[w]})
self.log_event(
"all",
{
"action": "replicate",
"workers": list(workers),
"key-count": len(keys),
"branching-factor": branching_factor,
},
)
def workers_to_close(
self,
comm=None,
memory_ratio=None,
n=None,
key=None,
minimum=None,
target=None,
attribute="address",
):
"""
Find workers that we can close with low cost
This returns a list of workers that are good candidates to retire.
These workers are not running anything and are storing
relatively little data relative to their peers. If all workers are
idle then we still maintain enough workers to have enough RAM to store
our data, with a comfortable buffer.
This is for use with systems like ``distributed.deploy.adaptive``.
Parameters
----------
memory_factor: Number
Amount of extra space we want to have for our stored data.
Defaults two 2, or that we want to have twice as much memory as we
currently have data.
n: int
Number of workers to close
minimum: int
Minimum number of workers to keep around
key: Callable(WorkerState)
An optional callable mapping a WorkerState object to a group
affiliation. Groups will be closed together. This is useful when
closing workers must be done collectively, such as by hostname.
target: int
Target number of workers to have after we close
attribute : str
The attribute of the WorkerState object to return, like "address"
or "name". Defaults to "address".
Examples
--------
>>> scheduler.workers_to_close()
['tcp://192.168.0.1:1234', 'tcp://192.168.0.2:1234']
Group workers by hostname prior to closing
>>> scheduler.workers_to_close(key=lambda ws: ws.host)
['tcp://192.168.0.1:1234', 'tcp://192.168.0.1:4567']
Remove two workers
>>> scheduler.workers_to_close(n=2)
Keep enough workers to have twice as much memory as we we need.
>>> scheduler.workers_to_close(memory_ratio=2)
Returns
-------
to_close: list of worker addresses that are OK to close
See Also
--------
Scheduler.retire_workers
"""
if target is not None and n is None:
n = len(self.workers) - target
if n is not None:
if n < 0:
n = 0
target = len(self.workers) - n
if n is None and memory_ratio is None:
memory_ratio = 2
with log_errors():
if not n and all(ws.processing for ws in self.workers.values()):
return []
if key is None:
key = lambda ws: ws.address
if isinstance(key, bytes) and dask.config.get(
"distributed.scheduler.pickle"
):
key = pickle.loads(key)
groups = groupby(key, self.workers.values())
limit_bytes = {
k: sum(ws.memory_limit for ws in v) for k, v in groups.items()
}
group_bytes = {k: sum(ws.nbytes for ws in v) for k, v in groups.items()}
limit = sum(limit_bytes.values())
total = sum(group_bytes.values())
def _key(group):
is_idle = not any(ws.processing for ws in groups[group])
bytes = -group_bytes[group]
return (is_idle, bytes)
idle = sorted(groups, key=_key)
to_close = []
n_remain = len(self.workers)
while idle:
group = idle.pop()
if n is None and any(ws.processing for ws in groups[group]):
break
if minimum and n_remain - len(groups[group]) < minimum:
break
limit -= limit_bytes[group]
if (n is not None and n_remain - len(groups[group]) >= target) or (
memory_ratio is not None and limit >= memory_ratio * total
):
to_close.append(group)
n_remain -= len(groups[group])
else:
break
result = [getattr(ws, attribute) for g in to_close for ws in groups[g]]
if result:
logger.debug("Suggest closing workers: %s", result)
return result
async def retire_workers(
self,
comm=None,
workers=None,
remove=True,
close_workers=False,
names=None,
lock=True,
**kwargs
):
""" Gracefully retire workers from cluster
Parameters
----------
workers: list (optional)
List of worker addresses to retire.
If not provided we call ``workers_to_close`` which finds a good set
workers_names: list (optional)
List of worker names to retire.
remove: bool (defaults to True)
Whether or not to remove the worker metadata immediately or else
wait for the worker to contact us
close_workers: bool (defaults to False)
Whether or not to actually close the worker explicitly from here.
Otherwise we expect some external job scheduler to finish off the
worker.
**kwargs: dict
Extra options to pass to workers_to_close to determine which
workers we should drop
Returns
-------
Dictionary mapping worker ID/address to dictionary of information about
that worker for each retired worker.
See Also
--------
Scheduler.workers_to_close
"""
with log_errors():
async with self._lock if lock else empty_context:
if names is not None:
if names:
logger.info("Retire worker names %s", names)
names = set(map(str, names))
workers = [
ws.address
for ws in self.workers.values()
if str(ws.name) in names
]
if workers is None:
while True:
try:
workers = self.workers_to_close(**kwargs)
if workers:
workers = await self.retire_workers(
workers=workers,
remove=remove,
close_workers=close_workers,
lock=False,
)
return workers
except KeyError: # keys left during replicate
pass
workers = {self.workers[w] for w in workers if w in self.workers}
if not workers:
return []
logger.info("Retire workers %s", workers)
# Keys orphaned by retiring those workers
keys = set.union(*[w.has_what for w in workers])
keys = {ts.key for ts in keys if ts.who_has.issubset(workers)}
other_workers = set(self.workers.values()) - workers
if keys:
if other_workers:
logger.info("Moving %d keys to other workers", len(keys))
await self.replicate(
keys=keys,
workers=[ws.address for ws in other_workers],
n=1,
delete=False,
lock=False,
)
else:
return []
worker_keys = {ws.address: ws.identity() for ws in workers}
if close_workers and worker_keys:
await asyncio.gather(
*[self.close_worker(worker=w, safe=True) for w in worker_keys]
)
if remove:
for w in worker_keys:
self.remove_worker(address=w, safe=True)
self.log_event(
"all",
{
"action": "retire-workers",
"workers": worker_keys,
"moved-keys": len(keys),
},
)
self.log_event(list(worker_keys), {"action": "retired"})
return worker_keys
def add_keys(self, comm=None, worker=None, keys=()):
"""
Learn that a worker has certain keys
This should not be used in practice and is mostly here for legacy
reasons. However, it is sent by workers from time to time.
"""
if worker not in self.workers:
return "not found"
ws = self.workers[worker]
for key in keys:
ts = self.tasks.get(key)
if ts is not None and ts.state == "memory":
if ts not in ws.has_what:
ws.nbytes += ts.get_nbytes()
ws.has_what.add(ts)
ts.who_has.add(ws)
else:
self.worker_send(
worker, {"op": "delete-data", "keys": [key], "report": False}
)
return "OK"
def update_data(
self, comm=None, who_has=None, nbytes=None, client=None, serializers=None
):
"""
Learn that new data has entered the network from an external source
See Also
--------
Scheduler.mark_key_in_memory
"""
with log_errors():
who_has = {
k: [self.coerce_address(vv) for vv in v] for k, v in who_has.items()
}
logger.debug("Update data %s", who_has)
for key, workers in who_has.items():
ts = self.tasks.get(key)
if ts is None:
ts = self.new_task(key, None, "memory")
ts.state = "memory"
if key in nbytes:
ts.set_nbytes(nbytes[key])
for w in workers:
ws = self.workers[w]
if ts not in ws.has_what:
ws.nbytes += ts.get_nbytes()
ws.has_what.add(ts)
ts.who_has.add(ws)
self.report(
{"op": "key-in-memory", "key": key, "workers": list(workers)}
)
if client:
self.client_desires_keys(keys=list(who_has), client=client)
def report_on_key(self, key=None, ts=None, client=None):
assert (key is None) + (ts is None) == 1, (key, ts)
if ts is None:
try:
ts = self.tasks[key]
except KeyError:
self.report({"op": "cancelled-key", "key": key}, client=client)
return
else:
key = ts.key
if ts.state == "forgotten":
self.report({"op": "cancelled-key", "key": key}, ts=ts, client=client)
elif ts.state == "memory":
self.report({"op": "key-in-memory", "key": key}, ts=ts, client=client)
elif ts.state == "erred":
failing_ts = ts.exception_blame
self.report(
{
"op": "task-erred",
"key": key,
"exception": failing_ts.exception,
"traceback": failing_ts.traceback,
},
ts=ts,
client=client,
)
async def feed(
self, comm, function=None, setup=None, teardown=None, interval="1s", **kwargs
):
"""
Provides a data Comm to external requester
Caution: this runs arbitrary Python code on the scheduler. This should
eventually be phased out. It is mostly used by diagnostics.
"""
if not dask.config.get("distributed.scheduler.pickle"):
logger.warn(
"Tried to call 'feed' route with custom fucntions, but "
"pickle is disallowed. Set the 'distributed.scheduler.pickle'"
"config value to True to use the 'feed' route (this is mostly "
"commonly used with progress bars)"
)
return
import pickle
interval = parse_timedelta(interval)
with log_errors():
if function:
function = pickle.loads(function)
if setup:
setup = pickle.loads(setup)
if teardown:
teardown = pickle.loads(teardown)
state = setup(self) if setup else None
if isawaitable(state):
state = await state
try:
while self.status == "running":
if state is None:
response = function(self)
else:
response = function(self, state)
await comm.write(response)
await asyncio.sleep(interval)
except (EnvironmentError, CommClosedError):
pass
finally:
if teardown:
teardown(self, state)
def subscribe_worker_status(self, comm=None):
WorkerStatusPlugin(self, comm)
ident = self.identity()
for v in ident["workers"].values():
del v["metrics"]
del v["last_seen"]
return ident
def get_processing(self, comm=None, workers=None):
if workers is not None:
workers = set(map(self.coerce_address, workers))
return {w: [ts.key for ts in self.workers[w].processing] for w in workers}
else:
return {
w: [ts.key for ts in ws.processing] for w, ws in self.workers.items()
}
def get_who_has(self, comm=None, keys=None):
if keys is not None:
return {
k: [ws.address for ws in self.tasks[k].who_has]
if k in self.tasks
else []
for k in keys
}
else:
return {
key: [ws.address for ws in ts.who_has] for key, ts in self.tasks.items()
}
def get_has_what(self, comm=None, workers=None):
if workers is not None:
workers = map(self.coerce_address, workers)
return {
w: [ts.key for ts in self.workers[w].has_what]
if w in self.workers
else []
for w in workers
}
else:
return {w: [ts.key for ts in ws.has_what] for w, ws in self.workers.items()}
def get_ncores(self, comm=None, workers=None):
if workers is not None:
workers = map(self.coerce_address, workers)
return {w: self.workers[w].nthreads for w in workers if w in self.workers}
else:
return {w: ws.nthreads for w, ws in self.workers.items()}
async def get_call_stack(self, comm=None, keys=None):
if keys is not None:
stack = list(keys)
processing = set()
while stack:
key = stack.pop()
ts = self.tasks[key]
if ts.state == "waiting":
stack.extend(dts.key for dts in ts.dependencies)
elif ts.state == "processing":
processing.add(ts)
workers = defaultdict(list)
for ts in processing:
if ts.processing_on:
workers[ts.processing_on.address].append(ts.key)
else:
workers = {w: None for w in self.workers}
if not workers:
return {}
results = await asyncio.gather(
*(self.rpc(w).call_stack(keys=v) for w, v in workers.items())
)
response = {w: r for w, r in zip(workers, results) if r}
return response
def get_nbytes(self, comm=None, keys=None, summary=True):
with log_errors():
if keys is not None:
result = {k: self.tasks[k].nbytes for k in keys}
else:
result = {
k: ts.nbytes
for k, ts in self.tasks.items()
if ts.nbytes is not None
}
if summary:
out = defaultdict(lambda: 0)
for k, v in result.items():
out[key_split(k)] += v
result = dict(out)
return result
def get_comm_cost(self, ts, ws):
"""
Get the estimated communication cost (in s.) to compute the task
on the given worker.
"""
return sum(dts.nbytes for dts in ts.dependencies - ws.has_what) / self.bandwidth
def get_task_duration(self, ts, default=0.5):
"""
Get the estimated computation cost of the given task
(not including any communication cost).
"""
duration = ts.prefix.duration_average
if duration is None:
self.unknown_durations[ts.prefix.name].add(ts)
return default
return duration
def run_function(self, stream, function, args=(), kwargs={}, wait=True):
""" Run a function within this process
See Also
--------
Client.run_on_scheduler:
"""
from .worker import run
self.log_event("all", {"action": "run-function", "function": function})
return run(self, stream, function=function, args=args, kwargs=kwargs, wait=wait)
def set_metadata(self, stream=None, keys=None, value=None):
try:
metadata = self.task_metadata
for key in keys[:-1]:
if key not in metadata or not isinstance(metadata[key], (dict, list)):
metadata[key] = dict()
metadata = metadata[key]
metadata[keys[-1]] = value
except Exception as e:
import pdb
pdb.set_trace()
def get_metadata(self, stream=None, keys=None, default=no_default):
metadata = self.task_metadata
for key in keys[:-1]:
metadata = metadata[key]
try:
return metadata[keys[-1]]
except KeyError:
if default != no_default:
return default
else:
raise
def get_task_status(self, stream=None, keys=None):
return {
key: (self.tasks[key].state if key in self.tasks else None) for key in keys
}
def get_task_stream(self, comm=None, start=None, stop=None, count=None):
from distributed.diagnostics.task_stream import TaskStreamPlugin
self.add_plugin(TaskStreamPlugin, idempotent=True)
ts = [p for p in self.plugins if isinstance(p, TaskStreamPlugin)][0]
return ts.collect(start=start, stop=stop, count=count)
async def register_worker_plugin(self, comm, plugin, name=None):
""" Registers a setup function, and call it on every worker """
self.worker_plugins.append(plugin)
responses = await self.broadcast(
msg=dict(op="plugin-add", plugin=plugin, name=name)
)
return responses
#####################
# State Transitions #
#####################
def _remove_from_processing(self, ts, send_worker_msg=None):
"""
Remove *ts* from the set of processing tasks.
"""
ws = ts.processing_on
ts.processing_on = None
w = ws.address
if w in self.workers: # may have been removed
duration = ws.processing.pop(ts)
if not ws.processing:
self.total_occupancy -= ws.occupancy
ws.occupancy = 0
else:
self.total_occupancy -= duration
ws.occupancy -= duration
self.check_idle_saturated(ws)
self.release_resources(ts, ws)
if send_worker_msg:
self.worker_send(w, send_worker_msg)
def _add_to_memory(
self, ts, ws, recommendations, type=None, typename=None, **kwargs
):
"""
Add *ts* to the set of in-memory tasks.
"""
if self.validate:
assert ts not in ws.has_what
ts.who_has.add(ws)
ws.has_what.add(ts)
ws.nbytes += ts.get_nbytes()
deps = ts.dependents
if len(deps) > 1:
deps = sorted(deps, key=operator.attrgetter("priority"), reverse=True)
for dts in deps:
s = dts.waiting_on
if ts in s:
s.discard(ts)
if not s: # new task ready to run
recommendations[dts.key] = "processing"
for dts in ts.dependencies:
s = dts.waiters
s.discard(ts)
if not s and not dts.who_wants:
recommendations[dts.key] = "released"
if not ts.waiters and not ts.who_wants:
recommendations[ts.key] = "released"
else:
msg = {"op": "key-in-memory", "key": ts.key}
if type is not None:
msg["type"] = type
self.report(msg)
ts.state = "memory"
ts.type = typename
ts.group.types.add(typename)
cs = self.clients["fire-and-forget"]
if ts in cs.wants_what:
self.client_releases_keys(client="fire-and-forget", keys=[ts.key])
def transition_released_waiting(self, key):
try:
ts = self.tasks[key]
if self.validate:
assert ts.run_spec
assert not ts.waiting_on
assert not ts.who_has
assert not ts.processing_on
assert not any(dts.state == "forgotten" for dts in ts.dependencies)
if ts.has_lost_dependencies:
return {key: "forgotten"}
ts.state = "waiting"
recommendations = OrderedDict()
for dts in ts.dependencies:
if dts.exception_blame:
ts.exception_blame = dts.exception_blame
recommendations[key] = "erred"
return recommendations
for dts in ts.dependencies:
dep = dts.key
if not dts.who_has:
ts.waiting_on.add(dts)
if dts.state == "released":
recommendations[dep] = "waiting"
else:
dts.waiters.add(ts)
ts.waiters = {dts for dts in ts.dependents if dts.state == "waiting"}
if not ts.waiting_on:
if self.workers:
recommendations[key] = "processing"
else:
self.unrunnable.add(ts)
ts.state = "no-worker"
return recommendations
except Exception as e:
logger.exception(e)
if LOG_PDB:
import pdb
pdb.set_trace()
raise
def transition_no_worker_waiting(self, key):
try:
ts = self.tasks[key]
if self.validate:
assert ts in self.unrunnable
assert not ts.waiting_on
assert not ts.who_has
assert not ts.processing_on
self.unrunnable.remove(ts)
if ts.has_lost_dependencies:
return {key: "forgotten"}
recommendations = OrderedDict()
for dts in ts.dependencies:
dep = dts.key
if not dts.who_has:
ts.waiting_on.add(dts)
if dts.state == "released":
recommendations[dep] = "waiting"
else:
dts.waiters.add(ts)
ts.state = "waiting"
if not ts.waiting_on:
if self.workers:
recommendations[key] = "processing"
else:
self.unrunnable.add(ts)
ts.state = "no-worker"
return recommendations
except Exception as e:
logger.exception(e)
if LOG_PDB:
import pdb
pdb.set_trace()
raise
def decide_worker(self, ts):
"""
Decide on a worker for task *ts*. Return a WorkerState.
"""
valid_workers = self.valid_workers(ts)
if not valid_workers and not ts.loose_restrictions and self.workers:
self.unrunnable.add(ts)
ts.state = "no-worker"
return None
if ts.dependencies or valid_workers is not True:
worker = decide_worker(
ts,
self.workers.values(),
valid_workers,
partial(self.worker_objective, ts),
)
elif self.idle:
if len(self.idle) < 20: # smart but linear in small case
worker = min(self.idle, key=operator.attrgetter("occupancy"))
else: # dumb but fast in large case
worker = self.idle[self.n_tasks % len(self.idle)]
else:
if len(self.workers) < 20: # smart but linear in small case
worker = min(
self.workers.values(), key=operator.attrgetter("occupancy")
)
else: # dumb but fast in large case
worker = self.workers.values()[self.n_tasks % len(self.workers)]
if self.validate:
assert worker is None or isinstance(worker, WorkerState), (
type(worker),
worker,
)
assert worker.address in self.workers
return worker
def transition_waiting_processing(self, key):
try:
ts = self.tasks[key]
if self.validate:
assert not ts.waiting_on
assert not ts.who_has
assert not ts.exception_blame
assert not ts.processing_on
assert not ts.has_lost_dependencies
assert ts not in self.unrunnable
assert all(dts.who_has for dts in ts.dependencies)
ws = self.decide_worker(ts)
if ws is None:
return {}
worker = ws.address
duration = self.get_task_duration(ts)
comm = self.get_comm_cost(ts, ws)
ws.processing[ts] = duration + comm
ts.processing_on = ws
ws.occupancy += duration + comm
self.total_occupancy += duration + comm
ts.state = "processing"
self.consume_resources(ts, ws)
self.check_idle_saturated(ws)
self.n_tasks += 1
if ts.actor:
ws.actors.add(ts)
# logger.debug("Send job to worker: %s, %s", worker, key)
self.send_task_to_worker(worker, key)
return {}
except Exception as e:
logger.exception(e)
if LOG_PDB:
import pdb
pdb.set_trace()
raise
def transition_waiting_memory(self, key, nbytes=None, worker=None, **kwargs):
try:
ws = self.workers[worker]
ts = self.tasks[key]
if self.validate:
assert not ts.processing_on
assert ts.waiting_on
assert ts.state == "waiting"
ts.waiting_on.clear()
if nbytes is not None:
ts.set_nbytes(nbytes)
self.check_idle_saturated(ws)
recommendations = OrderedDict()
self._add_to_memory(ts, ws, recommendations, **kwargs)
if self.validate:
assert not ts.processing_on
assert not ts.waiting_on
assert ts.who_has
return recommendations
except Exception as e:
logger.exception(e)
if LOG_PDB:
import pdb
pdb.set_trace()
raise
def transition_processing_memory(
self,
key,
nbytes=None,
type=None,
typename=None,
worker=None,
startstops=None,
**kwargs
):
try:
ts = self.tasks[key]
assert worker
assert isinstance(worker, str)
if self.validate:
assert ts.processing_on
ws = ts.processing_on
assert ts in ws.processing
assert not ts.waiting_on
assert not ts.who_has, (ts, ts.who_has)
assert not ts.exception_blame
assert ts.state == "processing"
ws = self.workers.get(worker)
if ws is None:
return {key: "released"}
if ws != ts.processing_on: # someone else has this task
logger.info(
"Unexpected worker completed task, likely due to"
" work stealing. Expected: %s, Got: %s, Key: %s",
ts.processing_on,
ws,
key,
)
return {}
if startstops:
L = [
(startstop["start"], startstop["stop"])
for startstop in startstops
if startstop["action"] == "compute"
]
if L:
compute_start, compute_stop = L[0]
else: # This is very rare
compute_start = compute_stop = None
else:
compute_start = compute_stop = None
#############################
# Update Timing Information #
#############################
if compute_start and ws.processing.get(ts, True):
# Update average task duration for worker
old_duration = ts.prefix.duration_average or 0
new_duration = compute_stop - compute_start
if not old_duration:
avg_duration = new_duration
else:
avg_duration = 0.5 * old_duration + 0.5 * new_duration
ts.prefix.duration_average = avg_duration
ts.group.duration += new_duration
for tts in self.unknown_durations.pop(ts.prefix.name, ()):
if tts.processing_on:
wws = tts.processing_on
old = wws.processing[tts]
comm = self.get_comm_cost(tts, wws)
wws.processing[tts] = avg_duration + comm
wws.occupancy += avg_duration + comm - old
self.total_occupancy += avg_duration + comm - old
############################
# Update State Information #
############################
if nbytes is not None:
ts.set_nbytes(nbytes)
recommendations = OrderedDict()
self._remove_from_processing(ts)
self._add_to_memory(ts, ws, recommendations, type=type, typename=typename)
if self.validate:
assert not ts.processing_on
assert not ts.waiting_on
return recommendations
except Exception as e:
logger.exception(e)
if LOG_PDB:
import pdb
pdb.set_trace()
raise
def transition_memory_released(self, key, safe=False):
try:
ts = self.tasks[key]
if self.validate:
assert not ts.waiting_on
assert not ts.processing_on
if safe:
assert not ts.waiters
if ts.actor:
for ws in ts.who_has:
ws.actors.discard(ts)
if ts.who_wants:
ts.exception_blame = ts
ts.exception = "Worker holding Actor was lost"
return {ts.key: "erred"} # don't try to recreate
recommendations = OrderedDict()
for dts in ts.waiters:
if dts.state in ("no-worker", "processing"):
recommendations[dts.key] = "waiting"
elif dts.state == "waiting":
dts.waiting_on.add(ts)
# XXX factor this out?
for ws in ts.who_has:
ws.has_what.remove(ts)
ws.nbytes -= ts.get_nbytes()
ts.group.nbytes_in_memory -= ts.get_nbytes()
self.worker_send(
ws.address, {"op": "delete-data", "keys": [key], "report": False}
)
ts.who_has.clear()
ts.state = "released"
self.report({"op": "lost-data", "key": key})
if not ts.run_spec: # pure data
recommendations[key] = "forgotten"
elif ts.has_lost_dependencies:
recommendations[key] = "forgotten"
elif ts.who_wants or ts.waiters:
recommendations[key] = "waiting"
if self.validate:
assert not ts.waiting_on
return recommendations
except Exception as e:
logger.exception(e)
if LOG_PDB:
import pdb
pdb.set_trace()
raise
def transition_released_erred(self, key):
try:
ts = self.tasks[key]
if self.validate:
with log_errors(pdb=LOG_PDB):
assert ts.exception_blame
assert not ts.who_has
assert not ts.waiting_on
assert not ts.waiters
recommendations = {}
failing_ts = ts.exception_blame
for dts in ts.dependents:
dts.exception_blame = failing_ts
if not dts.who_has:
recommendations[dts.key] = "erred"
self.report(
{
"op": "task-erred",
"key": key,
"exception": failing_ts.exception,
"traceback": failing_ts.traceback,
}
)
ts.state = "erred"
# TODO: waiting data?
return recommendations
except Exception as e:
logger.exception(e)
if LOG_PDB:
import pdb
pdb.set_trace()
raise
def transition_erred_released(self, key):
try:
ts = self.tasks[key]
if self.validate:
with log_errors(pdb=LOG_PDB):
assert all(dts.state != "erred" for dts in ts.dependencies)
assert ts.exception_blame
assert not ts.who_has
assert not ts.waiting_on
assert not ts.waiters
recommendations = OrderedDict()
ts.exception = None
ts.exception_blame = None
ts.traceback = None
for dep in ts.dependents:
if dep.state == "erred":
recommendations[dep.key] = "waiting"
self.report({"op": "task-retried", "key": key})
ts.state = "released"
return recommendations
except Exception as e:
logger.exception(e)
if LOG_PDB:
import pdb
pdb.set_trace()
raise
def transition_waiting_released(self, key):
try:
ts = self.tasks[key]
if self.validate:
assert not ts.who_has
assert not ts.processing_on
recommendations = {}
for dts in ts.dependencies:
s = dts.waiters
if ts in s:
s.discard(ts)
if not s and not dts.who_wants:
recommendations[dts.key] = "released"
ts.waiting_on.clear()
ts.state = "released"
if ts.has_lost_dependencies:
recommendations[key] = "forgotten"
elif not ts.exception_blame and (ts.who_wants or ts.waiters):
recommendations[key] = "waiting"
else:
ts.waiters.clear()
return recommendations
except Exception as e:
logger.exception(e)
if LOG_PDB:
import pdb
pdb.set_trace()
raise
def transition_processing_released(self, key):
try:
ts = self.tasks[key]
if self.validate:
assert ts.processing_on
assert not ts.who_has
assert not ts.waiting_on
assert self.tasks[key].state == "processing"
self._remove_from_processing(
ts, send_worker_msg={"op": "release-task", "key": key}
)
ts.state = "released"
recommendations = OrderedDict()
if ts.has_lost_dependencies:
recommendations[key] = "forgotten"
elif ts.waiters or ts.who_wants:
recommendations[key] = "waiting"
if recommendations.get(key) != "waiting":
for dts in ts.dependencies:
if dts.state != "released":
s = dts.waiters
s.discard(ts)
if not s and not dts.who_wants:
recommendations[dts.key] = "released"
ts.waiters.clear()
if self.validate:
assert not ts.processing_on
return recommendations
except Exception as e:
logger.exception(e)
if LOG_PDB:
import pdb
pdb.set_trace()
raise
def transition_processing_erred(
self, key, cause=None, exception=None, traceback=None, **kwargs
):
try:
ts = self.tasks[key]
if self.validate:
assert cause or ts.exception_blame
assert ts.processing_on
assert not ts.who_has
assert not ts.waiting_on
if ts.actor:
ws = ts.processing_on
ws.actors.remove(ts)
self._remove_from_processing(ts)
if exception is not None:
ts.exception = exception
if traceback is not None:
ts.traceback = traceback
if cause is not None:
failing_ts = self.tasks[cause]
ts.exception_blame = failing_ts
else:
failing_ts = ts.exception_blame
recommendations = {}
for dts in ts.dependents:
dts.exception_blame = failing_ts
recommendations[dts.key] = "erred"
for dts in ts.dependencies:
s = dts.waiters
s.discard(ts)
if not s and not dts.who_wants:
recommendations[dts.key] = "released"
ts.waiters.clear() # do anything with this?
ts.state = "erred"
self.report(
{
"op": "task-erred",
"key": key,
"exception": failing_ts.exception,
"traceback": failing_ts.traceback,
}
)
cs = self.clients["fire-and-forget"]
if ts in cs.wants_what:
self.client_releases_keys(client="fire-and-forget", keys=[key])
if self.validate:
assert not ts.processing_on
return recommendations
except Exception as e:
logger.exception(e)
if LOG_PDB:
import pdb
pdb.set_trace()
raise
def transition_no_worker_released(self, key):
try:
ts = self.tasks[key]
if self.validate:
assert self.tasks[key].state == "no-worker"
assert not ts.who_has
assert not ts.waiting_on
self.unrunnable.remove(ts)
ts.state = "released"
for dts in ts.dependencies:
dts.waiters.discard(ts)
ts.waiters.clear()
return {}
except Exception as e:
logger.exception(e)
if LOG_PDB:
import pdb
pdb.set_trace()
raise
def remove_key(self, key):
ts = self.tasks.pop(key)
assert ts.state == "forgotten"
self.unrunnable.discard(ts)
for cs in ts.who_wants:
cs.wants_what.remove(ts)
ts.who_wants.clear()
ts.processing_on = None
ts.exception_blame = ts.exception = ts.traceback = None
if key in self.task_metadata:
del self.task_metadata[key]
def _propagate_forgotten(self, ts, recommendations):
ts.state = "forgotten"
key = ts.key
for dts in ts.dependents:
dts.has_lost_dependencies = True
dts.dependencies.remove(ts)
dts.waiting_on.discard(ts)
if dts.state not in ("memory", "erred"):
# Cannot compute task anymore
recommendations[dts.key] = "forgotten"
ts.dependents.clear()
ts.waiters.clear()
for dts in ts.dependencies:
dts.dependents.remove(ts)
s = dts.waiters
s.discard(ts)
if not dts.dependents and not dts.who_wants:
# Task not needed anymore
assert dts is not ts
recommendations[dts.key] = "forgotten"
ts.dependencies.clear()
ts.waiting_on.clear()
if ts.who_has:
ts.group.nbytes_in_memory -= ts.get_nbytes()
for ws in ts.who_has:
ws.has_what.remove(ts)
ws.nbytes -= ts.get_nbytes()
w = ws.address
if w in self.workers: # in case worker has died
self.worker_send(
w, {"op": "delete-data", "keys": [key], "report": False}
)
ts.who_has.clear()
def transition_memory_forgotten(self, key):
try:
ts = self.tasks[key]
if self.validate:
assert ts.state == "memory"
assert not ts.processing_on
assert not ts.waiting_on
if not ts.run_spec:
# It's ok to forget a pure data task
pass
elif ts.has_lost_dependencies:
# It's ok to forget a task with forgotten dependencies
pass
elif not ts.who_wants and not ts.waiters and not ts.dependents:
# It's ok to forget a task that nobody needs
pass
else:
assert 0, (ts,)
recommendations = {}
if ts.actor:
for ws in ts.who_has:
ws.actors.discard(ts)
self._propagate_forgotten(ts, recommendations)
self.report_on_key(ts=ts)
self.remove_key(key)
return recommendations
except Exception as e:
logger.exception(e)
if LOG_PDB:
import pdb
pdb.set_trace()
raise
def transition_released_forgotten(self, key):
try:
ts = self.tasks[key]
if self.validate:
assert ts.state in ("released", "erred")
assert not ts.who_has
assert not ts.processing_on
assert not ts.waiting_on, (ts, ts.waiting_on)
if not ts.run_spec:
# It's ok to forget a pure data task
pass
elif ts.has_lost_dependencies:
# It's ok to forget a task with forgotten dependencies
pass
elif not ts.who_wants and not ts.waiters and not ts.dependents:
# It's ok to forget a task that nobody needs
pass
else:
assert 0, (ts,)
recommendations = {}
self._propagate_forgotten(ts, recommendations)
self.report_on_key(ts=ts)
self.remove_key(key)
return recommendations
except Exception as e:
logger.exception(e)
if LOG_PDB:
import pdb
pdb.set_trace()
raise
def transition(self, key, finish, *args, **kwargs):
""" Transition a key from its current state to the finish state
Examples
--------
>>> self.transition('x', 'waiting')
{'x': 'processing'}
Returns
-------
Dictionary of recommendations for future transitions
See Also
--------
Scheduler.transitions: transitive version of this function
"""
try:
try:
ts = self.tasks[key]
except KeyError:
return {}
start = ts.state
if start == finish:
return {}
if self.plugins:
dependents = set(ts.dependents)
dependencies = set(ts.dependencies)
if (start, finish) in self._transitions:
func = self._transitions[start, finish]
recommendations = func(key, *args, **kwargs)
elif "released" not in (start, finish):
func = self._transitions["released", finish]
assert not args and not kwargs
a = self.transition(key, "released")
if key in a:
func = self._transitions["released", a[key]]
b = func(key)
a = a.copy()
a.update(b)
recommendations = a
start = "released"
else:
raise RuntimeError(
"Impossible transition from %r to %r" % (start, finish)
)
finish2 = ts.state
self.transition_log.append((key, start, finish2, recommendations, time()))
if self.validate:
logger.debug(
"Transitioned %r %s->%s (actual: %s). Consequence: %s",
key,
start,
finish2,
ts.state,
dict(recommendations),
)
if self.plugins:
# Temporarily put back forgotten key for plugin to retrieve it
if ts.state == "forgotten":
try:
ts.dependents = dependents
ts.dependencies = dependencies
except KeyError:
pass
self.tasks[ts.key] = ts
for plugin in list(self.plugins):
try:
plugin.transition(key, start, finish2, *args, **kwargs)
except Exception:
logger.info("Plugin failed with exception", exc_info=True)
if ts.state == "forgotten":
del self.tasks[ts.key]
if ts.state == "forgotten":
# Remove TaskGroup if all tasks are in the forgotten state
tg = ts.group
if not any(tg.states.get(s) for s in ALL_TASK_STATES):
ts.prefix.groups.remove(tg)
del self.task_groups[tg.name]
return recommendations
except Exception as e:
logger.exception("Error transitioning %r from %r to %r", key, start, finish)
if LOG_PDB:
import pdb
pdb.set_trace()
raise
def transitions(self, recommendations):
""" Process transitions until none are left
This includes feedback from previous transitions and continues until we
reach a steady state
"""
keys = set()
recommendations = recommendations.copy()
while recommendations:
key, finish = recommendations.popitem()
keys.add(key)
new = self.transition(key, finish)
recommendations.update(new)
if self.validate:
for key in keys:
self.validate_key(key)
def story(self, *keys):
""" Get all transitions that touch one of the input keys """
keys = set(keys)
return [
t for t in self.transition_log if t[0] in keys or keys.intersection(t[3])
]
transition_story = story
def reschedule(self, key=None, worker=None):
""" Reschedule a task
Things may have shifted and this task may now be better suited to run
elsewhere
"""
try:
ts = self.tasks[key]
except KeyError:
logger.warning(
"Attempting to reschedule task {}, which was not "
"found on the scheduler. Aborting reschedule.".format(key)
)
return
if ts.state != "processing":
return
if worker and ts.processing_on.address != worker:
return
self.transitions({key: "released"})
##############################
# Assigning Tasks to Workers #
##############################
def check_idle_saturated(self, ws, occ=None):
""" Update the status of the idle and saturated state
The scheduler keeps track of workers that are ..
- Saturated: have enough work to stay busy
- Idle: do not have enough work to stay busy
They are considered saturated if they both have enough tasks to occupy
all of their threads, and if the expected runtime of those tasks is
large enough.
This is useful for load balancing and adaptivity.
"""
if self.total_nthreads == 0 or ws.status == "closed":
return
if occ is None:
occ = ws.occupancy
nc = ws.nthreads
p = len(ws.processing)
avg = self.total_occupancy / self.total_nthreads
if p < nc or occ / nc < avg / 2:
self.idle.add(ws)
self.saturated.discard(ws)
else:
self.idle.discard(ws)
pending = occ * (p - nc) / p / nc
if p > nc and pending > 0.4 and pending > 1.9 * avg:
self.saturated.add(ws)
else:
self.saturated.discard(ws)
def valid_workers(self, ts):
""" Return set of currently valid workers for key
If all workers are valid then this returns ``True``.
This checks tracks the following state:
* worker_restrictions
* host_restrictions
* resource_restrictions
"""
s = True
if ts.worker_restrictions:
s = {w for w in ts.worker_restrictions if w in self.workers}
if ts.host_restrictions:
# Resolve the alias here rather than early, for the worker
# may not be connected when host_restrictions is populated
hr = [self.coerce_hostname(h) for h in ts.host_restrictions]
# XXX need HostState?
ss = [self.host_info[h]["addresses"] for h in hr if h in self.host_info]
ss = set.union(*ss) if ss else set()
if s is True:
s = ss
else:
s |= ss
if ts.resource_restrictions:
w = {
resource: {
w
for w, supplied in self.resources[resource].items()
if supplied >= required
}
for resource, required in ts.resource_restrictions.items()
}
ww = set.intersection(*w.values())
if s is True:
s = ww
else:
s &= ww
if s is True:
return s
else:
return {self.workers[w] for w in s}
def consume_resources(self, ts, ws):
if ts.resource_restrictions:
for r, required in ts.resource_restrictions.items():
ws.used_resources[r] += required
def release_resources(self, ts, ws):
if ts.resource_restrictions:
for r, required in ts.resource_restrictions.items():
ws.used_resources[r] -= required
#####################
# Utility functions #
#####################
def add_resources(self, stream=None, worker=None, resources=None):
ws = self.workers[worker]
if resources:
ws.resources.update(resources)
ws.used_resources = {}
for resource, quantity in ws.resources.items():
ws.used_resources[resource] = 0
self.resources[resource][worker] = quantity
return "OK"
def remove_resources(self, worker):
ws = self.workers[worker]
for resource, quantity in ws.resources.items():
del self.resources[resource][worker]
def coerce_address(self, addr, resolve=True):
"""
Coerce possible input addresses to canonical form.
*resolve* can be disabled for testing with fake hostnames.
Handles strings, tuples, or aliases.
"""
# XXX how many address-parsing routines do we have?
if addr in self.aliases:
addr = self.aliases[addr]
if isinstance(addr, tuple):
addr = unparse_host_port(*addr)
if not isinstance(addr, str):
raise TypeError("addresses should be strings or tuples, got %r" % (addr,))
if resolve:
addr = resolve_address(addr)
else:
addr = normalize_address(addr)
return addr
def coerce_hostname(self, host):
"""
Coerce the hostname of a worker.
"""
if host in self.aliases:
return self.workers[self.aliases[host]].host
else:
return host
def workers_list(self, workers):
"""
List of qualifying workers
Takes a list of worker addresses or hostnames.
Returns a list of all worker addresses that match
"""
if workers is None:
return list(self.workers)
out = set()
for w in workers:
if ":" in w:
out.add(w)
else:
out.update({ww for ww in self.workers if w in ww}) # TODO: quadratic
return list(out)
def start_ipython(self, comm=None):
"""Start an IPython kernel
Returns Jupyter connection info dictionary.
"""
from ._ipython_utils import start_ipython
if self._ipython_kernel is None:
self._ipython_kernel = start_ipython(
ip=self.ip, ns={"scheduler": self}, log=logger
)
return self._ipython_kernel.get_connection_info()
def worker_objective(self, ts, ws):
"""
Objective function to determine which worker should get the task
Minimize expected start time. If a tie then break with data storage.
"""
comm_bytes = sum(
[dts.get_nbytes() for dts in ts.dependencies if ws not in dts.who_has]
)
stack_time = ws.occupancy / ws.nthreads
start_time = comm_bytes / self.bandwidth + stack_time
if ts.actor:
return (len(ws.actors), start_time, ws.nbytes)
else:
return (start_time, ws.nbytes)
async def get_profile(
self,
comm=None,
workers=None,
scheduler=False,
server=False,
merge_workers=True,
start=None,
stop=None,
key=None,
):
if workers is None:
workers = self.workers
else:
workers = set(self.workers) & set(workers)
if scheduler:
return profile.get_profile(self.io_loop.profile, start=start, stop=stop)
results = await asyncio.gather(
*(
self.rpc(w).profile(start=start, stop=stop, key=key, server=server)
for w in workers
)
)
if merge_workers:
response = profile.merge(*results)
else:
response = dict(zip(workers, results))
return response
async def get_profile_metadata(
self,
comm=None,
workers=None,
merge_workers=True,
start=None,
stop=None,
profile_cycle_interval=None,
):
dt = profile_cycle_interval or dask.config.get(
"distributed.worker.profile.cycle"
)
dt = parse_timedelta(dt, default="ms")
if workers is None:
workers = self.workers
else:
workers = set(self.workers) & set(workers)
results = await asyncio.gather(
*(self.rpc(w).profile_metadata(start=start, stop=stop) for w in workers)
)
counts = [v["counts"] for v in results]
counts = itertools.groupby(merge_sorted(*counts), lambda t: t[0] // dt * dt)
counts = [(time, sum(pluck(1, group))) for time, group in counts]
keys = set()
for v in results:
for t, d in v["keys"]:
for k in d:
keys.add(k)
keys = {k: [] for k in keys}
groups1 = [v["keys"] for v in results]
groups2 = list(merge_sorted(*groups1, key=first))
last = 0
for t, d in groups2:
tt = t // dt * dt
if tt > last:
last = tt
for k, v in keys.items():
v.append([tt, 0])
for k, v in d.items():
keys[k][-1][1] += v
return {"counts": counts, "keys": keys}
async def performance_report(self, comm=None, start=None, code=""):
# Profiles
compute, scheduler, workers = await asyncio.gather(
*[
self.get_profile(start=start),
self.get_profile(scheduler=True, start=start),
self.get_profile(server=True, start=start),
]
)
from . import profile
def profile_to_figure(state):
data = profile.plot_data(state)
figure, source = profile.plot_figure(data, sizing_mode="stretch_both")
return figure
compute, scheduler, workers = map(
profile_to_figure, (compute, scheduler, workers)
)
# Task stream
task_stream = self.get_task_stream(start=start)
from .diagnostics.task_stream import rectangles
from .dashboard.components.scheduler import task_stream_figure
rects = rectangles(task_stream)
source, task_stream = task_stream_figure(sizing_mode="stretch_both")
source.data.update(rects)
from distributed.dashboard.components.scheduler import (
BandwidthWorkers,
BandwidthTypes,
)
bandwidth_workers = BandwidthWorkers(self, sizing_mode="stretch_both")
bandwidth_workers.update()
bandwidth_types = BandwidthTypes(self, sizing_mode="stretch_both")
bandwidth_types.update()
from bokeh.models import Panel, Tabs, Div
# HTML
html = """
<h1> Dask Performance Report </h1>
<i> Select different tabs on the top for additional information </i>
<h2> Duration: {time} </h2>
<h2> Scheduler Information </h2>
<ul>
<li> Address: {address} </li>
<li> Workers: {nworkers} </li>
<li> Threads: {threads} </li>
<li> Memory: {memory} </li>
</ul>
<h2> Calling Code </h2>
<pre>
{code}
</pre>
""".format(
time=format_time(time() - start),
address=self.address,
nworkers=len(self.workers),
threads=sum(w.nthreads for w in self.workers.values()),
memory=format_bytes(sum(w.memory_limit for w in self.workers.values())),
code=code,
)
html = Div(text=html)
html = Panel(child=html, title="Summary")
compute = Panel(child=compute, title="Worker Profile (compute)")
workers = Panel(child=workers, title="Worker Profile (administrative)")
scheduler = Panel(child=scheduler, title="Scheduler Profile (administrative)")
task_stream = Panel(child=task_stream, title="Task Stream")
bandwidth_workers = Panel(
child=bandwidth_workers.fig, title="Bandwidth (Workers)"
)
bandwidth_types = Panel(child=bandwidth_types.fig, title="Bandwidth (Types)")
tabs = Tabs(
tabs=[
html,
task_stream,
compute,
workers,
scheduler,
bandwidth_workers,
bandwidth_types,
]
)
from bokeh.plotting import save, output_file
with tmpfile(extension=".html") as fn:
output_file(filename=fn, title="Dask Performance Report")
save(tabs, filename=fn)
with open(fn) as f:
data = f.read()
return data
async def get_worker_logs(self, comm=None, n=None, workers=None, nanny=False):
results = await self.broadcast(
msg={"op": "get_logs", "n": n}, workers=workers, nanny=nanny
)
return results
###########
# Cleanup #
###########
def reevaluate_occupancy(self, worker_index=0):
""" Periodically reassess task duration time
The expected duration of a task can change over time. Unfortunately we
don't have a good constant-time way to propagate the effects of these
changes out to the summaries that they affect, like the total expected
runtime of each of the workers, or what tasks are stealable.
In this coroutine we walk through all of the workers and re-align their
estimates with the current state of tasks. We do this periodically
rather than at every transition, and we only do it if the scheduler
process isn't under load (using psutil.Process.cpu_percent()). This
lets us avoid this fringe optimization when we have better things to
think about.
"""
DELAY = 0.1
try:
if self.status == "closed":
return
last = time()
next_time = timedelta(seconds=DELAY)
if self.proc.cpu_percent() < 50:
workers = list(self.workers.values())
for i in range(len(workers)):
ws = workers[worker_index % len(workers)]
worker_index += 1
try:
if ws is None or not ws.processing:
continue
self._reevaluate_occupancy_worker(ws)
finally:
del ws # lose ref
duration = time() - last
if duration > 0.005: # 5ms since last release
next_time = timedelta(seconds=duration * 5) # 25ms gap
break
self.loop.add_timeout(
next_time, self.reevaluate_occupancy, worker_index=worker_index
)
except Exception:
logger.error("Error in reevaluate occupancy", exc_info=True)
raise
def _reevaluate_occupancy_worker(self, ws):
""" See reevaluate_occupancy """
old = ws.occupancy
new = 0
nbytes = 0
for ts in ws.processing:
duration = self.get_task_duration(ts)
comm = self.get_comm_cost(ts, ws)
ws.processing[ts] = duration + comm
new += duration + comm
ws.occupancy = new
self.total_occupancy += new - old
self.check_idle_saturated(ws)
# significant increase in duration
if (new > old * 1.3) and ("stealing" in self.extensions):
steal = self.extensions["stealing"]
for ts in ws.processing:
steal.remove_key_from_stealable(ts)
steal.put_key_in_stealable(ts)
def check_worker_ttl(self):
now = time()
for ws in self.workers.values():
if ws.last_seen < now - self.worker_ttl:
logger.warning(
"Worker failed to heartbeat within %s seconds. Closing: %s",
self.worker_ttl,
ws,
)
self.remove_worker(address=ws.address)
def check_idle(self):
if any(ws.processing for ws in self.workers.values()):
return
if self.unrunnable:
return
if not self.transition_log:
close = time() > self.time_started + self.idle_timeout
else:
last_task = self.transition_log[-1][-1]
close = time() > last_task + self.idle_timeout
if close:
self.loop.add_callback(self.close)
def adaptive_target(self, comm=None, target_duration="5s"):
""" Desired number of workers based on the current workload
This looks at the current running tasks and memory use, and returns a
number of desired workers. This is often used by adaptive scheduling.
Parameters
----------
target_duration: str
A desired duration of time for computations to take. This affects
how rapidly the scheduler will ask to scale.
See Also
--------
distributed.deploy.Adaptive
"""
target_duration = parse_timedelta(target_duration)
# CPU
cpu = math.ceil(
self.total_occupancy / target_duration
) # TODO: threads per worker
# Avoid a few long tasks from asking for many cores
tasks_processing = 0
for ws in self.workers.values():
tasks_processing += len(ws.processing)
if tasks_processing > cpu:
break
else:
cpu = min(tasks_processing, cpu)
if self.unrunnable and not self.workers:
cpu = max(1, cpu)
# Memory
limit_bytes = {addr: ws.memory_limit for addr, ws in self.workers.items()}
worker_bytes = [ws.nbytes for ws in self.workers.values()]
limit = sum(limit_bytes.values())
total = sum(worker_bytes)
if total > 0.6 * limit:
memory = 2 * len(self.workers)
else:
memory = 0
target = max(memory, cpu)
if target >= len(self.workers):
return target
else: # Scale down?
to_close = self.workers_to_close()
return len(self.workers) - len(to_close)
def decide_worker(ts, all_workers, valid_workers, objective):
"""
Decide which worker should take task *ts*.
We choose the worker that has the data on which *ts* depends.
If several workers have dependencies then we choose the less-busy worker.
Optionally provide *valid_workers* of where jobs are allowed to occur
(if all workers are allowed to take the task, pass True instead).
If the task requires data communication because no eligible worker has
all the dependencies already, then we choose to minimize the number
of bytes sent between workers. This is determined by calling the
*objective* function.
"""
deps = ts.dependencies
assert all(dts.who_has for dts in deps)
if ts.actor:
candidates = all_workers
else:
candidates = frequencies([ws for dts in deps for ws in dts.who_has])
if valid_workers is True:
if not candidates:
candidates = all_workers
else:
candidates = valid_workers & set(candidates)
if not candidates:
candidates = valid_workers
if not candidates:
if ts.loose_restrictions:
return decide_worker(ts, all_workers, True, objective)
else:
return None
if not candidates:
return None
if len(candidates) == 1:
return first(candidates)
return min(candidates, key=objective)
def validate_task_state(ts):
"""
Validate the given TaskState.
"""
assert ts.state in ALL_TASK_STATES or ts.state == "forgotten", ts
if ts.waiting_on:
assert ts.waiting_on.issubset(ts.dependencies), (
"waiting not subset of dependencies",
str(ts.waiting_on),
str(ts.dependencies),
)
if ts.waiters:
assert ts.waiters.issubset(ts.dependents), (
"waiters not subset of dependents",
str(ts.waiters),
str(ts.dependents),
)
for dts in ts.waiting_on:
assert not dts.who_has, ("waiting on in-memory dep", str(ts), str(dts))
assert dts.state != "released", ("waiting on released dep", str(ts), str(dts))
for dts in ts.dependencies:
assert ts in dts.dependents, (
"not in dependency's dependents",
str(ts),
str(dts),
str(dts.dependents),
)
if ts.state in ("waiting", "processing"):
assert dts in ts.waiting_on or dts.who_has, (
"dep missing",
str(ts),
str(dts),
)
assert dts.state != "forgotten"
for dts in ts.waiters:
assert dts.state in ("waiting", "processing"), (
"waiter not in play",
str(ts),
str(dts),
)
for dts in ts.dependents:
assert ts in dts.dependencies, (
"not in dependent's dependencies",
str(ts),
str(dts),
str(dts.dependencies),
)
assert dts.state != "forgotten"
assert (ts.processing_on is not None) == (ts.state == "processing")
assert bool(ts.who_has) == (ts.state == "memory"), (ts, ts.who_has)
if ts.state == "processing":
assert all(dts.who_has for dts in ts.dependencies), (
"task processing without all deps",
str(ts),
str(ts.dependencies),
)
assert not ts.waiting_on
if ts.who_has:
assert ts.waiters or ts.who_wants, (
"unneeded task in memory",
str(ts),
str(ts.who_has),
)
if ts.run_spec: # was computed
assert ts.type
assert isinstance(ts.type, str)
assert not any(ts in dts.waiting_on for dts in ts.dependents)
for ws in ts.who_has:
assert ts in ws.has_what, (
"not in who_has' has_what",
str(ts),
str(ws),
str(ws.has_what),
)
if ts.who_wants:
for cs in ts.who_wants:
assert ts in cs.wants_what, (
"not in who_wants' wants_what",
str(ts),
str(cs),
str(cs.wants_what),
)
if ts.actor:
if ts.state == "memory":
assert sum([ts in ws.actors for ws in ts.who_has]) == 1
if ts.state == "processing":
assert ts in ts.processing_on.actors
def validate_worker_state(ws):
for ts in ws.has_what:
assert ws in ts.who_has, (
"not in has_what' who_has",
str(ws),
str(ts),
str(ts.who_has),
)
for ts in ws.actors:
assert ts.state in ("memory", "processing")
def validate_state(tasks, workers, clients):
"""
Validate a current runtime state
This performs a sequence of checks on the entire graph, running in about
linear time. This raises assert errors if anything doesn't check out.
"""
for ts in tasks.values():
validate_task_state(ts)
for ws in workers.values():
validate_worker_state(ws)
for cs in clients.values():
for ts in cs.wants_what:
assert cs in ts.who_wants, (
"not in wants_what' who_wants",
str(cs),
str(ts),
str(ts.who_wants),
)
_round_robin = [0]
def heartbeat_interval(n):
"""
Interval in seconds that we desire heartbeats based on number of workers
"""
if n <= 10:
return 0.5
elif n < 50:
return 1
elif n < 200:
return 2
else:
return 5
class KilledWorker(Exception):
def __init__(self, task, last_worker):
super(KilledWorker, self).__init__(task, last_worker)
self.task = task
self.last_worker = last_worker
class WorkerStatusPlugin(SchedulerPlugin):
"""
An plugin to share worker status with a remote observer
This is used in cluster managers to keep updated about the status of the
scheduler.
"""
def __init__(self, scheduler, comm):
self.bcomm = BatchedSend(interval="5ms")
self.bcomm.start(comm)
self.scheduler = scheduler
self.scheduler.add_plugin(self)
def add_worker(self, worker=None, **kwargs):
ident = self.scheduler.workers[worker].identity()
del ident["metrics"]
del ident["last_seen"]
try:
self.bcomm.send(["add", {"workers": {worker: ident}}])
except CommClosedError:
self.scheduler.remove_plugin(self)
def remove_worker(self, worker=None, **kwargs):
try:
self.bcomm.send(["remove", worker])
except CommClosedError:
self.scheduler.remove_plugin(self)
def teardown(self):
self.bcomm.close()
| 33.621173 | 104 | 0.541176 |
77fd0796e0900e303291d107bc164abcee5183fd | 3,393 | py | Python | lib/surface/compute/instances/simulate_maintenance_event.py | bopopescu/Google-Cloud-SDK-1 | c4683bacb2f6192d8a816932e438a0493085469b | [
"Apache-2.0"
] | null | null | null | lib/surface/compute/instances/simulate_maintenance_event.py | bopopescu/Google-Cloud-SDK-1 | c4683bacb2f6192d8a816932e438a0493085469b | [
"Apache-2.0"
] | null | null | null | lib/surface/compute/instances/simulate_maintenance_event.py | bopopescu/Google-Cloud-SDK-1 | c4683bacb2f6192d8a816932e438a0493085469b | [
"Apache-2.0"
] | 1 | 2020-07-24T20:13:29.000Z | 2020-07-24T20:13:29.000Z | # Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command for simulating maintenance events on virtual machine instances."""
from googlecloudsdk.api_lib.compute import base_classes
from googlecloudsdk.api_lib.compute.operations import poller
from googlecloudsdk.api_lib.util import waiter
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.compute import flags
from googlecloudsdk.command_lib.compute.instances import flags as instance_flags
from googlecloudsdk.core import exceptions as core_exceptions
from googlecloudsdk.core import log
@base.ReleaseTracks(base.ReleaseTrack.ALPHA)
class SimulateMaintenanceEvent(base.UpdateCommand):
"""Simulate maintenance of virtual machine instances."""
@staticmethod
def Args(parser):
instance_flags.INSTANCES_ARG.AddArgument(parser)
base.ASYNC_FLAG.AddToParser(parser)
def Run(self, args):
holder = base_classes.ComputeApiHolder(self.ReleaseTrack())
client = holder.client.apitools_client
messages = holder.client.messages
instance_refs = instance_flags.INSTANCES_ARG.ResolveAsResource(
args,
holder.resources,
scope_lister=flags.GetDefaultScopeLister(holder.client))
requests = []
for instance_ref in instance_refs:
request = messages.ComputeInstancesSimulateMaintenanceEventRequest(
**instance_ref.AsDict())
requests.append((client.instances, 'SimulateMaintenanceEvent', request))
errors_to_collect = []
responses = holder.client.BatchRequests(requests, errors_to_collect)
for r in responses:
err = getattr(r, 'error', None)
if err:
errors_to_collect.append(poller.OperationErrors(err.errors))
if errors_to_collect:
raise core_exceptions.MultiError(errors_to_collect)
operation_refs = [holder.resources.Parse(r.selfLink) for r in responses]
if args.async:
for i, operation_ref in enumerate(operation_refs):
log.UpdatedResource(
operation_ref,
kind='gce instance [{0}]'.format(instance_refs[i].Name()),
async=True,
details='Use [gcloud compute operations describe] command '
'to check the status of this operation.')
return responses
operation_poller = poller.BatchPoller(holder.client, client.instances,
instance_refs)
return waiter.WaitFor(
operation_poller,
poller.OperationBatch(operation_refs),
'Simulating maintenance on instance(s) [{0}]'
.format(', '.join(i.SelfLink() for i in instance_refs)))
SimulateMaintenanceEvent.detailed_help = {
'brief':
'Simulate maintenance of virtual machine instances',
'DESCRIPTION':
"""\
*{command}* simulates a maintenance event on Google
Compute Engine virtual machines.
""",
}
| 37.285714 | 80 | 0.724727 |
79d0d8a9394e36815ae37722447d4cbc3d165c9a | 1,664 | py | Python | RandomForestParamSearchAbdA/num-estimators_200_min-sample-split_15_max-depth_None_max-leaf-nodes_2_random-state_0_class-weight_balanced.py | aparna-arr/DeepLearningChromatinStructure | f56d36b8fc8b01df407ed7a2526266c4d8e731d4 | [
"MIT"
] | 3 | 2021-07-26T02:06:39.000Z | 2022-03-20T13:00:25.000Z | RandomForestParamSearchAbdA/num-estimators_200_min-sample-split_15_max-depth_None_max-leaf-nodes_2_random-state_0_class-weight_balanced.py | aparna-arr/DeepLearningChromatinStructure | f56d36b8fc8b01df407ed7a2526266c4d8e731d4 | [
"MIT"
] | null | null | null | RandomForestParamSearchAbdA/num-estimators_200_min-sample-split_15_max-depth_None_max-leaf-nodes_2_random-state_0_class-weight_balanced.py | aparna-arr/DeepLearningChromatinStructure | f56d36b8fc8b01df407ed7a2526266c4d8e731d4 | [
"MIT"
] | 1 | 2021-06-09T16:04:52.000Z | 2021-06-09T16:04:52.000Z | #!/share/software/user/open/python/3.6.1/bin/python3
from src.ModelDriver import *
## MODIFY THESE PARAMS FOR SPECIFIC RUN ###
X_train = "/oak/stanford/groups/aboettig/Aparna/NNproject/clean_data/train_5.23.18_unbalanced_unaugmented_xyz.txt"
Y_train = "/oak/stanford/groups/aboettig/Aparna/NNproject/clean_data/train_5.23.18_unbalanced_unaugmented_rna_2.txt"
X_dev = "/oak/stanford/groups/aboettig/Aparna/NNproject/clean_data/dev_5.23.18_unbalanced_unaugmented_xyz.txt"
Y_dev = "/oak/stanford/groups/aboettig/Aparna/NNproject/clean_data/dev_5.23.18_unbalanced_unaugmented_rna_2.txt"
specific_info = "hyperparam-search-rf"
architecture = "rf"
num_estimators = 200
min_sample_split = 15
max_depth = None
max_leaf_nodes = 2
random_state = 0
class_weight = "balanced"
n_jobs = -1
writestr = "num-estimators_" + str(num_estimators) + "_" +\
"min-sample-split_" + str(min_sample_split) + "_" +\
"max-depth_" + str(max_depth) + "_" +\
"max-leaf-nodes_" + str(max_leaf_nodes) + "_" +\
"random-state_" + str(random_state) + "_" +\
"class-weight_" + class_weight
tag = writestr + "_" + specific_info
## END OF PARAMS TO MODIFY ##
PARAMETERS = {
"X_train" : X_train,
"Y_train" : Y_train,
"X_dev" : X_dev,
"Y_dev" : Y_dev,
"architecture" : architecture,
"num_estimators" : num_estimators,
"min_sample_split" : min_sample_split,
"max_depth" : max_depth,
"max_leaf_nodes" : max_leaf_nodes,
"random_state" : random_state,
"class_weight" : class_weight,
"n_jobs" : n_jobs,
"tag" : tag,
"print_cost" : True
}
modelDriver = ModelDriver(PARAMETERS)
modelDriver.load()
modelDriver.init_model()
out = modelDriver.run_model()
| 32.627451 | 116 | 0.729567 |
0dff1ffa8d893cebbbf24c5c9c1f17710d008069 | 9,494 | py | Python | uvicore/auth/user_providers/orm.py | uvicore/framework | 9c21b85e9e470c6d789899340332a9abd0b26ab1 | [
"MIT"
] | 11 | 2021-03-22T22:07:49.000Z | 2022-03-08T16:18:33.000Z | uvicore/auth/user_providers/orm.py | uvicore/framework | 9c21b85e9e470c6d789899340332a9abd0b26ab1 | [
"MIT"
] | 12 | 2021-03-04T05:51:24.000Z | 2021-09-22T05:16:18.000Z | uvicore/auth/user_providers/orm.py | uvicore/framework | 9c21b85e9e470c6d789899340332a9abd0b26ab1 | [
"MIT"
] | 2 | 2021-03-25T14:49:56.000Z | 2021-11-17T23:20:29.000Z | import uvicore
from uvicore.auth.user_info import UserInfo
from uvicore.support.hash import sha1
from uvicore.contracts import UserProvider
from uvicore.support.dumper import dump, dd
from uvicore.auth.support import password as pwd
from uvicore.http.request import HTTPConnection
from uvicore.typing import List, Union, Any, Dict
from uvicore.auth.models.user import User as UserModel
from uvicore.auth.models.group import Group
from datetime import datetime
@uvicore.service()
class Orm(UserProvider):
"""Retrieve and validate user from uvicore.auth ORM User model during Authentication middleware
This is NOT a stateless user provider as it queries the user, groups, roles tables from a database.
"""
# def __init__(self):
# # Only need for an __init__ override is to modify field mappings
# super().__init__()
# # Temp, until I add username to ORM model
# self.field_map['username'] = 'email'
async def _retrieve_user(self,
key_name: str,
key_value: Any,
request: HTTPConnection,
*,
password: str = None,
# Parameters from auth config
anonymous: bool = False,
includes: List = None,
# Must have kwargs for infinite allowed optional params, even if not used.
**kwargs,
) -> UserInfo:
"""Retrieve user from backend"""
# Cache store
# Array store is much faster than redis and since this is called at the
# middleware level on every request, we want it to be as performant as possible.
# Set to None to use config default.
cache_store = 'array'
# Get password hash for cache key. Password is still required to pull the right cache key
# or else someone could login with an invalid password for the duration of the cache
password_hash = '/' + sha1(password) if password is not None else ''
# Check if user already validated in cache, if so, skip DB hits!
# Don't do a cache.has() becuase cache.get() already does it
# and because of the array store _expire() it's a bit expensive.
# Eliminating the duplicate has() saved me about 1500 req/sec on wrk benchmark
cache_key = 'auth/user/' + str(key_value) + password_hash
user = await uvicore.cache.store(cache_store).get(cache_key)
if user:
# User is already validated and cached
# Retrieve user from cache, no password check required because cache key has password has in it
#dump('Cached authentication middleware User found, load from cache!')
return user
# Interesting. With a heavy 'wrk' performance test you can see this hit
# a dozen times on the first run. Because of await, while caching is
# trying to take place, many other request are comming in and processing.
# We actually hit the DB a dozen times before the first request is cached.
# This is why we do 'wrk' at least twice, to "warm up" the cache. Also
# with 'array` cache store and gunicorn, its actually one cache registery
# per THREAD unlinke the shared redis backend which is just one cache. So
# with gunicorn and array caching you will see at least N cache misses.
# But even still you see several more due to the concurrency of wrk and the
# time it takes for await to set the cache.
dump('UNcached authentication middleware User, load from DB')
# ORM is currently thworing a Warning: Truncated incorrect DOUBLE value: '='
# when using actual bool as bit value. So I convert to '1' or '0' strings instead
disabled = '1' if anonymous else '0'
# Cache not found. Query user, validate password and convert to user class
find_kwargs = {key_name: key_value}
db_user = await (UserModel.query()
.include(*includes)
.where('disabled', disabled)
#.show_writeonly(['password'])
.show_writeonly(True)
.find(**find_kwargs)
)
# User not found or disabled. Return None means not verified or found.
if not db_user: return None
# If we are checking passwords and the db_user has NO password, user cannot be logged into
if password is not None and db_user.password is None: return None
# If password, validate credentials
if password is not None:
if not pwd.verify(password, db_user.password):
# Invalid password. Return None means not verified or found.
return None
# Get users groups->roles->permissions (roles linked to a group)
groups = []
roles = []
permissions = []
if 'groups' in includes:
user_groups = db_user.groups
if user_groups:
for group in user_groups:
groups.append(group.name)
if not group.roles: continue
for role in group.roles:
roles.append(role.name)
if not role.permissions: continue
for permission in role.permissions:
permissions.append(permission.name)
# Get users roles->permissions (roles linked directly to the user)
if 'roles' in includes:
user_roles = db_user.roles
if user_roles:
for role in user_roles:
roles.append(role.name)
if not role.permissions: continue
for permission in role.permissions:
permissions.append(permission.name)
# Unique groups, roles and permissions (sets are unique)
groups = sorted(list(set(groups)))
roles = sorted(list(set(roles)))
permissions = sorted(list(set(permissions)))
# Set super admin, existence of 'admin' permission
# Fixme, there is a 'superadmin' field on the roles table.
# If user is in any role with superadmin=True they are a superadmin
superadmin = False
if 'admin' in permissions:
# No need for any permissinos besides ['admin']
permissions = ['admin']
superadmin = True
# Build UserInfo dataclass with REQUIRED fields
user = UserInfo(
id=db_user.id or '',
uuid=db_user.uuid or '',
sub=db_user.uuid or '',
username=db_user.username or '',
email=db_user.email or '',
first_name=db_user.first_name or '',
last_name=db_user.last_name or '',
title=db_user.title or '',
avatar=db_user.avatar_url or '',
groups=groups or [],
roles=roles or [],
permissions=permissions or [],
superadmin=superadmin or False,
authenticated=not anonymous,
)
# Save to cache
if anonymous and cache_store == 'array':
# If anonymous user, set cache to NEVER expire. Why? Because
# the anonymouse user will never change, no need to have it expire from cache.
# This only works if cache store is 'array' because it will die when we kill
# the program. If cache store is 'redis', we want it to expire in case anyone
# changes what the anonymous user in the DB looks like at some point.
await uvicore.cache.store(cache_store).put(cache_key, user, seconds=0)
else:
# User is a valid user, cache it using configs default TTL expire
# Or user is anonymous but cache_store is redis, we need to expire in redis.
await uvicore.cache.store(cache_store).put(cache_key, user)
# Return to user
return user
async def create_user(self, request: HTTPConnection, **kwargs):
"""Create new user in backend"""
# Pop groups from kwargs
groups = kwargs.pop('groups')
# Set other kwargs values
kwargs['disabled'] = False
kwargs['login_at'] = datetime.now()
# Translate avatar
kwargs['avatar_url'] = kwargs.pop('avatar')
# Build user model
user = UserModel(**kwargs)
# Get actual groups in backend from groups array
real_groups = await Group.query().where('name', 'in', groups).get()
# Save user
await user.save()
# Link real_groups
await user.link('groups', real_groups)
# Return new backend user (not actual Auth user class)
return user
async def sync_user(self, request: HTTPConnection, **kwargs):
"""Sync user to backend"""
# Get username
username = kwargs['username']
# Get actual backend user
user = await UserModel.query().show_writeonly(['password']).find(username=username)
# If we have successfully logged in, we are not disabled
user.disabled = False
user.login_at = datetime.now()
# Pop groups from kwargs
groups = kwargs.pop('groups')
# Remove other kwargs items
del kwargs['creator_id']
# Translate avatar
kwargs['avatar_url'] = kwargs.pop('avatar')
# Add all other kwargs to user
for key, value in kwargs.items():
setattr(user, key, value)
# Save user
await user.save()
# Return new backend user (not actual Auth user class)
return user
| 40.4 | 107 | 0.618075 |
7de927cc4aa7e7a91438826a936d6e4a9a6e5ea9 | 16,463 | py | Python | analysis/tasks/files/treeMaker_cfg.py | cms-btv-pog/jet-tagging-sf | c418e13aa4eac5522818d5f5ad3db2a0c81ec52e | [
"Apache-2.0"
] | 3 | 2020-01-22T08:30:14.000Z | 2021-12-27T18:47:43.000Z | analysis/tasks/files/treeMaker_cfg.py | cms-btv-pog/jet-tagging-sf | c418e13aa4eac5522818d5f5ad3db2a0c81ec52e | [
"Apache-2.0"
] | null | null | null | analysis/tasks/files/treeMaker_cfg.py | cms-btv-pog/jet-tagging-sf | c418e13aa4eac5522818d5f5ad3db2a0c81ec52e | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Config file to create jet tagging scale factor trees.
"""
import os
import FWCore.ParameterSet.Config as cms
from FWCore.PythonUtilities.LumiList import LumiList
from FWCore.ParameterSet.VarParsing import VarParsing
try:
# create options
options = VarParsing("python")
# set defaults of common options
options.setDefault("inputFiles", "root://xrootd-cms.infn.it//store/data/Run2017B/DoubleEG/MINIAOD/17Nov2017-v1/20000/065312BE-A3D5-E711-A0C7-0CC47A1E0DCC.root")
options.setDefault("outputFile", "output.root")
options.setDefault("maxEvents", -1)
# add custom options
options.register(
"campaign",
"",
VarParsing.multiplicity.singleton,
VarParsing.varType.string,
"campaign which the dataset to process belongs to",
)
options.register(
"metaDataFile",
"",
VarParsing.multiplicity.singleton,
VarParsing.varType.string,
"path to the meta data file to write",
)
options.register(
"globalTag",
"",
VarParsing.multiplicity.singleton,
VarParsing.varType.string,
"the global tag to use",
)
options.register(
"lumiFile",
"",
VarParsing.multiplicity.singleton,
VarParsing.varType.string,
"file for selecting runs and lumis",
)
options.register(
"isData",
False,
VarParsing.multiplicity.singleton,
VarParsing.varType.bool,
"input dataset contains real data",
)
options.register(
"leptonChannel",
"",
VarParsing.multiplicity.singleton,
VarParsing.varType.string,
"the lepton channel name when running on real data",
)
options.register(
"eeTriggers",
[],
VarParsing.multiplicity.list,
VarParsing.varType.string,
"ee triggers to use",
)
options.register(
"emuTriggers",
[],
VarParsing.multiplicity.list,
VarParsing.varType.string,
"emu triggers to use",
)
options.register(
"mumuTriggers",
[],
VarParsing.multiplicity.list,
VarParsing.varType.string,
"mumu triggers to use",
)
options.register(
"eTriggers",
[],
VarParsing.multiplicity.list,
VarParsing.varType.string,
"e triggers to use",
)
options.register(
"muTriggers",
[],
VarParsing.multiplicity.list,
VarParsing.varType.string,
"mu triggers to use",
)
options.register(
"metFilters",
[],
VarParsing.multiplicity.list,
VarParsing.varType.string,
"MET filters to use",
)
options.register(
"jesFiles",
[],
VarParsing.multiplicity.list,
VarParsing.varType.string,
"txt files containing jes infos",
)
options.register(
"jesRanges",
[],
VarParsing.multiplicity.list,
VarParsing.varType.int,
"a flat list of range pairs",
)
options.register(
"jesUncFiles",
[],
VarParsing.multiplicity.list,
VarParsing.varType.string,
"txt files containing the combined jes uncertainty infos",
)
options.register(
"jesUncSrcFile",
"",
VarParsing.multiplicity.singleton,
VarParsing.varType.string,
"txt file containing the per-source jes uncertainty infos",
)
options.register(
"jesUncSources",
[],
VarParsing.multiplicity.list,
VarParsing.varType.string,
"jes uncertainty sources to consider",
)
options.register(
"jerPtResolutionFile",
"",
VarParsing.multiplicity.singleton,
VarParsing.varType.string,
"JER pt resolution file",
)
options.register(
"jerScaleFactorFile",
"",
VarParsing.multiplicity.singleton,
VarParsing.varType.string,
"JER scale factor file",
)
options.register(
"deepCSVWP",
0.,
VarParsing.multiplicity.singleton,
VarParsing.varType.float,
"Working point to count number of deepcsv tagged jets",
)
options.register(
"deepJetWP",
0.,
VarParsing.multiplicity.singleton,
VarParsing.varType.float,
"Working point to count number of deepjet tagged jets",
)
options.register(
"reportEvery",
1000,
VarParsing.multiplicity.singleton,
VarParsing.varType.int,
"number of events after which a report message is written",
)
options.register(
"summary",
False,
VarParsing.multiplicity.singleton,
VarParsing.varType.bool,
"print a summary at the end?",
)
options.parseArguments()
# sanity checks
if options.isData and not options.leptonChannel:
raise Exception("a lepton channel is required when running on real data")
# create the process and a sequence for additional modules
process = cms.Process("JTSF")
seq = cms.Sequence()
miniAODProcess = "RECO" if options.isData else "PAT"
# some default collections
electronCollection = cms.InputTag("slimmedElectrons")
muonCollection = cms.InputTag("slimmedMuons")
metCollection = cms.InputTag("slimmedMETs")
jetCollection = cms.InputTag("slimmedJets")
metFilterBitsCollection = cms.InputTag("TriggerResults", "", miniAODProcess)
# message logger
process.load("FWCore.MessageLogger.MessageLogger_cfi")
process.MessageLogger.cerr.FwkReport.reportEvery = options.reportEvery
# source defintion
process.source = cms.Source("PoolSource", fileNames=cms.untracked.vstring(options.inputFiles))
# good run and lumi selection
if options.isData and options.lumiFile:
lumiList = LumiList(filename=options.lumiFile)
process.source.lumisToProcess = lumiList.getVLuminosityBlockRange()
# standard sequences with global tag
if options.globalTag:
process.load("Configuration.StandardSequences.FrontierConditions_GlobalTag_cff")
process.GlobalTag.globaltag = options.globalTag
# standard and geometry sequences
process.load("Configuration.StandardSequences.GeometryDB_cff")
process.load("Configuration.StandardSequences.Services_cff")
process.load("Configuration.StandardSequences.MagneticField_cff")
process.load("Geometry.CaloEventSetup.CaloTowerConstituents_cfi")
# electron ID on uncorrected electrons
# no option to configure the electron collection available here
# https://twiki.cern.ch/twiki/bin/view/CMS/EgammaPostRecoRecipes
from RecoEgamma.EgammaTools.EgammaPostRecoTools import setupEgammaPostRecoSeq
params = {
"isMiniAOD": True,
"applyEnergyCorrections": False,
"applyVIDOnCorrectedEgamma": False,
}
if options.campaign == "Run2_pp_13TeV_Legacy18":
params["era"] = "2018-Prompt"
elif options.campaign == "Run2_pp_13TeV_Legacy17":
params["era"] = "2017-Nov17ReReco"
elif options.campaign == "Run2_pp_13TeV_Legacy16":
params["runEnergyCorrections"] = False
params["era"] = "2016-Legacy"
elif options.campaign == "Run2_pp_13TeV_UltraLegacy17":
params["era"] = "2017-UL"
else:
raise ValueError("Unknown campaign {}".format(options.campaign))
setupEgammaPostRecoSeq(process, **params)
seq += process.egammaScaleSmearSeq
seq += process.egammaPostRecoSeq
electronCollection = cms.InputTag("slimmedElectrons", "", process.name_())
# electron energy calibration
from RecoEgamma.EgammaTools.calibratedEgammas_cff import calibratedPatElectrons
process.correctedElectrons = calibratedPatElectrons.clone(
src=electronCollection,
produceCalibratedObjs=cms.bool(True),
semiDeterministic=cms.bool(True),
)
seq += process.correctedElectrons
electronCollection = cms.InputTag("correctedElectrons", "", process.name_())
# updated MET Filter:
# https://twiki.cern.ch/twiki/bin/view/CMS/MissingETOptionalFiltersRun2
if options.campaign in ["Run2_pp_13TeV_Legacy18", "Run2_pp_13TeV_Legacy17"]:
process.load('RecoMET.METFilters.ecalBadCalibFilter_cfi')
baddetEcallist = cms.vuint32(
[872439604,872422825,872420274,872423218,
872423215,872416066,872435036,872439336,
872420273,872436907,872420147,872439731,
872436657,872420397,872439732,872439339,
872439603,872422436,872439861,872437051,
872437052,872420649,872422436,872421950,
872437185,872422564,872421566,872421695,
872421955,872421567,872437184,872421951,
872421694,872437056,872437057,872437313]
)
process.ecalBadCalibReducedMINIAODFilter = cms.EDFilter(
"EcalBadCalibFilter",
EcalRecHitSource = cms.InputTag("reducedEgamma:reducedEERecHits"),
ecalMinEt = cms.double(50.),
baddetEcal = baddetEcallist,
taggingMode = cms.bool(True),
debug = cms.bool(False)
)
seq += process.ecalBadCalibReducedMINIAODFilter
# MET correction
# https://twiki.cern.ch/twiki/bin/viewauth/CMS/MissingETUncertaintyPrescription
from PhysicsTools.PatUtils.tools.runMETCorrectionsAndUncertainties import runMetCorAndUncFromMiniAOD
params = {
"isData" : options.isData,
"jecUncFile" : os.path.basename(options.jesUncFiles[0]),
"electronColl" : electronCollection.value(),
"muonColl" : muonCollection.value(),
"jetCollUnskimmed" : jetCollection.value(),
}
if options.campaign == "Run2_pp_13TeV_Legacy17":
params["fixEE2017"] = True
params["fixEE2017Params"] = {"userawPt": True, "ptThreshold": 50.0, "minEtaThreshold": 2.65, "maxEtaThreshold": 3.139}
runMetCorAndUncFromMiniAOD(process, **params)
seq += process.fullPatMetSequence
metCollection = cms.InputTag("slimmedMETs", "", process.name_())
# add DeepJet discriminators
from PhysicsTools.PatAlgos.tools.jetTools import updateJetCollection
if options.campaign != "Run2_pp_13TeV_Legacy18":
updateJetCollection(
process,
jetSource = jetCollection,
pvSource = cms.InputTag('offlineSlimmedPrimaryVertices'),
svSource = cms.InputTag('slimmedSecondaryVertices'),
# Safe to always add 'L2L3Residual' as MC contains dummy L2L3Residual corrections (always set to 1)
jetCorrections = ('AK4PFchs', cms.vstring(['L1FastJet', 'L2Relative', 'L3Absolute', 'L2L3Residual']), 'None'),
btagDiscriminators = [
'pfDeepFlavourJetTags:probb',
'pfDeepFlavourJetTags:probbb',
'pfDeepFlavourJetTags:problepb',
'pfDeepFlavourJetTags:probc',
'pfDeepFlavourJetTags:probuds',
'pfDeepFlavourJetTags:probg'
],
postfix='NewDFTraining'
)
process.deepFlavour = cms.Task(
process.patJetCorrFactorsNewDFTraining,
process.updatedPatJetsNewDFTraining,
process.patJetCorrFactorsTransientCorrectedNewDFTraining,
process.updatedPatJetsTransientCorrectedNewDFTraining,
process.pfDeepFlavourJetTagsNewDFTraining,
process.pfDeepFlavourTagInfosNewDFTraining,
process.pfDeepCSVTagInfosNewDFTraining,
process.selectedUpdatedPatJetsNewDFTraining,
process.pfInclusiveSecondaryVertexFinderTagInfosNewDFTraining,
process.pfImpactParameterTagInfosNewDFTraining
)
seq.associate(process.deepFlavour)
jetCollection = cms.InputTag("selectedUpdatedPatJetsNewDFTraining", "", process.name_())
# L1 prefiring weight
if options.campaign.endswith(("16", "17")) and not options.isData:
applyL1Weights = True
if options.campaign.endswith("17"):
data_era = "2017BtoF"
elif options.campaign.endswith("16"):
data_era = "2016BtoH"
else:
raise ValueError("campaign {} should not have l1 prefiring weights applied".format(options.campaign))
from PhysicsTools.PatUtils.l1ECALPrefiringWeightProducer_cfi import l1ECALPrefiringWeightProducer
process.prefiringweight = l1ECALPrefiringWeightProducer.clone(
DataEra = cms.string(data_era),
UseJetEMPt = cms.bool(False),
PrefiringRateSystematicUncty = cms.double(0.2),
SkipWarnings = False)
seq += process.prefiringweight
else:
applyL1Weights = False
# deterministic seeds
process.load("PhysicsTools.PatUtils.deterministicSeeds_cfi")
process.deterministicSeeds.produceCollections = cms.bool(True)
process.deterministicSeeds.produceValueMaps = cms.bool(False)
process.deterministicSeeds.electronCollection = electronCollection
process.deterministicSeeds.muonCollection = muonCollection
#process.deterministicSeeds.tauCollection = tauCollection
#process.deterministicSeeds.photonCollection = photonCollection
process.deterministicSeeds.jetCollection = jetCollection
process.deterministicSeeds.METCollection = metCollection
seq += process.deterministicSeeds
# overwrite output collections
muonCollection = cms.InputTag("deterministicSeeds", "muonsWithSeed", process.name_())
jetCollection = cms.InputTag("deterministicSeeds", "jetsWithSeed", process.name_())
metCollection = cms.InputTag("deterministicSeeds", "METsWithSeed", process.name_())
electronCollection = cms.InputTag("deterministicSeeds", "electronsWithSeed", process.name_())
# load and configure the tree maker
process.load("JetTaggingSF.JetTaggingSF.treeMaker_cfi")
process.treeMaker.verbose = cms.untracked.bool(False)
process.treeMaker.outputFile = cms.string(options.__getattr__("outputFile", noTags=True))
process.treeMaker.campaign = cms.string(options.campaign)
process.treeMaker.metaDataFile = cms.string(options.metaDataFile)
process.treeMaker.isData = cms.bool(options.isData)
process.treeMaker.leptonChannel = cms.string(options.leptonChannel)
process.treeMaker.eeTriggers = cms.vstring(options.eeTriggers)
process.treeMaker.emuTriggers = cms.vstring(options.emuTriggers)
process.treeMaker.mumuTriggers = cms.vstring(options.mumuTriggers)
process.treeMaker.eTriggers = cms.vstring(options.eTriggers)
process.treeMaker.muTriggers = cms.vstring(options.muTriggers)
process.treeMaker.metFilters = cms.vstring(options.metFilters)
process.treeMaker.jesFiles = cms.vstring(options.jesFiles)
process.treeMaker.jesRanges = cms.vint32(options.jesRanges)
process.treeMaker.jesUncFiles = cms.vstring(options.jesUncFiles)
process.treeMaker.jesUncSrcFile = cms.string(options.jesUncSrcFile)
process.treeMaker.jesUncSources = cms.vstring(options.jesUncSources)
process.treeMaker.jerPtResolutionFile = cms.string(options.jerPtResolutionFile)
process.treeMaker.jerScaleFactorFile = cms.string(options.jerScaleFactorFile)
process.treeMaker.deepJetWP = cms.double(options.deepJetWP)
process.treeMaker.deepCSVWP = cms.double(options.deepCSVWP)
process.treeMaker.metFilterBitsCollection = metFilterBitsCollection
process.treeMaker.electronCollection = electronCollection
process.treeMaker.muonCollection = muonCollection
process.treeMaker.metCollection = metCollection
process.treeMaker.jetCollection = jetCollection
process.treeMaker.applyHEMFilter = cms.bool(True) if options.campaign == "Run2_pp_13TeV_Legacy18" else cms.bool(False)
process.treeMaker.applyL1Weights = applyL1Weights
# additional configuration
process.maxEvents = cms.untracked.PSet(input=cms.untracked.int32(options.maxEvents))
# process options
process.options = cms.untracked.PSet(
allowUnscheduled=cms.untracked.bool(True),
wantSummary=cms.untracked.bool(options.summary),
)
# tell the process what to run
process.p = cms.Path(seq + process.treeMaker)
except:
import traceback
traceback.print_exc()
raise
| 38.286047 | 164 | 0.675758 |
e841df00479b1c669ba48ebe800f21dff788b7fa | 1,376 | py | Python | zvt/fill_project.py | h521822/zvt | c6bcc2b340406da55d920a411f59ab8d4cc7e76d | [
"MIT"
] | 2 | 2021-02-09T05:55:38.000Z | 2021-07-26T00:06:46.000Z | zvt/fill_project.py | h521822/zvt | c6bcc2b340406da55d920a411f59ab8d4cc7e76d | [
"MIT"
] | null | null | null | zvt/fill_project.py | h521822/zvt | c6bcc2b340406da55d920a411f59ab8d4cc7e76d | [
"MIT"
] | 1 | 2021-07-22T02:48:31.000Z | 2021-07-22T02:48:31.000Z | # script to auto generate some files
from zvt.contract import IntervalLevel
from zvt.autocode.generator import gen_exports, gen_kdata_schema
from zvt.contract import AdjustType
def gen_kdata_schemas():
# 股票行情
gen_kdata_schema(pkg='zvt', providers=['joinquant'], entity_type='stock',
levels=[level for level in IntervalLevel if level != IntervalLevel.LEVEL_TICK],
adjust_types=[None, AdjustType.hfq], entity_in_submodule=True)
# 板块行情
gen_kdata_schema(pkg='zvt', providers=['eastmoney'], entity_type='block',
levels=[IntervalLevel.LEVEL_1DAY, IntervalLevel.LEVEL_1WEEK, IntervalLevel.LEVEL_1MON],
entity_in_submodule=True)
# etf行情
gen_kdata_schema(pkg='zvt', providers=['sina'], entity_type='etf',
levels=[IntervalLevel.LEVEL_1DAY], entity_in_submodule=True)
# 指数行情
gen_kdata_schema(pkg='zvt', providers=['sina'], entity_type='index',
levels=[IntervalLevel.LEVEL_1DAY], entity_in_submodule=True)
if __name__ == '__main__':
# zip_dir(ZVT_TEST_DATA_PATH, zip_file_name=DATA_SAMPLE_ZIP_PATH)
# gen_exports('api')
# gen_exports('domain')
# gen_exports('informer')
# gen_exports('utils')
# gen_exports('trader')
# gen_exports('autocode')
gen_exports('factors')
# gen_kdata_schemas()
| 38.222222 | 108 | 0.677326 |
81ed8fbf8f85e20d8ec174b27fdffe59ddbf36f0 | 6,394 | py | Python | api/applications/tests/tests_managing_countries_on_goods_type.py | django-doctor/lite-api | 1ba278ba22ebcbb977dd7c31dd3701151cd036bf | [
"MIT"
] | null | null | null | api/applications/tests/tests_managing_countries_on_goods_type.py | django-doctor/lite-api | 1ba278ba22ebcbb977dd7c31dd3701151cd036bf | [
"MIT"
] | null | null | null | api/applications/tests/tests_managing_countries_on_goods_type.py | django-doctor/lite-api | 1ba278ba22ebcbb977dd7c31dd3701151cd036bf | [
"MIT"
] | null | null | null | from django.urls import reverse
from rest_framework import status
from api.applications.models import CountryOnApplication
from api.goodstype.models import GoodsType
from api.staticdata.countries.helpers import get_country
from api.staticdata.statuses.enums import CaseStatusEnum
from api.staticdata.statuses.libraries.get_case_status import get_case_status_by_status
from test_helpers.clients import DataTestClient
class GoodTypeCountriesManagementTests(DataTestClient):
def setUp(self):
super().setUp()
self.open_draft = self.create_draft_open_application(self.organisation)
self.goods_types = GoodsType.objects.filter(application=self.open_draft).order_by("id")
self.goods_type_1 = self.goods_types[0]
self.goods_type_2 = self.goods_types[1]
# Add a country to the draft
self.country_1 = get_country("ES")
self.country_2 = get_country("US")
self.country_3 = get_country("FR")
self.all_countries = [self.country_1, self.country_2, self.country_3]
for country in self.all_countries:
CountryOnApplication(application=self.open_draft, country=country).save()
self.good_url = reverse(
"applications:application_goodstype",
kwargs={"pk": self.open_draft.id, "goodstype_pk": self.goods_type_1.id},
)
self.good_country_url = reverse(
"applications:application_goodstype_assign_countries", kwargs={"pk": self.open_draft.id},
)
def test_all_countries_are_returned_for_goods_type(self):
"""
Given a Good with no Countries assigned
When a user requests the Good
Then the correct Good with all countries assigned to the application is returned
"""
response = self.client.get(self.good_url, **self.exporter_headers)
self.assertEqual(len(response.json()["good"]["countries"]), self.open_draft.application_countries.count())
def test_all_countries_for_goods_type_are_returned(self):
"""
Given a Good with Countries already assigned
When a user requests the Good
Then the correct Good with all assigned Countries are returned
"""
self.goods_type_1.countries.set(self.all_countries)
response = self.client.get(self.good_url, **self.exporter_headers)
returned_good = response.json()["good"]
self.assertEquals(len(self.goods_type_1.countries.all()), len(returned_good["countries"]))
def test_state_can_be_over_written(self):
"""
Given a Good with Countries already assigned
When a user removes a good-level Country owned by their Team from the Good
Then only that Country is removed
"""
self.goods_type_1.countries.set(self.all_countries)
data = {str(self.goods_type_1.id): [self.country_1.id, self.country_2.id]}
self.client.put(self.good_country_url, data, **self.exporter_headers)
self.assertEquals(2, len(self.goods_type_1.countries.all()))
self.assertTrue(self.country_1 in self.goods_type_1.countries.all())
self.assertTrue(self.country_2 in self.goods_type_1.countries.all())
def test_cannot_set_no_countries_on_good(self):
"""
Tests that a user cannot set no countries on a good
"""
data = {
str(self.goods_type_1.id): [],
str(self.goods_type_2.id): [self.country_3.id, self.country_1.id],
}
response = self.client.put(self.good_country_url, data, **self.exporter_headers)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_setting_countries_on_two_goods(self):
"""
Tests setting multiple countries on multiple goods types simultaneously
"""
data = {
str(self.goods_type_1.id): [self.country_1.id, self.country_2.id],
str(self.goods_type_2.id): [self.country_3.id, self.country_1.id],
}
response = self.client.put(self.good_country_url, data, **self.exporter_headers)
response_data = response.json()
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(len(response_data), 2)
def test_goodstype_countries_black_box_data_persistence(self):
data = {
str(self.goods_type_1.id): [self.country_1.id, self.country_2.id],
str(self.goods_type_2.id): [self.country_3.id, self.country_1.id],
}
self.client.put(self.good_country_url, data, **self.exporter_headers)
response = self.client.get(self.good_url, data, **self.exporter_headers)
countries = [x.get("id") for x in response.json()["good"]["countries"]]
self.assertEqual(len(countries), 2)
self.assertIn(self.country_1.id, countries)
self.assertIn(self.country_2.id, countries)
def test_invalid_request_data_returns_404(self):
"""
404 with invalid request country key
"""
data = {
str(self.goods_type_1.id): [self.country_1.id, self.country_2.id],
str(self.goods_type_2.id): ["sdffsdfds", self.country_1.id],
}
response = self.client.put(self.good_country_url, data, **self.exporter_headers)
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
def test_audit_entries_are_created(self):
"""
Given a Good with Countries already assigned
When a user assigns a new country to the good and removes the existing one
Then two audit entries should be made showing the addition and removal
"""
case = self.submit_application(self.open_draft)
case.status = get_case_status_by_status(CaseStatusEnum.APPLICANT_EDITING)
case.save()
self.goods_type_1.countries.set([self.country_1])
data = {str(self.goods_type_1.id): [self.country_2.id]}
self.client.put(self.good_country_url, data, **self.exporter_headers)
response_data = self.client.get(reverse("cases:activity", kwargs={"pk": case.id}), **self.gov_headers).json()
self.assertEqual(len(response_data["activity"]), 3)
self.assertIn(f"added the destinations United States to '{self.goods_type_1.description}'", str(response_data))
self.assertIn(f"removed the destinations Spain from '{self.goods_type_1.description}'", str(response_data))
| 41.79085 | 119 | 0.686581 |
add43ac8569be3594e22eb97119a64b8d7a7eeb4 | 8,663 | py | Python | src/aurora/security.py | heminsatya/aurora | cb3e7454450d016b23628f8a74ed4041716bf274 | [
"MIT"
] | 5 | 2021-12-27T17:14:42.000Z | 2022-02-05T19:09:12.000Z | src/aurora/security.py | heminsatya/aurora | cb3e7454450d016b23628f8a74ed4041716bf274 | [
"MIT"
] | null | null | null | src/aurora/security.py | heminsatya/aurora | cb3e7454450d016b23628f8a74ed4041716bf274 | [
"MIT"
] | 1 | 2022-01-14T17:32:00.000Z | 2022-01-14T17:32:00.000Z | ################
# Dependencies #
################
import importlib
from os import replace
from datetime import datetime, timedelta
from .helpers import route_url
from flask import make_response, jsonify, render_template, request as flask_request, abort as flask_abort, redirect as flask_redirect, session as flask_session
from werkzeug.security import check_password_hash, generate_password_hash
# Flask objects
request = flask_request
session = flask_session
# Fetch configuretion module
config = importlib.import_module('config')
debug = getattr(config, "DEBUG")
default_lang = getattr(config, "DEFAULT_LANG")
multi_lang = getattr(config, "MULTI_LANG")
languages = getattr(config, "LANGUAGES")
# Fetch apps module
apps_module = importlib.import_module('_apps')
apps = getattr(apps_module, "apps")
##
# @desc Redirects to HTTP error pages
#
# @param code: int - HTTP status code
#
# @return object
##
def abort(code:int=404):
# Return result
return flask_abort(status=code)
##
# @desc Redirects to relative URL
#
# @param url: str
#
# @return object
##
def redirect(url:str, code:int=302):
# Return results
return flask_redirect(location=url, code=code)
##
# @desc Redirects to app URL
#
# @param app: str - The app name
# @param controller: str - The app controller name
#
# @return object
##
def redirect_to(app:str, controller:str=None, code:int=302):
# Fetch the route final url
url = route_url(app, controller)
# Return result
return redirect(url=url, code=code)
##
# @desc Checks session for existence
#
# @param name: str -- *Required session name
#
# @return bool
##
def check_session(name:str):
# Session exists
if name in session:
return True
# Session not exists
else:
return False
##
# @desc Gets session
#
# @param name: str -- *Required session name
#
# @return object
##
def get_session(name:str):
return session[name]
##
# @desc Sets session
#
# @param name: str -- *Required session name
# @param value: str -- *Required session value
##
def set_session(name:str, value:str):
session[name] = value
##
# @desc Unset session
#
# @param name: str -- *Required session name
##
def unset_session(name:str):
session.pop(name, None)
##
# @desc Checks cookie for existence
#
# @param name: str -- *Required cookie name
#
# @return bool
##
def check_cookie(name:str):
# Cookie exists
if name in request.cookies:
return True
# Cookie not exists
else:
return False
##
# @desc Get cookie
#
# @param name: str -- *Required cookie name
#
# @return object
##
def get_cookie(name:str):
return request.cookies.get(name)
##
# @desc Sets cookie
#
# @param name: str -- *Required cookie name
# @param value: str -- *Required cookie value
# @param days: int -- Optional expiry days
# @param data: dictionary -- Optional data
#
# @return object
##
def set_cookie(name:str, value:str, data:dict={}, days:int=30):
# Check required params
if not name and not value:
# Produce error message
error = 'Please provide the required parameters!'
# Check debug mode
if debug:
# Raise error
raise Exception(error)
else:
# Print error
print(error)
exit()
# Check data
if data:
if data["type"] == "redirect":
res = make_response(redirect(data["response"]))
elif data["type"] == "render":
res = make_response(render_template(data["response"]))
elif data["type"] == "json":
res = make_response(jsonify(data["response"]))
elif data["type"] == "text":
res = make_response(data["response"])
# Create response
else:
res = make_response("Cookie set successfully!")
# expires in 30 days
expire = datetime.utcnow() + timedelta(days=days)
# Set cookie
res.set_cookie(name, value, expires=expire)
# Return response
return res
##
# @desc Unsets cookie
#
# @param name: str -- *Required cookie name
# @param data: dictionary -- Optional data
##
def unset_cookie(name:str, data:dict={}):
# Check required params
if not name:
# Produce error message
error = 'Please provide the required parameters!'
# Check debug mode
if debug:
# Raise error
raise Exception(error)
else:
# Print error
print(error)
exit()
# Check data
if data:
if data["type"] == "redirect":
res = make_response(redirect(data["response"]))
elif data["type"] == "render":
res = make_response(render_template(data["response"]))
elif data["type"] == "json":
res = make_response(jsonify(data["response"]))
elif data["type"] == "text":
res = make_response(data["response"])
else:
res = make_response("Cookie unset successfully!")
# unset cookie
res.set_cookie(name, '', expires=0)
# Return response
return res
##
# @desc Finds active language
#
# @var active_lang: str - The active language code
#
# @return str
##
def find_lang():
path = request.path
lang = path.split('/')[1]
# Check multi language
if multi_lang:
# Check the language path
if lang in languages:
active_lang = lang
LANGUAGE = '/' + active_lang
set_session('active_lang', lang)
elif check_cookie('active_lang'):
active_lang = get_cookie('active_lang')
LANGUAGE = '/' + active_lang
set_session('active_lang', get_cookie('active_lang'))
elif check_session('active_lang'):
active_lang = get_session('active_lang')
LANGUAGE = '/' + active_lang
else:
active_lang = default_lang
LANGUAGE = '/' + active_lang
set_session('active_lang', default_lang)
else:
active_lang = default_lang
LANGUAGE = ''
# Return result
return {
'active_language': active_lang,
'LANGUAGE': LANGUAGE,
}
##
# @desc Redirects not logged-in users
#
# @param url: str -- *Required url for users app
#
# @var next: str -- The next url
#
# @return object
##
def login_required(app:str, controller:str=None, validate:str='user'):
# Fetch the route final url
url = route_url(app, controller)
def wrapper(inner):
def decorator(*args, **kwargs):
# Find next URL
next = request.url.replace(request.url_root, '/')
# Check cookie
if check_cookie(validate):
set_session(validate, get_cookie(validate))
# User is not logged-in
if not check_session(validate):
# Check the language
if multi_lang:
if check_session('active_lang'):
return redirect(f'''/{get_session('active_lang')}/{url}?next={next}''')
return redirect(f'{url}?next={next}')
# if next:
# return redirect(f'{url}?next={next}')
# else:
# return redirect(f'{url}?next={next}')
# User is logged-in
else:
return inner(*args, **kwargs)
return decorator
return wrapper
##
# @desc Redirects logged-in users
#
# @param url: str -- *Required url for app
#
# @return object
##
def login_abort(app:str, controller:str=None, validate:str='user'):
# Fetch the route final url
url = route_url(app, controller)
def wrapper(inner):
def decorator(*args, **kwargs):
# Check cookie
if check_cookie(validate):
set_session(validate, get_cookie(validate))
# User is logged-in
if check_session(validate):
return redirect(url)
# User is not logged-in
else:
return inner(*args, **kwargs)
return decorator
return wrapper
##
# @desc Hashing password
#
# @param password: str
#
# @return str
##
def hash_password(password):
return generate_password_hash(password)
##
# @desc Check hashed password with requested password
#
# @param hashed_password: str -- Hashed password from database
# @param requested_password: str -- Requested password by the user
#
# @return bool
##
def check_password(hashed_password, requested_password):
# Valid password
if check_password_hash(hashed_password, requested_password):
return True
# Invalid password
else:
return False
| 22.385013 | 159 | 0.609027 |
c1247b34e282848f7587ffee0ac98917ef24300c | 45 | py | Python | pyVcsa/exceptions.py | ToxicSamN/pyVcsa | b385fac1e2e5c37e77f42caf57149448fd5fa4b6 | [
"Apache-2.0"
] | null | null | null | pyVcsa/exceptions.py | ToxicSamN/pyVcsa | b385fac1e2e5c37e77f42caf57149448fd5fa4b6 | [
"Apache-2.0"
] | null | null | null | pyVcsa/exceptions.py | ToxicSamN/pyVcsa | b385fac1e2e5c37e77f42caf57149448fd5fa4b6 | [
"Apache-2.0"
] | null | null | null |
class ValidationError(Exception):
pass
| 9 | 33 | 0.733333 |
8ab43f30476e8d8013912fab4907a7b92fc78a7b | 374 | py | Python | snmpsim/mltsplit.py | FCG-LLC/snmpsim | a55ecde4cde65d2364ea334ab85df4cd1bb21f3b | [
"BSD-2-Clause"
] | null | null | null | snmpsim/mltsplit.py | FCG-LLC/snmpsim | a55ecde4cde65d2364ea334ab85df4cd1bb21f3b | [
"BSD-2-Clause"
] | null | null | null | snmpsim/mltsplit.py | FCG-LLC/snmpsim | a55ecde4cde65d2364ea334ab85df4cd1bb21f3b | [
"BSD-2-Clause"
] | 1 | 2019-12-16T09:51:38.000Z | 2019-12-16T09:51:38.000Z | #
# This file is part of snmpsim software.
#
# Copyright (c) 2010-2017, Ilya Etingof <etingof@gmail.com>
# License: http://snmpsim.sf.net/license.html
#
# Like string.split but first tries to use composite separator as an
# escaping aid
def split(val, sep):
for x in (3, 2, 1):
if val.find(sep * x) != -1:
return val.split(sep * x)
return [val]
| 24.933333 | 68 | 0.639037 |
158cc0f98ba61faef4edcfeda9a287828ad6bd45 | 271 | py | Python | mundo-2/ex060.py | pablocarracci/cev-python | dd6c2db80be84ec732fc5efd895e11d48d298258 | [
"MIT"
] | null | null | null | mundo-2/ex060.py | pablocarracci/cev-python | dd6c2db80be84ec732fc5efd895e11d48d298258 | [
"MIT"
] | null | null | null | mundo-2/ex060.py | pablocarracci/cev-python | dd6c2db80be84ec732fc5efd895e11d48d298258 | [
"MIT"
] | null | null | null | # Exercício 060 - Cálculo do Fatorial
from cev.utils import fatorial
x = int(input('Digite um número para calcular seu Fatorial: '))
print(f'Calculando {x}! = ', end='')
for n in range(x, 0, -1):
print(f'{n} x' if n > 1 else f'{n} =', end=' ')
print(fatorial(x))
| 22.583333 | 63 | 0.627306 |
af65a059452e996c7ef4ef5228138a9f0c0d54ce | 3,578 | py | Python | tests/pytests/integration/modules/state/test_state_pillar_errors.py | agraul/salt-1 | b6665030d91fb7045467b4dc408169d5127aa9be | [
"Apache-2.0"
] | null | null | null | tests/pytests/integration/modules/state/test_state_pillar_errors.py | agraul/salt-1 | b6665030d91fb7045467b4dc408169d5127aa9be | [
"Apache-2.0"
] | null | null | null | tests/pytests/integration/modules/state/test_state_pillar_errors.py | agraul/salt-1 | b6665030d91fb7045467b4dc408169d5127aa9be | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python3
import textwrap
import pytest
from saltfactories.utils.functional import StateResult
pytestmark = [
pytest.mark.slow_test,
]
@pytest.fixture(scope="module")
def reset_pillar(salt_call_cli):
try:
# Run tests
yield
finally:
# Refresh pillar once all tests are done.
ret = salt_call_cli.run("saltutil.refresh_pillar", wait=True)
assert ret.exitcode == 0
assert ret.json is True
@pytest.fixture
def testfile_path(tmp_path, base_env_state_tree_root_dir):
testfile = tmp_path / "testfile"
sls_contents = textwrap.dedent(
"""
{}:
file:
- managed
- source: salt://testfile
- makedirs: true
""".format(testfile)
)
with pytest.helpers.temp_file(
"sls-id-test.sls", sls_contents, base_env_state_tree_root_dir
):
yield testfile
@pytest.mark.usefixtures("testfile_path", "reset_pillar")
def test_state_apply_aborts_on_pillar_error(
salt_cli,
salt_minion,
base_env_pillar_tree_root_dir,
):
"""
Test state.apply with error in pillar.
"""
pillar_top_file = textwrap.dedent(
"""
base:
'{}':
- basic
"""
).format(salt_minion.id)
basic_pillar_file = textwrap.dedent(
"""
syntax_error
"""
)
with pytest.helpers.temp_file(
"top.sls", pillar_top_file, base_env_pillar_tree_root_dir
), pytest.helpers.temp_file(
"basic.sls", basic_pillar_file, base_env_pillar_tree_root_dir
):
expected_comment = [
"Pillar failed to render with the following messages:",
"SLS 'basic' does not render to a dictionary",
]
shell_result = salt_cli.run(
"state.apply", "sls-id-test", minion_tgt=salt_minion.id
)
assert shell_result.exitcode == 1
assert shell_result.json == expected_comment
@pytest.mark.usefixtures("testfile_path", "reset_pillar")
def test_state_apply_continues_after_pillar_error_is_fixed(
salt_cli,
salt_minion,
base_env_pillar_tree_root_dir,
):
"""
Test state.apply with error in pillar.
"""
pillar_top_file = textwrap.dedent(
"""
base:
'{}':
- basic
""".format(salt_minion.id)
)
basic_pillar_file_error = textwrap.dedent(
"""
syntax_error
"""
)
basic_pillar_file = textwrap.dedent(
"""
syntax_error: Fixed!
"""
)
# save pillar render error in minion's in-memory pillar
with pytest.helpers.temp_file(
"top.sls", pillar_top_file, base_env_pillar_tree_root_dir
), pytest.helpers.temp_file(
"basic.sls", basic_pillar_file_error, base_env_pillar_tree_root_dir
):
shell_result = salt_cli.run(
"saltutil.refresh_pillar", minion_tgt=salt_minion.id
)
assert shell_result.exitcode == 0
# run state.apply with fixed pillar render error
with pytest.helpers.temp_file(
"top.sls", pillar_top_file, base_env_pillar_tree_root_dir
), pytest.helpers.temp_file(
"basic.sls", basic_pillar_file, base_env_pillar_tree_root_dir
):
shell_result = salt_cli.run(
"state.apply", "sls-id-test", minion_tgt=salt_minion.id
)
assert shell_result.exitcode == 0
state_result = StateResult(shell_result.json)
assert state_result.result is True
assert state_result.changes == {"diff": "New file", "mode": "0644"}
| 27.106061 | 75 | 0.626887 |
713246f8288e0a40ac4e726029c213f3abb498ae | 3,370 | py | Python | uproot/write/sink/cursor.py | riga/uproot | 78de42f849079c35fd05ae22033e56f02492b6c1 | [
"BSD-3-Clause"
] | 1 | 2021-03-18T23:33:35.000Z | 2021-03-18T23:33:35.000Z | uproot/write/sink/cursor.py | riga/uproot | 78de42f849079c35fd05ae22033e56f02492b6c1 | [
"BSD-3-Clause"
] | 17 | 2020-01-28T22:33:27.000Z | 2021-06-10T21:05:49.000Z | uproot/write/sink/cursor.py | riga/uproot | 78de42f849079c35fd05ae22033e56f02492b6c1 | [
"BSD-3-Clause"
] | 1 | 2020-04-17T15:33:03.000Z | 2020-04-17T15:33:03.000Z | #!/usr/bin/env python
# Copyright (c) 2017, DIANA-HEP
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import struct
class Cursor(object):
def __init__(self, index):
self.index = index
def skip(self, numbytes):
self.index += numbytes
def update_fields(self, sink, format, *args):
sink.write(format.pack(*args), self.index)
def write_fields(self, sink, format, *args):
self.update_fields(sink, format, *args)
self.index += format.size
@staticmethod
def length_string(string):
if len(string) < 255:
return len(string) + 1
else:
return len(string) + 5
@staticmethod
def length_strings(strings):
return sum(Cursor.length_string(x) for x in strings)
_format_byte = struct.Struct("B")
_format_byteint = struct.Struct(">Bi")
def update_string(self, sink, data):
if len(data) < 255:
sink.write(self._format_byte.pack(len(data)), self.index)
sink.write(data, self.index + 1)
else:
sink.write(self._format_byteint.pack(255, len(data)), self.index)
sink.write(data, self.index + 5)
def write_string(self, sink, data):
self.update_string(sink, data)
self.index += self.length_string(data)
def update_cstring(self, sink, data):
sink.write(data, self.index)
sink.write(b"\x00")
def write_cstring(self, sink, data):
self.update_cstring(sink, data)
self.index += len(data) + 1
def update_data(self, sink, data):
sink.write(data, self.index)
def write_data(self, sink, data):
self.update_data(sink, data)
self.index += len(data)
def update_array(self, sink, data):
sink.write(data.tostring(), self.index)
def write_array(self, sink, data):
self.update_array(sink, data)
self.index += data.nbytes
| 36.236559 | 80 | 0.690208 |
e3c7aecef7a4c674bbe1a5aa7f9ae7e8c9ce67b2 | 63 | py | Python | BOJ/21000~21999/21500~21599/21591.py | shinkeonkim/today-ps | f3e5e38c5215f19579bb0422f303a9c18c626afa | [
"Apache-2.0"
] | 2 | 2020-01-29T06:54:41.000Z | 2021-11-07T13:23:27.000Z | BOJ/21000~21999/21500~21599/21591.py | shinkeonkim/Today_PS | bb0cda0ee1b9c57e1cfa38355e29d0f1c6167a44 | [
"Apache-2.0"
] | null | null | null | BOJ/21000~21999/21500~21599/21591.py | shinkeonkim/Today_PS | bb0cda0ee1b9c57e1cfa38355e29d0f1c6167a44 | [
"Apache-2.0"
] | null | null | null | a,b,c,d=map(int,input().split())
print(int(a>=c+2 and b>=d+2))
| 21 | 32 | 0.587302 |
12b788b0334802d3dd2355e10dd5f60687580579 | 5,713 | py | Python | sdk/network/azure-mgmt-network/azure/mgmt/network/v2018_11_01/aio/operations/_network_interface_load_balancers_operations.py | vbarbaresi/azure-sdk-for-python | 397ba46c51d001ff89c66b170f5576cf8f49c05f | [
"MIT"
] | 8 | 2021-01-13T23:44:08.000Z | 2021-03-17T10:13:36.000Z | sdk/network/azure-mgmt-network/azure/mgmt/network/v2018_11_01/aio/operations/_network_interface_load_balancers_operations.py | vbarbaresi/azure-sdk-for-python | 397ba46c51d001ff89c66b170f5576cf8f49c05f | [
"MIT"
] | null | null | null | sdk/network/azure-mgmt-network/azure/mgmt/network/v2018_11_01/aio/operations/_network_interface_load_balancers_operations.py | vbarbaresi/azure-sdk-for-python | 397ba46c51d001ff89c66b170f5576cf8f49c05f | [
"MIT"
] | null | null | null | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class NetworkInterfaceLoadBalancersOperations:
"""NetworkInterfaceLoadBalancersOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2018_11_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
resource_group_name: str,
network_interface_name: str,
**kwargs
) -> AsyncIterable["models.NetworkInterfaceLoadBalancerListResult"]:
"""List all load balancers in a network interface.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_interface_name: The name of the network interface.
:type network_interface_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either NetworkInterfaceLoadBalancerListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2018_11_01.models.NetworkInterfaceLoadBalancerListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.NetworkInterfaceLoadBalancerListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-11-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('NetworkInterfaceLoadBalancerListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}/loadBalancers'} # type: ignore
| 48.82906 | 196 | 0.673377 |
e09227097c5829c808d4310e9ac5f8537d2371a1 | 7,266 | py | Python | tests/test_patch.py | softwarefactory-project/rdopkg | d7d63aa5142a1c00f96ef09d6451935113c9db85 | [
"Apache-2.0"
] | 12 | 2017-06-17T03:00:20.000Z | 2019-10-21T22:17:42.000Z | tests/test_patch.py | softwarefactory-project/rdopkg | d7d63aa5142a1c00f96ef09d6451935113c9db85 | [
"Apache-2.0"
] | 85 | 2017-06-13T09:43:51.000Z | 2022-02-10T16:24:48.000Z | tests/test_patch.py | openstack-packages/rdopkg | d7d63aa5142a1c00f96ef09d6451935113c9db85 | [
"Apache-2.0"
] | 6 | 2016-05-20T14:54:35.000Z | 2017-06-05T14:43:08.000Z | # -*- encoding: utf-8 -*-
from __future__ import unicode_literals
from rdopkg.cli import rdopkg
from rdopkg.utils.git import git, git_branch
from rdopkg.utils import log
import test_common as common
from test_common import DIST_POSTFIX
import pytest
RPM_AVAILABLE = False
try:
import rpm # NOQA
RPM_AVAILABLE = True
except ImportError:
pass
def _test_patch(asset, version, dir):
dist_path = common.prep_spec_test(dir, asset)
log.log.setLevel(log.WARN)
with dist_path.as_cwd():
spec_version, spec_release_parts, spec_milestone = version
tag = spec_version
if spec_milestone:
tag += spec_milestone
common.prep_patches_branch(tag=tag)
commit_before = git('rev-parse', 'HEAD')
common.add_patches()
rdopkg('patch', '-l')
# after
commit_after = git('rev-parse', 'HEAD')
common.assert_spec_version(spec_version, spec_release_parts,
spec_milestone)
assert commit_before != commit_after, "No commit created"
prev = git('rev-parse', 'HEAD~')
assert prev == commit_before, "Multiple commits created"
@pytest.mark.skipif('RPM_AVAILABLE == False')
def test_patch_milestone(tmpdir):
_test_patch('milestone', ('1.2.3', ('0.4', '%{?milestone}',
DIST_POSTFIX), '.0rc2'), tmpdir)
@pytest.mark.skipif('RPM_AVAILABLE == False')
def test_patch_milestone_bug(tmpdir):
# make sure rdopkg removes unwanted '%global milestone %{?milestone}'
_test_patch('milestone-bug',
('1.2.3', ('0.4', '', DIST_POSTFIX), None),
tmpdir)
@pytest.mark.skipif('RPM_AVAILABLE == False')
def test_patch_remove(tmpdir):
dist_path = common.prep_spec_test(tmpdir, 'patched')
with dist_path.as_cwd():
common.prep_patches_branch()
common.add_patches()
commit_before = git('rev-parse', 'HEAD')
common.remove_patches(1)
rdopkg('patch', '-l')
commit_after = git('rev-parse', 'HEAD')
git_clean = git.is_clean()
common.norm_changelog()
common.assert_distgit(dist_path, 'patch-remove')
assert commit_before != commit_after, "New commit not created"
assert git_clean, "git not clean after action"
@pytest.mark.skipif('RPM_AVAILABLE == False')
def test_patch_add(tmpdir):
dist_path = common.prep_spec_test(tmpdir, 'patched')
with dist_path.as_cwd():
common.prep_patches_branch()
common.add_patches()
commit_before = git('rev-parse', 'HEAD')
common.add_n_patches(3)
rdopkg('patch', '-l')
commit_after = git('rev-parse', 'HEAD')
git_clean = git.is_clean()
common.norm_changelog()
common.assert_distgit(dist_path, 'patch-add')
assert commit_before != commit_after, "New commit not created"
assert git_clean, "git not clean after action"
@pytest.mark.skipif('RPM_AVAILABLE == False')
def test_patch_mix(tmpdir):
dist_path = common.prep_spec_test(tmpdir, 'patched')
with dist_path.as_cwd():
common.prep_patches_branch()
common.add_patches()
commit_before = git('rev-parse', 'HEAD')
common.remove_patches(1)
common.add_n_patches(3)
rdopkg('patch', '-l')
commit_after = git('rev-parse', 'HEAD')
git_clean = git.is_clean()
common.norm_changelog()
common.assert_distgit(dist_path, 'patch-mix')
assert commit_before != commit_after, "New commit not created"
assert git_clean, "git not clean after action"
def _test_patch_noop(tmpdir, distgit, cmd):
dist_path = common.prep_spec_test(tmpdir, distgit)
with dist_path.as_cwd():
common.prep_patches_branch()
common.add_patches()
# regen patch files in order for hashes to match git
rdopkg('update-patches', '--amend')
commit_before = git('rev-parse', 'HEAD')
rdopkg(*cmd)
commit_after = git('rev-parse', 'HEAD')
git_clean = git.is_clean()
common.assert_distgit(dist_path, distgit)
assert commit_before == commit_after, "New commit created for noop"
assert git_clean, "git not clean after action"
def test_patch_noop(tmpdir):
_test_patch_noop(tmpdir, 'patched', ['patch', '-l'])
def test_patch_noop_detect(tmpdir):
_test_patch_noop(tmpdir,
'patched', ['patch', '-l', '--changelog', 'detect'])
def test_patch_noop_count(tmpdir):
_test_patch_noop(tmpdir,
'patched', ['patch', '-l', '--changelog', 'count'])
def test_patch_noop_plain(tmpdir):
_test_patch_noop(tmpdir, 'patched', ['patch', '-l', '-C', 'plain'])
@pytest.mark.skipif('RPM_AVAILABLE == False')
def test_patch_noop_no_bump(tmpdir):
_test_patch_noop(tmpdir, 'patched', ['patch', '-l', '--no-bump'])
def _test_patch_regen(tmpdir, distgit, distgit_after,
cmd, norm_changelog=True):
dist_path = common.prep_spec_test(tmpdir, distgit)
with dist_path.as_cwd():
common.prep_patches_branch()
common.add_patches()
commit_before = git('rev-parse', 'HEAD')
rdopkg(*cmd)
commit_after = git('rev-parse', 'HEAD')
git_clean = git.is_clean()
if norm_changelog:
common.norm_changelog()
common.assert_distgit(dist_path, distgit_after)
assert commit_before != commit_after, \
"New commit not created after patch regen"
assert git_clean, "git not clean after action"
@pytest.mark.skipif('RPM_AVAILABLE == False')
def test_patch_regen(tmpdir):
_test_patch_regen(tmpdir, 'patched', 'patched-regen', ['patch', '-l'])
@pytest.mark.skipif('RPM_AVAILABLE == False')
def test_patch_regen_detect(tmpdir):
_test_patch_regen(tmpdir, 'patched', 'patched-regen',
['patch', '-l', '-C', 'detect'])
@pytest.mark.skipif('RPM_AVAILABLE == False')
def test_patch_regen_count(tmpdir):
_test_patch_regen(tmpdir, 'patched', 'patched-regen',
['patch', '-l', '-C', 'count'])
@pytest.mark.skipif('RPM_AVAILABLE == False')
def test_patch_regen_plain(tmpdir):
_test_patch_regen(tmpdir, 'patched', 'patched-regen',
['patch', '-l', '--changelog', 'plain'])
def test_patch_regen_no_bump(tmpdir):
_test_patch_regen(tmpdir, 'patched', 'patched',
['patch', '-l', '--no-bump'], norm_changelog=False)
@pytest.mark.skipif('RPM_AVAILABLE == False')
def test_patch_unicode(tmpdir):
dist_path = common.prep_spec_test(tmpdir, 'patched')
with dist_path.as_cwd():
git('config', 'user.name', 'Přikrášlený Žluťoučký Kůň')
common.prep_patches_branch()
common.add_patches()
commit_before = git('rev-parse', 'HEAD')
with git_branch('master-patches'):
common.do_patch('foofile', '#to chceš',
"Přikrášlený Žluťoučký Kůň")
common.do_patch('foofile', '#to asi chceš', "Přikrášlení koně")
common.do_patch('foofile', '#to nechceš', "ěščřžýáí")
rdopkg('patch', '-l')
commit_after = git('rev-parse', 'HEAD')
git_clean = git.is_clean()
assert commit_before != commit_after, "New commit not created"
assert git_clean, "git not clean after action"
| 33.638889 | 75 | 0.645472 |
4e6d3879f0f0c9da8711d6817765b11e4d40512d | 2,519 | py | Python | setup.py | jordanrossetti/rtv | c6546b8e77463a5606ef56c86e054e248d197080 | [
"MIT"
] | null | null | null | setup.py | jordanrossetti/rtv | c6546b8e77463a5606ef56c86e054e248d197080 | [
"MIT"
] | null | null | null | setup.py | jordanrossetti/rtv | c6546b8e77463a5606ef56c86e054e248d197080 | [
"MIT"
] | 2 | 2018-05-01T21:40:39.000Z | 2018-05-02T20:43:35.000Z | import sys
import codecs
import setuptools
from version import __version__ as version
install_requires = [
'beautifulsoup4',
'decorator',
'kitchen',
'requests >=2.4.0', # https://github.com/michael-lazar/rtv/issues/325
'six',
]
tests_require = [
'coveralls',
'pytest>=3.1.0', # Pinned for the ``pytest.param`` method
'coverage',
'mock',
'pylint',
'vcrpy',
]
extras_require = {
'test': tests_require
}
# https://hynek.me/articles/conditional-python-dependencies/
if int(setuptools.__version__.split(".", 1)[0]) < 18:
assert "bdist_wheel" not in sys.argv
if sys.version_info[0:2] < (3, 6):
install_requires.append("mailcap-fix")
else:
# Building the bdist_wheel with conditional environment dependencies
# requires setuptools version > 18. For older setuptools versions this
# will raise an error.
extras_require.update({":python_version<'3.6'": ["mailcap-fix"]})
def long_description():
with codecs.open('README.md', encoding='utf8') as f:
return f.read()
setuptools.setup(
name='rtv',
version=version,
description='A simple terminal viewer for Reddit (Reddit Terminal Viewer)',
long_description=long_description(),
long_description_content_type='text/markdown',
url='https://github.com/michael-lazar/rtv',
author='Michael Lazar',
author_email='lazar.michael22@gmail.com',
license='MIT',
keywords='reddit terminal praw curses',
packages=[
'rtv',
'rtv.packages',
'rtv.packages.praw'
],
package_data={
'rtv': ['templates/*', 'themes/*'],
'rtv.packages.praw': ['praw.ini']
},
data_files=[("share/man/man1", ["rtv.1"])],
install_requires=install_requires,
tests_require=tests_require,
extras_require=extras_require,
entry_points={'console_scripts': ['rtv=rtv.__main__:main']},
classifiers=[
'Intended Audience :: End Users/Desktop',
'Environment :: Console :: Curses',
'Operating System :: MacOS :: MacOS X',
'Operating System :: POSIX',
'Natural Language :: English',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Terminals',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content :: Message Boards',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content :: News/Diary',
],
)
| 29.290698 | 79 | 0.631203 |
6901c2e2313c58a6c0cf21e64bdbfaf80c672cf8 | 5,974 | py | Python | plugin.video.deccandelight/resources/scrapers/gmala.py | arafathster/kodiworks | 8f66814c1ebe0cc4019a81f15d19882eb633d5e2 | [
"Apache-2.0"
] | 1 | 2018-11-25T18:08:19.000Z | 2018-11-25T18:08:19.000Z | plugin.video.deccandelight/resources/scrapers/gmala.py | arafathster/kodiworks | 8f66814c1ebe0cc4019a81f15d19882eb633d5e2 | [
"Apache-2.0"
] | null | null | null | plugin.video.deccandelight/resources/scrapers/gmala.py | arafathster/kodiworks | 8f66814c1ebe0cc4019a81f15d19882eb633d5e2 | [
"Apache-2.0"
] | 2 | 2018-11-04T20:08:04.000Z | 2018-12-15T01:03:03.000Z | '''
Hindi Geetmala deccandelight plugin
Copyright (C) 2016 Gujal
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
from main import Scraper
from BeautifulSoup import BeautifulSoup, SoupStrainer
import urllib, re, requests
import HTMLParser
class gmala(Scraper):
def __init__(self):
Scraper.__init__(self)
self.bu = 'http://www.hindigeetmala.net'
self.icon = self.ipath + 'gmala.png'
self.list = {'02Browse by Movie Titles': self.bu + '/ZZZZTitles',
'03Browse Yearwise': self.bu + '/ZZZZYearwise',
'04Browse by Singer': self.bu + '/ZZZZSinger',
'05[COLOR yellow]** Search by Singer **[/COLOR]': self.bu + '/search.php?type=1&value=MMMM7',
'06[COLOR yellow]** Search by Composer **[/COLOR]': self.bu + '/search.php?type=2&value=MMMM7',
'07[COLOR yellow]** Search by Movie **[/COLOR]': self.bu + '/search.php?type=3&value=MMMM7',
'08[COLOR yellow]** Search by Song **[/COLOR]': self.bu + '/search.php?type=8&value=MMMM7'}
def get_menu(self):
return (self.list,4,self.icon)
def get_top(self,iurl):
"""
Get the list of Categories.
:return: list
"""
categories = []
url = iurl.split('ZZZZ')[0]
category = iurl.split('ZZZZ')[1]
html = requests.get(url, headers=self.hdr).text
mlink = SoupStrainer('td', {'class':re.compile('^h20')})
items = BeautifulSoup(html, parseOnlyThese=mlink)
for item in items:
if category in item.span.text:
letters = item.findAll('a')
for letter in letters:
title = letter.text
url = self.bu + letter.get('href')
icon = self.icon
categories.append((title,icon,url))
return (categories,5)
def get_second(self,iurl):
"""
Get the list of categories.
:return: list
"""
categories = []
html = requests.get(iurl, headers=self.hdr).text
mlink = SoupStrainer('table', {'class':'b1 w760 alcen'})
itemclass = BeautifulSoup(html, parseOnlyThese=mlink)
items = itemclass.findAll('td', {'class':'w25p h150'})
for item in items:
title = item.text
url = self.bu + item.a.get('href')
try:
icon = self.bu + item.img.get('src')
except:
icon = self.icon
categories.append((title,icon,url))
plink = SoupStrainer('td', {'class':'vatop w140'})
Paginator = BeautifulSoup(html, parseOnlyThese=plink)
pages = Paginator.findAll('td')
for page in pages:
if 'next' in str(page):
ppath = page.find('a')['href']
if ppath[0] == '/':
purl = self.bu + ppath
else:
ptop = re.findall('(.+/)',iurl)[0]
purl = '%s%s'%(ptop,ppath)
pgtxt = re.findall('(Page.*?)"',html)[0]
if pgtxt.split()[1] != pgtxt.split()[3]:
title = 'Next Page.. (Currently in %s)' % pgtxt
categories.append((title,self.nicon,purl))
return (categories,7)
def get_items(self,iurl):
h = HTMLParser.HTMLParser()
movies = []
if iurl[-7:] == '&value=':
search_text = self.get_SearchQuery('Hindi Geetmala')
search_text = urllib.quote_plus(search_text)
iurl = iurl + search_text
html = requests.get(iurl, headers=self.hdr).text
mlink = SoupStrainer('tr', {'itemprop':'track'})
items = BeautifulSoup(html, parseOnlyThese=mlink)
for item in items:
albumdiv = item.find('td', {'itemprop':'inAlbum'})
try:
title = albumdiv.text + '-> '
except:
title = ''
titlediv = item.find('td', {'class':'w185'})
title += titlediv.find('span').text
url = self.bu + titlediv.find('a')['href']
icon = self.icon
movies.append((title,icon,url))
plink = SoupStrainer('td', {'class':'vamid w140'})
Paginator = BeautifulSoup(html, parseOnlyThese=plink)
pages = Paginator.findAll('td')
for page in pages:
if 'next' in str(page):
ppath = page.find('a')['href']
if ppath[0] == '/':
purl = self.bu + ppath
else:
ptop = re.findall('(.+/)',iurl)[0]
purl = '%s%s'%(ptop,ppath)
pgtxt = re.findall('(Page.*?)"',html)[0]
if pgtxt.split()[1] != pgtxt.split()[3]:
title = 'Next Page.. (Currently in %s)' % pgtxt
movies.append((title,self.nicon,purl))
return (movies,9)
def get_video(self,url):
html = requests.get(url, headers=self.hdr).text
mlink = SoupStrainer('table', {'class':'b1 w760 alcen'})
videoclass = BeautifulSoup(html, parseOnlyThese=mlink)
try:
link = videoclass.find('iframe')
vidurl = link.get('src')
except:
vidurl = ''
return vidurl
| 39.562914 | 116 | 0.534985 |
aa77d9a78dbe15c7afb44eeb130ca4140c481738 | 104 | py | Python | ncluster/test.py | timotheecour/ncluster | 24baf049c2690505bf4dd63ec7d8822edb81b5a9 | [
"MIT"
] | 34 | 2018-09-08T15:41:43.000Z | 2020-05-15T14:06:45.000Z | ncluster/test.py | timotheecour/ncluster | 24baf049c2690505bf4dd63ec7d8822edb81b5a9 | [
"MIT"
] | 66 | 2019-05-19T18:46:53.000Z | 2019-09-16T00:48:25.000Z | ncluster/test.py | timotheecour/ncluster | 24baf049c2690505bf4dd63ec7d8822edb81b5a9 | [
"MIT"
] | 6 | 2019-10-01T07:28:52.000Z | 2022-02-05T02:45:18.000Z |
print("%20s" % ('asdfasdf',))
print(f"{'asdfasdf':>20}")
print("%5.2f" % (5.5,))
print(f"{5.5:5.2f}")
| 14.857143 | 29 | 0.509615 |
2b07a54ba1ae97a97416ed9fcd48c7e51ce29ef6 | 246 | py | Python | wold2/assets.py | blurks/wold2 | 77272b5ee2e5330d01bfed1363d515c77fefa529 | [
"Apache-2.0"
] | 15 | 2016-08-26T17:55:09.000Z | 2022-02-03T03:06:34.000Z | wold2/assets.py | blurks/wold2 | 77272b5ee2e5330d01bfed1363d515c77fefa529 | [
"Apache-2.0"
] | 2 | 2018-01-24T15:31:01.000Z | 2018-03-12T09:30:45.000Z | wold2/assets.py | blurks/wold2 | 77272b5ee2e5330d01bfed1363d515c77fefa529 | [
"Apache-2.0"
] | 6 | 2015-12-06T22:02:08.000Z | 2022-02-02T16:29:32.000Z | import pathlib
from clld.web.assets import environment
import wold2
environment.append_path(
str(pathlib.Path(wold2.__file__).parent.joinpath('static')), url='/wold2:static/')
environment.load_path = list(reversed(environment.load_path))
| 22.363636 | 86 | 0.784553 |
2d80e8c57209b069bb55639664c98bd12040720d | 383 | py | Python | other/dingding/dingtalk/api/rest/OapiTdpTaskBasicDeleteRequest.py | hth945/pytest | 83e2aada82a2c6a0fdd1721320e5bf8b8fd59abc | [
"Apache-2.0"
] | null | null | null | other/dingding/dingtalk/api/rest/OapiTdpTaskBasicDeleteRequest.py | hth945/pytest | 83e2aada82a2c6a0fdd1721320e5bf8b8fd59abc | [
"Apache-2.0"
] | null | null | null | other/dingding/dingtalk/api/rest/OapiTdpTaskBasicDeleteRequest.py | hth945/pytest | 83e2aada82a2c6a0fdd1721320e5bf8b8fd59abc | [
"Apache-2.0"
] | null | null | null | '''
Created by auto_sdk on 2020.12.23
'''
from dingtalk.api.base import RestApi
class OapiTdpTaskBasicDeleteRequest(RestApi):
def __init__(self,url=None):
RestApi.__init__(self,url)
self.microapp_agent_id = None
self.operator_userid = None
self.task_id = None
def getHttpMethod(self):
return 'POST'
def getapiname(self):
return 'dingtalk.oapi.tdp.task.basic.delete'
| 22.529412 | 46 | 0.75718 |
a24dbef39498754c27254978908a086eff827487 | 2,195 | py | Python | setup.py | kasimte/fastcore | 22b6857c94e638719f100793cd56c3fd12ecc816 | [
"Apache-2.0"
] | 1 | 2020-08-23T21:32:34.000Z | 2020-08-23T21:32:34.000Z | setup.py | kasimte/fastcore | 22b6857c94e638719f100793cd56c3fd12ecc816 | [
"Apache-2.0"
] | null | null | null | setup.py | kasimte/fastcore | 22b6857c94e638719f100793cd56c3fd12ecc816 | [
"Apache-2.0"
] | null | null | null | from pkg_resources import parse_version
from configparser import ConfigParser
import setuptools
assert parse_version(setuptools.__version__)>=parse_version('36.2')
# note: all settings are in settings.ini; edit there, not here
config = ConfigParser(delimiters=['='])
config.read('settings.ini')
cfg = config['DEFAULT']
cfg_keys = 'version description keywords author author_email'.split()
expected = cfg_keys + "lib_name user branch license status min_python audience language".split()
for o in expected: assert o in cfg, "missing expected setting: {}".format(o)
setup_cfg = {o:cfg[o] for o in cfg_keys}
licenses = {
'apache2': ('Apache Software License 2.0','OSI Approved :: Apache Software License'),
}
statuses = [ '1 - Planning', '2 - Pre-Alpha', '3 - Alpha',
'4 - Beta', '5 - Production/Stable', '6 - Mature', '7 - Inactive' ]
py_versions = '2.0 2.1 2.2 2.3 2.4 2.5 2.6 2.7 3.0 3.1 3.2 3.3 3.4 3.5 3.6 3.7 3.8'.split()
requirements = ['pip', 'packaging', 'wheel']
if cfg.get('requirements'): requirements += cfg.get('requirements','').split()
if cfg.get('pip_requirements'): requirements += cfg.get('pip_requirements','').split()
dev_requirements = cfg.get('dev_requirements','').split()
lic = licenses[cfg['license']]
min_python = cfg['min_python']
setuptools.setup(
name = cfg['lib_name'],
license = lic[0],
classifiers = [
'Development Status :: ' + statuses[int(cfg['status'])],
'Intended Audience :: ' + cfg['audience'].title(),
'License :: ' + lic[1],
'Natural Language :: ' + cfg['language'].title(),
] + ['Programming Language :: Python :: '+o for o in py_versions[py_versions.index(min_python):]],
url = 'https://github.com/{}/{}'.format(cfg['user'],cfg['lib_name']),
packages = setuptools.find_packages(),
include_package_data = True,
install_requires = requirements,
extras_require = {
'dev': dev_requirements
},
python_requires = '>=' + cfg['min_python'],
long_description = open('README.md').read(),
long_description_content_type = 'text/markdown',
zip_safe = False,
entry_points = { 'console_scripts': cfg.get('console_scripts','').split() },
**setup_cfg)
| 40.648148 | 102 | 0.665604 |
c40fa15787a01d277a6a2c4473df8c5941638a9e | 54,078 | py | Python | VirtualBox-5.0.0/src/VBox/Devices/EFI/Firmware/BaseTools/Source/Python/Common/Misc.py | egraba/vbox_openbsd | 6cb82f2eed1fa697d088cecc91722b55b19713c2 | [
"MIT"
] | 1 | 2015-04-30T14:18:45.000Z | 2015-04-30T14:18:45.000Z | VirtualBox-5.0.0/src/VBox/Devices/EFI/Firmware/BaseTools/Source/Python/Common/Misc.py | egraba/vbox_openbsd | 6cb82f2eed1fa697d088cecc91722b55b19713c2 | [
"MIT"
] | null | null | null | VirtualBox-5.0.0/src/VBox/Devices/EFI/Firmware/BaseTools/Source/Python/Common/Misc.py | egraba/vbox_openbsd | 6cb82f2eed1fa697d088cecc91722b55b19713c2 | [
"MIT"
] | null | null | null | ## @file
# Common routines used by all tools
#
# Copyright (c) 2007 - 2010, Intel Corporation. All rights reserved.<BR>
# This program and the accompanying materials
# are licensed and made available under the terms and conditions of the BSD License
# which accompanies this distribution. The full text of the license may be found at
# http://opensource.org/licenses/bsd-license.php
#
# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
#
##
# Import Modules
#
import os
import sys
import string
import thread
import threading
import time
import re
import cPickle
import array
from UserDict import IterableUserDict
from UserList import UserList
from Common import EdkLogger as EdkLogger
from Common import GlobalData as GlobalData
from DataType import *
from BuildToolError import *
## Regular expression used to find out place holders in string template
gPlaceholderPattern = re.compile("\$\{([^$()\s]+)\}", re.MULTILINE|re.UNICODE)
## Dictionary used to store file time stamp for quick re-access
gFileTimeStampCache = {} # {file path : file time stamp}
## Dictionary used to store dependencies of files
gDependencyDatabase = {} # arch : {file path : [dependent files list]}
## callback routine for processing variable option
#
# This function can be used to process variable number of option values. The
# typical usage of it is specify architecure list on command line.
# (e.g. <tool> -a IA32 X64 IPF)
#
# @param Option Standard callback function parameter
# @param OptionString Standard callback function parameter
# @param Value Standard callback function parameter
# @param Parser Standard callback function parameter
#
# @retval
#
def ProcessVariableArgument(Option, OptionString, Value, Parser):
assert Value is None
Value = []
RawArgs = Parser.rargs
while RawArgs:
Arg = RawArgs[0]
if (Arg[:2] == "--" and len(Arg) > 2) or \
(Arg[:1] == "-" and len(Arg) > 1 and Arg[1] != "-"):
break
Value.append(Arg)
del RawArgs[0]
setattr(Parser.values, Option.dest, Value)
## Convert GUID string in xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx style to C structure style
#
# @param Guid The GUID string
#
# @retval string The GUID string in C structure style
#
def GuidStringToGuidStructureString(Guid):
GuidList = Guid.split('-')
Result = '{'
for Index in range(0,3,1):
Result = Result + '0x' + GuidList[Index] + ', '
Result = Result + '{0x' + GuidList[3][0:2] + ', 0x' + GuidList[3][2:4]
for Index in range(0,12,2):
Result = Result + ', 0x' + GuidList[4][Index:Index+2]
Result += '}}'
return Result
## Convert GUID structure in byte array to xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
#
# @param GuidValue The GUID value in byte array
#
# @retval string The GUID value in xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx format
#
def GuidStructureByteArrayToGuidString(GuidValue):
guidValueString = GuidValue.lower().replace("{", "").replace("}", "").replace(" ", "").replace(";", "")
guidValueList = guidValueString.split(",")
if len(guidValueList) != 16:
return ''
#EdkLogger.error(None, None, "Invalid GUID value string %s" % GuidValue)
try:
return "%02x%02x%02x%02x-%02x%02x-%02x%02x-%02x%02x-%02x%02x%02x%02x%02x%02x" % (
int(guidValueList[3], 16),
int(guidValueList[2], 16),
int(guidValueList[1], 16),
int(guidValueList[0], 16),
int(guidValueList[5], 16),
int(guidValueList[4], 16),
int(guidValueList[7], 16),
int(guidValueList[6], 16),
int(guidValueList[8], 16),
int(guidValueList[9], 16),
int(guidValueList[10], 16),
int(guidValueList[11], 16),
int(guidValueList[12], 16),
int(guidValueList[13], 16),
int(guidValueList[14], 16),
int(guidValueList[15], 16)
)
except:
return ''
## Convert GUID string in C structure style to xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
#
# @param GuidValue The GUID value in C structure format
#
# @retval string The GUID value in xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx format
#
def GuidStructureStringToGuidString(GuidValue):
guidValueString = GuidValue.lower().replace("{", "").replace("}", "").replace(" ", "").replace(";", "")
guidValueList = guidValueString.split(",")
if len(guidValueList) != 11:
return ''
#EdkLogger.error(None, None, "Invalid GUID value string %s" % GuidValue)
try:
return "%08x-%04x-%04x-%02x%02x-%02x%02x%02x%02x%02x%02x" % (
int(guidValueList[0], 16),
int(guidValueList[1], 16),
int(guidValueList[2], 16),
int(guidValueList[3], 16),
int(guidValueList[4], 16),
int(guidValueList[5], 16),
int(guidValueList[6], 16),
int(guidValueList[7], 16),
int(guidValueList[8], 16),
int(guidValueList[9], 16),
int(guidValueList[10], 16)
)
except:
return ''
## Convert GUID string in C structure style to xxxxxxxx_xxxx_xxxx_xxxx_xxxxxxxxxxxx
#
# @param GuidValue The GUID value in C structure format
#
# @retval string The GUID value in xxxxxxxx_xxxx_xxxx_xxxx_xxxxxxxxxxxx format
#
def GuidStructureStringToGuidValueName(GuidValue):
guidValueString = GuidValue.lower().replace("{", "").replace("}", "").replace(" ", "")
guidValueList = guidValueString.split(",")
if len(guidValueList) != 11:
EdkLogger.error(None, FORMAT_INVALID, "Invalid GUID value string [%s]" % GuidValue)
return "%08x_%04x_%04x_%02x%02x_%02x%02x%02x%02x%02x%02x" % (
int(guidValueList[0], 16),
int(guidValueList[1], 16),
int(guidValueList[2], 16),
int(guidValueList[3], 16),
int(guidValueList[4], 16),
int(guidValueList[5], 16),
int(guidValueList[6], 16),
int(guidValueList[7], 16),
int(guidValueList[8], 16),
int(guidValueList[9], 16),
int(guidValueList[10], 16)
)
## Create directories
#
# @param Directory The directory name
#
def CreateDirectory(Directory):
if Directory == None or Directory.strip() == "":
return True
try:
if not os.access(Directory, os.F_OK):
os.makedirs(Directory)
except:
return False
return True
## Remove directories, including files and sub-directories in it
#
# @param Directory The directory name
#
def RemoveDirectory(Directory, Recursively=False):
if Directory == None or Directory.strip() == "" or not os.path.exists(Directory):
return
if Recursively:
CurrentDirectory = os.getcwd()
os.chdir(Directory)
for File in os.listdir("."):
if os.path.isdir(File):
RemoveDirectory(File, Recursively)
else:
os.remove(File)
os.chdir(CurrentDirectory)
os.rmdir(Directory)
## Check if given file is changed or not
#
# This method is used to check if a file is changed or not between two build
# actions. It makes use a cache to store files timestamp.
#
# @param File The path of file
#
# @retval True If the given file is changed, doesn't exist, or can't be
# found in timestamp cache
# @retval False If the given file is changed
#
def IsChanged(File):
if not os.path.exists(File):
return True
FileState = os.stat(File)
TimeStamp = FileState[-2]
if File in gFileTimeStampCache and TimeStamp == gFileTimeStampCache[File]:
FileChanged = False
else:
FileChanged = True
gFileTimeStampCache[File] = TimeStamp
return FileChanged
## Store content in file
#
# This method is used to save file only when its content is changed. This is
# quite useful for "make" system to decide what will be re-built and what won't.
#
# @param File The path of file
# @param Content The new content of the file
# @param IsBinaryFile The flag indicating if the file is binary file or not
#
# @retval True If the file content is changed and the file is renewed
# @retval False If the file content is the same
#
def SaveFileOnChange(File, Content, IsBinaryFile=True):
if not IsBinaryFile:
Content = Content.replace("\n", os.linesep)
if os.path.exists(File):
try:
if Content == open(File, "rb").read():
return False
except:
EdkLogger.error(None, FILE_OPEN_FAILURE, ExtraData=File)
DirName = os.path.dirname(File)
if not CreateDirectory(DirName):
EdkLogger.error(None, FILE_CREATE_FAILURE, "Could not create directory %s" % DirName)
else:
if DirName == '':
DirName = os.getcwd()
if not os.access(DirName, os.W_OK):
EdkLogger.error(None, PERMISSION_FAILURE, "Do not have write permission on directory %s" % DirName)
try:
if False: # VBox: Don't want python25.dll dependencies, original: if GlobalData.gIsWindows:
try:
from PyUtility import SaveFileToDisk
if not SaveFileToDisk(File, Content):
EdkLogger.error(None, FILE_CREATE_FAILURE, ExtraData=File)
except:
Fd = open(File, "wb")
Fd.write(Content)
Fd.close()
else:
Fd = open(File, "wb")
Fd.write(Content)
Fd.close()
except IOError, X:
EdkLogger.error(None, FILE_CREATE_FAILURE, ExtraData='IOError %s'%X)
return True
## Make a Python object persistent on file system
#
# @param Data The object to be stored in file
# @param File The path of file to store the object
#
def DataDump(Data, File):
Fd = None
try:
Fd = open(File, 'wb')
cPickle.dump(Data, Fd, cPickle.HIGHEST_PROTOCOL)
except:
EdkLogger.error("", FILE_OPEN_FAILURE, ExtraData=File, RaiseError=False)
finally:
if Fd != None:
Fd.close()
## Restore a Python object from a file
#
# @param File The path of file stored the object
#
# @retval object A python object
# @retval None If failure in file operation
#
def DataRestore(File):
Data = None
Fd = None
try:
Fd = open(File, 'rb')
Data = cPickle.load(Fd)
except Exception, e:
EdkLogger.verbose("Failed to load [%s]\n\t%s" % (File, str(e)))
Data = None
finally:
if Fd != None:
Fd.close()
return Data
## Retrieve and cache the real path name in file system
#
# @param Root The root directory of path relative to
#
# @retval str The path string if the path exists
# @retval None If path doesn't exist
#
class DirCache:
_CACHE_ = set()
_UPPER_CACHE_ = {}
def __init__(self, Root):
self._Root = Root
for F in os.listdir(Root):
self._CACHE_.add(F)
self._UPPER_CACHE_[F.upper()] = F
# =[] operator
def __getitem__(self, Path):
Path = Path[len(os.path.commonprefix([Path, self._Root])):]
if not Path:
return self._Root
if Path and Path[0] == os.path.sep:
Path = Path[1:]
if Path in self._CACHE_:
return os.path.join(self._Root, Path)
UpperPath = Path.upper()
if UpperPath in self._UPPER_CACHE_:
return os.path.join(self._Root, self._UPPER_CACHE_[UpperPath])
IndexList = []
LastSepIndex = -1
SepIndex = Path.find(os.path.sep)
while SepIndex > -1:
Parent = UpperPath[:SepIndex]
if Parent not in self._UPPER_CACHE_:
break
LastSepIndex = SepIndex
SepIndex = Path.find(os.path.sep, LastSepIndex + 1)
if LastSepIndex == -1:
return None
Cwd = os.getcwd()
os.chdir(self._Root)
SepIndex = LastSepIndex
while SepIndex > -1:
Parent = Path[:SepIndex]
ParentKey = UpperPath[:SepIndex]
if ParentKey not in self._UPPER_CACHE_:
os.chdir(Cwd)
return None
if Parent in self._CACHE_:
ParentDir = Parent
else:
ParentDir = self._UPPER_CACHE_[ParentKey]
for F in os.listdir(ParentDir):
Dir = os.path.join(ParentDir, F)
self._CACHE_.add(Dir)
self._UPPER_CACHE_[Dir.upper()] = Dir
SepIndex = Path.find(os.path.sep, SepIndex + 1)
os.chdir(Cwd)
if Path in self._CACHE_:
return os.path.join(self._Root, Path)
elif UpperPath in self._UPPER_CACHE_:
return os.path.join(self._Root, self._UPPER_CACHE_[UpperPath])
return None
## Get all files of a directory
#
# @param Root: Root dir
# @param SkipList : The files need be skipped
#
# @retval A list of all files
#
def GetFiles(Root, SkipList=None, FullPath = True):
OriPath = Root
FileList = []
for Root, Dirs, Files in os.walk(Root):
if SkipList:
for Item in SkipList:
if Item in Dirs:
Dirs.remove(Item)
for File in Files:
File = os.path.normpath(os.path.join(Root, File))
if not FullPath:
File = File[len(OriPath) + 1:]
FileList.append(File)
return FileList
## Check if gvien file exists or not
#
# @param File File name or path to be checked
# @param Dir The directory the file is relative to
#
# @retval True if file exists
# @retval False if file doesn't exists
#
def ValidFile(File, Ext=None):
if Ext != None:
Dummy, FileExt = os.path.splitext(File)
if FileExt.lower() != Ext.lower():
return False
if not os.path.exists(File):
return False
return True
def RealPath(File, Dir='', OverrideDir=''):
NewFile = os.path.normpath(os.path.join(Dir, File))
NewFile = GlobalData.gAllFiles[NewFile]
if not NewFile and OverrideDir:
NewFile = os.path.normpath(os.path.join(OverrideDir, File))
NewFile = GlobalData.gAllFiles[NewFile]
return NewFile
def RealPath2(File, Dir='', OverrideDir=''):
if OverrideDir:
NewFile = GlobalData.gAllFiles[os.path.normpath(os.path.join(OverrideDir, File))]
if NewFile:
if OverrideDir[-1] == os.path.sep:
return NewFile[len(OverrideDir):], NewFile[0:len(OverrideDir)]
else:
return NewFile[len(OverrideDir)+1:], NewFile[0:len(OverrideDir)]
if GlobalData.gAllFiles:
NewFile = GlobalData.gAllFiles[os.path.normpath(os.path.join(Dir, File))]
# VBox hack begin - Required for RAW reset vectors and logo bmps files outside the workspace.
if not NewFile and Dir == '' and os.path.isabs(File):
NewFile = os.path.normpath(File);
# VBox hack end.
else:
NewFile = os.path.normpath(os.path.join(Dir, File))
if NewFile:
if Dir:
if Dir[-1] == os.path.sep:
return NewFile[len(Dir):], NewFile[0:len(Dir)]
else:
return NewFile[len(Dir)+1:], NewFile[0:len(Dir)]
else:
return NewFile, ''
return None, None
## Check if gvien file exists or not
#
#
def ValidFile2(AllFiles, File, Ext=None, Workspace='', EfiSource='', EdkSource='', Dir='.', OverrideDir=''):
NewFile = File
if Ext != None:
Dummy, FileExt = os.path.splitext(File)
if FileExt.lower() != Ext.lower():
return False, File
# Replace the Edk macros
if OverrideDir != '' and OverrideDir != None:
if OverrideDir.find('$(EFI_SOURCE)') > -1:
OverrideDir = OverrideDir.replace('$(EFI_SOURCE)', EfiSource)
if OverrideDir.find('$(EDK_SOURCE)') > -1:
OverrideDir = OverrideDir.replace('$(EDK_SOURCE)', EdkSource)
# Replace the default dir to current dir
if Dir == '.':
Dir = os.getcwd()
Dir = Dir[len(Workspace)+1:]
# First check if File has Edk definition itself
if File.find('$(EFI_SOURCE)') > -1 or File.find('$(EDK_SOURCE)') > -1:
NewFile = File.replace('$(EFI_SOURCE)', EfiSource)
NewFile = NewFile.replace('$(EDK_SOURCE)', EdkSource)
NewFile = AllFiles[os.path.normpath(NewFile)]
if NewFile != None:
return True, NewFile
# Second check the path with override value
if OverrideDir != '' and OverrideDir != None:
NewFile = AllFiles[os.path.normpath(os.path.join(OverrideDir, File))]
if NewFile != None:
return True, NewFile
# Last check the path with normal definitions
File = os.path.join(Dir, File)
NewFile = AllFiles[os.path.normpath(File)]
if NewFile != None:
return True, NewFile
return False, File
## Check if gvien file exists or not
#
#
def ValidFile3(AllFiles, File, Workspace='', EfiSource='', EdkSource='', Dir='.', OverrideDir=''):
# Replace the Edk macros
if OverrideDir != '' and OverrideDir != None:
if OverrideDir.find('$(EFI_SOURCE)') > -1:
OverrideDir = OverrideDir.replace('$(EFI_SOURCE)', EfiSource)
if OverrideDir.find('$(EDK_SOURCE)') > -1:
OverrideDir = OverrideDir.replace('$(EDK_SOURCE)', EdkSource)
# Replace the default dir to current dir
# Dir is current module dir related to workspace
if Dir == '.':
Dir = os.getcwd()
Dir = Dir[len(Workspace)+1:]
NewFile = File
RelaPath = AllFiles[os.path.normpath(Dir)]
NewRelaPath = RelaPath
while(True):
# First check if File has Edk definition itself
if File.find('$(EFI_SOURCE)') > -1 or File.find('$(EDK_SOURCE)') > -1:
File = File.replace('$(EFI_SOURCE)', EfiSource)
File = File.replace('$(EDK_SOURCE)', EdkSource)
NewFile = AllFiles[os.path.normpath(File)]
if NewFile != None:
NewRelaPath = os.path.dirname(NewFile)
File = os.path.basename(NewFile)
#NewRelaPath = NewFile[:len(NewFile) - len(File.replace("..\\", '').replace("../", '')) - 1]
break
# Second check the path with override value
if OverrideDir != '' and OverrideDir != None:
NewFile = AllFiles[os.path.normpath(os.path.join(OverrideDir, File))]
if NewFile != None:
#NewRelaPath = os.path.dirname(NewFile)
NewRelaPath = NewFile[:len(NewFile) - len(File.replace("..\\", '').replace("../", '')) - 1]
break
# Last check the path with normal definitions
NewFile = AllFiles[os.path.normpath(os.path.join(Dir, File))]
if NewFile != None:
break
# No file found
break
return NewRelaPath, RelaPath, File
def GetRelPath(Path1, Path2):
FileName = os.path.basename(Path2)
L1 = os.path.normpath(Path1).split(os.path.normpath('/'))
L2 = os.path.normpath(Path2).split(os.path.normpath('/'))
for Index in range(0, len(L1)):
if L1[Index] != L2[Index]:
FileName = '../' * (len(L1) - Index)
for Index2 in range(Index, len(L2)):
FileName = os.path.join(FileName, L2[Index2])
break
return os.path.normpath(FileName)
## Get GUID value from given packages
#
# @param CName The CName of the GUID
# @param PackageList List of packages looking-up in
#
# @retval GuidValue if the CName is found in any given package
# @retval None if the CName is not found in all given packages
#
def GuidValue(CName, PackageList):
for P in PackageList:
if CName in P.Guids:
return P.Guids[CName]
return None
## Get Protocol value from given packages
#
# @param CName The CName of the GUID
# @param PackageList List of packages looking-up in
#
# @retval GuidValue if the CName is found in any given package
# @retval None if the CName is not found in all given packages
#
def ProtocolValue(CName, PackageList):
for P in PackageList:
if CName in P.Protocols:
return P.Protocols[CName]
return None
## Get PPI value from given packages
#
# @param CName The CName of the GUID
# @param PackageList List of packages looking-up in
#
# @retval GuidValue if the CName is found in any given package
# @retval None if the CName is not found in all given packages
#
def PpiValue(CName, PackageList):
for P in PackageList:
if CName in P.Ppis:
return P.Ppis[CName]
return None
## A string template class
#
# This class implements a template for string replacement. A string template
# looks like following
#
# ${BEGIN} other_string ${placeholder_name} other_string ${END}
#
# The string between ${BEGIN} and ${END} will be repeated as many times as the
# length of "placeholder_name", which is a list passed through a dict. The
# "placeholder_name" is the key name of the dict. The ${BEGIN} and ${END} can
# be not used and, in this case, the "placeholder_name" must not a list and it
# will just be replaced once.
#
class TemplateString(object):
_REPEAT_START_FLAG = "BEGIN"
_REPEAT_END_FLAG = "END"
class Section(object):
_LIST_TYPES = [type([]), type(set()), type((0,))]
def __init__(self, TemplateSection, PlaceHolderList):
self._Template = TemplateSection
self._PlaceHolderList = []
# Split the section into sub-sections according to the position of placeholders
if PlaceHolderList:
self._SubSectionList = []
SubSectionStart = 0
#
# The placeholders passed in must be in the format of
#
# PlaceHolderName, PlaceHolderStartPoint, PlaceHolderEndPoint
#
for PlaceHolder,Start,End in PlaceHolderList:
self._SubSectionList.append(TemplateSection[SubSectionStart:Start])
self._SubSectionList.append(TemplateSection[Start:End])
self._PlaceHolderList.append(PlaceHolder)
SubSectionStart = End
if SubSectionStart < len(TemplateSection):
self._SubSectionList.append(TemplateSection[SubSectionStart:])
else:
self._SubSectionList = [TemplateSection]
def __str__(self):
return self._Template + " : " + str(self._PlaceHolderList)
def Instantiate(self, PlaceHolderValues):
RepeatTime = -1
RepeatPlaceHolders = {}
NonRepeatPlaceHolders = {}
for PlaceHolder in self._PlaceHolderList:
if PlaceHolder not in PlaceHolderValues:
continue
Value = PlaceHolderValues[PlaceHolder]
if type(Value) in self._LIST_TYPES:
if RepeatTime < 0:
RepeatTime = len(Value)
elif RepeatTime != len(Value):
EdkLogger.error(
"TemplateString",
PARAMETER_INVALID,
"${%s} has different repeat time from others!" % PlaceHolder,
ExtraData=str(self._Template)
)
RepeatPlaceHolders["${%s}" % PlaceHolder] = Value
else:
NonRepeatPlaceHolders["${%s}" % PlaceHolder] = Value
if NonRepeatPlaceHolders:
StringList = []
for S in self._SubSectionList:
if S not in NonRepeatPlaceHolders:
StringList.append(S)
else:
StringList.append(str(NonRepeatPlaceHolders[S]))
else:
StringList = self._SubSectionList
if RepeatPlaceHolders:
TempStringList = []
for Index in range(RepeatTime):
for S in StringList:
if S not in RepeatPlaceHolders:
TempStringList.append(S)
else:
TempStringList.append(str(RepeatPlaceHolders[S][Index]))
StringList = TempStringList
return "".join(StringList)
## Constructor
def __init__(self, Template=None):
self.String = ''
self.IsBinary = False
self._Template = Template
self._TemplateSectionList = self._Parse(Template)
## str() operator
#
# @retval string The string replaced
#
def __str__(self):
return self.String
## Split the template string into fragments per the ${BEGIN} and ${END} flags
#
# @retval list A list of TemplateString.Section objects
#
def _Parse(self, Template):
SectionStart = 0
SearchFrom = 0
MatchEnd = 0
PlaceHolderList = []
TemplateSectionList = []
while Template:
MatchObj = gPlaceholderPattern.search(Template, SearchFrom)
if not MatchObj:
if MatchEnd <= len(Template):
TemplateSection = TemplateString.Section(Template[SectionStart:], PlaceHolderList)
TemplateSectionList.append(TemplateSection)
break
MatchString = MatchObj.group(1)
MatchStart = MatchObj.start()
MatchEnd = MatchObj.end()
if MatchString == self._REPEAT_START_FLAG:
if MatchStart > SectionStart:
TemplateSection = TemplateString.Section(Template[SectionStart:MatchStart], PlaceHolderList)
TemplateSectionList.append(TemplateSection)
SectionStart = MatchEnd
PlaceHolderList = []
elif MatchString == self._REPEAT_END_FLAG:
TemplateSection = TemplateString.Section(Template[SectionStart:MatchStart], PlaceHolderList)
TemplateSectionList.append(TemplateSection)
SectionStart = MatchEnd
PlaceHolderList = []
else:
PlaceHolderList.append((MatchString, MatchStart - SectionStart, MatchEnd - SectionStart))
SearchFrom = MatchEnd
return TemplateSectionList
## Replace the string template with dictionary of placeholders and append it to previous one
#
# @param AppendString The string template to append
# @param Dictionary The placeholder dictionaries
#
def Append(self, AppendString, Dictionary=None):
if Dictionary:
SectionList = self._Parse(AppendString)
self.String += "".join([S.Instantiate(Dictionary) for S in SectionList])
else:
self.String += AppendString
## Replace the string template with dictionary of placeholders
#
# @param Dictionary The placeholder dictionaries
#
# @retval str The string replaced with placeholder values
#
def Replace(self, Dictionary=None):
return "".join([S.Instantiate(Dictionary) for S in self._TemplateSectionList])
## Progress indicator class
#
# This class makes use of thread to print progress on console.
#
class Progressor:
# for avoiding deadloop
_StopFlag = None
_ProgressThread = None
_CheckInterval = 0.25
## Constructor
#
# @param OpenMessage The string printed before progress charaters
# @param CloseMessage The string printed after progress charaters
# @param ProgressChar The charater used to indicate the progress
# @param Interval The interval in seconds between two progress charaters
#
def __init__(self, OpenMessage="", CloseMessage="", ProgressChar='.', Interval=1.0):
self.PromptMessage = OpenMessage
self.CodaMessage = CloseMessage
self.ProgressChar = ProgressChar
self.Interval = Interval
if Progressor._StopFlag == None:
Progressor._StopFlag = threading.Event()
## Start to print progress charater
#
# @param OpenMessage The string printed before progress charaters
#
def Start(self, OpenMessage=None):
if OpenMessage != None:
self.PromptMessage = OpenMessage
Progressor._StopFlag.clear()
if Progressor._ProgressThread == None:
Progressor._ProgressThread = threading.Thread(target=self._ProgressThreadEntry)
Progressor._ProgressThread.setDaemon(False)
Progressor._ProgressThread.start()
## Stop printing progress charater
#
# @param CloseMessage The string printed after progress charaters
#
def Stop(self, CloseMessage=None):
OriginalCodaMessage = self.CodaMessage
if CloseMessage != None:
self.CodaMessage = CloseMessage
self.Abort()
self.CodaMessage = OriginalCodaMessage
## Thread entry method
def _ProgressThreadEntry(self):
sys.stdout.write(self.PromptMessage + " ")
sys.stdout.flush()
TimeUp = 0.0
while not Progressor._StopFlag.isSet():
if TimeUp <= 0.0:
sys.stdout.write(self.ProgressChar)
sys.stdout.flush()
TimeUp = self.Interval
time.sleep(self._CheckInterval)
TimeUp -= self._CheckInterval
sys.stdout.write(" " + self.CodaMessage + "\n")
sys.stdout.flush()
## Abort the progress display
@staticmethod
def Abort():
if Progressor._StopFlag != None:
Progressor._StopFlag.set()
if Progressor._ProgressThread != None:
Progressor._ProgressThread.join()
Progressor._ProgressThread = None
## A dict which can access its keys and/or values orderly
#
# The class implements a new kind of dict which its keys or values can be
# accessed in the order they are added into the dict. It guarantees the order
# by making use of an internal list to keep a copy of keys.
#
class sdict(IterableUserDict):
## Constructor
def __init__(self):
IterableUserDict.__init__(self)
self._key_list = []
## [] operator
def __setitem__(self, key, value):
if key not in self._key_list:
self._key_list.append(key)
IterableUserDict.__setitem__(self, key, value)
## del operator
def __delitem__(self, key):
self._key_list.remove(key)
IterableUserDict.__delitem__(self, key)
## used in "for k in dict" loop to ensure the correct order
def __iter__(self):
return self.iterkeys()
## len() support
def __len__(self):
return len(self._key_list)
## "in" test support
def __contains__(self, key):
return key in self._key_list
## indexof support
def index(self, key):
return self._key_list.index(key)
## insert support
def insert(self, key, newkey, newvalue, order):
index = self._key_list.index(key)
if order == 'BEFORE':
self._key_list.insert(index, newkey)
IterableUserDict.__setitem__(self, newkey, newvalue)
elif order == 'AFTER':
self._key_list.insert(index + 1, newkey)
IterableUserDict.__setitem__(self, newkey, newvalue)
## append support
def append(self, sdict):
for key in sdict:
if key not in self._key_list:
self._key_list.append(key)
IterableUserDict.__setitem__(self, key, sdict[key])
def has_key(self, key):
return key in self._key_list
## Empty the dict
def clear(self):
self._key_list = []
IterableUserDict.clear(self)
## Return a copy of keys
def keys(self):
keys = []
for key in self._key_list:
keys.append(key)
return keys
## Return a copy of values
def values(self):
values = []
for key in self._key_list:
values.append(self[key])
return values
## Return a copy of (key, value) list
def items(self):
items = []
for key in self._key_list:
items.append((key, self[key]))
return items
## Iteration support
def iteritems(self):
return iter(self.items())
## Keys interation support
def iterkeys(self):
return iter(self.keys())
## Values interation support
def itervalues(self):
return iter(self.values())
## Return value related to a key, and remove the (key, value) from the dict
def pop(self, key, *dv):
value = None
if key in self._key_list:
value = self[key]
self.__delitem__(key)
elif len(dv) != 0 :
value = kv[0]
return value
## Return (key, value) pair, and remove the (key, value) from the dict
def popitem(self):
key = self._key_list[-1]
value = self[key]
self.__delitem__(key)
return key, value
def update(self, dict=None, **kwargs):
if dict != None:
for k, v in dict.items():
self[k] = v
if len(kwargs):
for k, v in kwargs.items():
self[k] = v
## Dictionary with restricted keys
#
class rdict(dict):
## Constructor
def __init__(self, KeyList):
for Key in KeyList:
dict.__setitem__(self, Key, "")
## []= operator
def __setitem__(self, key, value):
if key not in self:
EdkLogger.error("RestrictedDict", ATTRIBUTE_SET_FAILURE, "Key [%s] is not allowed" % key,
ExtraData=", ".join(dict.keys(self)))
dict.__setitem__(self, key, value)
## =[] operator
def __getitem__(self, key):
if key not in self:
return ""
return dict.__getitem__(self, key)
## del operator
def __delitem__(self, key):
EdkLogger.error("RestrictedDict", ATTRIBUTE_ACCESS_DENIED, ExtraData="del")
## Empty the dict
def clear(self):
for Key in self:
self.__setitem__(Key, "")
## Return value related to a key, and remove the (key, value) from the dict
def pop(self, key, *dv):
EdkLogger.error("RestrictedDict", ATTRIBUTE_ACCESS_DENIED, ExtraData="pop")
## Return (key, value) pair, and remove the (key, value) from the dict
def popitem(self):
EdkLogger.error("RestrictedDict", ATTRIBUTE_ACCESS_DENIED, ExtraData="popitem")
## Dictionary using prioritized list as key
#
class tdict:
_ListType = type([])
_TupleType = type(())
_Wildcard = 'COMMON'
_ValidWildcardList = ['COMMON', 'DEFAULT', 'ALL', '*', 'PLATFORM']
def __init__(self, _Single_=False, _Level_=2):
self._Level_ = _Level_
self.data = {}
self._Single_ = _Single_
# =[] operator
def __getitem__(self, key):
KeyType = type(key)
RestKeys = None
if KeyType == self._ListType or KeyType == self._TupleType:
FirstKey = key[0]
if len(key) > 1:
RestKeys = key[1:]
elif self._Level_ > 1:
RestKeys = [self._Wildcard for i in range(0, self._Level_-1)]
else:
FirstKey = key
if self._Level_ > 1:
RestKeys = [self._Wildcard for i in range(0, self._Level_-1)]
if FirstKey == None or str(FirstKey).upper() in self._ValidWildcardList:
FirstKey = self._Wildcard
if self._Single_:
return self._GetSingleValue(FirstKey, RestKeys)
else:
return self._GetAllValues(FirstKey, RestKeys)
def _GetSingleValue(self, FirstKey, RestKeys):
Value = None
#print "%s-%s" % (FirstKey, self._Level_) ,
if self._Level_ > 1:
if FirstKey == self._Wildcard:
if FirstKey in self.data:
Value = self.data[FirstKey][RestKeys]
if Value == None:
for Key in self.data:
Value = self.data[Key][RestKeys]
if Value != None: break
else:
if FirstKey in self.data:
Value = self.data[FirstKey][RestKeys]
if Value == None and self._Wildcard in self.data:
#print "Value=None"
Value = self.data[self._Wildcard][RestKeys]
else:
if FirstKey == self._Wildcard:
if FirstKey in self.data:
Value = self.data[FirstKey]
if Value == None:
for Key in self.data:
Value = self.data[Key]
if Value != None: break
else:
if FirstKey in self.data:
Value = self.data[FirstKey]
elif self._Wildcard in self.data:
Value = self.data[self._Wildcard]
return Value
def _GetAllValues(self, FirstKey, RestKeys):
Value = []
if self._Level_ > 1:
if FirstKey == self._Wildcard:
for Key in self.data:
Value += self.data[Key][RestKeys]
else:
if FirstKey in self.data:
Value += self.data[FirstKey][RestKeys]
if self._Wildcard in self.data:
Value += self.data[self._Wildcard][RestKeys]
else:
if FirstKey == self._Wildcard:
for Key in self.data:
Value.append(self.data[Key])
else:
if FirstKey in self.data:
Value.append(self.data[FirstKey])
if self._Wildcard in self.data:
Value.append(self.data[self._Wildcard])
return Value
## []= operator
def __setitem__(self, key, value):
KeyType = type(key)
RestKeys = None
if KeyType == self._ListType or KeyType == self._TupleType:
FirstKey = key[0]
if len(key) > 1:
RestKeys = key[1:]
else:
RestKeys = [self._Wildcard for i in range(0, self._Level_-1)]
else:
FirstKey = key
if self._Level_ > 1:
RestKeys = [self._Wildcard for i in range(0, self._Level_-1)]
if FirstKey in self._ValidWildcardList:
FirstKey = self._Wildcard
if FirstKey not in self.data and self._Level_ > 0:
self.data[FirstKey] = tdict(self._Single_, self._Level_ - 1)
if self._Level_ > 1:
self.data[FirstKey][RestKeys] = value
else:
self.data[FirstKey] = value
def SetGreedyMode(self):
self._Single_ = False
if self._Level_ > 1:
for Key in self.data:
self.data[Key].SetGreedyMode()
def SetSingleMode(self):
self._Single_ = True
if self._Level_ > 1:
for Key in self.data:
self.data[Key].SetSingleMode()
def GetKeys(self, KeyIndex=0):
assert KeyIndex >= 0
if KeyIndex == 0:
return set(self.data.keys())
else:
keys = set()
for Key in self.data:
keys |= self.data[Key].GetKeys(KeyIndex - 1)
return keys
## Boolean chain list
#
class Blist(UserList):
def __init__(self, initlist=None):
UserList.__init__(self, initlist)
def __setitem__(self, i, item):
if item not in [True, False]:
if item == 0:
item = False
else:
item = True
self.data[i] = item
def _GetResult(self):
Value = True
for item in self.data:
Value &= item
return Value
Result = property(_GetResult)
def ParseConsoleLog(Filename):
Opr = open(os.path.normpath(Filename), 'r')
Opw = open(os.path.normpath(Filename + '.New'), 'w+')
for Line in Opr.readlines():
if Line.find('.efi') > -1:
Line = Line[Line.rfind(' ') : Line.rfind('.efi')].strip()
Opw.write('%s\n' % Line)
Opr.close()
Opw.close()
## AnalyzePcdData
#
# Analyze the pcd Value, Datum type and TokenNumber.
# Used to avoid split issue while the value string contain "|" character
#
# @param[in] Setting: A String contain value/datum type/token number information;
#
# @retval ValueList: A List contain value, datum type and toke number.
#
def AnalyzePcdData(Setting):
ValueList = ['', '', '']
ValueRe = re.compile(r'^\s*L?\".*\|.*\"')
PtrValue = ValueRe.findall(Setting)
ValueUpdateFlag = False
if len(PtrValue) >= 1:
Setting = re.sub(ValueRe, '', Setting)
ValueUpdateFlag = True
TokenList = Setting.split(TAB_VALUE_SPLIT)
ValueList[0:len(TokenList)] = TokenList
if ValueUpdateFlag:
ValueList[0] = PtrValue[0]
return ValueList
## AnalyzeHiiPcdData
#
# Analyze the pcd Value, variable name, variable Guid and variable offset.
# Used to avoid split issue while the value string contain "|" character
#
# @param[in] Setting: A String contain VariableName, VariableGuid, VariableOffset, DefaultValue information;
#
# @retval ValueList: A List contaian VariableName, VariableGuid, VariableOffset, DefaultValue.
#
def AnalyzeHiiPcdData(Setting):
ValueList = ['', '', '', '']
ValueRe = re.compile(r'^\s*L?\".*\|.*\"')
PtrValue = ValueRe.findall(Setting)
ValueUpdateFlag = False
if len(PtrValue) >= 1:
Setting = re.sub(ValueRe, '', Setting)
ValueUpdateFlag = True
TokenList = Setting.split(TAB_VALUE_SPLIT)
ValueList[0:len(TokenList)] = TokenList
if ValueUpdateFlag:
ValueList[0] = PtrValue[0]
return ValueList
## AnalyzeVpdPcdData
#
# Analyze the vpd pcd Value, Datum type and TokenNumber.
# Used to avoid split issue while the value string contain "|" character
#
# @param[in] Setting: A String contain value/datum type/token number information;
#
# @retval ValueList: A List contain value, datum type and toke number.
#
def AnalyzeVpdPcdData(Setting):
ValueList = ['', '', '']
ValueRe = re.compile(r'\s*L?\".*\|.*\"\s*$')
PtrValue = ValueRe.findall(Setting)
ValueUpdateFlag = False
if len(PtrValue) >= 1:
Setting = re.sub(ValueRe, '', Setting)
ValueUpdateFlag = True
TokenList = Setting.split(TAB_VALUE_SPLIT)
ValueList[0:len(TokenList)] = TokenList
if ValueUpdateFlag:
ValueList[2] = PtrValue[0]
return ValueList
## check format of PCD value against its the datum type
#
# For PCD value setting
#
def CheckPcdDatum(Type, Value):
if Type == "VOID*":
if not (((Value.startswith('L"') or Value.startswith('"')) and Value.endswith('"'))
or (Value.startswith('{') and Value.endswith('}'))
):
return False, "Invalid value [%s] of type [%s]; must be in the form of {...} for array"\
", or \"...\" for string, or L\"...\" for unicode string" % (Value, Type)
elif Type == 'BOOLEAN':
if Value not in ['TRUE', 'True', 'true', '0x1', '0x01', '1', 'FALSE', 'False', 'false', '0x0', '0x00', '0']:
return False, "Invalid value [%s] of type [%s]; must be one of TRUE, True, true, 0x1, 0x01, 1"\
", FALSE, False, false, 0x0, 0x00, 0" % (Value, Type)
elif type(Value) == type(""):
try:
Value = long(Value, 0)
except:
return False, "Invalid value [%s] of type [%s];"\
" must be a hexadecimal, decimal or octal in C language format."\
% (Value, Type)
return True, ""
## Split command line option string to list
#
# subprocess.Popen needs the args to be a sequence. Otherwise there's problem
# in non-windows platform to launch command
#
def SplitOption(OptionString):
OptionList = []
LastChar = " "
OptionStart = 0
QuotationMark = ""
for Index in range(0, len(OptionString)):
CurrentChar = OptionString[Index]
if CurrentChar in ['"', "'"]:
if QuotationMark == CurrentChar:
QuotationMark = ""
elif QuotationMark == "":
QuotationMark = CurrentChar
continue
elif QuotationMark:
continue
if CurrentChar in ["/", "-"] and LastChar in [" ", "\t", "\r", "\n"]:
if Index > OptionStart:
OptionList.append(OptionString[OptionStart:Index-1])
OptionStart = Index
LastChar = CurrentChar
OptionList.append(OptionString[OptionStart:])
return OptionList
def CommonPath(PathList):
P1 = min(PathList).split(os.path.sep)
P2 = max(PathList).split(os.path.sep)
for Index in xrange(min(len(P1), len(P2))):
if P1[Index] != P2[Index]:
return os.path.sep.join(P1[:Index])
return os.path.sep.join(P1)
class PathClass(object):
def __init__(self, File='', Root='', AlterRoot='', Type='', IsBinary=False,
Arch='COMMON', ToolChainFamily='', Target='', TagName='', ToolCode=''):
self.Arch = Arch
self.File = str(File)
if os.path.isabs(self.File):
self.Root = ''
self.AlterRoot = ''
else:
self.Root = str(Root)
self.AlterRoot = str(AlterRoot)
# Remove any '.' and '..' in path
if self.Root:
self.Path = os.path.normpath(os.path.join(self.Root, self.File))
self.Root = os.path.normpath(CommonPath([self.Root, self.Path]))
# eliminate the side-effect of 'C:'
if self.Root[-1] == ':':
self.Root += os.path.sep
# file path should not start with path separator
if self.Root[-1] == os.path.sep:
self.File = self.Path[len(self.Root):]
else:
self.File = self.Path[len(self.Root)+1:]
else:
self.Path = os.path.normpath(self.File)
self.SubDir, self.Name = os.path.split(self.File)
self.BaseName, self.Ext = os.path.splitext(self.Name)
if self.Root:
if self.SubDir:
self.Dir = os.path.join(self.Root, self.SubDir)
else:
self.Dir = self.Root
else:
self.Dir = self.SubDir
if IsBinary:
self.Type = Type
else:
self.Type = self.Ext.lower()
self.IsBinary = IsBinary
self.Target = Target
self.TagName = TagName
self.ToolCode = ToolCode
self.ToolChainFamily = ToolChainFamily
self._Key = None
## Convert the object of this class to a string
#
# Convert member Path of the class to a string
#
# @retval string Formatted String
#
def __str__(self):
return self.Path
## Override __eq__ function
#
# Check whether PathClass are the same
#
# @retval False The two PathClass are different
# @retval True The two PathClass are the same
#
def __eq__(self, Other):
if type(Other) == type(self):
return self.Path == Other.Path
else:
return self.Path == str(Other)
## Override __cmp__ function
#
# Customize the comparsion operation of two PathClass
#
# @retval 0 The two PathClass are different
# @retval -1 The first PathClass is less than the second PathClass
# @retval 1 The first PathClass is Bigger than the second PathClass
def __cmp__(self, Other):
if type(Other) == type(self):
OtherKey = Other.Path
else:
OtherKey = str(Other)
SelfKey = self.Path
if SelfKey == OtherKey:
return 0
elif SelfKey > OtherKey:
return 1
else:
return -1
## Override __hash__ function
#
# Use Path as key in hash table
#
# @retval string Key for hash table
#
def __hash__(self):
return hash(self.Path)
def _GetFileKey(self):
if self._Key == None:
self._Key = self.Path.upper() # + self.ToolChainFamily + self.TagName + self.ToolCode + self.Target
return self._Key
def _GetTimeStamp(self):
return os.stat(self.Path)[8]
def Validate(self, Type='', CaseSensitive=True):
if GlobalData.gCaseInsensitive:
CaseSensitive = False
if Type and Type.lower() != self.Type:
return FILE_TYPE_MISMATCH, '%s (expect %s but got %s)' % (self.File, Type, self.Type)
RealFile, RealRoot = RealPath2(self.File, self.Root, self.AlterRoot)
if not RealRoot and not RealFile:
RealFile = self.File
if self.AlterRoot:
RealFile = os.path.join(self.AlterRoot, self.File)
elif self.Root:
RealFile = os.path.join(self.Root, self.File)
return FILE_NOT_FOUND, os.path.join(self.AlterRoot, RealFile)
ErrorCode = 0
ErrorInfo = ''
if RealRoot != self.Root or RealFile != self.File:
if CaseSensitive and (RealFile != self.File or (RealRoot != self.Root and RealRoot != self.AlterRoot)):
ErrorCode = FILE_CASE_MISMATCH
ErrorInfo = self.File + '\n\t' + RealFile + " [in file system]"
self.SubDir, self.Name = os.path.split(RealFile)
self.BaseName, self.Ext = os.path.splitext(self.Name)
if self.SubDir:
self.Dir = os.path.join(RealRoot, self.SubDir)
else:
self.Dir = RealRoot
self.File = RealFile
self.Root = RealRoot
self.Path = os.path.join(RealRoot, RealFile)
return ErrorCode, ErrorInfo
Key = property(_GetFileKey)
TimeStamp = property(_GetTimeStamp)
## Parse PE image to get the required PE informaion.
#
class PeImageClass():
## Constructor
#
# @param File FilePath of PeImage
#
def __init__(self, PeFile):
self.FileName = PeFile
self.IsValid = False
self.Size = 0
self.EntryPoint = 0
self.SectionAlignment = 0
self.SectionHeaderList = []
self.ErrorInfo = ''
try:
PeObject = open(PeFile, 'rb')
except:
self.ErrorInfo = self.FileName + ' can not be found\n'
return
# Read DOS header
ByteArray = array.array('B')
ByteArray.fromfile(PeObject, 0x3E)
ByteList = ByteArray.tolist()
# DOS signature should be 'MZ'
if self._ByteListToStr (ByteList[0x0:0x2]) != 'MZ':
self.ErrorInfo = self.FileName + ' has no valid DOS signature MZ'
return
# Read 4 byte PE Signature
PeOffset = self._ByteListToInt(ByteList[0x3C:0x3E])
PeObject.seek(PeOffset)
ByteArray = array.array('B')
ByteArray.fromfile(PeObject, 4)
# PE signature should be 'PE\0\0'
if ByteArray.tostring() != 'PE\0\0':
self.ErrorInfo = self.FileName + ' has no valid PE signature PE00'
return
# Read PE file header
ByteArray = array.array('B')
ByteArray.fromfile(PeObject, 0x14)
ByteList = ByteArray.tolist()
SecNumber = self._ByteListToInt(ByteList[0x2:0x4])
if SecNumber == 0:
self.ErrorInfo = self.FileName + ' has no section header'
return
# Read PE optional header
OptionalHeaderSize = self._ByteListToInt(ByteArray[0x10:0x12])
ByteArray = array.array('B')
ByteArray.fromfile(PeObject, OptionalHeaderSize)
ByteList = ByteArray.tolist()
self.EntryPoint = self._ByteListToInt(ByteList[0x10:0x14])
self.SectionAlignment = self._ByteListToInt(ByteList[0x20:0x24])
self.Size = self._ByteListToInt(ByteList[0x38:0x3C])
# Read each Section Header
for Index in range(SecNumber):
ByteArray = array.array('B')
ByteArray.fromfile(PeObject, 0x28)
ByteList = ByteArray.tolist()
SecName = self._ByteListToStr(ByteList[0:8])
SecVirtualSize = self._ByteListToInt(ByteList[8:12])
SecRawAddress = self._ByteListToInt(ByteList[20:24])
SecVirtualAddress = self._ByteListToInt(ByteList[12:16])
self.SectionHeaderList.append((SecName, SecVirtualAddress, SecRawAddress, SecVirtualSize))
self.IsValid = True
PeObject.close()
def _ByteListToStr(self, ByteList):
String = ''
for index in range(len(ByteList)):
if ByteList[index] == 0:
break
String += chr(ByteList[index])
return String
def _ByteListToInt(self, ByteList):
Value = 0
for index in range(len(ByteList) - 1, -1, -1):
Value = (Value << 8) | int(ByteList[index])
return Value
##
#
# This acts like the main() function for the script, unless it is 'import'ed into another
# script.
#
if __name__ == '__main__':
pass
| 34.422661 | 116 | 0.583065 |
722bbdfbb03aa9447a2da5f3e0804a638b51a358 | 13,382 | py | Python | sdk/python/pulumi_azure_native/databoxedge/v20201201/bandwidth_schedule.py | sebtelko/pulumi-azure-native | 711ec021b5c73da05611c56c8a35adb0ce3244e4 | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/databoxedge/v20201201/bandwidth_schedule.py | sebtelko/pulumi-azure-native | 711ec021b5c73da05611c56c8a35adb0ce3244e4 | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/databoxedge/v20201201/bandwidth_schedule.py | sebtelko/pulumi-azure-native | 711ec021b5c73da05611c56c8a35adb0ce3244e4 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._enums import *
__all__ = ['BandwidthScheduleArgs', 'BandwidthSchedule']
@pulumi.input_type
class BandwidthScheduleArgs:
def __init__(__self__, *,
days: pulumi.Input[Sequence[pulumi.Input[Union[str, 'DayOfWeek']]]],
device_name: pulumi.Input[str],
rate_in_mbps: pulumi.Input[int],
resource_group_name: pulumi.Input[str],
start: pulumi.Input[str],
stop: pulumi.Input[str],
name: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a BandwidthSchedule resource.
:param pulumi.Input[Sequence[pulumi.Input[Union[str, 'DayOfWeek']]]] days: The days of the week when this schedule is applicable.
:param pulumi.Input[str] device_name: The device name.
:param pulumi.Input[int] rate_in_mbps: The bandwidth rate in Mbps.
:param pulumi.Input[str] resource_group_name: The resource group name.
:param pulumi.Input[str] start: The start time of the schedule in UTC.
:param pulumi.Input[str] stop: The stop time of the schedule in UTC.
:param pulumi.Input[str] name: The bandwidth schedule name which needs to be added/updated.
"""
pulumi.set(__self__, "days", days)
pulumi.set(__self__, "device_name", device_name)
pulumi.set(__self__, "rate_in_mbps", rate_in_mbps)
pulumi.set(__self__, "resource_group_name", resource_group_name)
pulumi.set(__self__, "start", start)
pulumi.set(__self__, "stop", stop)
if name is not None:
pulumi.set(__self__, "name", name)
@property
@pulumi.getter
def days(self) -> pulumi.Input[Sequence[pulumi.Input[Union[str, 'DayOfWeek']]]]:
"""
The days of the week when this schedule is applicable.
"""
return pulumi.get(self, "days")
@days.setter
def days(self, value: pulumi.Input[Sequence[pulumi.Input[Union[str, 'DayOfWeek']]]]):
pulumi.set(self, "days", value)
@property
@pulumi.getter(name="deviceName")
def device_name(self) -> pulumi.Input[str]:
"""
The device name.
"""
return pulumi.get(self, "device_name")
@device_name.setter
def device_name(self, value: pulumi.Input[str]):
pulumi.set(self, "device_name", value)
@property
@pulumi.getter(name="rateInMbps")
def rate_in_mbps(self) -> pulumi.Input[int]:
"""
The bandwidth rate in Mbps.
"""
return pulumi.get(self, "rate_in_mbps")
@rate_in_mbps.setter
def rate_in_mbps(self, value: pulumi.Input[int]):
pulumi.set(self, "rate_in_mbps", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The resource group name.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter
def start(self) -> pulumi.Input[str]:
"""
The start time of the schedule in UTC.
"""
return pulumi.get(self, "start")
@start.setter
def start(self, value: pulumi.Input[str]):
pulumi.set(self, "start", value)
@property
@pulumi.getter
def stop(self) -> pulumi.Input[str]:
"""
The stop time of the schedule in UTC.
"""
return pulumi.get(self, "stop")
@stop.setter
def stop(self, value: pulumi.Input[str]):
pulumi.set(self, "stop", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The bandwidth schedule name which needs to be added/updated.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
class BandwidthSchedule(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
days: Optional[pulumi.Input[Sequence[pulumi.Input[Union[str, 'DayOfWeek']]]]] = None,
device_name: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
rate_in_mbps: Optional[pulumi.Input[int]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
start: Optional[pulumi.Input[str]] = None,
stop: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
The bandwidth schedule details.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[Sequence[pulumi.Input[Union[str, 'DayOfWeek']]]] days: The days of the week when this schedule is applicable.
:param pulumi.Input[str] device_name: The device name.
:param pulumi.Input[str] name: The bandwidth schedule name which needs to be added/updated.
:param pulumi.Input[int] rate_in_mbps: The bandwidth rate in Mbps.
:param pulumi.Input[str] resource_group_name: The resource group name.
:param pulumi.Input[str] start: The start time of the schedule in UTC.
:param pulumi.Input[str] stop: The stop time of the schedule in UTC.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: BandwidthScheduleArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
The bandwidth schedule details.
:param str resource_name: The name of the resource.
:param BandwidthScheduleArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(BandwidthScheduleArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
days: Optional[pulumi.Input[Sequence[pulumi.Input[Union[str, 'DayOfWeek']]]]] = None,
device_name: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
rate_in_mbps: Optional[pulumi.Input[int]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
start: Optional[pulumi.Input[str]] = None,
stop: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = BandwidthScheduleArgs.__new__(BandwidthScheduleArgs)
if days is None and not opts.urn:
raise TypeError("Missing required property 'days'")
__props__.__dict__["days"] = days
if device_name is None and not opts.urn:
raise TypeError("Missing required property 'device_name'")
__props__.__dict__["device_name"] = device_name
__props__.__dict__["name"] = name
if rate_in_mbps is None and not opts.urn:
raise TypeError("Missing required property 'rate_in_mbps'")
__props__.__dict__["rate_in_mbps"] = rate_in_mbps
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
if start is None and not opts.urn:
raise TypeError("Missing required property 'start'")
__props__.__dict__["start"] = start
if stop is None and not opts.urn:
raise TypeError("Missing required property 'stop'")
__props__.__dict__["stop"] = stop
__props__.__dict__["system_data"] = None
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:databoxedge/v20201201:BandwidthSchedule"), pulumi.Alias(type_="azure-native:databoxedge:BandwidthSchedule"), pulumi.Alias(type_="azure-nextgen:databoxedge:BandwidthSchedule"), pulumi.Alias(type_="azure-native:databoxedge/v20190301:BandwidthSchedule"), pulumi.Alias(type_="azure-nextgen:databoxedge/v20190301:BandwidthSchedule"), pulumi.Alias(type_="azure-native:databoxedge/v20190701:BandwidthSchedule"), pulumi.Alias(type_="azure-nextgen:databoxedge/v20190701:BandwidthSchedule"), pulumi.Alias(type_="azure-native:databoxedge/v20190801:BandwidthSchedule"), pulumi.Alias(type_="azure-nextgen:databoxedge/v20190801:BandwidthSchedule"), pulumi.Alias(type_="azure-native:databoxedge/v20200501preview:BandwidthSchedule"), pulumi.Alias(type_="azure-nextgen:databoxedge/v20200501preview:BandwidthSchedule"), pulumi.Alias(type_="azure-native:databoxedge/v20200901:BandwidthSchedule"), pulumi.Alias(type_="azure-nextgen:databoxedge/v20200901:BandwidthSchedule"), pulumi.Alias(type_="azure-native:databoxedge/v20200901preview:BandwidthSchedule"), pulumi.Alias(type_="azure-nextgen:databoxedge/v20200901preview:BandwidthSchedule"), pulumi.Alias(type_="azure-native:databoxedge/v20210201preview:BandwidthSchedule"), pulumi.Alias(type_="azure-nextgen:databoxedge/v20210201preview:BandwidthSchedule")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(BandwidthSchedule, __self__).__init__(
'azure-native:databoxedge/v20201201:BandwidthSchedule',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'BandwidthSchedule':
"""
Get an existing BandwidthSchedule resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = BandwidthScheduleArgs.__new__(BandwidthScheduleArgs)
__props__.__dict__["days"] = None
__props__.__dict__["name"] = None
__props__.__dict__["rate_in_mbps"] = None
__props__.__dict__["start"] = None
__props__.__dict__["stop"] = None
__props__.__dict__["system_data"] = None
__props__.__dict__["type"] = None
return BandwidthSchedule(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def days(self) -> pulumi.Output[Sequence[str]]:
"""
The days of the week when this schedule is applicable.
"""
return pulumi.get(self, "days")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The object name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="rateInMbps")
def rate_in_mbps(self) -> pulumi.Output[int]:
"""
The bandwidth rate in Mbps.
"""
return pulumi.get(self, "rate_in_mbps")
@property
@pulumi.getter
def start(self) -> pulumi.Output[str]:
"""
The start time of the schedule in UTC.
"""
return pulumi.get(self, "start")
@property
@pulumi.getter
def stop(self) -> pulumi.Output[str]:
"""
The stop time of the schedule in UTC.
"""
return pulumi.get(self, "stop")
@property
@pulumi.getter(name="systemData")
def system_data(self) -> pulumi.Output['outputs.SystemDataResponse']:
"""
Bandwidth object related to ASE resource
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
The hierarchical type of the object.
"""
return pulumi.get(self, "type")
| 43.448052 | 1,376 | 0.643252 |
845b4589b0e82129d05ff74eb00dadd9b7a18a60 | 5,981 | py | Python | mercury_engine_data_structures/formats/bmsad.py | Antidote/mercury-engine-data-structures | d8e8ba1eacaa37f4fc76b78bb208ffc2cde61f64 | [
"MIT"
] | null | null | null | mercury_engine_data_structures/formats/bmsad.py | Antidote/mercury-engine-data-structures | d8e8ba1eacaa37f4fc76b78bb208ffc2cde61f64 | [
"MIT"
] | null | null | null | mercury_engine_data_structures/formats/bmsad.py | Antidote/mercury-engine-data-structures | d8e8ba1eacaa37f4fc76b78bb208ffc2cde61f64 | [
"MIT"
] | 2 | 2021-11-07T13:42:13.000Z | 2022-01-08T06:00:40.000Z | import construct
from construct.core import (
Array, Byte, Const, Construct, Flag, Float32l, Hex, Int16ul, Int32ul, PrefixedArray, Struct, Switch,
)
from mercury_engine_data_structures import common_types, type_lib
from mercury_engine_data_structures.common_types import Float, StrId, make_dict, make_vector
from mercury_engine_data_structures.construct_extensions.alignment import PrefixedAllowZeroLen
from mercury_engine_data_structures.construct_extensions.misc import ErrorWithMessage
from mercury_engine_data_structures.formats import BaseResource, dread_types
from mercury_engine_data_structures.formats.property_enum import PropertyEnum
from mercury_engine_data_structures.game_check import Game
Char = construct.PaddedString(1, 'ascii')
FunctionArgument = Struct(
type=Char,
value=Switch(
construct.this.type,
{
's': StrId,
'f': Float,
'b': Flag,
'i': Int32ul,
},
ErrorWithMessage(lambda ctx: f"Unknown argument type: {ctx.type}", construct.SwitchError)
)
)
Functions = make_vector(Struct(
name=StrId,
unk=Int16ul,
params=common_types.DictAdapter(common_types.make_vector(
common_types.DictElement(FunctionArgument, key=PropertyEnum)
)),
))
fieldtypes = {k: v for k, v in vars(dread_types).items() if isinstance(v, construct.Construct)}
def find_charclass_for_type(type_name: str):
if type_name == "CActorComponent":
return "CActorComponentDef"
as_char = "CCharClass" + type_name[1:]
if as_char in fieldtypes:
return as_char
return find_charclass_for_type(
type_lib.get_parent_for(type_name),
)
def Dependencies():
component_dependencies = {
"CFXComponent": make_vector(Struct(
"file" / StrId,
"unk1" / Int32ul,
"unk2" / Int32ul,
"unk3" / Byte
)),
"CCollisionComponent": Struct(
"file" / StrId,
"unk" / Int16ul
),
"CGrabComponent": make_vector(Struct(
"unk1" / StrId,
"unk2" / StrId,
"unk3" / StrId,
"unk4" / Float32l,
"unk5" / Byte,
"unk6" / Byte,
"unk7" / Int16ul,
"unk8" / Array(2, Struct(
"unk2" / Int16ul,
"unk1" / Array(8, Float32l),
)),
)),
"CBillboardComponent": Struct(
"id1" / StrId,
"unk1" / make_vector(Struct(
"id" / StrId,
"unk1" / Array(3, Int32ul),
"unk2" / Byte,
"unk3" / Array(2, Int32ul),
"unk4" / Float32l
)),
"id2" / StrId,
"unk2" / make_vector(Struct(
"id" / StrId,
"unk1" / Byte,
"unk2" / Array(4, Int32ul)
)),
),
"CSwarmControllerComponent": Struct(
"unk1" / make_vector(StrId),
"unk2" / make_vector(StrId),
"unk3" / make_vector(StrId)
)
}
component_dependencies["CStandaloneFXComponent"] = component_dependencies["CFXComponent"]
def component_type(this):
for component_type in component_dependencies.keys():
if type_lib.is_child_of(this.type, component_type):
return component_type
return None
return Switch(component_type, component_dependencies)
Component = Struct(
type=StrId,
unk_1=Array(2, Hex(Int32ul)),
fields=PrefixedAllowZeroLen(
Int32ul,
Struct(
empty_string=PropertyEnum,
root=PropertyEnum,
fields=Switch(
lambda ctx: find_charclass_for_type(ctx._._.type),
fieldtypes,
ErrorWithMessage(lambda ctx: f"Unknown component type: {ctx._._.type}", construct.SwitchError)
)
)
),
extra_fields=construct.If(
lambda this: type_lib.is_child_of(this.type, "CComponent"),
common_types.DictAdapter(common_types.make_vector(
common_types.DictElement(Struct(
"type" / StrId,
"value" / Switch(
construct.this.type,
{
"bool": Flag,
"string": StrId
},
ErrorWithMessage(lambda ctx: f"Unknown argument type: {ctx.type}", construct.SwitchError)
)
))
))
),
functions=Functions,
dependencies=Dependencies()
)
CCharClass = Struct(
model_name=StrId,
unk_1=Int16ul,
unk_2=Int32ul,
unk_3=Int16ul,
sub_actors=PrefixedArray(Int32ul, StrId),
unk_4=Array(9, Float32l),
magic=Const(0xFFFFFFFF, Hex(Int32ul)),
unk_5=Byte,
unk_6=StrId,
unk_7=Byte,
components=make_dict(Component),
binaries=make_vector(StrId),
sources=make_vector(StrId >> Byte),
)
CActorDef = Struct(
unk_1=Int16ul,
unk_2=Int32ul,
unk_3=Int16ul,
sub_actors=PrefixedArray(Int32ul, StrId),
unk_4=StrId,
components=make_dict(Component),
binaries=make_vector(StrId),
sources=make_vector(StrId >> Byte),
)
property_types = {
"CCharClass": CCharClass,
"CActorDef": CActorDef
}
#
BMSAD = Struct(
_magic=Const(b"MSAD"),
version=Const(0x0200000F, Hex(Int32ul)),
# # gameeditor::CGameModelRoot
# root_type=construct.Const('Root', PropertyEnum),
# Root=gameeditor_CGameModelRoot,
name=StrId,
type=StrId,
property=Switch(
construct.this.type,
property_types,
ErrorWithMessage(lambda ctx: f"Unknown property type: {ctx.type}"),
),
# rest=Peek(construct.GreedyBytes),
# z=Probe(),
_end=construct.Terminated,
)
# BMSAD = game_model_root.create('CActorDef', 0x02000031)
class Bmsad(BaseResource):
@classmethod
def construct_class(cls, target_game: Game) -> Construct:
return BMSAD
| 28.212264 | 110 | 0.598562 |
a0b1cb54ada9b516ae7c1eef2d07132f1b65a8d4 | 9,942 | py | Python | sfa_api/tests/test_jobs.py | SolarArbiter/solarforecastarbiter-api | 280800c73eb7cfd49029462b352887e78f1ff91b | [
"MIT"
] | 7 | 2018-12-07T22:05:36.000Z | 2020-05-03T03:20:50.000Z | sfa_api/tests/test_jobs.py | SolarArbiter/solarforecastarbiter-api | 280800c73eb7cfd49029462b352887e78f1ff91b | [
"MIT"
] | 220 | 2018-11-01T23:33:19.000Z | 2021-12-02T21:06:38.000Z | sfa_api/tests/test_jobs.py | SolarArbiter/solarforecastarbiter-api | 280800c73eb7cfd49029462b352887e78f1ff91b | [
"MIT"
] | 3 | 2018-10-31T20:55:07.000Z | 2021-11-10T22:51:43.000Z | import datetime as dt
import tempfile
import time
import pytest
from rq import SimpleWorker
from rq.timeouts import JobTimeoutException
from rq_scheduler import Scheduler
from sfa_api import jobs
from sfa_api.utils.queuing import get_queue
from sfa_api.conftest import _make_sql_app, _make_nocommit_cursor
@pytest.fixture()
def app(mocker):
with _make_sql_app() as app:
app.config.update(
TOKEN_ENCRYPTION_KEY=b'eKfeo832hn8nQ_3K69YDniBbHqbqpIxUNRstrv225c8=', # NOQA
SCHEDULER_QUEUE='scheduler',
MYSQL_USER='job_executor',
MYSQL_PASSWORD='thisisaterribleandpublicpassword'
)
with _make_nocommit_cursor(mocker):
yield app
@pytest.fixture()
def queue(app):
return get_queue(app.config['SCHEDULER_QUEUE'])
def test_exchange_token(mocker, app, userid):
exchange = mocker.patch('sfa_api.jobs.exchange_refresh_token',
return_value='access')
out = jobs.exchange_token(userid)
assert out.token == 'access'
assert exchange.called_with('token')
def test_exchange_token_dne(app):
with pytest.raises(KeyError):
jobs.exchange_token('1190950a-7cca-11e9-a81f-54bf64606445')
def test_make_job_app(mocker):
with tempfile.NamedTemporaryFile(mode='w') as f:
f.write('SCHEDULER_QUEUE = "scheduled_jobsq"')
f.flush()
with jobs.make_job_app(f.name) as (app, queue):
assert queue.name == 'scheduled_jobsq'
def test_schedule_jobs(mocker, queue, jobid):
sch = Scheduler(queue=queue, connection=queue.connection)
sch.cancel = mocker.MagicMock()
jobs.schedule_jobs(sch)
assert jobid in sch
assert len(list(sch.get_jobs())) == 1
# running again should have no effect
jobs.schedule_jobs(sch)
assert jobid in sch
assert len(list(sch.get_jobs())) == 1
assert not sch.cancel.called
def noop():
pass
def test_schedule_jobs_bad_current(mocker, queue, jobid):
sch = Scheduler(queue=queue, connection=queue.connection)
id0 = 'jobid0'
sch.cron(
'* * * * *',
func=noop,
id=id0,
meta={}
)
jobs.schedule_jobs(sch)
assert jobid in sch
assert id0 not in sch
assert len(list(sch.get_jobs())) == 1
@pytest.fixture()
def sql_job(userid, orgid, jobid):
return {
'id': jobid,
'user_id': userid,
'organization_id': orgid,
'name': 'Test job',
'job_type': 'daily_observation_validation',
'parameters': {
"start_td": "-1d",
"end_td": "0h",
"base_url": "http://localhost:5000"
},
'schedule': {"type": "cron", "cron_string": "0 0 * * *"},
'version': 0,
'created_at': dt.datetime(2019, 1, 1, 12, tzinfo=dt.timezone.utc),
'modified_at': dt.datetime(2019, 1, 1, 12, tzinfo=dt.timezone.utc)
}
def test_schedule_jobs_modified(mocker, queue, sql_job):
mocker.patch('sfa_api.jobs.storage._call_procedure',
return_value=[sql_job])
sch = Scheduler(queue=queue, connection=queue.connection)
jobs.schedule_jobs(sch)
assert list(sch.get_jobs())[0].meta[
'last_modified_in_sql'] == dt.datetime(2019, 1, 1, 12,
tzinfo=dt.timezone.utc)
njob = sql_job.copy()
njob['modified_at'] = dt.datetime(2019, 2, 1, tzinfo=dt.timezone.utc)
mocker.patch('sfa_api.jobs.storage._call_procedure',
return_value=[njob])
jobs.schedule_jobs(sch)
assert list(sch.get_jobs())[0].meta[
'last_modified_in_sql'] == dt.datetime(
2019, 2, 1, tzinfo=dt.timezone.utc)
def test_schedule_jobs_err(mocker, queue, sql_job):
job = sql_job.copy()
job['schedule'] = {}
mocker.patch('sfa_api.jobs.storage._call_procedure',
return_value=[job])
log = mocker.patch('sfa_api.jobs.logger')
sch = Scheduler(queue=queue, connection=queue.connection)
jobs.schedule_jobs(sch)
assert log.error.called
def test_convert_sql_job_to_rq_job(sql_job, mocker):
scheduler = mocker.MagicMock()
jobs.convert_sql_to_rq_job(sql_job, scheduler)
assert scheduler.cron.called
assert scheduler.cron.call_args[0] == ('0 0 * * *',)
def test_convert_sql_job_to_rq_job_timeout(sql_job, mocker):
sql_job['schedule'] = {
"type": "cron", "cron_string": "0 0 * * *", "timeout": "10m"}
scheduler = mocker.MagicMock()
jobs.convert_sql_to_rq_job(sql_job, scheduler)
assert scheduler.cron.called
assert scheduler.cron.call_args[0] == ('0 0 * * *',)
assert scheduler.cron.call_args[1]['timeout'] == '10m'
def test_convert_sql_job_to_rq_job_not_cron(sql_job, mocker):
job = sql_job.copy()
job['schedule'] = {"type": "enqueue_at"}
scheduler = mocker.MagicMock()
with pytest.raises(ValueError):
jobs.convert_sql_to_rq_job(job, scheduler)
@pytest.mark.parametrize('jtype,params,func', [
('daily_observation_validation',
{'start_td': '-1h', 'end_td': '0h'},
'sfa_api.jobs.fetch_and_validate_all_observations'),
('reference_nwp',
{'issue_time_buffer': '10min',
'nwp_directory': '.'},
'sfa_api.jobs.make_latest_nwp_forecasts'),
('periodic_report',
{'report_id': 'blah'},
'sfa_api.jobs.compute_report'),
pytest.param(
'other_job', {}, 'sfa_api.app',
marks=pytest.mark.xfail(strict=True, raises=ValueError)),
('reference_persistence',
{},
'sfa_api.jobs.make_latest_persistence_forecasts'),
('reference_probabilistic_persistence',
{},
'sfa_api.jobs.make_latest_probabilistic_persistence_forecasts'),
('trial_data_copy',
{'base_url': 'https://',
'copy_from': 'id1',
'copy_to': 'id2'},
'sfa_api.jobs.copy_observation_data')
])
def test_execute_job(jtype, params, func, mocker, userid):
mocker.patch('sfa_api.jobs.exchange_token',
return_value='token')
ret = mocker.patch(func, autospec=True)
jobs.execute_job('test', jtype, userid, **params)
assert ret.called
def test_full_run_through(app, queue, mocker):
mocker.patch('sfa_api.jobs.exchange_token', return_value='token')
validate = mocker.patch('sfa_api.jobs.fetch_and_validate_all_observations')
gjq = mocker.patch('rq_scheduler.Scheduler.get_jobs_to_queue')
class US(jobs.UpdateMixin, Scheduler):
pass
sch = US(queue=queue, connection=queue.connection)
jobs.schedule_jobs(sch)
(job, exc_time) = list(sch.get_jobs(with_times=True))[0]
assert exc_time == dt.datetime.utcnow().replace(
hour=0, minute=0, second=0, microsecond=0) + dt.timedelta(days=1)
gjq.return_value = [job]
sch.run(burst=True)
assert job in queue.jobs
w = SimpleWorker([queue], connection=queue.connection)
w.work(burst=True)
assert validate.called
@pytest.fixture()
def adminapp(mocker):
with _make_sql_app() as app:
app.config.update(
MYSQL_USER='frameworkadmin',
MYSQL_PASSWORD='thisisaterribleandpublicpassword'
)
with _make_nocommit_cursor(mocker):
yield app
def test_full_run_through_job_timeout(app, queue, mocker):
def dosleep(*args, **kwargs):
time.sleep(5)
mocker.patch('sfa_api.jobs.exchange_token', return_value='token')
mocker.patch('sfa_api.jobs.fetch_and_validate_all_observations',
new=dosleep)
fail = mocker.MagicMock()
gjq = mocker.patch('rq_scheduler.Scheduler.get_jobs_to_queue')
class US(jobs.UpdateMixin, Scheduler):
pass
sch = US(queue=queue, connection=queue.connection)
jobs.schedule_jobs(sch)
(job, exc_time) = list(sch.get_jobs(with_times=True))[0]
job.timeout = 1
assert exc_time == dt.datetime.utcnow().replace(
hour=0, minute=0, second=0, microsecond=0) + dt.timedelta(days=1)
gjq.return_value = [job]
sch.run(burst=True)
assert job in queue.jobs
def my_err(job, *exc_info):
assert exc_info[0] == JobTimeoutException
fail()
w = SimpleWorker([queue], connection=queue.connection,
disable_default_exception_handler=True,
exception_handlers=[my_err])
w.work(burst=True)
assert fail.called
@pytest.mark.parametrize('jt,kwargs', [
('daily_observation_validation', {'start_td': '1h', 'end_td': '1h'}),
('reference_nwp', {'issue_time_buffer': '1h', 'base_url': 'hhtp'}),
('periodic_report', {'report_id': 'id'}),
('reference_persistence', {'base_url': 'https://'}),
('reference_probabilistic_persistence', {'base_url': 'https://'}),
('trial_data_copy', {
'base_url': 'https://',
'copy_from': 'id1',
'copy_to': 'id2'
}),
pytest.param('badtype', {}, marks=pytest.mark.xfail(
strict=True, raises=ValueError)),
pytest.param('daily_observation_validation', {}, marks=pytest.mark.xfail(
strict=True, raises=KeyError))
])
def test_create_job(adminapp, jt, kwargs, nocommit_cursor, user_id):
jobs.create_job(jt, 'testcreatejob', user_id, 'cronstr', **kwargs)
jlist = jobs.storage._call_procedure('list_jobs', with_current_user=False)
assert len(jlist) == 2
job = [j for j in jlist if j['name'] == 'testcreatejob'][0]
assert job['schedule'] == {'type': 'cron', 'cron_string': 'cronstr'}
assert job['job_type'] == jt
assert job['parameters'] == kwargs
def test_create_job_timeout(adminapp, nocommit_cursor, user_id):
timeout = 100
jobs.create_job('periodic_report', 'testcreatejob', user_id, 'cronstr',
timeout, report_id='reportid')
jlist = jobs.storage._call_procedure('list_jobs', with_current_user=False)
assert len(jlist) == 2
job = [j for j in jlist if j['name'] == 'testcreatejob'][0]
assert job['schedule'] == {'type': 'cron', 'cron_string': 'cronstr',
'timeout': timeout}
| 33.362416 | 89 | 0.653188 |
2f6a06db3b937d09da02ca515c2472b21754b8a8 | 37,543 | py | Python | src/wallet_data_models.py | hlooman/polis-masternode-tool | 94fd2c7fa53db81ae8cfdb767808046958532869 | [
"MIT"
] | 3 | 2019-10-16T02:17:09.000Z | 2020-07-27T16:50:43.000Z | src/wallet_data_models.py | hlooman/polis-masternode-tool | 94fd2c7fa53db81ae8cfdb767808046958532869 | [
"MIT"
] | null | null | null | src/wallet_data_models.py | hlooman/polis-masternode-tool | 94fd2c7fa53db81ae8cfdb767808046958532869 | [
"MIT"
] | 1 | 2019-10-21T11:59:27.000Z | 2019-10-21T11:59:27.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Author: Bertrand256
# Created on: 2018-09
import bisect
import datetime
import hashlib
import logging
from PyQt5.QtCore import Qt, QVariant, QModelIndex, QAbstractItemModel, QUrl
from PyQt5.QtGui import QColor, QFont, QDesktopServices
from PyQt5.QtWidgets import QTreeView, QTableView
from PyQt5 import QtGui
from more_itertools import consecutive_groups
from typing import Optional, List, Tuple, Dict
import app_utils
import thread_utils
import wnd_utils
from app_config import MasternodeConfig
from app_defs import DEBUG_MODE
from bip44_wallet import Bip44Wallet, UNCONFIRMED_TX_BLOCK_HEIGHT
from ext_item_model import TableModelColumn, ExtSortFilterTableModel
from wallet_common import Bip44AccountType, Bip44AddressType, UtxoType, TxType
log = logging.getLogger('pmt.wallet_dlg')
FILTER_OR = 0
FILTER_AND = 1
FILTER_OPER_GTEQ = 1
FILTER_OPER_LTEQ = 2
FILTER_OPER_EQ = 3
class MnAddressItem(object):
def __init__(self):
self.masternode: MasternodeConfig = None
self.address: Bip44AddressType = None
class MnAddressTableModel(ExtSortFilterTableModel):
def __init__(self, parent, masternode_list: List[MasternodeConfig], bip44_wallet: Bip44Wallet):
ExtSortFilterTableModel.__init__(self, parent, [
TableModelColumn('description', 'Description', True, 100)
], False, False)
self.mn_items: List[MnAddressItem] = []
for mn in masternode_list:
mni = MnAddressItem()
mni.masternode = mn
if mni.masternode.collateralAddress:
self.mn_items.append(mni)
self.load_mn_addresses_in_bip44_wallet(bip44_wallet)
def load_mn_addresses_in_bip44_wallet(self, bip44_wallet: Bip44Wallet):
addr_ids = []
for mni in self.mn_items:
if mni.masternode.collateralAddress:
a = bip44_wallet.get_address_item(mni.masternode.collateralAddress, True)
address_loc = Bip44AddressType(tree_id=None)
address_loc.copy_from(a)
if not address_loc.bip32_path:
address_loc.bip32_path = mni.masternode.collateralBip32Path
a.bip32_path = mni.masternode.collateralBip32Path
mni.address = address_loc
if mni.masternode.collateralAddress not in addr_ids:
addr_ids.append(mni.address.id)
if addr_ids:
bip44_wallet.subscribe_addresses_for_chbalance(addr_ids, True)
def flags(self, index):
return Qt.ItemIsEnabled | Qt.ItemIsSelectable
def rowCount(self, parent=None, *args, **kwargs):
return len(self.mn_items)
def data_by_row_index(self, row_index):
return self.mn_items[row_index]
def data(self, index, role=None):
if index.isValid():
col_idx = index.column()
row_idx = index.row()
if row_idx < len(self.mn_items):
if role in (Qt.DisplayRole, Qt.EditRole):
col = self.col_by_index(col_idx)
if col:
field_name = col.name
if field_name == 'description':
return self.mn_items[row_idx]
return QVariant()
def get_mn_by_addr_hash(self, addr_hash) -> Optional[MnAddressItem]:
for idx, mni in enumerate(self.mn_items):
if mni.address.address:
h = hashlib.sha256(bytes(mni.address.address, 'utf-8')).hexdigest()
if h == addr_hash:
return mni
return None
def get_mn_index(self, mn_item: MnAddressItem) -> Optional[int]:
if mn_item in self.mn_items:
return self.mn_items.index(mn_item)
return None
def get_mn_index_by_addr(self, address: Bip44AddressType) -> Optional[int]:
for idx, mni in enumerate(self.mn_items):
if mni.address.id == address.id:
return idx
return None
def get_mn_by_addr(self, address: Bip44AddressType) -> Optional[MasternodeConfig]:
for idx, mni in enumerate(self.mn_items):
if mni.address.id == address.id:
return mni.masternode
return None
def address_data_changed(self, address: Bip44AddressType):
idx = self.get_mn_index_by_addr(address)
if idx is not None:
self.mn_items[idx].address.update_from(address)
index = self.index(idx, 0)
self.dataChanged.emit(index, index)
class AccountListModel(ExtSortFilterTableModel):
def __init__(self, parent):
ExtSortFilterTableModel.__init__(self, parent, [
TableModelColumn('address', 'Address', True, 100)
], False, True)
self.accounts: List[Bip44AccountType] = []
self.__data_modified = False
self.show_zero_balance_addresses = False
self.show_not_used_addresses = False
self.set_attr_protection()
def reset_modified(self):
self.__data_modified = False
@property
def data_modified(self):
return self.__data_modified
def flags(self, index):
return Qt.ItemIsEnabled | Qt.ItemIsSelectable | Qt.ItemIsEditable
def parent(self, index=None):
try:
if not index or not index.isValid():
return QModelIndex()
node = index.internalPointer()
if isinstance(node, Bip44AccountType):
return QModelIndex()
else:
acc_idx = self.accounts.index(node.bip44_account)
return self.createIndex(acc_idx, 0, node.bip44_account)
except Exception as e:
log.exception('Exception while getting parent of index')
raise
def index(self, row, column, parent=None, *args, **kwargs):
try:
if not parent or not parent.isValid():
if 0 <= row < len(self.accounts):
return self.createIndex(row, column, self.accounts[row])
else:
return QModelIndex()
parentNode = parent.internalPointer()
if isinstance(parentNode, Bip44AccountType):
addr = parentNode.address_by_index(row)
if addr:
return self.createIndex(row, column, addr)
return QModelIndex()
except Exception as e:
log.exception('Exception while creating index')
raise
def rowCount(self, parent=None, *args, **kwargs):
if not parent or not parent.isValid():
ret = len(self.accounts)
else:
node = parent.internalPointer()
if isinstance(node, Bip44AccountType):
ret = len(node.addresses)
else:
ret = 0
return ret
def data(self, index, role=None):
if index.isValid():
data = index.internalPointer()
col = index.column()
if data:
if role in (Qt.DisplayRole, Qt.EditRole):
if col == 0:
# if isinstance(data, Bip44AccountType):
# return data.get_account_name()
# else:
# return f'/{data.address_index}: {data.address}'
return data
elif col == 1:
b = data.balance
if b:
b = b/1e8
return b
elif col == 2:
b = data.received
if b:
b = b/1e8
return b
return QVariant()
def removeRows(self, row, count, parent=None, *args, **kwargs):
if parent is None or not parent.isValid():
if row >=0 and row < len(self.accounts):
self.beginRemoveRows(parent, row, row + count)
for row_offs in range(count):
del self.accounts[row - row_offs]
self.endRemoveRows()
return True
else:
acc = parent.internalPointer()
removed = False
if acc:
self.beginRemoveRows(parent, row, row + count)
for row_offs in range(count):
removed = max(removed, acc.remove_address_by_index(row - row_offs))
self.endRemoveRows()
return removed
def filterAcceptsRow(self, source_row, source_parent):
def count_prev_zero_received(acc: Bip44AccountType, start_index: int):
cnt = 0
index = start_index
while index >= 0:
a = acc.address_by_index(index)
if not a.received:
cnt += 1
else:
break
index -= 1
return cnt
try:
will_show = True
if source_parent.isValid():
acc = source_parent.internalPointer()
if isinstance(acc, Bip44AccountType):
addr = acc.address_by_index(source_row)
if addr:
if addr.received == 0:
will_show = False
if self.show_not_used_addresses:
will_show = True
else:
if not addr.is_change:
prev_cnt = count_prev_zero_received(acc, source_row - 1)
if prev_cnt < acc.view_fresh_addresses_count:
will_show = True
elif addr.balance == 0:
will_show = self.show_zero_balance_addresses
else:
if source_row < len(self.accounts):
acc = self.accounts[source_row]
will_show = self.is_account_visible(acc)
else:
will_show = False
except Exception as e:
log.exception('Exception occurred while filtering account/address')
raise
return will_show
def is_account_visible(self, account: Bip44AccountType):
if account.status_force_hide:
return False
if account.status_force_show or account.address_index == 0x80000000:
return True
if account.received > 0:
return True
else:
return False
def increase_account_fresh_addr_count(self, acc: Bip44AccountType, increase_count=1):
acc.view_fresh_addresses_count += increase_count
self.invalidateFilter()
def account_by_id(self, id: int) -> Optional[Bip44AccountType]:
for a in self.accounts:
if a.id == id:
return a
return None
def account_index_by_id(self, id: int) -> Optional[int]:
for idx, a in enumerate(self.accounts):
if a.id == id:
return idx
return None
def account_by_bip44_index(self, bip44_index: int) -> Optional[Bip44AccountType]:
for a in self.accounts:
if a.address_index == bip44_index:
return a
return None
def add_account(self, account: Bip44AccountType):
existing_account = self.account_by_id(account.id)
self.__data_modified = True
if not existing_account:
account_loc = Bip44AccountType(None, None, None, None, None)
account_loc.copy_from(account)
idxs = [a.address_index for a in self.accounts]
insert_idx = bisect.bisect_right(idxs, account.address_index)
self.beginInsertRows(QModelIndex(), insert_idx, insert_idx)
self.accounts.insert(insert_idx, account_loc)
self.endInsertRows()
else:
existing_account.copy_from(account)
def add_account_address(self, account: Bip44AccountType, address: Bip44AddressType):
account_idx = self.account_index_by_id(account.id)
if account_idx is not None:
account_loc = self.accounts[account_idx]
acc_index = self.index(account_idx, 0)
addr_idx = account_loc.address_index_by_id(address.id)
if addr_idx is None:
self.__data_modified = True
addr_loc = Bip44AddressType(None)
addr_loc.copy_from(address)
addr_idx = account_loc.get_address_insert_index(addr_loc)
self.beginInsertRows(acc_index, addr_idx, addr_idx)
account_loc.add_address(addr_loc, addr_idx)
self.endInsertRows()
def account_data_changed(self, account: Bip44AccountType):
account_idx = self.account_index_by_id(account.id)
if account_idx is not None:
account_loc = self.accounts[account_idx]
if account != account_loc:
account_loc.update_from(account)
self.__data_modified = True
index = self.index(account_idx, 0)
self.dataChanged.emit(index, index)
def address_data_changed(self, account: Bip44AccountType, address: Bip44AddressType):
account_idx = self.account_index_by_id(account.id)
if account_idx is not None:
account = self.accounts[account_idx]
acc_index = self.index(account_idx, 0)
addr_idx = account.address_index_by_id(address.id)
if addr_idx is not None:
addr_loc = account.address_by_index(addr_idx)
if addr_loc != address:
addr_loc.update_from(address)
addr_index = self.index(addr_idx, 0, parent=acc_index)
self.__data_modified = True
self.dataChanged.emit(addr_index, addr_index)
def remove_account(self, index):
if 0 <= index < len(self.accounts):
self.__data_modified = True
self.beginRemoveRows(QModelIndex(), index, index)
del self.accounts[index]
self.endRemoveRows()
def clear_accounts(self):
log.debug('Clearing accounts')
self.__data_modified = True
self.accounts.clear()
def get_first_unused_bip44_account_index(self):
""" Get first unused not yet visible account index. """
cur_index = 0x80000000
for a in self.accounts:
if a.address_index >= cur_index and not self.is_account_visible(a) and a.received == 0:
return a.address_index
else:
cur_index = a.address_index
return cur_index + 1
class UtxoTableModel(ExtSortFilterTableModel):
def __init__(self, parent, masternode_list: List[MasternodeConfig], tx_explorer_url: str):
ExtSortFilterTableModel.__init__(self, parent, [
TableModelColumn('satoshis', 'Amount (Polis)', True, 100),
TableModelColumn('confirmations', 'Confirmations', True, 100),
TableModelColumn('bip32_path', 'Path', True, 100),
TableModelColumn('time_str', 'TX Date/Time', True, 140),
TableModelColumn('address', 'Address', True, 140),
TableModelColumn('masternode', 'Masternode', False, 40),
TableModelColumn('txid', 'TX Hash', True, 220),
TableModelColumn('output_index', 'TX Idx', True, 40)
], True, True)
if DEBUG_MODE:
self.insert_column(len(self._columns), TableModelColumn('id', 'DB id', True, 40))
self.tx_explorer_url = tx_explorer_url
self.hide_collateral_utxos = True
self.utxos: List[UtxoType] = []
self.utxo_by_id: Dict[int, UtxoType] = {}
self.block_height = None
self.mn_by_collateral_tx: Dict[str, MasternodeConfig] = {}
self.mn_by_collateral_address: Dict[str, MasternodeConfig] = {}
for mn in masternode_list:
ident = mn.collateralTx + '-' + str(mn.collateralTxIndex)
self.mn_by_collateral_tx[ident] = mn
self.mn_by_collateral_address[mn.collateralAddress] = mn
self.set_attr_protection()
def rowCount(self, parent=None, *args, **kwargs):
return len(self.utxos)
def flags(self, index):
return Qt.ItemIsEnabled | Qt.ItemIsSelectable | Qt.ItemIsEditable
def set_view(self, table_view: QTableView):
super().set_view(table_view)
link_delagate = wnd_utils.HyperlinkItemDelegate(table_view)
link_delagate.linkActivated.connect(self.hyperlink_activated)
table_view.setItemDelegateForColumn(self.col_index_by_name('txid'), link_delagate)
def hyperlink_activated(self, link):
QDesktopServices.openUrl(QUrl(link))
def data(self, index, role=None):
if index.isValid():
col_idx = index.column()
row_idx = index.row()
if row_idx < len(self.utxos):
utxo = self.utxos[row_idx]
if utxo:
if role in (Qt.DisplayRole, Qt.EditRole):
col = self.col_by_index(col_idx)
if col:
field_name = col.name
if field_name == 'satoshis':
return app_utils.to_string(round(utxo.satoshis / 1e8, 8))
elif field_name == 'masternode':
if utxo.masternode:
return utxo.masternode.name
elif field_name == 'confirmations':
if utxo.block_height >= UNCONFIRMED_TX_BLOCK_HEIGHT:
return 'Unconfirmed'
else:
return app_utils.to_string(utxo.__getattribute__(field_name))
elif field_name == 'address':
if utxo.address_obj and utxo.address_obj.label:
return utxo.address_obj.label
else:
return utxo.address
elif col.name == 'txid':
if self.tx_explorer_url:
url = self.tx_explorer_url.replace('%TXID%', utxo.txid)
url = f'<a href="{url}">{utxo.txid}</a>'
return url
else:
return utxo.txid
else:
return app_utils.to_string(utxo.__getattribute__(field_name))
elif role == Qt.ForegroundRole:
if utxo.is_collateral:
return QColor(Qt.white)
elif utxo.coinbase_locked or utxo.block_height >= UNCONFIRMED_TX_BLOCK_HEIGHT:
return QColor('red')
elif role == Qt.BackgroundRole:
if utxo.is_collateral:
return QColor(Qt.red)
elif role == Qt.TextAlignmentRole:
col = self.col_by_index(col_idx)
if col:
if col.name in ('satoshis', 'confirmations', 'output_index'):
return Qt.AlignRight
return QVariant()
def add_utxo(self, utxo: UtxoType, insert_pos = None):
if not utxo.id in self.utxo_by_id:
if insert_pos is None:
self.utxos.append(utxo)
else:
self.utxos.insert(insert_pos, utxo)
self.utxo_by_id[utxo.id] = utxo
ident = utxo.txid + '-' + str(utxo.output_index)
if ident in self.mn_by_collateral_tx:
utxo.is_collateral = True
mn = self.mn_by_collateral_address.get(utxo.address, None)
if mn:
utxo.masternode = mn
def clear_utxos(self):
self.utxos.clear()
self.utxo_by_id.clear()
def update_utxos(self, utxos_to_add: List[UtxoType], utxos_to_update: List[UtxoType], utxos_to_delete: List[Tuple[int, int]]):
if utxos_to_delete:
row_indexes_to_remove = []
for utxo_id in utxos_to_delete:
utxo = self.utxo_by_id.get(utxo_id)
if utxo:
utxo_index = self.utxos.index(utxo)
if utxo_index not in row_indexes_to_remove:
row_indexes_to_remove.append(utxo_index)
del self.utxo_by_id[utxo_id]
row_indexes_to_remove.sort(reverse=True)
for group in consecutive_groups(row_indexes_to_remove, ordering=lambda x: -x):
l = list(group)
self.beginRemoveRows(QModelIndex(), l[-1], l[0]) # items are sorted in reversed order
del self.utxos[l[-1]: l[0]+1]
self.endRemoveRows()
if utxos_to_add:
# in the model, the rows are sorted by the number of confirmations in the descending order, so put
# the new ones in the right place
# filter out the already existing utxos
utxos_to_add_verified = []
for utxo in utxos_to_add:
if utxo.id not in self.utxo_by_id:
utxos_to_add_verified.append(utxo)
utxos_to_add_verified.sort(key=lambda x: x.block_height, reverse=True)
row_idx = 0
self.beginInsertRows(QModelIndex(), row_idx, row_idx + len(utxos_to_add_verified) - 1)
try:
for index, utxo in enumerate(utxos_to_add_verified):
if utxo.id not in self.utxo_by_id:
self.add_utxo(utxo, index)
finally:
self.endInsertRows()
if utxos_to_update:
for utxo_new in utxos_to_update:
utxo = self.utxo_by_id.get(utxo_new.id)
if utxo:
utxo.block_height = utxo_new.block_height # block_height is the only field that can be updated
utxo_index = self.utxos.index(utxo)
ui_index = self.index(utxo_index, 0)
self.dataChanged.emit(ui_index, ui_index)
def lessThan(self, col_index, left_row_index, right_row_index):
col = self.col_by_index(col_index)
if col:
col_name = col.name
reverse = False
if col_name == 'time_str':
col_name = 'confirmations'
reverse = True
if 0 <= left_row_index < len(self.utxos) and \
0 <= right_row_index < len(self.utxos):
left_utxo = self.utxos[left_row_index]
right_utxo = self.utxos[right_row_index]
left_value = left_utxo.__getattribute__(col_name)
right_value = right_utxo.__getattribute__(col_name)
if isinstance(left_value, (int, float)) and isinstance(right_value, (int, float)):
if not reverse:
return left_value < right_value
else:
return right_value < left_value
elif isinstance(left_value, str) and isinstance(right_value, str):
left_value = left_value.lower()
right_value = right_value.lower()
if not reverse:
return left_value < right_value
else:
return right_value < left_value
return False
def filterAcceptsRow(self, source_row, source_parent):
will_show = True
if 0 <= source_row < len(self.utxos):
if self.hide_collateral_utxos:
utxo = self.utxos[source_row]
if utxo.is_collateral:
will_show = False
return will_show
def set_hide_collateral_utxos(self, hide):
self.hide_collateral_utxos = hide
self.proxy_model.invalidateFilter()
def set_block_height(self, block_height: int):
if block_height != self.block_height:
log.debug('Block height updated to %s', block_height)
self.block_height = block_height
# if self.utxos:
# tl_index = self.index(0, self.col_index_by_name('confirmations'))
# br_index = self.index(len(self.utxos) - 1, self.col_index_by_name('confirmations'))
# self.view.dataChanged(tl_index, br_index, [Qt.DisplayRole, Qt.ForegroundRole, Qt.BackgroundColorRole])
class TransactionTableModel(ExtSortFilterTableModel):
def __init__(self, parent, tx_explorer_url: str):
ExtSortFilterTableModel.__init__(self, parent, [
TableModelColumn('direction', 'Direction', True, 50),
TableModelColumn('satoshis', 'Amount', True, 100),
TableModelColumn('block_time_str', 'Date', True, 100),
TableModelColumn('block_height', 'Height', True, 100),
TableModelColumn('confirmations', 'Confirmations', True, 100),
TableModelColumn('senders', 'Sender', True, 100),
TableModelColumn('recipient', 'Recipient', True, 100),
TableModelColumn('tx_hash', 'TX Hash', False, 100),
TableModelColumn('is_coinbase', 'Coinbase TX', True, 100),
TableModelColumn('label', 'Comment', True, 100)
], True, True)
if DEBUG_MODE:
self.insert_column(len(self._columns), TableModelColumn('id', 'DB id', True, 40))
self.txes: List[TxType] = []
self.txes_by_id: Dict[int, TxType] = {}
self.tx_explorer_url = tx_explorer_url
self.__current_block_height = None
self.__data_modified = False
# filter:
self.filter_type = FILTER_OR
self.filter_incoming = False
self.filter_outgoing = False
self.filter_coinbase = False
self.filter_recipient = None
self.filter_sender = None
self.filter_amount_oper = None
self.filter_amount_value = None # in satoshis
self.filter_date_oper = None
self.filter_date_value = None
def set_view(self, table_view: QTableView):
super().set_view(table_view)
link_delagate = wnd_utils.HyperlinkItemDelegate(table_view)
link_delagate.linkActivated.connect(self.hyperlink_activated)
table_view.setItemDelegateForColumn(self.col_index_by_name('tx_hash'), link_delagate)
def hyperlink_activated(self, link):
QDesktopServices.openUrl(QUrl(link))
def rowCount(self, parent=None, *args, **kwargs):
return len(self.txes)
def flags(self, index):
return Qt.ItemIsEnabled | Qt.ItemIsSelectable | Qt.ItemIsEditable
def data(self, index, role=None):
if index.isValid():
col_idx = index.column()
row_idx = index.row()
col = self.col_by_index(col_idx)
if row_idx < len(self.txes):
tx = self.txes[row_idx]
if role in (Qt.DisplayRole, Qt.EditRole):
if col.name == 'direction':
if tx.direction == 1:
if tx.is_coinbase:
return 'In - New coins'
else:
return 'In'
else:
return 'Out'
elif col.name == 'satoshis':
return app_utils.to_string(round(tx.satoshis / 1e8, 8))
elif col.name == 'senders':
return tx
elif col.name == 'recipient':
return tx
elif col.name == 'block_height':
if tx.block_height == UNCONFIRMED_TX_BLOCK_HEIGHT:
return 0
else:
return tx.block_height
elif col.name == 'tx_hash':
if self.tx_explorer_url:
url = self.tx_explorer_url.replace('%TXID%', tx.tx_hash)
url = f'<a href="{url}">{tx.tx_hash}</a>'
return url
else:
return tx.tx_hash
elif col.name == 'confirmations':
if self.__current_block_height is None:
return ''
else:
if tx.block_height == UNCONFIRMED_TX_BLOCK_HEIGHT:
return 'Unconfirmed'
else:
return app_utils.to_string(self.__current_block_height - tx.block_height + 1)
else:
return app_utils.to_string(tx.__getattribute__(col.name))
elif role == Qt.ForegroundRole:
if col.name == 'direction':
if tx.direction == 1:
if tx.is_coinbase:
return QtGui.QColor(Qt.darkBlue)
else:
return QtGui.QColor(Qt.darkGreen)
else:
return QtGui.QColor(Qt.red)
elif col.name == 'satoshis':
if tx.satoshis < 0:
return QtGui.QColor(Qt.red)
elif role == Qt.BackgroundRole:
pass
elif role == Qt.TextAlignmentRole:
col = self.col_by_index(col_idx)
if col:
if col.name in ('satoshis', 'block_height', 'confirmations'):
return Qt.AlignRight
else:
return Qt.AlignLeft
return QVariant()
def setData(self, index, value, role=None):
if index.isValid():
col_idx = index.column()
row_idx = index.row()
col = self.col_by_index(col_idx)
if row_idx < len(self.txes):
tx = self.txes[row_idx]
if role == Qt.EditRole:
if col.name == 'label':
tx.label = str(value)
return True
return False
def headerData(self, column, orientation, role=Qt.DisplayRole):
if role == Qt.DisplayRole and orientation == Qt.Vertical:
idx = self.index(column, 0)
if idx.isValid():
idx = self.mapFromSource(idx)
return str(idx.row() + 1)
else:
return ExtSortFilterTableModel.headerData(self, column, orientation, role)
def set_blockheight(self, cur_blockheight):
if self.__current_block_height != cur_blockheight:
self.__current_block_height = cur_blockheight
def add_tx(self, tx: TxType, insert_pos = None):
if not tx.id in self.txes_by_id:
if insert_pos is None:
self.txes.append(tx)
else:
self.txes.insert(insert_pos, tx)
self.txes_by_id[tx.id] = tx
def clear_txes(self):
self.txes_by_id.clear()
self.txes.clear()
def lessThan(self, col_index, left_row_index, right_row_index):
col = self.col_by_index(col_index)
if col:
col_name = col.name
reverse = False
if 0 <= left_row_index < len(self.txes) and \
0 <= right_row_index < len(self.txes):
left_tx = self.txes[left_row_index]
right_tx = self.txes[right_row_index]
if col_name == 'block_time_str':
col_name = 'block_timestamp'
left_value = left_tx.__getattribute__(col_name)
right_value = right_tx.__getattribute__(col_name)
elif col_name in ('senders', 'recipient'):
return False
elif col_name == 'confirmations':
if self.__current_block_height is not None:
left_value = self.__current_block_height - left_tx.block_height + 1
right_value = self.__current_block_height - right_tx.block_height + 1
else:
return False
else:
left_value = left_tx.__getattribute__(col_name)
right_value = right_tx.__getattribute__(col_name)
if isinstance(left_value, (int, float)) and isinstance(right_value, (int, float)):
if not reverse:
return left_value < right_value
else:
return right_value < left_value
elif isinstance(left_value, str) and isinstance(right_value, str):
left_value = left_value.lower()
right_value = right_value.lower()
if not reverse:
return left_value < right_value
else:
return right_value < left_value
return False
def filterAcceptsRow(self, source_row, source_parent):
any_cond_met = False
any_cond_not_met = False
was_any_condition = False
def check_cond(cond) -> Optional[bool]:
""":return True if the item should be shown without checking other conditions
False if the item will not be shown without checking other conditions
None check next conditions
"""
nonlocal any_cond_met, any_cond_not_met, was_any_condition
if cond is False:
any_cond_not_met = False
was_any_condition = True
if self.filter_type == FILTER_AND:
return False
elif cond is True:
any_cond_met = True
was_any_condition = True
if self.filter_type == FILTER_OR:
return True
return None
will_show = True
if 0 <= source_row < len(self.txes):
tx = self.txes[source_row]
if self.filter_incoming or self.filter_outgoing or self.filter_coinbase:
cond_met = (self.filter_incoming and tx.direction == 1 and tx.is_coinbase == 0) or \
(self.filter_coinbase and tx.direction == 1 and tx.is_coinbase == 1) or \
(self.filter_outgoing and tx.direction == -1)
r = check_cond(cond_met)
if r is False:
return False
elif r is True:
return True
if self.filter_amount_oper:
sat_val = abs(tx.satoshis)
cond_met = (self.filter_amount_oper == FILTER_OPER_EQ and sat_val == self.filter_amount_value) or \
(self.filter_amount_oper == FILTER_OPER_GTEQ and sat_val >= self.filter_amount_value) or \
(self.filter_amount_oper == FILTER_OPER_LTEQ and sat_val <= self.filter_amount_value)
r = check_cond(cond_met)
if r is False:
return False
elif r is True:
return True
if self.filter_date_oper:
dt = datetime.datetime.fromtimestamp(tx.block_timestamp)
dt = dt.replace(hour=0, minute=0, second=0)
ts = int(dt.timestamp())
cond_met = (self.filter_date_oper == FILTER_OPER_EQ and ts == self.filter_date_value) or \
(self.filter_date_oper == FILTER_OPER_GTEQ and ts >= self.filter_date_value) or \
(self.filter_date_oper == FILTER_OPER_LTEQ and ts <= self.filter_date_value)
r = check_cond(cond_met)
if r is False:
return False
elif r is True:
return True
if self.filter_recipient:
cond_met = False
for addr in tx.recipient_addrs:
if (isinstance(addr, Bip44AddressType) and addr.address == self.filter_recipient) or \
(addr == self.filter_recipient):
cond_met = True
break
r = check_cond(cond_met)
if r is False:
return False
elif r is True:
return True
if self.filter_sender:
cond_met = False
for addr in tx.sender_addrs:
if (isinstance(addr, Bip44AddressType) and addr.address == self.filter_sender) or \
(addr == self.filter_sender):
cond_met = True
break
r = check_cond(cond_met)
if r is False:
return False
elif r is True:
return True
if was_any_condition:
if (self.filter_type == FILTER_OR and not any_cond_met) or \
(self.filter_type == FILTER_AND and any_cond_not_met):
will_show = False
return will_show
| 42.421469 | 130 | 0.54929 |
b3ed3a9ecfe988764fc3330b33c73f45c9e6b0d2 | 1,616 | py | Python | ats/players_from_different_games_in_same_room_test.py | gomyar/rooms | ba20cb77380f439d60d452d2bc69bd94c9c21c24 | [
"MIT"
] | null | null | null | ats/players_from_different_games_in_same_room_test.py | gomyar/rooms | ba20cb77380f439d60d452d2bc69bd94c9c21c24 | [
"MIT"
] | null | null | null | ats/players_from_different_games_in_same_room_test.py | gomyar/rooms | ba20cb77380f439d60d452d2bc69bd94c9c21c24 | [
"MIT"
] | null | null | null |
import unittest
from rooms.testutils import *
class PlayersFromDifferentGamesInSameRoom(unittest.TestCase):
def setUp(self):
self.game = RoomsTestRunner(__file__, './test_game_1')
#self.game.start_game()
self.conn_bob = open_connection()
self.conn_ray = open_connection()
def tearDown(self):
self.game.stop_game()
def testTwoNodes(self):
bob_game_id = self.conn_bob.create_game(owner_username="bob")
ray_game_id = self.conn_ray.create_game(owner_username="ray")
info = self.conn_bob.player_info("bob", bob_game_id)
if not info:
self.conn_bob.join_game("bob", bob_game_id, "area1",
"room1", start_state="some value")
else:
self.conn_bob.connect_to_game("bob", bob_game_id)
info = self.conn_ray.player_info("ray", ray_game_id)
if not info:
self.conn_ray.join_game("ray", ray_game_id, "area1",
"room1", start_state="some value")
else:
self.conn_ray.connect_to_game("ray", ray_game_id)
wait_for_sync(self.conn_bob)
wait_for_sync(self.conn_ray)
wait_for_position(self.conn_ray.player_actor, (250, 250))
wait_for_position(self.conn_bob.player_actor, (250, 250))
self.assertEquals(1, len(self.conn_ray.actors))
self.assertEquals("misteractor", self.conn_ray.actors.values()[0].name)
self.assertEquals(1, len(self.conn_bob.actors))
self.assertEquals("misteractor", self.conn_bob.actors.values()[0].name)
if __name__ == "__main__":
unittest.main()
| 32.979592 | 79 | 0.652847 |
d4634f2234f3acac354e63852f9f245196c0c209 | 882 | py | Python | 2020_05_p2.py | Dementophobia/advent-of-code-2020 | ee1fb67d4ec55ed082aa7723c79759310925a85a | [
"MIT"
] | null | null | null | 2020_05_p2.py | Dementophobia/advent-of-code-2020 | ee1fb67d4ec55ed082aa7723c79759310925a85a | [
"MIT"
] | null | null | null | 2020_05_p2.py | Dementophobia/advent-of-code-2020 | ee1fb67d4ec55ed082aa7723c79759310925a85a | [
"MIT"
] | null | null | null | from aoc import read_file, timer
def calc_seat_id(seat):
row_low, row_high = 0, 127
col_low, col_high = 0, 7
for char in seat[:7]:
if char == "B":
row_low += (row_high - row_low + 1) // 2
else:
row_high -= (row_high - row_low + 1) // 2
for char in seat[7:]:
if char == "R":
col_low += (col_high - col_low + 1) // 2
else:
col_high -= (col_high - col_low + 1) // 2
return row_low * 8 + col_low
@timer
def solve():
seats = sorted(read_file("05"), key = lambda s: s[7:])
seats = sorted(seats, key = lambda s: s[:7], reverse = True)
for i in range(len(seats)):
current_id = calc_seat_id(seats[i])
if i and prev_id + 2 == current_id:
return prev_id + 1
prev_id = current_id
result = solve()
print(f"Solution: {result}") | 26.727273 | 64 | 0.53288 |
7cbf5cd3e37ad65c2f0652f41a469105f9b07254 | 1,490 | py | Python | reconhecimento-facial/treinar_modelo.py | lopes-leonardo/visao-computacional-fatec | a2d4e804b3b3b650797393effa1fd6412515a83b | [
"MIT"
] | null | null | null | reconhecimento-facial/treinar_modelo.py | lopes-leonardo/visao-computacional-fatec | a2d4e804b3b3b650797393effa1fd6412515a83b | [
"MIT"
] | null | null | null | reconhecimento-facial/treinar_modelo.py | lopes-leonardo/visao-computacional-fatec | a2d4e804b3b3b650797393effa1fd6412515a83b | [
"MIT"
] | null | null | null | # COMO USAR
# python treinar_modelo.py
# Ele espera que você já tenha rodado o arquivo extrai_features.py
# Você pode modificar os caminhos de output e input
# por meio dos parâmetros opcionais abaixo
# Importa o pacotes necessários
from sklearn.preprocessing import LabelEncoder
from sklearn.svm import SVC
import argparse
import pickle
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("--embeddings", required=False,
default="output/embeddings.pickle",
help="Caminho para os embeddings serializados")
ap.add_argument("--recognizer", required=False,
default="output/recognizer.pickle",
help="Caminho para o output do treinamento")
ap.add_argument("--le", required=False,
default="output/le.pickle",
help="Caminho para o output das labels")
args = vars(ap.parse_args())
# Carrega os embeddings das faces
print("Carregando embedding das faces...")
data = pickle.loads(open(args["embeddings"], "rb").read())
# Codificando labels
print("Codificando labels...")
le = LabelEncoder()
labels = le.fit_transform(data["nomes"])
# Treina o SVM com os embeddings e produz o identificador
print("Treinando modelo...")
recognizer = SVC(C=1.0, kernel="linear", probability=True)
recognizer.fit(data["embeddings"], labels)
# Grava o modelo treinado no disco
f = open(args["recognizer"], "wb")
f.write(pickle.dumps(recognizer))
f.close()
# Grava as labels codificadas no disco
f = open(args["le"], "wb")
f.write(pickle.dumps(le))
f.close() | 31.041667 | 66 | 0.756376 |
09936856fbe5b0aa96e01b3b2f3281e9c9d33afb | 919 | py | Python | bites/bite120.py | ChidinmaKO/Chobe-bitesofpy | 2f933e6c8877a37d1ce7ef54ea22169fc67417d3 | [
"MIT"
] | null | null | null | bites/bite120.py | ChidinmaKO/Chobe-bitesofpy | 2f933e6c8877a37d1ce7ef54ea22169fc67417d3 | [
"MIT"
] | null | null | null | bites/bite120.py | ChidinmaKO/Chobe-bitesofpy | 2f933e6c8877a37d1ce7ef54ea22169fc67417d3 | [
"MIT"
] | 1 | 2019-07-16T19:12:52.000Z | 2019-07-16T19:12:52.000Z | from functools import wraps
def int_args(func):
@wraps(func)
# complete this decorator
def inner(*args):
is_ints = [isinstance (i, int) for i in args]
if not all(is_ints):
raise TypeError("Not an integer")
is_greater_than_zero = [i > 0 for i in args]
if not all(is_greater_than_zero):
raise ValueError("Less than 0")
return func(*args)
return inner
# tests
import pytest
from validate import int_args
@int_args
def sum_numbers(*numbers):
return sum(numbers)
def test_valid_args():
assert sum_numbers(1, 2, 3) == 6
def test_invalid_type_str():
with pytest.raises(TypeError):
sum_numbers(1, 'string', 3)
def test_invalid_type_float():
with pytest.raises(TypeError):
sum_numbers(1, 2.1, 3)
def test_negative_number():
with pytest.raises(ValueError):
sum_numbers(1, 2, -3) | 20.422222 | 53 | 0.63765 |
a7d58a7b6382c85e0941c8139d7de65a8e95bafe | 7,654 | py | Python | src/m3_graphical_accumulating.py | johnsom6/TheAccumulatorPattern | 0f9865707ffee6bc2601d3c62272a0d53e4bc56e | [
"MIT"
] | null | null | null | src/m3_graphical_accumulating.py | johnsom6/TheAccumulatorPattern | 0f9865707ffee6bc2601d3c62272a0d53e4bc56e | [
"MIT"
] | null | null | null | src/m3_graphical_accumulating.py | johnsom6/TheAccumulatorPattern | 0f9865707ffee6bc2601d3c62272a0d53e4bc56e | [
"MIT"
] | null | null | null | """
This module lets you practice one form of the ACCUMULATOR pattern,
namely, the "IN GRAPHICS" form which features:
-- DRAWING OBJECTS via ACCUMULATING positions and/or sizes,
as in: x = x + pixels
Additionally, it emphasizes that you must
** DO A CONCRETE EXAMPLE BY HAND **
before you can implement a solution to the problem in Python.
Authors: David Mutchler, Dave Fisher, Valerie Galluzzi, Amanda Stouder,
their colleagues and Madi Johnson.
""" # DONE: 1. PUT YOUR NAME IN THE ABOVE LINE.
import rosegraphics as rg
# ----------------------------------------------------------------------
# Students: As you work each of these problems, ask yourself:
# 1. Do I need a loop?
# If so, HOW MANY LOOPS?
#
# 2. Where I need a loop, what needs to happen:
# -- BEFORE the loop?
# -- IN the loop?
# -- AFTER the loop?
# ----------------------------------------------------------------------
def main():
""" Calls the TEST functions in this module. """
run_test_draw_parallel_lines()
run_test_draw_lines()
def run_test_draw_parallel_lines():
""" Tests the draw_parallel_lines function. """
print()
print('--------------------------------------------------')
print('Testing the draw_parallel_lines function:')
print(' See the graphics windows that pop up.')
print('--------------------------------------------------')
# ------------------------------------------------------------------
# TWO tests on ONE window.
# ------------------------------------------------------------------
title = 'Tests 1 and 2 of DRAW_PARALLEL_LINES:'
title = title + ' 4 long lines, 7 short lines'
window1 = rg.RoseWindow(600, 350, title)
# Test 1:
left_most_point = rg.Point(400, 50)
draw_parallel_lines(7, left_most_point, 100, window1)
# Test 2:
left_most_point = rg.Point(50, 200)
draw_parallel_lines(4, left_most_point, 300, window1)
window1.close_on_mouse_click()
# ------------------------------------------------------------------
# A third test on ANOTHER window.
# ------------------------------------------------------------------
title = 'Test 3 of DRAW_PARALLEL_LINES: 12 very long lines!'
window2 = rg.RoseWindow(500, 400, title)
# Test 3:
left_most_point = rg.Point(20, 20)
draw_parallel_lines(12, left_most_point, 470, window2)
window2.close_on_mouse_click()
def draw_parallel_lines(n, point, length, window):
"""
What comes in: The four arguments are:
-- A positive integer n.
-- An rg.Point.
-- A positive integer length.
-- An rg.RoseWindow.
What goes out: Nothing (i.e., None).
Side effects:
See draw_parallel_lines.pdf in this project for pictures
that may help you better understand the following specification:
Draws n rg.Lines parallel to each other,
all on the given rg.RoseWindow, such that:
-- The first rg.Line has its left-most end at the given rg.Point.
-- Each rg.Line is a horizontal line
(i.e., parallel to the x-axis).
-- Each rg.Line has the given length.
-- Each rg.Line is 30 pixels below the previous rg.Line.
Must ** render ** but ** NOT close ** the window.
Type hints:
:type n: int
:type point: rg.Point
:type length: int
:type window: rg.RoseWindow
"""
# ------------------------------------------------------------------
# DONE: 2. Implement and test this function.
# Tests have been written for you (above).
#
# CONSIDER using the ACCUMULATOR IN GRAPHICS pattern,
# as in draw_row_of_circles in m1e,
# instead of directly using the loop variable.
#
####################################################################
# HINT: To figure out the code that computes the necessary
# endpoints for each line,
# ** FIRST DO A CONCRETE EXAMPLE BY HAND! **
####################################################################
# ------------------------------------------------------------------
x = point.x # Initialize x and y BEFORE the loop
y = point.y # Choose values that make the FIRST object easy to draw
end= point.x + length
for _ in range(n+1):
point = rg.Point(x, y)
point2 = rg.Point(end,y)
line = rg.Line(point, point2)
line.attach_to(window)
y = y + 30
window.render()
def run_test_draw_lines():
""" Tests the draw_lines function. """
print()
print('--------------------------------------------------')
print('Testing the draw_lines function:')
print(' See the graphics windows that pop up.')
print('--------------------------------------------------')
# TWO tests on ONE window.
title = 'Tests 1 & 2 of DRAW_LINES: 4 lines, 12 lines!'
window1 = rg.RoseWindow(350, 400, title)
draw_lines(4, rg.Point(20, 120), window1)
draw_lines(12, rg.Point(150, 230), window1)
window1.close_on_mouse_click()
# A third test on ANOTHER window.
window2 = rg.RoseWindow(350, 300, 'Test 3 of DRAW_LINES: 7 lines!')
draw_lines(7, rg.Point(50, 120), window2)
window2.close_on_mouse_click()
def draw_lines(n, point, window):
"""
What comes in: The three arguments are:
-- A integer n that is at least 2.
-- An rg.Point.
-- An rg.RoseWindow.
What goes out: Nothing (i.e., None).
Side effects:
See draw_lines.pdf in this project for pictures that
may help you better understand the following specification:
Draws n rg.Lines on the given rg.RoseWindow, such that:
-- The leftmost point of each of the rg.Lines
is the given rg.Point.
-- For the rightmost point of each of the lines:
-- Its x-coordinate is (pX + 100),
where pX is the x-coordinate of the given rg.Point.
-- The y-coordinates of the lines vary evenly
from (pY - 100) to (pY + 100),
where pY is the y-coordinate of the given rg.Point.
Must ** render ** but ** NOT close ** the window.
Type hints:
:type n: int
:type point: rg.Point
:type window: rg.RoseWindow
"""
# ------------------------------------------------------------------
# DONE: 3. Implement and test this function.
# Tests have been written for you (above).
#
# CONSIDER using the ACCUMULATOR IN GRAPHICS pattern,
# as in draw_row_of_circles in m1e,
# instead of directly using the loop variable.
#
####################################################################
# HINT: To figure out the code that computes the necessary
# endpoints for each line,
# ** FIRST DO A CONCRETE EXAMPLE BY HAND! **
####################################################################
# ------------------------------------------------------------------
pX = point.x # Initialize x and y BEFORE the loop
pY = point.y # Choose values that make the FIRST object easy to draw
pY2 = pY - 100
for _ in range (n):
point3 = rg.Point(pX, pY)
point4 = rg.Point(pX+100, pY2)
pY2 = pY2 + 200.00 / (n-1)
line = rg.Line(point3, point4)
line.attach_to(window)
window.render()
# ----------------------------------------------------------------------
# Calls main to start the ball rolling.
# ----------------------------------------------------------------------
main()
| 36.975845 | 74 | 0.510191 |
b8a5dc061b981a1e797f249b44ef9e821cdf468f | 12,368 | py | Python | jtyoui/tools/times.py | vanton/Jtyoui | c44d66b038ac5f4e2d75b68b3493d02f7b7b385e | [
"MIT"
] | 1 | 2019-12-24T00:57:47.000Z | 2019-12-24T00:57:47.000Z | jtyoui/tools/times.py | liangxioa/Jtyoui | 5a584cbf12d644b6c4fb13167d8841a383afbbac | [
"MIT"
] | null | null | null | jtyoui/tools/times.py | liangxioa/Jtyoui | 5a584cbf12d644b6c4fb13167d8841a383afbbac | [
"MIT"
] | null | null | null | #!/usr/bin/python3.7
# -*- coding: utf-8 -*-
# @Time : 2019/4/24 17:29
# @Author: Jtyoui@qq.com
from jtyoui.data import chinese_mon_number, add_time
from jtyoui.decorators import warns
import re
import datetime
import time
import itertools
import copy
import calendar
class StringTime:
"""解析时间
>>> st = StringTime('二零零七年十月三十一号下午2点半')
>>> print(st.find_times())
"""
def __init__(self, sentence, date_str=None, date_format='%Y-%m-%d %H:%M:%S'):
"""传入一个字符串时间和现在时间
:param sentence: 字符串时间
:param date_str: 你认为的现在时间,不传默认是当前时间
:param date_format: 时间格式
"""
self._sentence = sentence
self._localtime = date_str if date_str else time.strftime(date_format)
self.format = date_format
self.local = time.strptime(self._localtime, self.format)
self.re_year = r'(今年)|(明年)|(后年)|(昨年)|(前年)|(去年)|(\d+年)'
self.re_mon = r'(上个?月)|(这个?月)|(下个?月)|(\d{0,2}本?月底?)|(\d*个?月以?后)'
self.re_day = r'(今天)|(明天)|(后天)|(昨天)|(前天)|(\d+日)|(\d+号)|(\d*天\w?[后前])'
self.re_week = r'(上个?周)|(下个?周)|(星期日)|(星期天)|(星期\d+)|(周\d+)'
self.re_hour = r'(早上)|(下午)|(晚上)|(\d+点)'
self.re_min = r'(\d+分)|(\d+点半)'
self.re_sec = r'(\d+秒)'
self.now_year = self.local.tm_year
self.now_mon = self.local.tm_mon
self.now_day = self.local.tm_mday
self.now_week = self.local.tm_wday + 1
self.chinese_numerals = copy.deepcopy(chinese_mon_number)
self.chinese_numerals.pop('十')
self.add_time = add_time
self.times = set()
@property
def sentence(self):
return self._sentence
@sentence.setter
def sentence(self, sentence):
self._sentence = sentence
def adds(self, x, fmt):
add = datetime.datetime.strptime(self._localtime, self.format) + datetime.timedelta(days=x)
self.now_year = add.year
self.now_mon = add.month
self.now_day = add.day
self.now_week = add.isoweekday()
return add.strftime(fmt)
def find(self, name):
"""根据名字来查找年月日
:param name: 填写年、月、日、号、来找对应的日期
"""
if name == '年':
flag = '%Y'
re_ = self.re_year
elif name == '月':
flag = '%m'
re_ = self.re_mon
elif name == '日' or name == '号':
flag = '%d'
re_ = self.re_day
elif name == '周':
flag = '%d'
re_ = self.re_week
else:
flag = None
re_ = ''
date_time, day, add = [], 0, 0
for d in re.findall(re_, self.sentence):
for i in d:
if i:
if i in ['星期日', '星期天']:
day = 7 - self.now_week
elif '星期' in i and i[-1].isdigit():
week = int(i[-1])
day = week - self.now_week
elif '周' in i and len(i) < 3: # 周三、周四等
if i[-1].isdigit():
week = int(i[-1])
day = week - self.now_week
else:
add = self.add_time[i]
else:
if i in self.add_time:
date_time.append(self.adds(self.add_time[i], flag))
elif re.search(r'\d{1,2}个?月以?后', i):
ds = int(i[0]) if i[0].isdigit() else int(i[0:2])
self.now_mon = self.now_mon + ds
elif name in i and '底' not in i: # 判断不是xx月底
if i[:-1].isdigit():
date_time.append(i[:-1])
elif '月底' in i: # 处理xx月底
if i[0] == '本':
days = calendar.monthrange(self.now_year, self.now_mon)[1]
date_time.append(self.now_mon)
self._sentence += f'{days}日'
elif i[0].isdigit():
days = calendar.monthrange(self.now_year, int(i[0]))[1]
date_time.append(int(i[0]))
self._sentence += f'{days}日'
else: # 既没有xx月也没有本月之类的。暂未考虑
pass
elif add_time.get(i[1]):
if i[0].isdigit():
date_time.append(self.adds(int(i[0]), flag))
if day != 0 or add != 0:
if add == 0 and date_time:
days = int(date_time[0]) + day
date_time = [days]
else:
days = self.adds(day + add, flag)
if int(days) >= self.now_day:
date_time.append(days)
else:
date_time.append(days)
return date_time, 1
return date_time if date_time else []
def find_hour(self):
"""找对应的小时"""
hours = []
flag = 0
for d in re.findall(self.re_hour, self.sentence):
for i in d:
if i:
if i == '早上':
flag = 0
elif i == '下午':
flag = 12
elif i == '晚上':
flag = 12
else:
if i[:-1].isdigit():
if int(i[:-1]) >= 12:
hours.append(int(i[:-1]))
else:
hours.append(int(i[:-1]) + flag)
else:
hours.append(0)
return hours if hours else []
def find_min(self):
"""找对应的分钟"""
minute = []
for d in re.findall(self.re_min, self.sentence):
for i in d:
if i:
if i[:-1].isdigit():
minute.append(int(i[:-1]))
elif '半' in i:
minute.append(30)
return minute if minute else []
def find_sec(self):
"""找对应的秒钟"""
second = []
for d in re.findall(self.re_sec, self.sentence):
if d:
if d[:-1].isdigit():
second.append(d[:-1])
return second if second else []
@warns('该类已经废除、废除时间2019年11月1日(19.10.28版本),请将StringTime类换成ParseTime类使用', DeprecationWarning)
def find_times(self):
"""根据一句话来找对应的时间"""
words = re.split(r'[,.,。、?!?!]', self.sentence)
for sentences_ in words:
if not sentences_:
continue
sentences = re.split(r'[到至-]', sentences_)
t = re.findall('早上|下午|晚上', sentences[0])
if t and len(t) == 1:
sentences = [_ if re.findall('早上|下午|晚上', _) else t[0] + _ for _ in sentences]
flag_y, flag_m, flag_d = [], [], [] # 临时变量,存放左右连词的性质
for sentence in sentences:
str_ = [self.chinese_numerals.get(s, s) for s in sentence] + [' '] # 加[' ']的原因保证index+1不会出现list索引溢出
string = ''
if '十' in str_:
for index, c in enumerate(str_): # 判断十在每个位置上的不同意义
if c == '十':
if str_[index - 1].isdigit() and str_[index + 1].isdigit(): # 比如:二十一实际上十可以取空,变成21
c = ''
elif str_[index - 1].isdigit() and (not str_[index + 1].isdigit()): # 比如:二十实际上十变成0,变成20
c = '0'
elif not str_[index - 1].isdigit() and str_[index + 1].isdigit(): # 比如:十三实际上十变成1,变成13
c = '1'
else:
c = '10' # 其余情况十就变成10
string += c
else:
string = ''.join(str_)
if re.search('[上下]个?周[1-6日]', string):
string = string.replace('周', '周星期')
self._sentence = string
y = self.find('年') # 找到一句话中的年份
m = self.find('月') # 找到一句话中的月份
d = self.find('号') # 找到一句话中的天数
d = d + self.find('日') # 找到一句话中的天数
w = self.find('周') # 找到一句话中的天数
if isinstance(w, tuple):
if m:
m[0] = int(m[0]) + w[1]
else:
m = [self.now_mon + w[1]]
d += d + w[0]
else:
d += d + w
h = self.find_hour() # 找到一句话中的小时
mi = self.find_min() # 找到一句话中的分钟
sec = self.find_sec() # 找到一句话中的秒钟
if not (y or m or d or h or mi or sec):
continue
if not y:
y = flag_y
if not m:
m = flag_m
if not d:
d = flag_d
if h and not d:
d = [self.now_day]
flag_y, flag_m, flag_d = y, m, d
for y_, m_, d_, h_, mi_, sec_ in itertools.zip_longest(y, m, d, h, mi, sec):
if not y_ and m_:
y_ = self.now_year
if not m_ and d_:
if not y_:
y_ = self.now_year
m_ = self.now_mon
add_y, add_m = divmod(m_, 12)
y_ += add_y
m_ = add_m
if not mi_:
mi_ = '00'
if not sec_:
sec_ = '00'
if not m_:
self.times.add(f'{y_}')
elif not d_:
self.times.add(f'{y_}-{m_:0>2}')
elif not h_:
self.times.add(f'{y_}-{m_:0>2}-{d_:0>2}')
else:
self.times.add(f'{y_}-{m_:0>2}-{d_:0>2} {h_:0>2}:{mi_:0>2}:{sec_:0>2}')
break
times = sorted(self.times)
self.times.clear()
return times
if __name__ == '__main__':
print('-----------------默认是当日期------------------')
st = StringTime('二零零七年十月三十一号下午2点半')
print(st.find_times()) # ['2007-10-31 14:30:00']
st.sentence = '下周星期一下午15点半开会'
print(st.find_times()) # ['2019-07-08 15:30:00']
print('-----------------切换日期------------------')
st = StringTime('下周星期一下午2点半开会', '2019-4-17 00:00:00')
print(st.find_times()) # ['2019-04-22 14:30:00']
print('----------------多个时间-------------------')
st = StringTime('今天下午3点开会到4点整到12楼大会议室开会。')
print(st.find_times()) # ['2019-07-02 15:00:00', '2019-07-02 16:00:00']
print('----------------没有时间-------------------')
st = StringTime('我要是不传时间呢?')
print(st.find_times()) # []
print('---------------只有天数--------------------')
st = StringTime('今天去北京,明天去哪里?')
print(st.find_times()) # ['2019-07-02', '2019-07-03']
print('---------------跳断日期--------------------')
st = StringTime('下周星期一下午2点半到4点开会')
print(st.find_times()) # ['2019-07-08 14:30:00', '2019-07-08 16:00:00']
print('---------------非常间断日期--------------------')
st = StringTime('明天下午2点半一直到下周星期五下午4点开会')
print(st.find_times()) # ['2019-07-03 14:30:00', '2019-07-12 16:00:00']
print('---------------没有日期或者天数--------------------')
st = StringTime('下午2点半开会')
print(st.find_times()) # ['2019-07-03 14:30:00']
print('---------------*几个月以后--------------------')
st = StringTime('请王鹏宇下个月1号下午3点上交财务报表')
print(st.find_times()) # ['2019-08-01 15:00:00']
print('--------------几天之后--------------')
st = StringTime('三天之后下午3点开会')
print(st.find_times()) # ['2019-07-08 15:00:00']
print('--------------几月底--------------')
st = StringTime('明年的2月底之前必须交报告,本月底吃饭')
print(st.find_times()) # ['2019-07-31', '2020-02-28']
print('--------晚上-----------')
st = StringTime('晚上11点20分')
print(st.find_times())
print('--------下个周几-----------')
st = StringTime('下个周2')
print(st.find_times())
print('--------几个月以后的日期--------')
st = StringTime('5个月后的明天')
print(st.find_times())
| 37.93865 | 116 | 0.420925 |
f033c648c4fbc044e886a6be4d140ca0d3a738ce | 39 | py | Python | hello_universe/start.py | jayanthvarma134/hello-universe | ab5453731471c172f41ce63c99487cb05faab998 | [
"MIT"
] | null | null | null | hello_universe/start.py | jayanthvarma134/hello-universe | ab5453731471c172f41ce63c99487cb05faab998 | [
"MIT"
] | null | null | null | hello_universe/start.py | jayanthvarma134/hello-universe | ab5453731471c172f41ce63c99487cb05faab998 | [
"MIT"
] | null | null | null | def call():
print("Hello Universe") | 19.5 | 27 | 0.641026 |
4d60cc4f28d12171f6bd467a5e3443020f407089 | 1,832 | py | Python | treasureHunt/models.py | team-den-treasure-island/BackEnd | 16c69cddc7863c6ae1d2b8c24d4186ba1f5759ce | [
"MIT"
] | null | null | null | treasureHunt/models.py | team-den-treasure-island/BackEnd | 16c69cddc7863c6ae1d2b8c24d4186ba1f5759ce | [
"MIT"
] | 10 | 2019-12-04T23:54:00.000Z | 2022-02-10T10:00:24.000Z | treasureHunt/models.py | team-den-treasure-island/BackEnd | 16c69cddc7863c6ae1d2b8c24d4186ba1f5759ce | [
"MIT"
] | null | null | null | from django.db import models
from uuid import uuid4
# from django.contrib.auth.models import User
# Create your models here.
class Player(models.Model):
id = models.UUIDField(primary_key=True, default=uuid4, editable=False)
name = models.CharField(max_length=255, editable=True, unique=True)
current_room = models.IntegerField(default=0)
cooldown = models.DecimalField(max_digits=10, decimal_places=2, default=0)
explore_mode = models.BooleanField(default=False)
encumbrance = models.IntegerField(default=0)
speed = models.IntegerField(default=0)
gold = models.IntegerField(default=0)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
def __str__(self):
return self.name
class Room(models.Model):
id = models.UUIDField(primary_key=True, default=uuid4, editable=False)
room_id = models.IntegerField()
coord_x = models.SmallIntegerField(blank=True, null=True, default=None)
coord_y = models.SmallIntegerField(blank=True, null=True, default=None)
elevation = models.IntegerField(blank=True, null=True, default=None)
terrain = models.CharField(max_length=255, blank=True, null=True, default=None)
n_to = models.IntegerField(default=None, blank=True, null=True)
s_to = models.IntegerField(default=None, blank=True, null=True)
e_to = models.IntegerField(default=None, blank=True, null=True)
w_to = models.IntegerField(default=None, blank=True, null=True)
description = models.TextField(default=None, blank=True, null=True)
title = models.CharField(max_length=255, default=None, blank=True, null=True)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
def __str__(self):
return "Room: " + str(self.room_id)
| 42.604651 | 83 | 0.742904 |
b678c9ae7a4e02e402e1d89d3d8ae262dce2811d | 12,733 | py | Python | dask/dataframe/io/parquet/utils.py | xavi-ai/dask | 5f335e9c383d54bc8f376a8cb153171e1f905e65 | [
"BSD-3-Clause"
] | null | null | null | dask/dataframe/io/parquet/utils.py | xavi-ai/dask | 5f335e9c383d54bc8f376a8cb153171e1f905e65 | [
"BSD-3-Clause"
] | null | null | null | dask/dataframe/io/parquet/utils.py | xavi-ai/dask | 5f335e9c383d54bc8f376a8cb153171e1f905e65 | [
"BSD-3-Clause"
] | null | null | null | import re
from ....compatibility import string_types
class Engine:
""" The API necessary to provide a new Parquet reader/writer """
@staticmethod
def read_metadata(
fs,
paths,
categories=None,
index=None,
gather_statistics=None,
filters=None,
**kwargs
):
""" Gather metadata about a Parquet Dataset to prepare for a read
This function is called once in the user's Python session to gather
important metadata about the parquet dataset.
Parameters
----------
fs: FileSystem
paths: List[str]
A list of paths to files (or their equivalents)
categories: list, dict or None
Column(s) containing categorical data.
index: str, List[str], or False
The column name(s) to be used as the index.
If set to ``None``, pandas metadata (if available) can be used
to reset the value in this function
gather_statistics: bool
Whether or not to gather statistics data. If ``None``, we only
gather statistics data if there is a _metadata file available to
query (cheaply)
filters: list
List of filters to apply, like ``[('x', '>', 0), ...]``.
**kwargs: dict (of dicts)
User-specified arguments to pass on to backend.
Top level key can be used by engine to select appropriate dict.
Returns
-------
meta: pandas.DataFrame
An empty DataFrame object to use for metadata.
Should have appropriate column names and dtypes but need not have
any actual data
statistics: Optional[List[Dict]]
Either None, if no statistics were found, or a list of dictionaries
of statistics data, one dict for every partition (see the next
return value). The statistics should look like the following:
[
{'num-rows': 1000, 'columns': [
{'name': 'id', 'min': 0, 'max': 100, 'null-count': 0},
{'name': 'x', 'min': 0.0, 'max': 1.0, 'null-count': 5},
]},
...
]
parts: List[object]
A list of objects to be passed to ``Engine.read_partition``.
Each object should represent a row group of data.
The type of each object can be anything, as long as the
engine's read_partition function knows how to interpret it.
"""
raise NotImplementedError()
@staticmethod
def read_partition(fs, piece, columns, index, **kwargs):
""" Read a single piece of a Parquet dataset into a Pandas DataFrame
This function is called many times in individual tasks
Parameters
----------
fs: FileSystem
piece: object
This is some token that is returned by Engine.read_metadata.
Typically it represents a row group in a Parquet dataset
columns: List[str]
List of column names to pull out of that row group
index: str, List[str], or False
The index name(s).
**kwargs:
Includes `"kwargs"` values stored within the `parts` output
of `engine.read_metadata`. May also include arguments to be
passed to the backend (if stored under a top-level `"read"` key).
Returns
-------
A Pandas DataFrame
"""
raise NotImplementedError()
@staticmethod
def initialize_write(
df,
fs,
path,
append=False,
partition_on=None,
ignore_divisions=False,
division_info=None,
**kwargs
):
"""Perform engine-specific initialization steps for this dataset
Parameters
----------
df: dask.dataframe.DataFrame
fs: FileSystem
path: str
Destination directory for data. Prepend with protocol like ``s3://``
or ``hdfs://`` for remote data.
append: bool
If True, may use existing metadata (if any) and perform checks
against the new data being stored.
partition_on: List(str)
Column(s) to use for dataset partitioning in parquet.
ignore_divisions: bool
Whether or not to ignore old divisions when appending. Otherwise,
overlapping divisions will lead to an error being raised.
division_info: dict
Dictionary containing the divisions and corresponding column name.
**kwargs: dict
Other keyword arguments (including `index_cols`)
Returns
-------
tuple:
engine-specific instance
list of filenames, one per partition
"""
raise NotImplementedError
@staticmethod
def write_partition(
df, path, fs, filename, partition_on, return_metadata, **kwargs
):
"""
Output a partition of a dask.DataFrame. This will correspond to
one output file, unless partition_on is set, in which case, it will
correspond to up to one file in each sub-directory.
Parameters
----------
df: dask.dataframe.DataFrame
path: str
Destination directory for data. Prepend with protocol like ``s3://``
or ``hdfs://`` for remote data.
fs: FileSystem
filename: str
partition_on: List(str)
Column(s) to use for dataset partitioning in parquet.
return_metadata : bool
Whether to return list of instances from this write, one for each
output file. These will be passed to write_metadata if an output
metadata file is requested.
**kwargs: dict
Other keyword arguments (including `fmd` and `index_cols`)
Returns
-------
List of metadata-containing instances (if `return_metadata` is `True`)
or empty list
"""
raise NotImplementedError
@staticmethod
def write_metadata(parts, meta, fs, path, append=False, **kwargs):
"""
Write the shared metadata file for a parquet dataset.
Parameters
----------
parts: List
Contains metadata objects to write, of the type undrestood by the
specific implementation
meta: non-chunk metadata
Details that do not depend on the specifics of each chunk write,
typically the schema and pandas metadata, in a format the writer
can use.
fs: FileSystem
path: str
Output file to write to, usually ``"_metadata"`` in the root of
the output dataset
append: boolean
Whether or not to consolidate new metadata with existing (True)
or start from scratch (False)
**kwargs: dict
Other keyword arguments (including `compression`)
"""
raise NotImplementedError()
def _parse_pandas_metadata(pandas_metadata):
"""Get the set of names from the pandas metadata section
Parameters
----------
pandas_metadata : dict
Should conform to the pandas parquet metadata spec
Returns
-------
index_names : list
List of strings indicating the actual index names
column_names : list
List of strings indicating the actual column names
storage_name_mapping : dict
Pairs of storage names (e.g. the field names for
PyArrow) and actual names. The storage and field names will
differ for index names for certain writers (pyarrow > 0.8).
column_indexes_names : list
The names for ``df.columns.name`` or ``df.columns.names`` for
a MultiIndex in the columns
Notes
-----
This should support metadata written by at least
* fastparquet>=0.1.3
* pyarrow>=0.7.0
"""
index_storage_names = [
n["name"] if isinstance(n, dict) else n
for n in pandas_metadata["index_columns"]
]
index_name_xpr = re.compile(r"__index_level_\d+__")
# older metadatas will not have a 'field_name' field so we fall back
# to the 'name' field
pairs = [
(x.get("field_name", x["name"]), x["name"]) for x in pandas_metadata["columns"]
]
# Need to reconcile storage and real names. These will differ for
# pyarrow, which uses __index_leveL_d__ for the storage name of indexes.
# The real name may be None (e.g. `df.index.name` is None).
pairs2 = []
for storage_name, real_name in pairs:
if real_name and index_name_xpr.match(real_name):
real_name = None
pairs2.append((storage_name, real_name))
index_names = [name for (storage_name, name) in pairs2 if name != storage_name]
# column_indexes represents df.columns.name
# It was added to the spec after pandas 0.21.0+, and implemented
# in PyArrow 0.8. It was added to fastparquet in 0.3.1.
column_index_names = pandas_metadata.get("column_indexes", [{"name": None}])
column_index_names = [x["name"] for x in column_index_names]
# Now we need to disambiguate between columns and index names. PyArrow
# 0.8.0+ allows for duplicates between df.index.names and df.columns
if not index_names:
# For PyArrow < 0.8, Any fastparquet. This relies on the facts that
# 1. Those versions used the real index name as the index storage name
# 2. Those versions did not allow for duplicate index / column names
# So we know that if a name is in index_storage_names, it must be an
# index name
if index_storage_names and isinstance(index_storage_names[0], dict):
# Cannot handle dictionary case
index_storage_names = []
index_names = list(index_storage_names) # make a copy
index_storage_names2 = set(index_storage_names)
column_names = [
name for (storage_name, name) in pairs if name not in index_storage_names2
]
else:
# For newer PyArrows the storage names differ from the index names
# iff it's an index level. Though this is a fragile assumption for
# other systems...
column_names = [name for (storage_name, name) in pairs2 if name == storage_name]
storage_name_mapping = dict(pairs2) # TODO: handle duplicates gracefully
return index_names, column_names, storage_name_mapping, column_index_names
def _normalize_index_columns(user_columns, data_columns, user_index, data_index):
"""Normalize user and file-provided column and index names
Parameters
----------
user_columns : None, str or list of str
data_columns : list of str
user_index : None, str, or list of str
data_index : list of str
Returns
-------
column_names : list of str
index_names : list of str
"""
specified_columns = user_columns is not None
specified_index = user_index is not None
if user_columns is None:
user_columns = list(data_columns)
elif isinstance(user_columns, string_types):
user_columns = [user_columns]
else:
user_columns = list(user_columns)
if user_index is None:
user_index = data_index
elif user_index is False:
# When index is False, use no index and all fields should be treated as
# columns (unless `columns` provided).
user_index = []
data_columns = data_index + data_columns
elif isinstance(user_index, string_types):
user_index = [user_index]
else:
user_index = list(user_index)
if specified_index and not specified_columns:
# Only `index` provided. Use specified index, and all column fields
# that weren't specified as indices
index_names = user_index
column_names = [x for x in data_columns if x not in index_names]
elif specified_columns and not specified_index:
# Only `columns` provided. Use specified columns, and all index fields
# that weren't specified as columns
column_names = user_columns
index_names = [x for x in data_index if x not in column_names]
elif specified_index and specified_columns:
# Both `index` and `columns` provided. Use as specified, but error if
# they intersect.
column_names = user_columns
index_names = user_index
if set(column_names).intersection(index_names):
raise ValueError("Specified index and column names must not intersect")
else:
# Use default columns and index from the metadata
column_names = data_columns
index_names = data_index
return column_names, index_names
| 37.230994 | 88 | 0.623419 |
3140f6e4cd43386ee15cc504947417aa9a73c63d | 6,031 | py | Python | hierarchical_foresight/env/environment.py | kiss2u/google-research | 2cd66234656f9e2f4218ed90a2d8aa9cf3139093 | [
"Apache-2.0"
] | 7 | 2020-03-15T12:14:07.000Z | 2021-12-01T07:01:09.000Z | hierarchical_foresight/env/environment.py | Alfaxad/google-research | 2c0043ecd507e75e2df9973a3015daf9253e1467 | [
"Apache-2.0"
] | 25 | 2020-07-25T08:53:09.000Z | 2022-03-12T00:43:02.000Z | hierarchical_foresight/env/environment.py | Alfaxad/google-research | 2c0043ecd507e75e2df9973a3015daf9253e1467 | [
"Apache-2.0"
] | 4 | 2021-02-08T10:25:45.000Z | 2021-04-17T14:46:26.000Z | # coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Environment wrapper around the maze navigation environment.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
from . import simple_maze
import cv2
import numpy as np
class Environment(object):
"""Wrapper around the Simple maze environment."""
def __init__(self, difficulty=None):
"""Initialize the environment with the specified difficulty."""
self.difficulty = difficulty
self._sim_env = simple_maze.navigate(difficulty=difficulty)
self.stepcount = 0
def reset(self):
"""Resets the environment."""
self.stepcount = 0
time_step = self._sim_env.reset()
return time_step
def get_goal_im(self):
"""Computes and returns the goal image."""
currp = copy.deepcopy(self._sim_env.physics.data.qpos[:])
currv = copy.deepcopy(self._sim_env.physics.data.qvel[:])
self._sim_env.task.dontreset = True
tg = copy.deepcopy(self._sim_env.physics.named.data.geom_xpos['target'][:2])
self._sim_env.physics.data.qpos[:] = tg
self._sim_env.physics.data.qvel[:] = 0
self.step([0, 0])
self._sim_env.physics.data.qpos[:] = tg
self._sim_env.physics.data.qvel[:] = 0
self.step([0, 0])
_, gim = self.get_observation()
self._sim_env.physics.data.qpos[:] = currp
self._sim_env.physics.data.qvel[:] = currv
self.step([0, 0])
self._sim_env.task.dontreset = False
return gim
def get_subgoal_ims(self, numg):
"""Computes and returs the ground truth sub goal images."""
currp = copy.deepcopy(self._sim_env.physics.data.qpos[:])
currv = copy.deepcopy(self._sim_env.physics.data.qvel[:])
self._sim_env.task.dontreset = True
tg = copy.deepcopy(self._sim_env.physics.named.data.geom_xpos['target'][:2])
sg = []
if self.difficulty == 'e':
if numg == 1:
self._sim_env.physics.data.qpos[:] = currp + (tg - currp) / 2
self._sim_env.physics.data.qvel[:] = 0
self.step([0, 0])
_, gim = self.get_observation()
sg.append(gim)
elif numg == 2:
self._sim_env.physics.data.qpos[:] = currp + (tg - currp) / 3
self._sim_env.physics.data.qvel[:] = 0
self.step([0, 0])
_, gim = self.get_observation()
sg.append(gim)
self._sim_env.physics.data.qpos[:] = currp + 2 * (tg - currp) / 3
self._sim_env.physics.data.qvel[:] = 0
self.step([0, 0])
_, gim = self.get_observation()
sg.append(gim)
elif self.difficulty == 'm':
if numg == 1:
self._sim_env.physics.data.qpos[:] = [
self._sim_env.physics.named.model.geom_pos['wall2A', 'x'],
self._sim_env.physics.named.model.geom_pos['wall2A', 'y'] - 0.25]
self._sim_env.physics.data.qvel[:] = 0
self.step([0, 0])
_, gim = self.get_observation()
sg.append(gim)
elif numg == 2:
self._sim_env.physics.data.qpos[:] = [
self._sim_env.physics.named.model.geom_pos['wall2A', 'x'],
self._sim_env.physics.named.model.geom_pos['wall2A', 'y'] - 0.25]
self._sim_env.physics.data.qvel[:] = 0
self.step([0, 0])
_, gim = self.get_observation()
sg.append(gim)
self._sim_env.physics.data.qpos[:] = [
self._sim_env.physics.named.model.geom_pos['wall2A', 'x'],
self._sim_env.physics.named.model.geom_pos['wall2A', 'y'] - 0.25]
self._sim_env.physics.data.qvel[:] = 0
self.step([0, 0])
_, gim = self.get_observation()
sg.append(gim)
elif self.difficulty == 'h':
if numg == 1:
self._sim_env.physics.data.qpos[:] = [
self._sim_env.physics.named.model.geom_pos['wall1A', 'x'],
self._sim_env.physics.named.model.geom_pos['wall1A', 'y'] - 0.25]
self._sim_env.physics.data.qvel[:] = 0
self.step([0, 0])
_, gim = self.get_observation()
sg.append(gim)
elif numg == 2:
self._sim_env.physics.data.qpos[:] = [
self._sim_env.physics.named.model.geom_pos['wall1A', 'x'],
self._sim_env.physics.named.model.geom_pos['wall1A', 'y'] - 0.25]
self._sim_env.physics.data.qvel[:] = 0
self.step([0, 0])
_, gim = self.get_observation()
sg.append(gim)
self._sim_env.physics.data.qpos[:] = [
self._sim_env.physics.named.model.geom_pos['wall2A', 'x'],
self._sim_env.physics.named.model.geom_pos['wall2A', 'y'] - 0.25]
self._sim_env.physics.data.qvel[:] = 0
self.step([0, 0])
_, gim = self.get_observation()
sg.append(gim)
sg = np.array(sg)
self._sim_env.physics.data.qpos[:] = currp
self._sim_env.physics.data.qvel[:] = currv
self.step([0, 0])
self._sim_env.task.dontreset = False
return sg
def is_goal(self):
"""Checks if the current state is a goal state."""
return self._sim_env.task.is_goal(self._sim_env.physics)
def step(self, action=None):
"""Steps the environment."""
time_step = self._sim_env.step(action)
self._sim_env.physics.data.qvel[:] = 0
return time_step
def get_observation(self):
"""Return image observation."""
obs = self._sim_env.task.get_observation(self._sim_env.physics)
im = self._sim_env.physics.render(256, 256, camera_id='fixed')
im = cv2.resize(im, (64, 64), interpolation=cv2.INTER_LANCZOS4)
return obs, im
| 37.228395 | 80 | 0.636213 |
4669f7f40058c12832d206cb76319f9fb59c3a8f | 159 | py | Python | python/mzcloud/models/schema_retrieve_format.py | benesch/cloud-sdks | 21e69b8eacc74d64131fc4d5d543ff0d889c87e4 | [
"Apache-2.0"
] | null | null | null | python/mzcloud/models/schema_retrieve_format.py | benesch/cloud-sdks | 21e69b8eacc74d64131fc4d5d543ff0d889c87e4 | [
"Apache-2.0"
] | null | null | null | python/mzcloud/models/schema_retrieve_format.py | benesch/cloud-sdks | 21e69b8eacc74d64131fc4d5d543ff0d889c87e4 | [
"Apache-2.0"
] | null | null | null | from enum import Enum
class SchemaRetrieveFormat(str, Enum):
JSON = "json"
YAML = "yaml"
def __str__(self) -> str:
return str(self.value) | 19.875 | 38 | 0.63522 |
8c5b90e088a5b4702cee7786e7bd897243d4d56c | 124,201 | py | Python | musicbot/bot.py | DeadParticles/fuchsupportbotsystem | 7880a3d52c74a4eebcac19edfe24db6115c9bf98 | [
"MIT"
] | null | null | null | musicbot/bot.py | DeadParticles/fuchsupportbotsystem | 7880a3d52c74a4eebcac19edfe24db6115c9bf98 | [
"MIT"
] | null | null | null | musicbot/bot.py | DeadParticles/fuchsupportbotsystem | 7880a3d52c74a4eebcac19edfe24db6115c9bf98 | [
"MIT"
] | null | null | null | import os
import sys
import time
import shlex
import shutil
import random
import inspect
import logging
import asyncio
import pathlib
import traceback
import math
import re
import aiohttp
import discord
import colorlog
from io import BytesIO, StringIO
from functools import wraps
from textwrap import dedent
from datetime import timedelta
from collections import defaultdict
from discord.enums import ChannelType
from . import exceptions
from . import downloader
from .playlist import Playlist
from .player import MusicPlayer
from .entry import StreamPlaylistEntry
from .opus_loader import load_opus_lib
from .config import Config, ConfigDefaults
from .permissions import Permissions, PermissionsDefaults
from .constructs import SkipState, Response
from .utils import load_file, write_file, fixg, ftimedelta, _func_, _get_variable
from .spotify import Spotify
from .json import Json
from .constants import VERSION as BOTVERSION
from .constants import DISCORD_MSG_CHAR_LIMIT, AUDIO_CACHE_PATH
load_opus_lib()
log = logging.getLogger(__name__)
class MusicBot(discord.Client):
def __init__(self, config_file=None, perms_file=None):
try:
sys.stdout.write("\x1b]2;MusicBot {}\x07".format(BOTVERSION))
except:
pass
print()
if config_file is None:
config_file = ConfigDefaults.options_file
if perms_file is None:
perms_file = PermissionsDefaults.perms_file
self.players = {}
self.exit_signal = None
self.init_ok = False
self.cached_app_info = None
self.last_status = None
self.config = Config(config_file)
self.permissions = Permissions(perms_file, grant_all=[self.config.owner_id])
self.str = Json(self.config.i18n_file)
self.blacklist = set(load_file(self.config.blacklist_file))
self.autoplaylist = load_file(self.config.auto_playlist_file)
self.aiolocks = defaultdict(asyncio.Lock)
self.downloader = downloader.Downloader(download_folder='audio_cache')
self._setup_logging()
log.info('Starting MusicBot {}'.format(BOTVERSION))
if not self.autoplaylist:
log.warning("Autoplaylist is empty, disabling.")
self.config.auto_playlist = False
else:
log.info("Loaded autoplaylist with {} entries".format(len(self.autoplaylist)))
if self.blacklist:
log.debug("Loaded blacklist with {} entries".format(len(self.blacklist)))
# TODO: Do these properly
ssd_defaults = {
'last_np_msg': None,
'auto_paused': False,
'availability_paused': False
}
self.server_specific_data = defaultdict(ssd_defaults.copy)
super().__init__()
self.aiosession = aiohttp.ClientSession(loop=self.loop)
self.http.user_agent += ' MusicBot/%s' % BOTVERSION
self.spotify = None
if self.config._spotify:
try:
self.spotify = Spotify(self.config.spotify_clientid, self.config.spotify_clientsecret, aiosession=self.aiosession, loop=self.loop)
if not self.spotify.token:
log.warning('Spotify did not provide us with a token. Disabling.')
self.config._spotify = False
else:
log.info('Authenticated with Spotify successfully using client ID and secret.')
except exceptions.SpotifyError as e:
log.warning('There was a problem initialising the connection to Spotify. Is your client ID and secret correct? Details: {0}. Continuing anyway in 5 seconds...'.format(e))
self.config._spotify = False
time.sleep(5) # make sure they see the problem
def __del__(self):
# These functions return futures but it doesn't matter
try: self.http.session.close()
except: pass
# TODO: Add some sort of `denied` argument for a message to send when someone else tries to use it
def owner_only(func):
@wraps(func)
async def wrapper(self, *args, **kwargs):
# Only allow the owner to use these commands
orig_msg = _get_variable('message')
if not orig_msg or orig_msg.author.id == self.config.owner_id:
# noinspection PyCallingNonCallable
return await func(self, *args, **kwargs)
else:
raise exceptions.PermissionsError("Only the owner can use this command.", expire_in=30)
return wrapper
def dev_only(func):
@wraps(func)
async def wrapper(self, *args, **kwargs):
orig_msg = _get_variable('message')
if str(orig_msg.author.id) in self.config.dev_ids:
# noinspection PyCallingNonCallable
return await func(self, *args, **kwargs)
else:
raise exceptions.PermissionsError("Only dev users can use this command.", expire_in=30)
wrapper.dev_cmd = True
return wrapper
def ensure_appinfo(func):
@wraps(func)
async def wrapper(self, *args, **kwargs):
await self._cache_app_info()
# noinspection PyCallingNonCallable
return await func(self, *args, **kwargs)
return wrapper
def _get_owner(self, *, server=None, voice=False):
return discord.utils.find(
lambda m: m.id == self.config.owner_id and (m.voice if voice else True),
server.members if server else self.get_all_members()
)
def _delete_old_audiocache(self, path=AUDIO_CACHE_PATH):
try:
shutil.rmtree(path)
return True
except:
try:
os.rename(path, path + '__')
except:
return False
try:
shutil.rmtree(path)
except:
os.rename(path + '__', path)
return False
return True
def _setup_logging(self):
if len(logging.getLogger(__package__).handlers) > 1:
log.debug("Skipping logger setup, already set up")
return
shandler = logging.StreamHandler(stream=sys.stdout)
shandler.setFormatter(colorlog.LevelFormatter(
fmt = {
'DEBUG': '{log_color}[{levelname}:{module}] {message}',
'INFO': '{log_color}{message}',
'WARNING': '{log_color}{levelname}: {message}',
'ERROR': '{log_color}[{levelname}:{module}] {message}',
'CRITICAL': '{log_color}[{levelname}:{module}] {message}',
'EVERYTHING': '{log_color}[{levelname}:{module}] {message}',
'NOISY': '{log_color}[{levelname}:{module}] {message}',
'VOICEDEBUG': '{log_color}[{levelname}:{module}][{relativeCreated:.9f}] {message}',
'FFMPEG': '{log_color}[{levelname}:{module}][{relativeCreated:.9f}] {message}'
},
log_colors = {
'DEBUG': 'cyan',
'INFO': 'white',
'WARNING': 'yellow',
'ERROR': 'red',
'CRITICAL': 'bold_red',
'EVERYTHING': 'white',
'NOISY': 'white',
'FFMPEG': 'bold_purple',
'VOICEDEBUG': 'purple',
},
style = '{',
datefmt = ''
))
shandler.setLevel(self.config.debug_level)
logging.getLogger(__package__).addHandler(shandler)
log.debug("Set logging level to {}".format(self.config.debug_level_str))
if self.config.debug_mode:
dlogger = logging.getLogger('discord')
dlogger.setLevel(logging.DEBUG)
dhandler = logging.FileHandler(filename='logs/discord.log', encoding='utf-8', mode='w')
dhandler.setFormatter(logging.Formatter('{asctime}:{levelname}:{name}: {message}', style='{'))
dlogger.addHandler(dhandler)
@staticmethod
def _check_if_empty(vchannel: discord.abc.GuildChannel, *, excluding_me=True, excluding_deaf=False):
def check(member):
if excluding_me and member == vchannel.guild.me:
return False
if excluding_deaf and any([member.deaf, member.self_deaf]):
return False
return True
return not sum(1 for m in vchannel.members if check(m))
async def _join_startup_channels(self, channels, *, autosummon=True):
joined_servers = set()
channel_map = {c.guild: c for c in channels}
def _autopause(player):
if self._check_if_empty(player.voice_client.channel):
log.info("Initial autopause in empty channel")
player.pause()
self.server_specific_data[player.voice_client.channel.guild]['auto_paused'] = True
for guild in self.guilds:
if guild.unavailable or guild in channel_map:
continue
if guild.me.voice:
log.info("Found resumable voice channel {0.guild.name}/{0.name}".format(guild.me.voice.channel))
channel_map[guild] = guild.me.voice.channel
if autosummon:
owner = self._get_owner(server=guild, voice=True)
if owner:
log.info("Found owner in \"{}\"".format(owner.voice.channel.name))
channel_map[guild] = owner.voice.channel
for guild, channel in channel_map.items():
if guild in joined_servers:
log.info("Already joined a channel in \"{}\", skipping".format(guild.name))
continue
if channel and isinstance(channel, discord.VoiceChannel):
log.info("Attempting to join {0.guild.name}/{0.name}".format(channel))
chperms = channel.permissions_for(guild.me)
if not chperms.connect:
log.info("Cannot join channel \"{}\", no permission.".format(channel.name))
continue
elif not chperms.speak:
log.info("Will not join channel \"{}\", no permission to speak.".format(channel.name))
continue
try:
player = await self.get_player(channel, create=True, deserialize=self.config.persistent_queue)
joined_servers.add(guild)
log.info("Joined {0.guild.name}/{0.name}".format(channel))
if player.is_stopped:
player.play()
if self.config.auto_playlist:
if self.config.auto_pause:
player.once('play', lambda player, **_: _autopause(player))
if not player.playlist.entries:
await self.on_player_finished_playing(player)
except Exception:
log.debug("Error joining {0.guild.name}/{0.name}".format(channel), exc_info=True)
log.error("Failed to join {0.guild.name}/{0.name}".format(channel))
elif channel:
log.warning("Not joining {0.guild.name}/{0.name}, that's a text channel.".format(channel))
else:
log.warning("Invalid channel thing: {}".format(channel))
async def _wait_delete_msg(self, message, after):
await asyncio.sleep(after)
await self.safe_delete_message(message, quiet=True)
# TODO: Check to see if I can just move this to on_message after the response check
async def _manual_delete_check(self, message, *, quiet=False):
if self.config.delete_invoking:
await self.safe_delete_message(message, quiet=quiet)
async def _check_ignore_non_voice(self, msg):
vc = msg.guild.me.voice.channel
# If we've connected to a voice chat and we're in the same voice channel
if not vc or vc == msg.author.voice.channel:
return True
else:
raise exceptions.PermissionsError(
"you cannot use this command when not in the voice channel (%s)" % vc.name, expire_in=30)
async def _cache_app_info(self, *, update=False):
if not self.cached_app_info and not update and self.user.bot:
log.debug("Caching app info")
self.cached_app_info = await self.application_info()
return self.cached_app_info
async def remove_from_autoplaylist(self, song_url:str, *, ex:Exception=None, delete_from_ap=False):
if song_url not in self.autoplaylist:
log.debug("URL \"{}\" not in autoplaylist, ignoring".format(song_url))
return
async with self.aiolocks[_func_()]:
self.autoplaylist.remove(song_url)
log.info("Removing unplayable song from session autoplaylist: %s" % song_url)
with open(self.config.auto_playlist_removed_file, 'a', encoding='utf8') as f:
f.write(
'# Entry removed {ctime}\n'
'# Reason: {ex}\n'
'{url}\n\n{sep}\n\n'.format(
ctime=time.ctime(),
ex=str(ex).replace('\n', '\n#' + ' ' * 10), # 10 spaces to line up with # Reason:
url=song_url,
sep='#' * 32
))
if delete_from_ap:
log.info("Updating autoplaylist")
write_file(self.config.auto_playlist_file, self.autoplaylist)
@ensure_appinfo
async def generate_invite_link(self, *, permissions=discord.Permissions(70380544), guild=None):
return discord.utils.oauth_url(self.cached_app_info.id, permissions=permissions, guild=guild)
async def get_voice_client(self, channel: discord.abc.GuildChannel):
if isinstance(channel, discord.Object):
channel = self.get_channel(channel.id)
if not isinstance(channel, discord.VoiceChannel):
raise AttributeError('Channel passed must be a voice channel')
if channel.guild.voice_client:
return channel.guild.voice_client
else:
return await channel.connect(timeout=60, reconnect=True)
async def disconnect_voice_client(self, guild):
vc = self.voice_client_in(guild)
if not vc:
return
if guild.id in self.players:
self.players.pop(guild.id).kill()
await vc.disconnect()
async def disconnect_all_voice_clients(self):
for vc in list(self.voice_clients).copy():
await self.disconnect_voice_client(vc.channel.guild)
async def set_voice_state(self, vchannel, *, mute=False, deaf=False):
if isinstance(vchannel, discord.Object):
vchannel = self.get_channel(vchannel.id)
if getattr(vchannel, 'type', ChannelType.text) != ChannelType.voice:
raise AttributeError('Channel passed must be a voice channel')
await self.ws.voice_state(vchannel.guild.id, vchannel.id, mute, deaf)
# I hope I don't have to set the channel here
# instead of waiting for the event to update it
def get_player_in(self, guild:discord.Guild) -> MusicPlayer:
return self.players.get(guild.id)
async def get_player(self, channel, create=False, *, deserialize=False) -> MusicPlayer:
guild = channel.guild
async with self.aiolocks[_func_() + ':' + str(guild.id)]:
if deserialize:
voice_client = await self.get_voice_client(channel)
player = await self.deserialize_queue(guild, voice_client)
if player:
log.debug("Created player via deserialization for guild %s with %s entries", guild.id, len(player.playlist))
# Since deserializing only happens when the bot starts, I should never need to reconnect
return self._init_player(player, guild=guild)
if guild.id not in self.players:
if not create:
raise exceptions.CommandError(
'The bot is not in a voice channel. '
'Use %ssummon to summon it to your voice channel.' % self.config.command_prefix)
voice_client = await self.get_voice_client(channel)
playlist = Playlist(self)
player = MusicPlayer(self, voice_client, playlist)
self._init_player(player, guild=guild)
return self.players[guild.id]
def _init_player(self, player, *, guild=None):
player = player.on('play', self.on_player_play) \
.on('resume', self.on_player_resume) \
.on('pause', self.on_player_pause) \
.on('stop', self.on_player_stop) \
.on('finished-playing', self.on_player_finished_playing) \
.on('entry-added', self.on_player_entry_added) \
.on('error', self.on_player_error)
player.skip_state = SkipState()
if guild:
self.players[guild.id] = player
return player
async def on_player_play(self, player, entry):
log.debug('Running on_player_play')
await self.update_now_playing_status(entry)
player.skip_state.reset()
# This is the one event where its ok to serialize autoplaylist entries
await self.serialize_queue(player.voice_client.channel.guild)
if self.config.write_current_song:
await self.write_current_song(player.voice_client.channel.guild, entry)
channel = entry.meta.get('channel', None)
author = entry.meta.get('author', None)
if channel and author:
last_np_msg = self.server_specific_data[channel.guild]['last_np_msg']
if last_np_msg and last_np_msg.channel == channel:
async for lmsg in channel.history(limit=1):
if lmsg != last_np_msg and last_np_msg:
await self.safe_delete_message(last_np_msg)
self.server_specific_data[channel.guild]['last_np_msg'] = None
break # This is probably redundant
author_perms = self.permissions.for_user(author)
if author not in player.voice_client.channel.members and author_perms.skip_when_absent:
newmsg = 'Skipping next song in `%s`: `%s` added by `%s` as queuer not in voice' % (
player.voice_client.channel.name, entry.title, entry.meta['author'].name)
player.skip()
elif self.config.now_playing_mentions:
newmsg = '%s - your song `%s` is now playing in `%s`!' % (
entry.meta['author'].mention, entry.title, player.voice_client.channel.name)
else:
newmsg = 'Now playing in `%s`: `%s` added by `%s`' % (
player.voice_client.channel.name, entry.title, entry.meta['author'].name)
if self.server_specific_data[channel.guild]['last_np_msg']:
self.server_specific_data[channel.guild]['last_np_msg'] = await self.safe_edit_message(last_np_msg, newmsg, send_if_fail=True)
else:
self.server_specific_data[channel.guild]['last_np_msg'] = await self.safe_send_message(channel, newmsg)
# TODO: Check channel voice state?
async def on_player_resume(self, player, entry, **_):
log.debug('Running on_player_resume')
await self.update_now_playing_status(entry)
async def on_player_pause(self, player, entry, **_):
log.debug('Running on_player_pause')
await self.update_now_playing_status(entry, True)
# await self.serialize_queue(player.voice_client.channel.guild)
async def on_player_stop(self, player, **_):
log.debug('Running on_player_stop')
await self.update_now_playing_status()
async def on_player_finished_playing(self, player, **_):
log.debug('Running on_player_finished_playing')
def _autopause(player):
if self._check_if_empty(player.voice_client.channel):
log.info("Player finished playing, autopaused in empty channel")
player.pause()
self.server_specific_data[player.voice_client.channel.guild]['auto_paused'] = True
if not player.playlist.entries and not player.current_entry and self.config.auto_playlist:
if not player.autoplaylist:
if not self.autoplaylist:
# TODO: When I add playlist expansion, make sure that's not happening during this check
log.warning("No playable songs in the autoplaylist, disabling.")
self.config.auto_playlist = False
else:
log.debug("No content in current autoplaylist. Filling with new music...")
player.autoplaylist = list(self.autoplaylist)
while player.autoplaylist:
if self.config.auto_playlist_random:
random.shuffle(player.autoplaylist)
song_url = random.choice(player.autoplaylist)
else:
song_url = player.autoplaylist[0]
player.autoplaylist.remove(song_url)
info = {}
try:
info = await self.downloader.extract_info(player.playlist.loop, song_url, download=False, process=False)
except downloader.youtube_dl.utils.DownloadError as e:
if 'YouTube said:' in e.args[0]:
# url is bork, remove from list and put in removed list
log.error("Error processing youtube url:\n{}".format(e.args[0]))
else:
# Probably an error from a different extractor, but I've only seen youtube's
log.error("Error processing \"{url}\": {ex}".format(url=song_url, ex=e))
await self.remove_from_autoplaylist(song_url, ex=e, delete_from_ap=self.config.remove_ap)
continue
except Exception as e:
log.error("Error processing \"{url}\": {ex}".format(url=song_url, ex=e))
log.exception()
self.autoplaylist.remove(song_url)
continue
if info.get('entries', None): # or .get('_type', '') == 'playlist'
log.debug("Playlist found but is unsupported at this time, skipping.")
# TODO: Playlist expansion
# Do I check the initial conditions again?
# not (not player.playlist.entries and not player.current_entry and self.config.auto_playlist)
if self.config.auto_pause:
player.once('play', lambda player, **_: _autopause(player))
try:
await player.playlist.add_entry(song_url, channel=None, author=None)
except exceptions.ExtractionError as e:
log.error("Error adding song from autoplaylist: {}".format(e))
log.debug('', exc_info=True)
continue
break
if not self.autoplaylist:
# TODO: When I add playlist expansion, make sure that's not happening during this check
log.warning("No playable songs in the autoplaylist, disabling.")
self.config.auto_playlist = False
else: # Don't serialize for autoplaylist events
await self.serialize_queue(player.voice_client.channel.guild)
async def on_player_entry_added(self, player, playlist, entry, **_):
log.debug('Running on_player_entry_added')
if entry.meta.get('author') and entry.meta.get('channel'):
await self.serialize_queue(player.voice_client.channel.guild)
async def on_player_error(self, player, entry, ex, **_):
if 'channel' in entry.meta:
await self.safe_send_message(
entry.meta['channel'],
"```\nError from FFmpeg:\n{}\n```".format(ex)
)
else:
log.exception("Player error", exc_info=ex)
async def update_now_playing_status(self, entry=None, is_paused=False):
game = None
if not self.config.status_message:
if self.user.bot:
activeplayers = sum(1 for p in self.players.values() if p.is_playing)
if activeplayers > 1:
game = discord.Game(type=0, name="music on %s guilds" % activeplayers)
entry = None
elif activeplayers == 1:
player = discord.utils.get(self.players.values(), is_playing=True)
entry = player.current_entry
if entry:
prefix = u'\u275A\u275A ' if is_paused else ''
name = u'{}{}'.format(prefix, entry.title)[:128]
game = discord.Game(type=0, name=name)
else:
game = discord.Game(type=0, name=self.config.status_message.strip()[:128])
async with self.aiolocks[_func_()]:
if game != self.last_status:
await self.change_presence(activity=game)
self.last_status = game
async def update_now_playing_message(self, guild, message, *, channel=None):
lnp = self.server_specific_data[guild]['last_np_msg']
m = None
if message is None and lnp:
await self.safe_delete_message(lnp, quiet=True)
elif lnp: # If there was a previous lp message
oldchannel = lnp.channel
if lnp.channel == oldchannel: # If we have a channel to update it in
async for lmsg in self.logs_from(channel, limit=1):
if lmsg != lnp and lnp: # If we need to resend it
await self.safe_delete_message(lnp, quiet=True)
m = await self.safe_send_message(channel, message, quiet=True)
else:
m = await self.safe_edit_message(lnp, message, send_if_fail=True, quiet=False)
elif channel: # If we have a new channel to send it to
await self.safe_delete_message(lnp, quiet=True)
m = await self.safe_send_message(channel, message, quiet=True)
else: # we just resend it in the old channel
await self.safe_delete_message(lnp, quiet=True)
m = await self.safe_send_message(oldchannel, message, quiet=True)
elif channel: # No previous message
m = await self.safe_send_message(channel, message, quiet=True)
self.server_specific_data[guild]['last_np_msg'] = m
async def serialize_queue(self, guild, *, dir=None):
"""
Serialize the current queue for a server's player to json.
"""
player = self.get_player_in(guild)
if not player:
return
if dir is None:
dir = 'data/%s/queue.json' % guild.id
async with self.aiolocks['queue_serialization' + ':' + str(guild.id)]:
log.debug("Serializing queue for %s", guild.id)
with open(dir, 'w', encoding='utf8') as f:
f.write(player.serialize(sort_keys=True))
async def serialize_all_queues(self, *, dir=None):
coros = [self.serialize_queue(s, dir=dir) for s in self.guilds]
await asyncio.gather(*coros, return_exceptions=True)
async def deserialize_queue(self, guild, voice_client, playlist=None, *, dir=None) -> MusicPlayer:
"""
Deserialize a saved queue for a server into a MusicPlayer. If no queue is saved, returns None.
"""
if playlist is None:
playlist = Playlist(self)
if dir is None:
dir = 'data/%s/queue.json' % guild.id
async with self.aiolocks['queue_serialization' + ':' + str(guild.id)]:
if not os.path.isfile(dir):
return None
log.debug("Deserializing queue for %s", guild.id)
with open(dir, 'r', encoding='utf8') as f:
data = f.read()
return MusicPlayer.from_json(data, self, voice_client, playlist)
async def write_current_song(self, guild, entry, *, dir=None):
"""
Writes the current song to file
"""
player = self.get_player_in(guild)
if not player:
return
if dir is None:
dir = 'data/%s/current.txt' % guild.id
async with self.aiolocks['current_song' + ':' + str(guild.id)]:
log.debug("Writing current song for %s", guild.id)
with open(dir, 'w', encoding='utf8') as f:
f.write(entry.title)
@ensure_appinfo
async def _on_ready_sanity_checks(self):
# Ensure folders exist
await self._scheck_ensure_env()
# Server permissions check
await self._scheck_server_permissions()
# playlists in autoplaylist
await self._scheck_autoplaylist()
# config/permissions async validate?
await self._scheck_configs()
async def _scheck_ensure_env(self):
log.debug("Ensuring data folders exist")
for guild in self.guilds:
pathlib.Path('data/%s/' % guild.id).mkdir(exist_ok=True)
with open('data/server_names.txt', 'w', encoding='utf8') as f:
for guilds in sorted(self.guilds, key=lambda s:int(s.id)):
f.write('{:<22} {}\n'.format(guild.id, guild.name))
if not self.config.save_videos and os.path.isdir(AUDIO_CACHE_PATH):
if self._delete_old_audiocache():
log.debug("Deleted old audio cache")
else:
log.debug("Could not delete old audio cache, moving on.")
async def _scheck_server_permissions(self):
log.debug("Checking server permissions")
pass # TODO
async def _scheck_autoplaylist(self):
log.debug("Auditing autoplaylist")
pass # TODO
async def _scheck_configs(self):
log.debug("Validating config")
await self.config.async_validate(self)
log.debug("Validating permissions config")
await self.permissions.async_validate(self)
#######################################################################################################################
async def safe_send_message(self, dest, content, **kwargs):
tts = kwargs.pop('tts', False)
quiet = kwargs.pop('quiet', False)
expire_in = kwargs.pop('expire_in', 0)
allow_none = kwargs.pop('allow_none', True)
also_delete = kwargs.pop('also_delete', None)
msg = None
lfunc = log.debug if quiet else log.warning
try:
if content is not None or allow_none:
if isinstance(content, discord.Embed):
msg = await dest.send(embed=content)
else:
msg = await dest.send(content, tts=tts)
except discord.Forbidden:
lfunc("Cannot send message to \"%s\", no permission", dest.name)
except discord.NotFound:
lfunc("Cannot send message to \"%s\", invalid channel?", dest.name)
except discord.HTTPException:
if len(content) > DISCORD_MSG_CHAR_LIMIT:
lfunc("Message is over the message size limit (%s)", DISCORD_MSG_CHAR_LIMIT)
else:
lfunc("Failed to send message")
log.noise("Got HTTPException trying to send message to %s: %s", dest, content)
finally:
if msg and expire_in:
asyncio.ensure_future(self._wait_delete_msg(msg, expire_in))
if also_delete and isinstance(also_delete, discord.Message):
asyncio.ensure_future(self._wait_delete_msg(also_delete, expire_in))
return msg
async def safe_delete_message(self, message, *, quiet=False):
lfunc = log.debug if quiet else log.warning
try:
return await message.delete()
except discord.Forbidden:
lfunc("Cannot delete message \"{}\", no permission".format(message.clean_content))
except discord.NotFound:
lfunc("Cannot delete message \"{}\", message not found".format(message.clean_content))
async def safe_edit_message(self, message, new, *, send_if_fail=False, quiet=False):
lfunc = log.debug if quiet else log.warning
try:
return await message.edit(content=new)
except discord.NotFound:
lfunc("Cannot edit message \"{}\", message not found".format(message.clean_content))
if send_if_fail:
lfunc("Sending message instead")
return await self.safe_send_message(message.channel, new)
async def send_typing(self, destination):
try:
return await destination.trigger_typing()
except discord.Forbidden:
log.warning("Could not send typing to {}, no permission".format(destination))
async def restart(self):
self.exit_signal = exceptions.RestartSignal()
await self.logout()
def restart_threadsafe(self):
asyncio.run_coroutine_threadsafe(self.restart(), self.loop)
def _cleanup(self):
try:
self.loop.run_until_complete(self.logout())
self.loop.run_until_complete(self.aiosession.close())
except: pass
pending = asyncio.Task.all_tasks()
gathered = asyncio.gather(*pending)
try:
gathered.cancel()
self.loop.run_until_complete(gathered)
gathered.exception()
except: pass
# noinspection PyMethodOverriding
def run(self):
try:
self.loop.run_until_complete(self.start(*self.config.auth))
except discord.errors.LoginFailure:
# Add if token, else
raise exceptions.HelpfulError(
"Bot cannot login, bad credentials.",
"Fix your token in the options file. "
"Remember that each field should be on their own line."
) # ^^^^ In theory self.config.auth should never have no items
finally:
try:
self._cleanup()
except Exception:
log.error("Error in cleanup", exc_info=True)
if self.exit_signal:
raise self.exit_signal
async def logout(self):
await self.disconnect_all_voice_clients()
return await super().logout()
async def on_error(self, event, *args, **kwargs):
ex_type, ex, stack = sys.exc_info()
if ex_type == exceptions.HelpfulError:
log.error("Exception in {}:\n{}".format(event, ex.message))
await asyncio.sleep(2) # don't ask
await self.logout()
elif issubclass(ex_type, exceptions.Signal):
self.exit_signal = ex_type
await self.logout()
else:
log.error("Exception in {}".format(event), exc_info=True)
async def on_resumed(self):
log.info("\nReconnected to discord.\n")
async def on_ready(self):
dlogger = logging.getLogger('discord')
for h in dlogger.handlers:
if getattr(h, 'terminator', None) == '':
dlogger.removeHandler(h)
print()
log.debug("Connection established, ready to go.")
self.ws._keep_alive.name = 'Gateway Keepalive'
if self.init_ok:
log.debug("Received additional READY event, may have failed to resume")
return
await self._on_ready_sanity_checks()
self.init_ok = True
################################
log.info("Connected: {0}/{1}#{2}".format(
self.user.id,
self.user.name,
self.user.discriminator
))
owner = self._get_owner(voice=True) or self._get_owner()
if owner and self.guilds:
log.info("Owner: {0}/{1}#{2}\n".format(
owner.id,
owner.name,
owner.discriminator
))
log.info('Guild List:')
for s in self.guilds:
ser = ('{} (unavailable)'.format(s.name) if s.unavailable else s.name)
log.info(' - ' + ser)
elif self.guilds:
log.warning("Owner could not be found on any guild (id: %s)\n" % self.config.owner_id)
log.info('Guild List:')
for s in self.guilds:
ser = ('{} (unavailable)'.format(s.name) if s.unavailable else s.name)
log.info(' - ' + ser)
else:
log.warning("Owner unknown, bot is not on any guilds.")
if self.user.bot:
log.warning(
"To make the bot join a guild, paste this link in your browser. \n"
"Note: You should be logged into your main account and have \n"
"manage server permissions on the guild you want the bot to join.\n"
" " + await self.generate_invite_link()
)
print(flush=True)
if self.config.bound_channels:
chlist = set(self.get_channel(i) for i in self.config.bound_channels if i)
chlist.discard(None)
invalids = set()
invalids.update(c for c in chlist if isinstance(c, discord.VoiceChannel))
chlist.difference_update(invalids)
self.config.bound_channels.difference_update(invalids)
if chlist:
log.info("Bound to text channels:")
[log.info(' - {}/{}'.format(ch.guild.name.strip(), ch.name.strip())) for ch in chlist if ch]
else:
print("Not bound to any text channels")
if invalids and self.config.debug_mode:
print(flush=True)
log.info("Not binding to voice channels:")
[log.info(' - {}/{}'.format(ch.guild.name.strip(), ch.name.strip())) for ch in invalids if ch]
print(flush=True)
else:
log.info("Not bound to any text channels")
if self.config.autojoin_channels:
chlist = set(self.get_channel(i) for i in self.config.autojoin_channels if i)
chlist.discard(None)
invalids = set()
invalids.update(c for c in chlist if isinstance(c, discord.TextChannel))
chlist.difference_update(invalids)
self.config.autojoin_channels.difference_update(invalids)
if chlist:
log.info("Autojoining voice chanels:")
[log.info(' - {}/{}'.format(ch.guild.name.strip(), ch.name.strip())) for ch in chlist if ch]
else:
log.info("Not autojoining any voice channels")
if invalids and self.config.debug_mode:
print(flush=True)
log.info("Cannot autojoin text channels:")
[log.info(' - {}/{}'.format(ch.guild.name.strip(), ch.name.strip())) for ch in invalids if ch]
self.autojoin_channels = chlist
else:
log.info("Not autojoining any voice channels")
self.autojoin_channels = set()
if self.config.show_config_at_start:
print(flush=True)
log.info("Options:")
log.info(" Command prefix: " + self.config.command_prefix)
log.info(" Default volume: {}%".format(int(self.config.default_volume * 100)))
log.info(" Skip threshold: {} votes or {}%".format(
self.config.skips_required, fixg(self.config.skip_ratio_required * 100)))
log.info(" Now Playing @mentions: " + ['Disabled', 'Enabled'][self.config.now_playing_mentions])
log.info(" Auto-Summon: " + ['Disabled', 'Enabled'][self.config.auto_summon])
log.info(" Auto-Playlist: " + ['Disabled', 'Enabled'][self.config.auto_playlist] + " (order: " + ['sequential', 'random'][self.config.auto_playlist_random] + ")")
log.info(" Auto-Pause: " + ['Disabled', 'Enabled'][self.config.auto_pause])
log.info(" Delete Messages: " + ['Disabled', 'Enabled'][self.config.delete_messages])
if self.config.delete_messages:
log.info(" Delete Invoking: " + ['Disabled', 'Enabled'][self.config.delete_invoking])
log.info(" Debug Mode: " + ['Disabled', 'Enabled'][self.config.debug_mode])
log.info(" Downloaded songs will be " + ['deleted', 'saved'][self.config.save_videos])
if self.config.status_message:
log.info(" Status message: " + self.config.status_message)
log.info(" Write current songs to file: " + ['Disabled', 'Enabled'][self.config.write_current_song])
log.info(" Author insta-skip: " + ['Disabled', 'Enabled'][self.config.allow_author_skip])
log.info(" Embeds: " + ['Disabled', 'Enabled'][self.config.embeds])
log.info(" Spotify integration: " + ['Disabled', 'Enabled'][self.config._spotify])
log.info(" Legacy skip: " + ['Disabled', 'Enabled'][self.config.legacy_skip])
print(flush=True)
await self.update_now_playing_status()
# maybe option to leave the ownerid blank and generate a random command for the owner to use
# wait_for_message is pretty neato
await self._join_startup_channels(self.autojoin_channels, autosummon=self.config.auto_summon)
# we do this after the config stuff because it's a lot easier to notice here
if self.config.missing_keys:
log.warning('Your config file is missing some options. If you have recently updated, '
'check the example_options.ini file to see if there are new options available to you. '
'The options missing are: {0}'.format(self.config.missing_keys))
print(flush=True)
# t-t-th-th-that's all folks!
def _gen_embed(self):
"""Provides a basic template for embeds"""
e = discord.Embed()
e.colour = 7506394
e.set_footer(text='Just-Some-Bots/MusicBot ({})'.format(BOTVERSION), icon_url='https://i.imgur.com/gFHBoZA.png')
e.set_author(name=self.user.name, url='https://github.com/Just-Some-Bots/MusicBot', icon_url=self.user.avatar_url)
return e
async def cmd_resetplaylist(self, player, channel):
"""
Usage:
{command_prefix}resetplaylist
Resets all songs in the server's autoplaylist
"""
player.autoplaylist = list(set(self.autoplaylist))
return Response(self.str.get('cmd-resetplaylist-response', '\N{OK HAND SIGN}'), delete_after=15)
async def cmd_help(self, message, channel, command=None):
"""
Usage:
{command_prefix}help [command]
Prints a help message.
If a command is specified, it prints a help message for that command.
Otherwise, it lists the available commands.
"""
self.commands = []
self.is_all = False
prefix = self.config.command_prefix
if command:
if command.lower() == 'all':
self.is_all = True
await self.gen_cmd_list(message, list_all_cmds=True)
else:
cmd = getattr(self, 'cmd_' + command, None)
if cmd and not hasattr(cmd, 'dev_cmd'):
return Response(
"```\n{}```".format(
dedent(cmd.__doc__)
).format(command_prefix=self.config.command_prefix),
delete_after=60
)
else:
raise exceptions.CommandError(self.str.get('cmd-help-invalid', "No such command"), expire_in=10)
elif message.author.id == self.config.owner_id:
await self.gen_cmd_list(message, list_all_cmds=True)
else:
await self.gen_cmd_list(message)
desc = '```\n' + ', '.join(self.commands) + '\n```\n' + self.str.get(
'cmd-help-response', 'For information about a particular command, run `{}help [command]`\n'
'For further help, see https://just-some-bots.github.io/MusicBot/').format(prefix)
if not self.is_all:
desc += self.str.get('cmd-help-all', '\nOnly showing commands you can use, for a list of all commands, run `{}help all`').format(prefix)
return Response(desc, reply=True, delete_after=60)
async def cmd_blacklist(self, message, user_mentions, option, something):
"""
Usage:
{command_prefix}blacklist [ + | - | add | remove ] @UserName [@UserName2 ...]
Add or remove users to the blacklist.
Blacklisted users are forbidden from using bot commands.
"""
if not user_mentions:
raise exceptions.CommandError("No users listed.", expire_in=20)
if option not in ['+', '-', 'add', 'remove']:
raise exceptions.CommandError(
self.str.get('cmd-blacklist-invalid', 'Invalid option "{0}" specified, use +, -, add, or remove').format(option), expire_in=20
)
for user in user_mentions.copy():
if user.id == self.config.owner_id:
print("[Commands:Blacklist] The owner cannot be blacklisted.")
user_mentions.remove(user)
old_len = len(self.blacklist)
if option in ['+', 'add']:
self.blacklist.update(user.id for user in user_mentions)
write_file(self.config.blacklist_file, self.blacklist)
return Response(
self.str.get('cmd-blacklist-added', '{0} users have been added to the blacklist').format(len(self.blacklist) - old_len),
reply=True, delete_after=10
)
else:
if self.blacklist.isdisjoint(user.id for user in user_mentions):
return Response(self.str.get('cmd-blacklist-none', 'None of those users are in the blacklist.'), reply=True, delete_after=10)
else:
self.blacklist.difference_update(user.id for user in user_mentions)
write_file(self.config.blacklist_file, self.blacklist)
return Response(
self.str.get('cmd-blacklist-removed', '{0} users have been removed from the blacklist').format(old_len - len(self.blacklist)),
reply=True, delete_after=10
)
async def cmd_id(self, author, user_mentions):
"""
Usage:
{command_prefix}id [@user]
Tells the user their id or the id of another user.
"""
if not user_mentions:
return Response(self.str.get('cmd-id-self', 'Your ID is `{0}`').format(author.id), reply=True, delete_after=35)
else:
usr = user_mentions[0]
return Response(self.str.get('cmd-id-other', '**{0}**s ID is `{1}`').format(usr.name, usr.id), reply=True, delete_after=35)
async def cmd_save(self, player, url=None):
"""
Usage:
{command_prefix}save [url]
Saves the specified song or current song if not specified to the autoplaylist.
"""
if url or (player.current_entry and not isinstance(player.current_entry, StreamPlaylistEntry)):
if not url:
url = player.current_entry.url
if url not in self.autoplaylist:
self.autoplaylist.append(url)
write_file(self.config.auto_playlist_file, self.autoplaylist)
log.debug("Appended {} to autoplaylist".format(url))
return Response(self.str.get('cmd-save-success', 'Added <{0}> to the autoplaylist.').format(url))
else:
raise exceptions.CommandError(self.str.get('cmd-save-exists', 'This song is already in the autoplaylist.'))
else:
raise exceptions.CommandError(self.str.get('cmd-save-invalid', 'There is no valid song playing.'))
@owner_only
async def cmd_joinserver(self, message, server_link=None):
"""
Usage:
{command_prefix}joinserver invite_link
Asks the bot to join a server. Note: Bot accounts cannot use invite links.
"""
url = await self.generate_invite_link()
return Response(
self.str.get('cmd-joinserver-response', "Click here to add me to a server: \n{}").format(url),
reply=True, delete_after=30
)
async def cmd_karaoke(self, player, channel, author):
"""
Usage:
{command_prefix}karaoke
Activates karaoke mode. During karaoke mode, only groups with the BypassKaraokeMode
permission in the config file can queue music.
"""
player.karaoke_mode = not player.karaoke_mode
return Response("\N{OK HAND SIGN} Karaoke mode is now " + ['disabled', 'enabled'][player.karaoke_mode], delete_after=15)
async def _do_playlist_checks(self, permissions, player, author, testobj):
num_songs = sum(1 for _ in testobj)
# I have to do exe extra checks anyways because you can request an arbitrary number of search results
if not permissions.allow_playlists and num_songs > 1:
raise exceptions.PermissionsError(self.str.get('playlists-noperms', "You are not allowed to request playlists"), expire_in=30)
if permissions.max_playlist_length and num_songs > permissions.max_playlist_length:
raise exceptions.PermissionsError(
self.str.get('playlists-big', "Playlist has too many entries ({0} > {1})").format(num_songs, permissions.max_playlist_length),
expire_in=30
)
# This is a little bit weird when it says (x + 0 > y), I might add the other check back in
if permissions.max_songs and player.playlist.count_for_user(author) + num_songs > permissions.max_songs:
raise exceptions.PermissionsError(
self.str.get('playlists-limit', "Playlist entries + your already queued songs reached limit ({0} + {1} > {2})").format(
num_songs, player.playlist.count_for_user(author), permissions.max_songs),
expire_in=30
)
return True
async def cmd_play(self, message, player, channel, author, permissions, leftover_args, song_url):
"""
Usage:
{command_prefix}play song_link
{command_prefix}play text to search for
{command_prefix}play spotify_uri
Adds the song to the playlist. If a link is not provided, the first
result from a youtube search is added to the queue.
If enabled in the config, the bot will also support Spotify URIs, however
it will use the metadata (e.g song name and artist) to find a YouTube
equivalent of the song. Streaming from Spotify is not possible.
"""
song_url = song_url.strip('<>')
await self.send_typing(channel)
if leftover_args:
song_url = ' '.join([song_url, *leftover_args])
leftover_args = None # prevent some crazy shit happening down the line
# Make sure forward slashes work properly in search queries
linksRegex = '((http(s)*:[/][/]|www.)([a-z]|[A-Z]|[0-9]|[/.]|[~])*)'
pattern = re.compile(linksRegex)
matchUrl = pattern.match(song_url)
song_url = song_url.replace('/', '%2F') if matchUrl is None else song_url
# Rewrite YouTube playlist URLs if the wrong URL type is given
playlistRegex = r'watch\?v=.+&(list=[^&]+)'
matches = re.search(playlistRegex, song_url)
groups = matches.groups() if matches is not None else []
song_url = "https://www.youtube.com/playlist?" + groups[0] if len(groups) > 0 else song_url
if self.config._spotify:
if 'open.spotify.com' in song_url:
song_url = 'spotify:' + re.sub('(http[s]?:\/\/)?(open.spotify.com)\/', '', song_url).replace('/', ':')
if song_url.startswith('spotify:'):
parts = song_url.split(":")
try:
if 'track' in parts:
res = await self.spotify.get_track(parts[-1])
song_url = res['artists'][0]['name'] + ' ' + res['name']
elif 'album' in parts:
res = await self.spotify.get_album(parts[-1])
await self._do_playlist_checks(permissions, player, author, res['tracks']['items'])
procmesg = await self.safe_send_message(channel, self.str.get('cmd-play-spotify-album-process', 'Processing album `{0}` (`{1}`)').format(res['name'], song_url))
for i in res['tracks']['items']:
song_url = i['name'] + ' ' + i['artists'][0]['name']
log.debug('Processing {0}'.format(song_url))
await self.cmd_play(message, player, channel, author, permissions, leftover_args, song_url)
await self.safe_delete_message(procmesg)
return Response(self.str.get('cmd-play-spotify-album-queued', "Enqueued `{0}` with **{1}** songs.").format(res['name'], len(res['tracks']['items'])))
elif 'playlist' in parts:
res = []
r = await self.spotify.get_playlist_tracks(parts[-1])
while True:
res.extend(r['items'])
if r['next'] is not None:
r = await self.spotify.make_spotify_req(r['next'])
continue
else:
break
await self._do_playlist_checks(permissions, player, author, res)
procmesg = await self.safe_send_message(channel, self.str.get('cmd-play-spotify-playlist-process', 'Processing playlist `{0}` (`{1}`)').format(parts[-1], song_url))
for i in res:
song_url = i['track']['name'] + ' ' + i['track']['artists'][0]['name']
log.debug('Processing {0}'.format(song_url))
await self.cmd_play(message, player, channel, author, permissions, leftover_args, song_url)
await self.safe_delete_message(procmesg)
return Response(self.str.get('cmd-play-spotify-playlist-queued', "Enqueued `{0}` with **{1}** songs.").format(parts[-1], len(res)))
else:
raise exceptions.CommandError(self.str.get('cmd-play-spotify-unsupported', 'That is not a supported Spotify URI.'), expire_in=30)
except exceptions.SpotifyError:
raise exceptions.CommandError(self.str.get('cmd-play-spotify-invalid', 'You either provided an invalid URI, or there was a problem.'))
async with self.aiolocks[_func_() + ':' + str(author.id)]:
if permissions.max_songs and player.playlist.count_for_user(author) >= permissions.max_songs:
raise exceptions.PermissionsError(
self.str.get('cmd-play-limit', "You have reached your enqueued song limit ({0})").format(permissions.max_songs), expire_in=30
)
if player.karaoke_mode and not permissions.bypass_karaoke_mode:
raise exceptions.PermissionsError(
self.str.get('karaoke-enabled', "Karaoke mode is enabled, please try again when its disabled!"), expire_in=30
)
try:
info = await self.downloader.extract_info(player.playlist.loop, song_url, download=False, process=False)
except Exception as e:
if 'unknown url type' in str(e):
song_url = song_url.replace(':', '') # it's probably not actually an extractor
info = await self.downloader.extract_info(player.playlist.loop, song_url, download=False, process=False)
else:
raise exceptions.CommandError(e, expire_in=30)
if not info:
raise exceptions.CommandError(
self.str.get('cmd-play-noinfo', "That video cannot be played. Try using the {0}stream command.").format(self.config.command_prefix),
expire_in=30
)
log.debug(info)
if info.get('extractor', '') not in permissions.extractors and permissions.extractors:
raise exceptions.PermissionsError(
self.str.get('cmd-play-badextractor', "You do not have permission to play media from this service."), expire_in=30
)
# abstract the search handling away from the user
# our ytdl options allow us to use search strings as input urls
if info.get('url', '').startswith('ytsearch'):
# print("[Command:play] Searching for \"%s\"" % song_url)
info = await self.downloader.extract_info(
player.playlist.loop,
song_url,
download=False,
process=True, # ASYNC LAMBDAS WHEN
on_error=lambda e: asyncio.ensure_future(
self.safe_send_message(channel, "```\n%s\n```" % e, expire_in=120), loop=self.loop),
retry_on_error=True
)
if not info:
raise exceptions.CommandError(
self.str.get('cmd-play-nodata', "Error extracting info from search string, youtubedl returned no data. "
"You may need to restart the bot if this continues to happen."), expire_in=30
)
if not all(info.get('entries', [])):
# empty list, no data
log.debug("Got empty list, no data")
return
# TODO: handle 'webpage_url' being 'ytsearch:...' or extractor type
song_url = info['entries'][0]['webpage_url']
info = await self.downloader.extract_info(player.playlist.loop, song_url, download=False, process=False)
# Now I could just do: return await self.cmd_play(player, channel, author, song_url)
# But this is probably fine
# TODO: Possibly add another check here to see about things like the bandcamp issue
# TODO: Where ytdl gets the generic extractor version with no processing, but finds two different urls
if 'entries' in info:
await self._do_playlist_checks(permissions, player, author, info['entries'])
num_songs = sum(1 for _ in info['entries'])
if info['extractor'].lower() in ['youtube:playlist', 'soundcloud:set', 'bandcamp:album']:
try:
return await self._cmd_play_playlist_async(player, channel, author, permissions, song_url, info['extractor'])
except exceptions.CommandError:
raise
except Exception as e:
log.error("Error queuing playlist", exc_info=True)
raise exceptions.CommandError(self.str.get('cmd-play-playlist-error', "Error queuing playlist:\n`{0}`").format(e), expire_in=30)
t0 = time.time()
# My test was 1.2 seconds per song, but we maybe should fudge it a bit, unless we can
# monitor it and edit the message with the estimated time, but that's some ADVANCED SHIT
# I don't think we can hook into it anyways, so this will have to do.
# It would probably be a thread to check a few playlists and get the speed from that
# Different playlists might download at different speeds though
wait_per_song = 1.2
procmesg = await self.safe_send_message(
channel,
self.str.get('cmd-play-playlist-gathering-1', 'Gathering playlist information for {0} songs{1}').format(
num_songs,
self.str.get('cmd-play-playlist-gathering-2', ', ETA: {0} seconds').format(fixg(
num_songs * wait_per_song)) if num_songs >= 10 else '.'))
# We don't have a pretty way of doing this yet. We need either a loop
# that sends these every 10 seconds or a nice context manager.
await self.send_typing(channel)
# TODO: I can create an event emitter object instead, add event functions, and every play list might be asyncified
# Also have a "verify_entry" hook with the entry as an arg and returns the entry if its ok
entry_list, position = await player.playlist.import_from(song_url, channel=channel, author=author)
tnow = time.time()
ttime = tnow - t0
listlen = len(entry_list)
drop_count = 0
if permissions.max_song_length:
for e in entry_list.copy():
if e.duration > permissions.max_song_length:
player.playlist.entries.remove(e)
entry_list.remove(e)
drop_count += 1
# Im pretty sure there's no situation where this would ever break
# Unless the first entry starts being played, which would make this a race condition
if drop_count:
print("Dropped %s songs" % drop_count)
log.info("Processed {} songs in {} seconds at {:.2f}s/song, {:+.2g}/song from expected ({}s)".format(
listlen,
fixg(ttime),
ttime / listlen if listlen else 0,
ttime / listlen - wait_per_song if listlen - wait_per_song else 0,
fixg(wait_per_song * num_songs))
)
await self.safe_delete_message(procmesg)
if not listlen - drop_count:
raise exceptions.CommandError(
self.str.get('cmd-play-playlist-maxduration', "No songs were added, all songs were over max duration (%ss)") % permissions.max_song_length,
expire_in=30
)
reply_text = self.str.get('cmd-play-playlist-reply', "Enqueued **%s** songs to be played. Position in queue: %s")
btext = str(listlen - drop_count)
else:
if info.get('extractor', '').startswith('youtube:playlist'):
try:
info = await self.downloader.extract_info(player.playlist.loop, 'https://www.youtube.com/watch?v=%s' % info.get('url', ''), download=False, process=False)
except Exception as e:
raise exceptions.CommandError(e, expire_in=30)
if permissions.max_song_length and info.get('duration', 0) > permissions.max_song_length:
raise exceptions.PermissionsError(
self.str.get('cmd-play-song-limit', "Song duration exceeds limit ({0} > {1})").format(info['duration'], permissions.max_song_length),
expire_in=30
)
try:
entry, position = await player.playlist.add_entry(song_url, channel=channel, author=author)
except exceptions.WrongEntryTypeError as e:
if e.use_url == song_url:
log.warning("Determined incorrect entry type, but suggested url is the same. Help.")
log.debug("Assumed url \"%s\" was a single entry, was actually a playlist" % song_url)
log.debug("Using \"%s\" instead" % e.use_url)
return await self.cmd_play(player, channel, author, permissions, leftover_args, e.use_url)
reply_text = self.str.get('cmd-play-song-reply', "Enqueued `%s` to be played. Position in queue: %s")
btext = entry.title
if position == 1 and player.is_stopped:
position = self.str.get('cmd-play-next', 'Up next!')
reply_text %= (btext, position)
else:
try:
time_until = await player.playlist.estimate_time_until(position, player)
reply_text += self.str.get('cmd-play-eta', ' - estimated time until playing: %s')
except:
traceback.print_exc()
time_until = ''
reply_text %= (btext, position, ftimedelta(time_until))
return Response(reply_text, delete_after=30)
async def _cmd_play_playlist_async(self, player, channel, author, permissions, playlist_url, extractor_type):
"""
Secret handler to use the async wizardry to make playlist queuing non-"blocking"
"""
await self.send_typing(channel)
info = await self.downloader.extract_info(player.playlist.loop, playlist_url, download=False, process=False)
if not info:
raise exceptions.CommandError(self.str.get('cmd-play-playlist-invalid', "That playlist cannot be played."))
num_songs = sum(1 for _ in info['entries'])
t0 = time.time()
busymsg = await self.safe_send_message(
channel, self.str.get('cmd-play-playlist-process', "Processing {0} songs...").format(num_songs)) # TODO: From playlist_title
await self.send_typing(channel)
entries_added = 0
if extractor_type == 'youtube:playlist':
try:
entries_added = await player.playlist.async_process_youtube_playlist(
playlist_url, channel=channel, author=author)
# TODO: Add hook to be called after each song
# TODO: Add permissions
except Exception:
log.error("Error processing playlist", exc_info=True)
raise exceptions.CommandError(self.str.get('cmd-play-playlist-queueerror', 'Error handling playlist {0} queuing.').format(playlist_url), expire_in=30)
elif extractor_type.lower() in ['soundcloud:set', 'bandcamp:album']:
try:
entries_added = await player.playlist.async_process_sc_bc_playlist(
playlist_url, channel=channel, author=author)
# TODO: Add hook to be called after each song
# TODO: Add permissions
except Exception:
log.error("Error processing playlist", exc_info=True)
raise exceptions.CommandError(self.str.get('cmd-play-playlist-queueerror', 'Error handling playlist {0} queuing.').format(playlist_url), expire_in=30)
songs_processed = len(entries_added)
drop_count = 0
skipped = False
if permissions.max_song_length:
for e in entries_added.copy():
if e.duration > permissions.max_song_length:
try:
player.playlist.entries.remove(e)
entries_added.remove(e)
drop_count += 1
except:
pass
if drop_count:
log.debug("Dropped %s songs" % drop_count)
if player.current_entry and player.current_entry.duration > permissions.max_song_length:
await self.safe_delete_message(self.server_specific_data[channel.guild]['last_np_msg'])
self.server_specific_data[channel.guild]['last_np_msg'] = None
skipped = True
player.skip()
entries_added.pop()
await self.safe_delete_message(busymsg)
songs_added = len(entries_added)
tnow = time.time()
ttime = tnow - t0
wait_per_song = 1.2
# TODO: actually calculate wait per song in the process function and return that too
# This is technically inaccurate since bad songs are ignored but still take up time
log.info("Processed {}/{} songs in {} seconds at {:.2f}s/song, {:+.2g}/song from expected ({}s)".format(
songs_processed,
num_songs,
fixg(ttime),
ttime / num_songs if num_songs else 0,
ttime / num_songs - wait_per_song if num_songs - wait_per_song else 0,
fixg(wait_per_song * num_songs))
)
if not songs_added:
basetext = self.str.get('cmd-play-playlist-maxduration', "No songs were added, all songs were over max duration (%ss)") % permissions.max_song_length
if skipped:
basetext += self.str.get('cmd-play-playlist-skipped', "\nAdditionally, the current song was skipped for being too long.")
raise exceptions.CommandError(basetext, expire_in=30)
return Response(self.str.get('cmd-play-playlist-reply-secs', "Enqueued {0} songs to be played in {1} seconds").format(
songs_added, fixg(ttime, 1)), delete_after=30)
async def cmd_stream(self, player, channel, author, permissions, song_url):
"""
Usage:
{command_prefix}stream song_link
Enqueue a media stream.
This could mean an actual stream like Twitch or shoutcast, or simply streaming
media without predownloading it. Note: FFmpeg is notoriously bad at handling
streams, especially on poor connections. You have been warned.
"""
song_url = song_url.strip('<>')
if permissions.max_songs and player.playlist.count_for_user(author) >= permissions.max_songs:
raise exceptions.PermissionsError(
self.str.get('cmd-stream-limit', "You have reached your enqueued song limit ({0})").format(permissions.max_songs), expire_in=30
)
if player.karaoke_mode and not permissions.bypass_karaoke_mode:
raise exceptions.PermissionsError(
self.str.get('karaoke-enabled', "Karaoke mode is enabled, please try again when its disabled!"), expire_in=30
)
await self.send_typing(channel)
await player.playlist.add_stream_entry(song_url, channel=channel, author=author)
return Response(self.str.get('cmd-stream-success', "Streaming."), delete_after=6)
async def cmd_search(self, message, player, channel, author, permissions, leftover_args):
"""
Usage:
{command_prefix}search [service] [number] query
Searches a service for a video and adds it to the queue.
- service: any one of the following services:
- youtube (yt) (default if unspecified)
- soundcloud (sc)
- yahoo (yh)
- number: return a number of video results and waits for user to choose one
- defaults to 3 if unspecified
- note: If your search query starts with a number,
you must put your query in quotes
- ex: {command_prefix}search 2 "I ran seagulls"
The command issuer can use reactions to indicate their response to each result.
"""
if permissions.max_songs and player.playlist.count_for_user(author) > permissions.max_songs:
raise exceptions.PermissionsError(
self.str.get('cmd-search-limit', "You have reached your playlist item limit ({0})").format(permissions.max_songs),
expire_in=30
)
if player.karaoke_mode and not permissions.bypass_karaoke_mode:
raise exceptions.PermissionsError(
self.str.get('karaoke-enabled', "Karaoke mode is enabled, please try again when its disabled!"), expire_in=30
)
def argcheck():
if not leftover_args:
# noinspection PyUnresolvedReferences
raise exceptions.CommandError(
self.str.get('cmd-search-noquery', "Please specify a search query.\n%s") % dedent(
self.cmd_search.__doc__.format(command_prefix=self.config.command_prefix)),
expire_in=60
)
argcheck()
try:
leftover_args = shlex.split(' '.join(leftover_args))
except ValueError:
raise exceptions.CommandError(self.str.get('cmd-search-noquote', "Please quote your search query properly."), expire_in=30)
service = 'youtube'
items_requested = 3
max_items = permissions.max_search_items
services = {
'youtube': 'ytsearch',
'soundcloud': 'scsearch',
'yahoo': 'yvsearch',
'yt': 'ytsearch',
'sc': 'scsearch',
'yh': 'yvsearch'
}
if leftover_args[0] in services:
service = leftover_args.pop(0)
argcheck()
if leftover_args[0].isdigit():
items_requested = int(leftover_args.pop(0))
argcheck()
if items_requested > max_items:
raise exceptions.CommandError(self.str.get('cmd-search-searchlimit', "You cannot search for more than %s videos") % max_items)
# Look jake, if you see this and go "what the fuck are you doing"
# and have a better idea on how to do this, i'd be delighted to know.
# I don't want to just do ' '.join(leftover_args).strip("\"'")
# Because that eats both quotes if they're there
# where I only want to eat the outermost ones
if leftover_args[0][0] in '\'"':
lchar = leftover_args[0][0]
leftover_args[0] = leftover_args[0].lstrip(lchar)
leftover_args[-1] = leftover_args[-1].rstrip(lchar)
search_query = '%s%s:%s' % (services[service], items_requested, ' '.join(leftover_args))
search_msg = await self.safe_send_message(channel, self.str.get('cmd-search-searching', "Searching for videos..."))
await self.send_typing(channel)
try:
info = await self.downloader.extract_info(player.playlist.loop, search_query, download=False, process=True)
except Exception as e:
await self.safe_edit_message(search_msg, str(e), send_if_fail=True)
return
else:
await self.safe_delete_message(search_msg)
if not info:
return Response(self.str.get('cmd-search-none', "No videos found."), delete_after=30)
for e in info['entries']:
result_message = await self.safe_send_message(channel, self.str.get('cmd-search-result', "Result {0}/{1}: {2}").format(
info['entries'].index(e) + 1, len(info['entries']), e['webpage_url']))
def check(reaction, user):
return user == message.author and reaction.message.id == result_message.id # why can't these objs be compared directly?
reactions = ['\u2705', '\U0001F6AB', '\U0001F3C1']
for r in reactions:
await result_message.add_reaction(r)
try:
reaction, user = await self.wait_for('reaction_add', timeout=30.0, check=check)
except asyncio.TimeoutError:
await self.safe_delete_message(result_message)
return
if str(reaction.emoji) == '\u2705': # check
await self.safe_delete_message(result_message)
await self.cmd_play(message, player, channel, author, permissions, [], e['webpage_url'])
return Response(self.str.get('cmd-search-accept', "Alright, coming right up!"), delete_after=30)
elif str(reaction.emoji) == '\U0001F6AB': # cross
await self.safe_delete_message(result_message)
continue
else:
await self.safe_delete_message(result_message)
break
return Response(self.str.get('cmd-search-decline', "Oh well :("), delete_after=30)
async def cmd_np(self, player, channel, guild, message):
"""
Usage:
{command_prefix}np
Displays the current song in chat.
"""
if player.current_entry:
if self.server_specific_data[guild]['last_np_msg']:
await self.safe_delete_message(self.server_specific_data[guild]['last_np_msg'])
self.server_specific_data[guild]['last_np_msg'] = None
# TODO: Fix timedelta garbage with util function
song_progress = ftimedelta(timedelta(seconds=player.progress))
song_total = ftimedelta(timedelta(seconds=player.current_entry.duration))
streaming = isinstance(player.current_entry, StreamPlaylistEntry)
prog_str = ('`[{progress}]`' if streaming else '`[{progress}/{total}]`').format(
progress=song_progress, total=song_total
)
prog_bar_str = ''
# percentage shows how much of the current song has already been played
percentage = 0.0
if player.current_entry.duration > 0:
percentage = player.progress / player.current_entry.duration
# create the actual bar
progress_bar_length = 30
for i in range(progress_bar_length):
if (percentage < 1 / progress_bar_length * i):
prog_bar_str += '□'
else:
prog_bar_str += '■'
action_text = self.str.get('cmd-np-action-streaming', 'Streaming') if streaming else self.str.get('cmd-np-action-playing', 'Playing')
if player.current_entry.meta.get('channel', False) and player.current_entry.meta.get('author', False):
np_text = self.str.get('cmd-np-reply-author', "Now {action}: **{title}** added by **{author}**\nProgress: {progress_bar} {progress}\n\N{WHITE RIGHT POINTING BACKHAND INDEX} <{url}>").format(
action=action_text,
title=player.current_entry.title,
author=player.current_entry.meta['author'].name,
progress_bar=prog_bar_str,
progress=prog_str,
url=player.current_entry.url
)
else:
np_text = self.str.get('cmd-np-reply-noauthor', "Now {action}: **{title}**\nProgress: {progress_bar} {progress}\n\N{WHITE RIGHT POINTING BACKHAND INDEX} <{url}>").format(
action=action_text,
title=player.current_entry.title,
progress_bar=prog_bar_str,
progress=prog_str,
url=player.current_entry.url
)
self.server_specific_data[guild]['last_np_msg'] = await self.safe_send_message(channel, np_text)
await self._manual_delete_check(message)
else:
return Response(
self.str.get('cmd-np-none', 'There are no songs queued! Queue something with {0}play.') .format(self.config.command_prefix),
delete_after=30
)
async def cmd_summon(self, channel, guild, author, voice_channel):
"""
Usage:
{command_prefix}summon
Call the bot to the summoner's voice channel.
"""
if not author.voice:
raise exceptions.CommandError(self.str.get('cmd-summon-novc', 'You are not connected to voice. Try joining a voice channel!'))
voice_client = self.voice_client_in(guild)
if voice_client and guild == author.voice.channel.guild:
await voice_client.move_to(author.voice.channel)
else:
# move to _verify_vc_perms?
chperms = author.voice.channel.permissions_for(guild.me)
if not chperms.connect:
log.warning("Cannot join channel '{0}', no permission.".format(author.voice.channel.name))
raise exceptions.CommandError(
self.str.get('cmd-summon-noperms-connect', "Cannot join channel `{0}`, no permission to connect.").format(author.voice.channel.name),
expire_in=25
)
elif not chperms.speak:
log.warning("Cannot join channel '{0}', no permission to speak.".format(author.voice.channel.name))
raise exceptions.CommandError(
self.str.get('cmd-summon-noperms-speak', "Cannot join channel `{0}`, no permission to speak.").format(author.voice.channel.name),
expire_in=25
)
player = await self.get_player(author.voice.channel, create=True, deserialize=self.config.persistent_queue)
if player.is_stopped:
player.play()
if self.config.auto_playlist:
await self.on_player_finished_playing(player)
log.info("Joining {0.guild.name}/{0.name}".format(author.voice.channel))
return Response(self.str.get('cmd-summon-reply', 'Connected to `{0.name}`').format(author.voice.channel))
async def cmd_pause(self, player):
"""
Usage:
{command_prefix}pause
Pauses playback of the current song.
"""
if player.is_playing:
player.pause()
return Response(self.str.get('cmd-pause-reply', 'Paused music in `{0.name}`').format(player.voice_client.channel))
else:
raise exceptions.CommandError(self.str.get('cmd-pause-none', 'Player is not playing.'), expire_in=30)
async def cmd_resume(self, player):
"""
Usage:
{command_prefix}resume
Resumes playback of a paused song.
"""
if player.is_paused:
player.resume()
return Response(self.str.get('cmd-resume-reply', 'Resumed music in `{0.name}`').format(player.voice_client.channel), delete_after=15)
else:
raise exceptions.CommandError(self.str.get('cmd-resume-none', 'Player is not paused.'), expire_in=30)
async def cmd_shuffle(self, channel, player):
"""
Usage:
{command_prefix}shuffle
Shuffles the server's queue.
"""
player.playlist.shuffle()
cards = ['\N{BLACK SPADE SUIT}', '\N{BLACK CLUB SUIT}', '\N{BLACK HEART SUIT}', '\N{BLACK DIAMOND SUIT}']
random.shuffle(cards)
hand = await self.safe_send_message(channel, ' '.join(cards))
await asyncio.sleep(0.6)
for x in range(4):
random.shuffle(cards)
await self.safe_edit_message(hand, ' '.join(cards))
await asyncio.sleep(0.6)
await self.safe_delete_message(hand, quiet=True)
return Response(self.str.get('cmd-shuffle-reply', "Shuffled `{0}`'s queue.").format(player.voice_client.channel.guild), delete_after=15)
async def cmd_clear(self, player, author):
"""
Usage:
{command_prefix}clear
Clears the playlist.
"""
player.playlist.clear()
return Response(self.str.get('cmd-clear-reply', "Cleared `{0}`'s queue").format(player.voice_client.channel.guild), delete_after=20)
async def cmd_remove(self, user_mentions, message, author, permissions, channel, player, index=None):
"""
Usage:
{command_prefix}remove [# in queue]
Removes queued songs. If a number is specified, removes that song in the queue, otherwise removes the most recently queued song.
"""
if not player.playlist.entries:
raise exceptions.CommandError(self.str.get('cmd-remove-none', "There's nothing to remove!"), expire_in=20)
if user_mentions:
for user in user_mentions:
if author.id == self.config.owner_id or permissions.remove or author == user:
try:
entry_indexes = [e for e in player.playlist.entries if e.meta.get('author', None) == user]
for entry in entry_indexes:
player.playlist.entries.remove(entry)
entry_text = '%s ' % len(entry_indexes) + 'item'
if len(entry_indexes) > 1:
entry_text += 's'
return Response(self.str.get('cmd-remove-reply', "Removed `{0}` added by `{1}`").format(entry_text, user.name).strip())
except ValueError:
raise exceptions.CommandError(self.str.get('cmd-remove-missing', "Nothing found in the queue from user `%s`") % user.name, expire_in=20)
raise exceptions.PermissionsError(
self.str.get('cmd-remove-noperms', "You do not have the valid permissions to remove that entry from the queue, make sure you're the one who queued it or have instant skip permissions"), expire_in=20)
if not index:
index = len(player.playlist.entries)
try:
index = int(index)
except (TypeError, ValueError):
raise exceptions.CommandError(self.str.get('cmd-remove-invalid', "Invalid number. Use {}queue to find queue positions.").format(self.config.command_prefix), expire_in=20)
if index > len(player.playlist.entries):
raise exceptions.CommandError(self.str.get('cmd-remove-invalid', "Invalid number. Use {}queue to find queue positions.").format(self.config.command_prefix), expire_in=20)
if author.id == self.config.owner_id or permissions.remove or author == player.playlist.get_entry_at_index(index - 1).meta.get('author', None):
entry = player.playlist.delete_entry_at_index((index - 1))
await self._manual_delete_check(message)
if entry.meta.get('channel', False) and entry.meta.get('author', False):
return Response(self.str.get('cmd-remove-reply-author', "Removed entry `{0}` added by `{1}`").format(entry.title, entry.meta['author'].name).strip())
else:
return Response(self.str.get('cmd-remove-reply-noauthor', "Removed entry `{0}`").format(entry.title).strip())
else:
raise exceptions.PermissionsError(
self.str.get('cmd-remove-noperms', "You do not have the valid permissions to remove that entry from the queue, make sure you're the one who queued it or have instant skip permissions"), expire_in=20
)
async def cmd_skip(self, player, channel, author, message, permissions, voice_channel, param=''):
"""
Usage:
{command_prefix}skip [force/f]
Skips the current song when enough votes are cast.
Owners and those with the instaskip permission can add 'force' or 'f' after the command to force skip.
"""
if player.is_stopped:
raise exceptions.CommandError(self.str.get('cmd-skip-none', "Can't skip! The player is not playing!"), expire_in=20)
if not player.current_entry:
if player.playlist.peek():
if player.playlist.peek()._is_downloading:
return Response(self.str.get('cmd-skip-dl', "The next song (`%s`) is downloading, please wait.") % player.playlist.peek().title)
elif player.playlist.peek().is_downloaded:
print("The next song will be played shortly. Please wait.")
else:
print("Something odd is happening. "
"You might want to restart the bot if it doesn't start working.")
else:
print("Something strange is happening. "
"You might want to restart the bot if it doesn't start working.")
current_entry = player.current_entry
if (param.lower() in ['force', 'f']) or self.config.legacy_skip:
if author.id == self.config.owner_id \
or permissions.instaskip \
or (self.config.allow_author_skip and author == player.current_entry.meta.get('author', None)):
player.skip() # TODO: check autopause stuff here
await self._manual_delete_check(message)
return Response(self.str.get('cmd-skip-force', 'Force skipped `{}`.').format(current_entry.title), reply=True, delete_after=30)
else:
raise exceptions.PermissionsError(self.str.get('cmd-skip-force-noperms', 'You do not have permission to force skip.'), expire_in=30)
# TODO: ignore person if they're deaf or take them out of the list or something?
# Currently is recounted if they vote, deafen, then vote
num_voice = sum(1 for m in voice_channel.members if not (
m.voice.deaf or m.voice.self_deaf or m == self.user))
if num_voice == 0: num_voice = 1 # incase all users are deafened, to avoid divison by zero
num_skips = player.skip_state.add_skipper(author.id, message)
skips_remaining = min(
self.config.skips_required,
math.ceil(self.config.skip_ratio_required / (1 / num_voice)) # Number of skips from config ratio
) - num_skips
if skips_remaining <= 0:
player.skip() # check autopause stuff here
return Response(
self.str.get('cmd-skip-reply-skipped-1', 'Your skip for `{0}` was acknowledged.\nThe vote to skip has been passed.{1}').format(
current_entry.title,
self.str.get('cmd-skip-reply-skipped-2', ' Next song coming up!') if player.playlist.peek() else ''
),
reply=True,
delete_after=20
)
else:
# TODO: When a song gets skipped, delete the old x needed to skip messages
return Response(
self.str.get('cmd-skip-reply-voted-1', 'Your skip for `{0}` was acknowledged.\n**{1}** more {2} required to vote to skip this song.').format(
current_entry.title,
skips_remaining,
self.str.get('cmd-skip-reply-voted-2', 'person is') if skips_remaining == 1 else self.str.get('cmd-skip-reply-voted-3', 'people are')
),
reply=True,
delete_after=20
)
async def cmd_volume(self, message, player, new_volume=None):
"""
Usage:
{command_prefix}volume (+/-)[volume]
Sets the playback volume. Accepted values are from 1 to 100.
Putting + or - before the volume will make the volume change relative to the current volume.
"""
if not new_volume:
return Response(self.str.get('cmd-volume-current', 'Current volume: `%s%%`') % int(player.volume * 100), reply=True, delete_after=20)
relative = False
if new_volume[0] in '+-':
relative = True
try:
new_volume = int(new_volume)
except ValueError:
raise exceptions.CommandError(self.str.get('cmd-volume-invalid', '`{0}` is not a valid number').format(new_volume), expire_in=20)
vol_change = None
if relative:
vol_change = new_volume
new_volume += (player.volume * 100)
old_volume = int(player.volume * 100)
if 0 < new_volume <= 100:
player.volume = new_volume / 100.0
return Response(self.str.get('cmd-volume-reply', 'Updated volume from **%d** to **%d**') % (old_volume, new_volume), reply=True, delete_after=20)
else:
if relative:
raise exceptions.CommandError(
self.str.get('cmd-volume-unreasonable-relative', 'Unreasonable volume change provided: {}{:+} -> {}%. Provide a change between {} and {:+}.').format(
old_volume, vol_change, old_volume + vol_change, 1 - old_volume, 100 - old_volume), expire_in=20)
else:
raise exceptions.CommandError(
self.str.get('cmd-volume-unreasonable-absolute', 'Unreasonable volume provided: {}%. Provide a value between 1 and 100.').format(new_volume), expire_in=20)
@owner_only
async def cmd_option(self, player, option, value):
"""
Usage:
{command_prefix}option [option] [on/y/enabled/off/n/disabled]
Changes a config option without restarting the bot. Changes aren't permanent and
only last until the bot is restarted. To make permanent changes, edit the
config file.
Valid options:
autoplaylist, save_videos, now_playing_mentions, auto_playlist_random, auto_pause,
delete_messages, delete_invoking, write_current_song
For information about these options, see the option's comment in the config file.
"""
option = option.lower()
value = value.lower()
bool_y = ['on', 'y', 'enabled']
bool_n = ['off', 'n', 'disabled']
generic = ['save_videos', 'now_playing_mentions', 'auto_playlist_random',
'auto_pause', 'delete_messages', 'delete_invoking',
'write_current_song'] # these need to match attribute names in the Config class
if option in ['autoplaylist', 'auto_playlist']:
if value in bool_y:
if self.config.auto_playlist:
raise exceptions.CommandError(self.str.get('cmd-option-autoplaylist-enabled', 'The autoplaylist is already enabled!'))
else:
if not self.autoplaylist:
raise exceptions.CommandError(self.str.get('cmd-option-autoplaylist-none', 'There are no entries in the autoplaylist file.'))
self.config.auto_playlist = True
await self.on_player_finished_playing(player)
elif value in bool_n:
if not self.config.auto_playlist:
raise exceptions.CommandError(self.str.get('cmd-option-autoplaylist-disabled', 'The autoplaylist is already disabled!'))
else:
self.config.auto_playlist = False
else:
raise exceptions.CommandError(self.str.get('cmd-option-invalid-value', 'The value provided was not valid.'))
return Response("The autoplaylist is now " + ['disabled', 'enabled'][self.config.auto_playlist] + '.')
else:
is_generic = [o for o in generic if o == option] # check if it is a generic bool option
if is_generic and (value in bool_y or value in bool_n):
name = is_generic[0]
log.debug('Setting attribute {0}'.format(name))
setattr(self.config, name, True if value in bool_y else False) # this is scary but should work
attr = getattr(self.config, name)
res = "The option {0} is now ".format(option) + ['disabled', 'enabled'][attr] + '.'
log.warning('Option overriden for this session: {0}'.format(res))
return Response(res)
else:
raise exceptions.CommandError(self.str.get('cmd-option-invalid-param' ,'The parameters provided were invalid.'))
async def cmd_queue(self, channel, player):
"""
Usage:
{command_prefix}queue
Prints the current song queue.
"""
lines = []
unlisted = 0
andmoretext = '* ... and %s more*' % ('x' * len(player.playlist.entries))
if player.is_playing:
# TODO: Fix timedelta garbage with util function
song_progress = ftimedelta(timedelta(seconds=player.progress))
song_total = ftimedelta(timedelta(seconds=player.current_entry.duration))
prog_str = '`[%s/%s]`' % (song_progress, song_total)
if player.current_entry.meta.get('channel', False) and player.current_entry.meta.get('author', False):
lines.append(self.str.get('cmd-queue-playing-author', "Currently playing: `{0}` added by `{1}` {2}\n").format(
player.current_entry.title, player.current_entry.meta['author'].name, prog_str))
else:
lines.append(self.str.get('cmd-queue-playing-noauthor', "Currently playing: `{0}` {1}\n").format(player.current_entry.title, prog_str))
for i, item in enumerate(player.playlist, 1):
if item.meta.get('channel', False) and item.meta.get('author', False):
nextline = self.str.get('cmd-queue-entry-author', '{0} -- `{1}` by `{2}`').format(i, item.title, item.meta['author'].name).strip()
else:
nextline = self.str.get('cmd-queue-entry-noauthor', '{0} -- `{1}`').format(i, item.title).strip()
currentlinesum = sum(len(x) + 1 for x in lines) # +1 is for newline char
if (currentlinesum + len(nextline) + len(andmoretext) > DISCORD_MSG_CHAR_LIMIT) or (i > self.config.queue_length):
if currentlinesum + len(andmoretext):
unlisted += 1
continue
lines.append(nextline)
if unlisted:
lines.append(self.str.get('cmd-queue-more', '\n... and %s more') % unlisted)
if not lines:
lines.append(
self.str.get('cmd-queue-none', 'There are no songs queued! Queue something with {}play.').format(self.config.command_prefix))
message = '\n'.join(lines)
return Response(message, delete_after=30)
async def cmd_clean(self, message, channel, guild, author, search_range=50):
"""
Usage:
{command_prefix}clean [range]
Removes up to [range] messages the bot has posted in chat. Default: 50, Max: 1000
"""
try:
float(search_range) # lazy check
search_range = min(int(search_range), 1000)
except:
return Response(self.str.get('cmd-clean-invalid', "Invalid parameter. Please provide a number of messages to search."), reply=True, delete_after=8)
await self.safe_delete_message(message, quiet=True)
def is_possible_command_invoke(entry):
valid_call = any(
entry.content.startswith(prefix) for prefix in [self.config.command_prefix]) # can be expanded
return valid_call and not entry.content[1:2].isspace()
delete_invokes = True
delete_all = channel.permissions_for(author).manage_messages or self.config.owner_id == author.id
def check(message):
if is_possible_command_invoke(message) and delete_invokes:
return delete_all or message.author == author
return message.author == self.user
if self.user.bot:
if channel.permissions_for(guild.me).manage_messages:
deleted = await channel.purge(check=check, limit=search_range, before=message)
return Response(self.str.get('cmd-clean-reply', 'Cleaned up {0} message{1}.').format(len(deleted), 's' * bool(deleted)), delete_after=15)
async def cmd_pldump(self, channel, author, song_url):
"""
Usage:
{command_prefix}pldump url
Dumps the individual urls of a playlist
"""
try:
info = await self.downloader.extract_info(self.loop, song_url.strip('<>'), download=False, process=False)
except Exception as e:
raise exceptions.CommandError("Could not extract info from input url\n%s\n" % e, expire_in=25)
if not info:
raise exceptions.CommandError("Could not extract info from input url, no data.", expire_in=25)
if not info.get('entries', None):
# TODO: Retarded playlist checking
# set(url, webpageurl).difference(set(url))
if info.get('url', None) != info.get('webpage_url', info.get('url', None)):
raise exceptions.CommandError("This does not seem to be a playlist.", expire_in=25)
else:
return await self.cmd_pldump(channel, info.get(''))
linegens = defaultdict(lambda: None, **{
"youtube": lambda d: 'https://www.youtube.com/watch?v=%s' % d['id'],
"soundcloud": lambda d: d['url'],
"bandcamp": lambda d: d['url']
})
exfunc = linegens[info['extractor'].split(':')[0]]
if not exfunc:
raise exceptions.CommandError("Could not extract info from input url, unsupported playlist type.", expire_in=25)
with BytesIO() as fcontent:
for item in info['entries']:
fcontent.write(exfunc(item).encode('utf8') + b'\n')
fcontent.seek(0)
await author.send("Here's the playlist dump for <%s>" % song_url, file=discord.File(fcontent, filename='playlist.txt'))
return Response("Sent a message with a playlist file.", delete_after=20)
async def cmd_listids(self, guild, author, leftover_args, cat='all'):
"""
Usage:
{command_prefix}listids [categories]
Lists the ids for various things. Categories are:
all, users, roles, channels
"""
cats = ['channels', 'roles', 'users']
if cat not in cats and cat != 'all':
return Response(
"Valid categories: " + ' '.join(['`%s`' % c for c in cats]),
reply=True,
delete_after=25
)
if cat == 'all':
requested_cats = cats
else:
requested_cats = [cat] + [c.strip(',') for c in leftover_args]
data = ['Your ID: %s' % author.id]
for cur_cat in requested_cats:
rawudata = None
if cur_cat == 'users':
data.append("\nUser IDs:")
rawudata = ['%s #%s: %s' % (m.name, m.discriminator, m.id) for m in guild.members]
elif cur_cat == 'roles':
data.append("\nRole IDs:")
rawudata = ['%s: %s' % (r.name, r.id) for r in guild.roles]
elif cur_cat == 'channels':
data.append("\nText Channel IDs:")
tchans = [c for c in guild.channels if isinstance(c, discord.TextChannel)]
rawudata = ['%s: %s' % (c.name, c.id) for c in tchans]
rawudata.append("\nVoice Channel IDs:")
vchans = [c for c in guild.channels if isinstance(c, discord.VoiceChannel)]
rawudata.extend('%s: %s' % (c.name, c.id) for c in vchans)
if rawudata:
data.extend(rawudata)
with BytesIO() as sdata:
sdata.writelines(d.encode('utf8') + b'\n' for d in data)
sdata.seek(0)
# TODO: Fix naming (Discord20API-ids.txt)
await author.send(file=discord.File(sdata, filename='%s-ids-%s.txt' % (guild.name.replace(' ', '_'), cat)))
return Response("Sent a message with a list of IDs.", delete_after=20)
async def cmd_perms(self, author, user_mentions, channel, guild, permissions):
"""
Usage:
{command_prefix}perms [@user]
Sends the user a list of their permissions, or the permissions of the user specified.
"""
lines = ['Command permissions in %s\n' % guild.name, '```', '```']
if user_mentions:
user = user_mentions[0]
permissions = self.permissions.for_user(user)
for perm in permissions.__dict__:
if perm in ['user_list'] or permissions.__dict__[perm] == set():
continue
lines.insert(len(lines) - 1, "%s: %s" % (perm, permissions.__dict__[perm]))
await self.safe_send_message(author, '\n'.join(lines))
return Response("\N{OPEN MAILBOX WITH RAISED FLAG}", delete_after=20)
@owner_only
async def cmd_setname(self, leftover_args, name):
"""
Usage:
{command_prefix}setname name
Changes the bot's username.
Note: This operation is limited by discord to twice per hour.
"""
name = ' '.join([name, *leftover_args])
try:
await self.user.edit(username=name)
except discord.HTTPException:
raise exceptions.CommandError(
"Failed to change name. Did you change names too many times? "
"Remember name changes are limited to twice per hour.")
except Exception as e:
raise exceptions.CommandError(e, expire_in=20)
return Response("Set the bot's username to **{0}**".format(name), delete_after=20)
async def cmd_setnick(self, guild, channel, leftover_args, nick):
"""
Usage:
{command_prefix}setnick nick
Changes the bot's nickname.
"""
if not channel.permissions_for(guild.me).change_nickname:
raise exceptions.CommandError("Unable to change nickname: no permission.")
nick = ' '.join([nick, *leftover_args])
try:
await guild.me.edit(nick=nick)
except Exception as e:
raise exceptions.CommandError(e, expire_in=20)
return Response("Set the bot's nickname to `{0}`".format(nick), delete_after=20)
@owner_only
async def cmd_setavatar(self, message, url=None):
"""
Usage:
{command_prefix}setavatar [url]
Changes the bot's avatar.
Attaching a file and leaving the url parameter blank also works.
"""
if message.attachments:
thing = message.attachments[0].url
elif url:
thing = url.strip('<>')
else:
raise exceptions.CommandError("You must provide a URL or attach a file.", expire_in=20)
try:
timeout = aiohttp.ClientTimeout(total=10)
async with self.aiosession.get(thing, timeout=timeout) as res:
await self.user.edit(avatar=await res.read())
except Exception as e:
raise exceptions.CommandError("Unable to change avatar: {}".format(e), expire_in=20)
return Response("Changed the bot's avatar.", delete_after=20)
async def cmd_disconnect(self, guild):
"""
Usage:
{command_prefix}disconnect
Forces the bot leave the current voice channel.
"""
await self.disconnect_voice_client(guild)
return Response("Disconnected from `{0.name}`".format(guild), delete_after=20)
async def cmd_restart(self, channel):
"""
Usage:
{command_prefix}restart
Restarts the bot.
Will not properly load new dependencies or file updates unless fully shutdown
and restarted.
"""
await self.safe_send_message(channel, "\N{WAVING HAND SIGN} Restarting. If you have updated your bot "
"or its dependencies, you need to restart the bot properly, rather than using this command.")
player = self.get_player_in(channel.guild)
if player and player.is_paused:
player.resume()
await self.disconnect_all_voice_clients()
raise exceptions.RestartSignal()
async def cmd_shutdown(self, channel):
"""
Usage:
{command_prefix}shutdown
Disconnects from voice channels and closes the bot process.
"""
await self.safe_send_message(channel, "\N{WAVING HAND SIGN}")
player = self.get_player_in(channel.guild)
if player and player.is_paused:
player.resume()
await self.disconnect_all_voice_clients()
raise exceptions.TerminateSignal()
async def cmd_leaveserver(self, val, leftover_args):
"""
Usage:
{command_prefix}leaveserver <name/ID>
Forces the bot to leave a server.
When providing names, names are case-sensitive.
"""
if leftover_args:
val = ' '.join([val, *leftover_args])
t = self.get_guild(val)
if t is None:
t = discord.utils.get(self.guilds, name=val)
if t is None:
raise exceptions.CommandError('No guild was found with the ID or name as `{0}`'.format(val))
await t.leave()
return Response('Left the guild: `{0.name}` (Owner: `{0.owner.name}`, ID: `{0.id}`)'.format(t))
@dev_only
async def cmd_breakpoint(self, message):
log.critical("Activating debug breakpoint")
return
@dev_only
async def cmd_objgraph(self, channel, func='most_common_types()'):
import objgraph
await self.send_typing(channel)
if func == 'growth':
f = StringIO()
objgraph.show_growth(limit=10, file=f)
f.seek(0)
data = f.read()
f.close()
elif func == 'leaks':
f = StringIO()
objgraph.show_most_common_types(objects=objgraph.get_leaking_objects(), file=f)
f.seek(0)
data = f.read()
f.close()
elif func == 'leakstats':
data = objgraph.typestats(objects=objgraph.get_leaking_objects())
else:
data = eval('objgraph.' + func)
return Response(data, codeblock='py')
@dev_only
async def cmd_debug(self, message, _player, *, data):
codeblock = "```py\n{}\n```"
result = None
if data.startswith('```') and data.endswith('```'):
data = '\n'.join(data.rstrip('`\n').split('\n')[1:])
code = data.strip('` \n')
try:
result = eval(code)
except:
try:
exec(code)
except Exception as e:
traceback.print_exc(chain=False)
return Response("{}: {}".format(type(e).__name__, e))
if asyncio.iscoroutine(result):
result = await result
return Response(codeblock.format(result))
async def on_message(self, message):
await self.wait_until_ready()
message_content = message.content.strip()
if not message_content.startswith(self.config.command_prefix):
return
if message.author == self.user:
log.warning("Ignoring command from myself ({})".format(message.content))
return
if self.config.bound_channels and message.channel.id not in self.config.bound_channels:
return # if I want to log this I just move it under the prefix check
if not isinstance(message.channel, discord.abc.GuildChannel):
return
command, *args = message_content.split(' ') # Uh, doesn't this break prefixes with spaces in them (it doesn't, config parser already breaks them)
command = command[len(self.config.command_prefix):].lower().strip()
args = ' '.join(args).lstrip(' ').split(' ')
handler = getattr(self, 'cmd_' + command, None)
if not handler:
return
if isinstance(message.channel, discord.abc.PrivateChannel):
if not (message.author.id == self.config.owner_id and command == 'joinserver'):
await self.send_message(message.channel, 'You cannot use this bot in private messages.')
return
if message.author.id in self.blacklist and message.author.id != self.config.owner_id:
log.warning("User blacklisted: {0.id}/{0!s} ({1})".format(message.author, command))
return
else:
log.info("{0.id}/{0!s}: {1}".format(message.author, message_content.replace('\n', '\n... ')))
user_permissions = self.permissions.for_user(message.author)
argspec = inspect.signature(handler)
params = argspec.parameters.copy()
sentmsg = response = None
# noinspection PyBroadException
try:
if user_permissions.ignore_non_voice and command in user_permissions.ignore_non_voice:
await self._check_ignore_non_voice(message)
handler_kwargs = {}
if params.pop('message', None):
handler_kwargs['message'] = message
if params.pop('channel', None):
handler_kwargs['channel'] = message.channel
if params.pop('author', None):
handler_kwargs['author'] = message.author
if params.pop('guild', None):
handler_kwargs['guild'] = message.guild
if params.pop('player', None):
handler_kwargs['player'] = await self.get_player(message.channel)
if params.pop('_player', None):
handler_kwargs['_player'] = self.get_player_in(message.guild)
if params.pop('permissions', None):
handler_kwargs['permissions'] = user_permissions
if params.pop('user_mentions', None):
handler_kwargs['user_mentions'] = list(map(message.guild.get_member, message.raw_mentions))
if params.pop('channel_mentions', None):
handler_kwargs['channel_mentions'] = list(map(message.guild.get_channel, message.raw_channel_mentions))
if params.pop('voice_channel', None):
handler_kwargs['voice_channel'] = message.guild.me.voice.channel if message.guild.me.voice else None
if params.pop('leftover_args', None):
handler_kwargs['leftover_args'] = args
args_expected = []
for key, param in list(params.items()):
# parse (*args) as a list of args
if param.kind == param.VAR_POSITIONAL:
handler_kwargs[key] = args
params.pop(key)
continue
# parse (*, args) as args rejoined as a string
# multiple of these arguments will have the same value
if param.kind == param.KEYWORD_ONLY and param.default == param.empty:
handler_kwargs[key] = ' '.join(args)
params.pop(key)
continue
doc_key = '[{}={}]'.format(key, param.default) if param.default is not param.empty else key
args_expected.append(doc_key)
# Ignore keyword args with default values when the command had no arguments
if not args and param.default is not param.empty:
params.pop(key)
continue
# Assign given values to positional arguments
if args:
arg_value = args.pop(0)
handler_kwargs[key] = arg_value
params.pop(key)
if message.author.id != self.config.owner_id:
if user_permissions.command_whitelist and command not in user_permissions.command_whitelist:
raise exceptions.PermissionsError(
"This command is not enabled for your group ({}).".format(user_permissions.name),
expire_in=20)
elif user_permissions.command_blacklist and command in user_permissions.command_blacklist:
raise exceptions.PermissionsError(
"This command is disabled for your group ({}).".format(user_permissions.name),
expire_in=20)
# Invalid usage, return docstring
if params:
docs = getattr(handler, '__doc__', None)
if not docs:
docs = 'Usage: {}{} {}'.format(
self.config.command_prefix,
command,
' '.join(args_expected)
)
docs = dedent(docs)
await self.safe_send_message(
message.channel,
'```\n{}\n```'.format(docs.format(command_prefix=self.config.command_prefix)),
expire_in=60
)
return
response = await handler(**handler_kwargs)
if response and isinstance(response, Response):
if not isinstance(response.content, discord.Embed) and self.config.embeds:
content = self._gen_embed()
content.title = command
content.description = response.content
else:
content = response.content
if response.reply:
if isinstance(content, discord.Embed):
content.description = '{} {}'.format(message.author.mention, content.description if content.description is not discord.Embed.Empty else '')
else:
content = '{}: {}'.format(message.author.mention, content)
sentmsg = await self.safe_send_message(
message.channel, content,
expire_in=response.delete_after if self.config.delete_messages else 0,
also_delete=message if self.config.delete_invoking else None
)
except (exceptions.CommandError, exceptions.HelpfulError, exceptions.ExtractionError) as e:
log.error("Error in {0}: {1.__class__.__name__}: {1.message}".format(command, e), exc_info=True)
expirein = e.expire_in if self.config.delete_messages else None
alsodelete = message if self.config.delete_invoking else None
if self.config.embeds:
content = self._gen_embed()
content.add_field(name='Error', value=e.message, inline=False)
content.colour = 13369344
else:
content = '```\n{}\n```'.format(e.message)
await self.safe_send_message(
message.channel,
content,
expire_in=expirein,
also_delete=alsodelete
)
except exceptions.Signal:
raise
except Exception:
log.error("Exception in on_message", exc_info=True)
if self.config.debug_mode:
await self.safe_send_message(message.channel, '```\n{}\n```'.format(traceback.format_exc()))
finally:
if not sentmsg and not response and self.config.delete_invoking:
await asyncio.sleep(5)
await self.safe_delete_message(message, quiet=True)
async def gen_cmd_list(self, message, list_all_cmds=False):
for att in dir(self):
# This will always return at least cmd_help, since they needed perms to run this command
if att.startswith('cmd_') and not hasattr(getattr(self, att), 'dev_cmd'):
user_permissions = self.permissions.for_user(message.author)
command_name = att.replace('cmd_', '').lower()
whitelist = user_permissions.command_whitelist
blacklist = user_permissions.command_blacklist
if list_all_cmds:
self.commands.append('{}{}'.format(self.config.command_prefix, command_name))
elif blacklist and command_name in blacklist:
pass
elif whitelist and command_name not in whitelist:
pass
else:
self.commands.append("{}{}".format(self.config.command_prefix, command_name))
async def on_voice_state_update(self, member, before, after):
if not self.init_ok:
return # Ignore stuff before ready
if before.channel:
channel = before.channel
elif after.channel:
channel = after.channel
else:
return
if not self.config.auto_pause:
return
autopause_msg = "{state} in {channel.guild.name}/{channel.name} {reason}"
auto_paused = self.server_specific_data[channel.guild]['auto_paused']
player = await self.get_player(channel)
if not player:
return
if not member == self.user: # if the user is not the bot
if player.voice_client.channel != before.channel and player.voice_client.channel == after.channel: # if the person joined
if auto_paused and player.is_paused:
log.info(autopause_msg.format(
state = "Unpausing",
channel = player.voice_client.channel,
reason = ""
).strip())
self.server_specific_data[player.voice_client.guild]['auto_paused'] = False
player.resume()
elif player.voice_client.channel == before.channel and player.voice_client.channel != after.channel:
if len(player.voice_client.channel.members) == 0:
if not auto_paused and player.is_playing:
log.info(autopause_msg.format(
state = "Pausing",
channel = player.voice_client.channel,
reason = "(empty channel)"
).strip())
self.server_specific_data[player.voice_client.guild]['auto_paused'] = True
player.pause()
else:
if len(player.voice_client.channel.members) > 0: # channel is not empty
if auto_paused and player.is_paused:
log.info(autopause_msg.format(
state = "Unpausing",
channel = player.voice_client.channel,
reason = ""
).strip())
self.server_specific_data[player.voice_client.guild]['auto_paused'] = False
player.resume()
async def on_guild_update(self, before:discord.Guild, after:discord.Guild):
if before.region != after.region:
log.warning("Guild \"%s\" changed regions: %s -> %s" % (after.name, before.region, after.region))
async def on_guild_join(self, guild:discord.Guild):
log.info("Bot has been joined guild: {}".format(guild.name))
log.debug("Creating data folder for guild %s", guild.id)
pathlib.Path('data/%s/' % guild.id).mkdir(exist_ok=True)
async def on_guild_remove(self, guild:discord.Guild):
log.info("Bot has been removed from guild: {}".format(guild.name))
log.debug('Updated guild list:')
[log.debug(' - ' + s.name) for s in self.guilds]
if guild.id in self.players:
self.players.pop(guild.id).kill()
async def on_guild_available(self, guild:discord.Guild):
if not self.init_ok:
return # Ignore pre-ready events
log.debug("Guild \"{}\" has become available.".format(guild.name))
player = self.get_player_in(guild)
if player and player.is_paused:
av_paused = self.server_specific_data[guild]['availability_paused']
if av_paused:
log.debug("Resuming player in \"{}\" due to availability.".format(guild.name))
self.server_specific_data[guild]['availability_paused'] = False
player.resume()
async def on_server_unavailable(self, guild:discord.Guild):
log.debug("Guild \"{}\" has become unavailable.".format(guild.name))
player = self.get_player_in(guild)
if player and player.is_playing:
log.debug("Pausing player in \"{}\" due to unavailability.".format(guild.name))
self.server_specific_data[guild]['availability_paused'] = True
player.pause()
def voice_client_in(self, guild):
for vc in self.voice_clients:
if vc.guild == guild:
return vc
return None
| 42.916724 | 219 | 0.584963 |
12403f0e25dd71994cf42bc623a51a4b55ac182d | 2,395 | py | Python | Tree/postorder_traversal.py | AaronOS0/leetcode_solver | 9700d9c3ea3e7645a00c2c82bcca06c7fb423403 | [
"MIT"
] | null | null | null | Tree/postorder_traversal.py | AaronOS0/leetcode_solver | 9700d9c3ea3e7645a00c2c82bcca06c7fb423403 | [
"MIT"
] | null | null | null | Tree/postorder_traversal.py | AaronOS0/leetcode_solver | 9700d9c3ea3e7645a00c2c82bcca06c7fb423403 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
from typing import List, Optional
from collections import Counter, deque
"""
Questions:
145. Binary Tree Postorder Traversal
590. N-ary Tree Postorder Traversal
"""
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
class Node:
def __init__(self, val=None, children=None):
self.val = val
self.children = children
class Solution:
"""
145. Binary Tree Postorder Traversal
Given the root of a binary tree, return the postorder traversal of its nodes' values.
https://leetcode.com/problems/binary-tree-postorder-traversal/
>>> root = [1,null,2,3]
>>> [3,2,1]
"""
# Time Complexity: O()
# Space Complexity: O()
# Recursion version
def postorderTraversal(self, root: Optional[TreeNode]) -> List[int]:
res = []
if not root:
return res
def recursion(root, res):
if root:
recursion(root.left, res)
recursion(root.right, res)
res.append(root.val)
recursion(root, res)
return res
# Iteration version
def postorderTraversal1(self, root: Optional[TreeNode]) -> List[int]:
res, stack = [], [root]
while stack:
node = stack.pop()
if node:
stack.append(node.left)
stack.append(node.right)
res.append(node.val)
return res[::-1]
"""
590. N-ary Tree Postorder Traversal
Given the root of an n-ary tree, return the postorder traversal of its nodes' values.
>>> [1,null,3,2,4,null,5,6]
>>> [5,6,3,2,4,1]
"""
# Recursion version
def postorder(self, root: 'Node') -> List[int]:
res = []
# Empty tree
if not root:
return res
def recursion(root, res):
for child in root.children:
recursion(child, res)
res.append(root.val)
recursion(root, res)
return res
# Iteration version
def postorder1(self, root: 'Node') -> List[int]:
res = []
if not root:
return res
stack = [root]
while stack:
curr = stack.pop()
res.append(curr.val)
stack.extend(curr.children)
return res[::-1]
| 23.712871 | 89 | 0.552401 |
2c44af17f295f1f296ccca748d51cd5cd9eccf02 | 824 | py | Python | fest_app/forms.py | prkhrv/Ebullience-2k18 | 0799a81239d1c1b1b6f8d49eb733f44fc22ff237 | [
"MIT"
] | null | null | null | fest_app/forms.py | prkhrv/Ebullience-2k18 | 0799a81239d1c1b1b6f8d49eb733f44fc22ff237 | [
"MIT"
] | null | null | null | fest_app/forms.py | prkhrv/Ebullience-2k18 | 0799a81239d1c1b1b6f8d49eb733f44fc22ff237 | [
"MIT"
] | null | null | null | from django import forms
from django.contrib.auth.forms import UserCreationForm,UserChangeForm
from .models import CustomUser
class CustomUserCreationForm(UserCreationForm):
class Meta(UserCreationForm.Meta):
model = CustomUser
fields = ('username','first_name','last_name','phone','roll','branch','email','section','year')
def clean_email(self):
email = self.cleaned_data.get('email')
try:
match = CustomUser.objects.get(email=email)
except CustomUser.DoesNotExist:
return email
raise forms.ValidationError('This Email is Already in Use')
class CustomUserChangeForm(UserChangeForm):
class Meta:
model = CustomUser
fields = ('username','first_name','last_name','phone','roll','branch','email','section','year')
| 34.333333 | 103 | 0.678398 |
dbb7c12df59415fe21c3c70b37a5917c017ab8bf | 1,040 | py | Python | cc1101/configuration.py | codeandbacon/radio-paella | 98bd6829299e528de9c69690206dee51b1372687 | [
"MIT"
] | 1 | 2020-05-03T11:37:40.000Z | 2020-05-03T11:37:40.000Z | cc1101/configuration.py | codeandbacon/radio-paella | 98bd6829299e528de9c69690206dee51b1372687 | [
"MIT"
] | null | null | null | cc1101/configuration.py | codeandbacon/radio-paella | 98bd6829299e528de9c69690206dee51b1372687 | [
"MIT"
] | null | null | null |
from micropython import const
# registers
IOCFG2 = const(0x00)
IOCFG1 = const(0x01)
IOCFG0 = const(0x02)
FIFOTHR = const(0x03)
SYNC1 = const(0x04)
SYNC0 = const(0x05)
PKTLEN = const(0x06)
PKTCTRL1 = const(0x07)
PKTCTRL0 = const(0x08)
ADDR = const(0x09)
CHANNR = const(0x0a)
FSCTRL1 = const(0x0b)
FSCTRL0 = const(0x0c)
FREQ2 = const(0x0d)
FREQ1 = const(0x0e)
FREQ0 = const(0x0f)
MDMCFG4 = const(0x10)
MDMCFG3 = const(0x11)
MDMCFG2 = const(0x12)
MDMCFG1 = const(0x13)
MDMCFG0 = const(0x14)
DEVIATN = const(0x15)
MCSM2 = const(0x16)
MCSM1 = const(0x17)
MCSM0 = const(0x18)
FOCCFG = const(0x19)
BSCFG = const(0x1a)
AGCCTRL2 = const(0x1b)
AGCCTRL1 = const(0x1c)
AGCCTRL0 = const(0x1d)
WOREVT1 = const(0x1e)
WOREVT0 = const(0x1f)
WORCTRL = const(0x20)
FREND1 = const(0x21)
FREND0 = const(0x22)
FSCAL3 = const(0x23)
FSCAL2 = const(0x24)
FSCAL1 = const(0x25)
FSCAL0 = const(0x26)
RCCTRL1 = const(0x27)
RCCTRL0 = const(0x28)
FSTEST = const(0x29)
PTEST = const(0x2a)
AGCTEST = const(0x2b)
TEST2 = const(0x2c)
TEST1 = const(0x2d)
TEST0 = const(0x2e) | 20.392157 | 29 | 0.721154 |
502c872cabde610f0e004569e0397fc392a91203 | 5,551 | py | Python | utils/utils.py | mi-erasmusmc/Sard | d8228a7c49e2e6f98fbd16d4531cb3fc4b505590 | [
"MIT"
] | null | null | null | utils/utils.py | mi-erasmusmc/Sard | d8228a7c49e2e6f98fbd16d4531cb3fc4b505590 | [
"MIT"
] | null | null | null | utils/utils.py | mi-erasmusmc/Sard | d8228a7c49e2e6f98fbd16d4531cb3fc4b505590 | [
"MIT"
] | null | null | null | import json
import numpy as np
import seaborn as sns
import torch
from matplotlib import pyplot as plt
from sklearn.metrics import roc_curve, roc_auc_score, precision_recall_curve, auc
sns.set_theme()
def extract_best_model(directory, metric='val_loss'):
"""
Extract best model from a directory with checkpoints
Parameters
----------
directory : Pathlib Path, directory where model checkpoints have been stored
metric : Which metric to use, either 'auc' or 'val_loss'
Returns
-------
best_model_file : Pathlib path to file of best model
"""
if metric is None:
metric = 'auc'
direction = 'max'
elif metric == 'auc':
direction = 'max'
elif metric == 'val_loss':
direction = 'min'
else:
ValueError(f'Unknown metric supplied. Needs to be either "auc" or "val_loss" but {metric} was given')
metric_value, fnames = [], []
for f in directory.rglob('*' + metric + '*'):
l = f.name.split(metric + ':')
metric_value.append(float(l[1].split('_')[0]))
fnames.append(f)
if direction == 'max':
best_index = np.argmax(metric_value)
elif direction == 'min':
best_index = np.argmin(metric_value)
best_model_file = fnames[best_index]
return best_model_file
def create_sequence_data(covariates):
"""
Takes in a covariate dataframe and creates sequences which are lists of patients of visits (lists) of concepts (lists)
:param covariates : dataframe covariate dataframe from plp package
:return: sequences_list : a nested list of patients of visits of concepts, concepts are integers
visit_list : list of patient visits with timeId of the visit
"""
sequence_list = list(
covariates.groupby(['rowIdPython', 'timeId'])['covariateId'].agg(list).groupby(level=0).agg(list))
visit_list = list(covariates.groupby(['rowIdPython'])['timeId'].agg(lambda x: sorted(list(x.unique()))).values)
return sequence_list, visit_list
def load_data(data_folder, name='plp_output'):
"""
Loads data saved from plp
Parameters
----------
data_folder : pathlib Path Folder where PLP output was saved
name: str Name of data object
Returns
-------
outcomes : Pandas Series with 1.0 for patients with outcome and 0.0 elsewhere
feature_matrix_3d : sparse matrix in Pytorch COO format. num patients X num features X num timepoints
covariates : Dataframe Covariates dataframe from PLP package
good_feature_names : covariate names
dataset_dict : dictionary with data in correct format for the deep model
"""
# load output from plp data export
plp_data = torch.load(data_folder.joinpath(name))
population = plp_data['population']
plp_data['outcomes'] = population.outcomeCount.astype(np.float32)
plp_data['data'] = plp_data['data'].coalesce()
old_covariate_ids = plp_data['map'].oldCovariateId
covariate_ref = plp_data['covariateRef']
feature_names = covariate_ref[covariate_ref.covariateId.isin(old_covariate_ids)].covariateName.values
plp_data['feature_names'] = feature_names
return plp_data
def plot_roc_curve(y_true, predictions, title='Dementia'):
"""
Plots the ROC curve of many models together
Parameters
----------
y_true : True labels
predictions : Dictionary with one (key, value) par for each model's predictions.
Returns
-------
"""
plt.figure(figsize=(8, 6))
for key, value in predictions.items():
fpr, tpr, _ = roc_curve(y_true, value)
auc = roc_auc_score(y_true, value)
plt.plot(fpr, tpr, label=f'{key} AUC: {auc:.3f}')
plt.plot([0, 1], [0, 1], color='orange', linestyle='--')
plt.xticks(np.arange(0.0, 1.1, step=0.1))
plt.xlabel('False positive rate', fontsize=15)
plt.yticks(np.arange(0.0, 1.1, step=0.1))
plt.ylabel('True positive rate', fontsize=15)
plt.title(f'ROC Curve {title}', fontweight='bold', fontsize=15)
plt.legend(prop={'size': 13}, loc='lower right')
plt.show()
def plot_pr(y_true, predictions, title='dementia'):
"""
Plots the Precision-recall curves for many models
Parameters
----------
y_true : Ground truth from test set
predictions : Dictionary with one (key, value) par for each model's predictions.
title : str Title of plot
Returns
-------
Plots the plot
"""
plt.figure(figsize=(8, 6))
for key, value in predictions.items():
precision, recall, _ = precision_recall_curve(y_true, value)
auprc = auc(recall, precision)
plt.plot(recall, precision, label=f'{key} AUPRC: {auprc:.3f}')
plt.xticks(np.arange(0.0, 1.1, step=0.1))
plt.xlabel('Recall', fontsize=15)
plt.yticks(np.arange(0.0, 1.1, step=0.1))
plt.ylabel('Precision', fontsize=15)
plt.title(f'Precision-recall curve {title}', fontweight='bold', fontsize=15)
plt.legend(prop={'size': 13}, loc='upper right')
class NpEncoder(json.JSONEncoder):
"""
Class I use to change numpy datatypes to python datatypes before saving json
"""
def default(self, obj):
if isinstance(obj, np.integer):
return int(obj)
if isinstance(obj, np.floating):
return float(obj)
if isinstance(obj, np.ndarray):
return obj.tolist()
return super(NpEncoder, self).default(obj) | 33.041667 | 122 | 0.643488 |
d50c0348467071ff95038defa6df2252d2df3b32 | 41,502 | py | Python | tests/test_forms.py | azmeuk/webtest | ca58f4d1712d87397e84ed30fd87475c6a814d32 | [
"MIT"
] | 239 | 2015-01-23T06:19:06.000Z | 2022-03-08T10:40:10.000Z | tests/test_forms.py | azmeuk/webtest | ca58f4d1712d87397e84ed30fd87475c6a814d32 | [
"MIT"
] | 96 | 2015-01-05T17:16:52.000Z | 2022-02-04T17:21:41.000Z | tests/test_forms.py | azmeuk/webtest | ca58f4d1712d87397e84ed30fd87475c6a814d32 | [
"MIT"
] | 84 | 2015-01-21T14:07:59.000Z | 2022-03-06T08:52:47.000Z | import cgi
import os.path
import struct
import sys
import webtest
from webob import Request
from webtest.debugapp import DebugApp
from webtest.compat import to_bytes
from webtest.forms import NoValue, Submit, Upload
from tests.compat import unittest
from tests.compat import u
class TestForms(unittest.TestCase):
def callFUT(self, filename='form_inputs.html', formid='simple_form'):
dirname = os.path.join(os.path.dirname(__file__), 'html')
app = DebugApp(form=os.path.join(dirname, filename), show_form=True)
resp = webtest.TestApp(app).get('/form.html')
return resp.forms[formid]
def test_set_submit_field(self):
form = self.callFUT()
self.assertRaises(
AttributeError,
form['submit'].value__set,
'foo'
)
def test_button(self):
form = self.callFUT()
button = form['button']
self.assertTrue(isinstance(button, Submit),
"<button> without type is a submit button")
def test_button_value_if_submitted(self):
form = self.callFUT()
submit = form['submit']
self.assertEqual(
submit.value_if_submitted(), '',
"submit default value is ''")
button = form['button']
self.assertEqual(
button.value_if_submitted(), '',
"submit default value is ''")
def test_force_select(self):
form = self.callFUT()
form['select'].force_value('notavalue')
form['select'].value__set('value3')
self.assertTrue(
form['select']._forced_value is NoValue,
"Setting a value after having forced a value should keep a forced"
" state")
self.assertEqual(
form['select'].value, 'value3',
"the value should the the one set by value__set")
self.assertEqual(
form['select'].selectedIndex, 2,
"the value index should be the one set by value__set")
def test_form_select(self):
form = self.callFUT()
form.select('select', 'value1')
self.assertEqual(
form['select'].value, 'value1',
"when using form.select, the input selected value should be "
"changed")
def test_get_field_by_index(self):
form = self.callFUT()
self.assertEqual(form['select'],
form.get('select', index=0))
def test_get_unknown_field(self):
form = self.callFUT()
self.assertEqual(form['unknown'].value, '')
form['unknown'].value = '1'
self.assertEqual(form['unknown'].value, '1')
def test_get_non_exist_fields(self):
form = self.callFUT()
self.assertRaises(AssertionError, form.get, 'nonfield')
def test_get_non_exist_fields_with_default(self):
form = self.callFUT()
value = form.get('nonfield', default=1)
self.assertEqual(value, 1)
def test_upload_fields(self):
form = self.callFUT()
fu = webtest.Upload(__file__)
form['file'] = fu
self.assertEqual(form.upload_fields(),
[['file', __file__]])
def test_repr(self):
form = self.callFUT()
self.assertTrue(repr(form).startswith('<Form id='))
def test_the_bs_node_must_not_change(self):
form = self.callFUT()
self.assertEqual(form.text, str(form.html))
def test_set_multiple_checkboxes(self):
form = self.callFUT(formid='multiple_checkbox_form')
form['checkbox'] = [10, 30]
self.assertEqual(form.get('checkbox', index=0).value, '10')
self.assertEqual(form.get('checkbox', index=1).value, None)
self.assertEqual(form.get('checkbox', index=2).value, '30')
def test_button_submit(self):
form = self.callFUT(formid='multiple_buttons_form')
display = form.submit('action')
self.assertIn(u("action=deactivate"), display, display)
def test_button_submit_by_index(self):
form = self.callFUT(formid='multiple_buttons_form')
display = form.submit('action', index=1)
self.assertIn(u("action=activate"), display, display)
def test_button_submit_by_value(self):
form = self.callFUT(formid='multiple_buttons_form')
display = form.submit('action', value='activate')
self.assertIn(u("action=activate"), display, display)
def test_button_submit_by_value_and_index(self):
form = self.callFUT(formid='multiple_buttons_form')
self.assertRaises(ValueError,
form.submit, "action", value="activate",
index=0)
class TestResponseFormAttribute(unittest.TestCase):
def callFUT(self, body):
app = DebugApp(form=to_bytes(body))
return webtest.TestApp(app)
def test_no_form(self):
app = self.callFUT('<html><body></body></html>')
res = app.get('/form.html')
self.assertRaises(TypeError, lambda: res.form)
def test_too_many_forms(self):
app = self.callFUT(
'<html><body><form></form><form></form></body></html>')
res = app.get('/form.html')
self.assertRaises(TypeError, lambda: res.form)
class TestInput(unittest.TestCase):
def callFUT(self, filename='form_inputs.html'):
dirname = os.path.join(os.path.dirname(__file__), 'html')
app = DebugApp(form=os.path.join(dirname, filename), show_form=True)
return webtest.TestApp(app)
def test_input(self):
app = self.callFUT()
res = app.get('/form.html')
self.assertEqual(res.status_int, 200)
self.assertTrue(res.content_type.startswith('text/html'))
form = res.forms['text_input_form']
self.assertEqual(form['foo'].value, 'bar')
self.assertEqual(form.submit_fields(), [('foo', 'bar')])
form = res.forms['radio_input_form']
self.assertEqual(form['foo'].selectedIndex, 1)
self.assertEqual(form['foo'].value, 'baz')
self.assertEqual(form.submit_fields(), [('foo', 'baz')])
form = res.forms['checkbox_input_form']
self.assertEqual(form['foo'].value, 'bar')
self.assertEqual(form.submit_fields(), [('foo', 'bar')])
form = res.forms['password_input_form']
self.assertEqual(form['foo'].value, 'bar')
self.assertEqual(form.submit_fields(), [('foo', 'bar')])
def test_force_radio_input(self):
app = self.callFUT()
res = app.get('/form.html')
form = res.forms['radio_input_form']
form['foo'].force_value('fido')
self.assertEqual(form['foo'].value, 'fido')
self.assertEqual(form.submit_fields(), [('foo', 'fido')])
def test_radio_input_order(self):
app = self.callFUT()
res = app.get('/form.html')
self.assertEqual(res.status_int, 200)
self.assertTrue(res.content_type.startswith('text/html'))
form = res.forms['complex_radio_input_form']
form['foo'].value = 'true'
self.assertEqual(form['foo'].value, 'true')
self.assertEqual(form['foo'].selectedIndex, 0)
self.assertEqual(form.submit_fields(), [
('__start__', 'item:mapping'),
('foo', 'true'),
('__end__', 'item:mapping'),
('__start__', 'item:mapping'),
('__end__', 'item:mapping')])
res = app.get('/form.html')
form = res.forms['complex_radio_input_form']
self.assertEqual(form['foo'].value, 'true')
self.assertEqual(form['foo'].selectedIndex, 1)
self.assertEqual(form.submit_fields(), [
('__start__', 'item:mapping'),
('__end__', 'item:mapping'),
('__start__', 'item:mapping'),
('foo', 'true'),
('__end__', 'item:mapping')])
def test_input_unicode(self):
app = self.callFUT('form_unicode_inputs.html')
res = app.get('/form.html')
self.assertEqual(res.status_int, 200)
self.assertTrue(res.content_type.startswith('text/html'))
self.assertEqual(res.charset.lower(), 'utf-8')
form = res.forms['text_input_form']
self.assertEqual(form['foo'].value, u('Хармс'))
self.assertEqual(form.submit_fields(), [('foo', u('Хармс'))])
form = res.forms['radio_input_form']
self.assertEqual(form['foo'].selectedIndex, 1)
self.assertEqual(form['foo'].value, u('Блок'))
self.assertEqual(form.submit_fields(), [('foo', u('Блок'))])
form = res.forms['checkbox_input_form']
self.assertEqual(form['foo'].value, u('Хармс'))
self.assertEqual(form.submit_fields(), [('foo', u('Хармс'))])
form = res.forms['password_input_form']
self.assertEqual(form['foo'].value, u('Хармс'))
self.assertEqual(form.submit_fields(), [('foo', u('Хармс'))])
def test_input_no_default(self):
app = self.callFUT('form_inputs_with_defaults.html')
res = app.get('/form.html')
self.assertEqual(res.status_int, 200)
self.assertTrue(res.content_type.startswith('text/html'))
form = res.forms['text_input_form']
self.assertEqual(form['foo'].value, '')
self.assertEqual(form.submit_fields(), [('foo', '')])
form = res.forms['radio_input_form']
self.assertTrue(form['foo'].value is None)
self.assertEqual(form.submit_fields(), [])
form = res.forms['checkbox_input_form']
self.assertTrue(form['foo'].value is None)
self.assertEqual(form.submit_fields(), [])
form = res.forms['password_input_form']
self.assertEqual(form['foo'].value, '')
self.assertEqual(form.submit_fields(), [('foo', '')])
def test_textarea_entities(self):
app = self.callFUT()
res = app.get('/form.html')
form = res.forms.get("textarea_input_form")
self.assertEqual(form.get("textarea").value, "'foo&bar'")
self.assertEqual(form.submit_fields(), [('textarea', "'foo&bar'")])
def test_textarea_emptyfirstline(self):
app = self.callFUT()
res = app.get('/form.html')
form = res.forms.get("textarea_emptyline_form")
self.assertEqual(form.get("textarea").value, "aaa")
self.assertEqual(form.submit_fields(), [('textarea', "aaa")])
class TestFormLint(unittest.TestCase):
def test_form_lint(self):
form = webtest.Form(None, '''<form>
<input type="text" name="field"/>
</form>''')
self.assertRaises(AttributeError, form.lint)
form = webtest.Form(None, '''<form>
<input type="text" id="myfield" name="field"/>
</form>''')
self.assertRaises(AttributeError, form.lint)
form = webtest.Form(None, '''<form>
<label for="myfield">my field</label>
<input type="text" id="myfield" name="field"/>
</form>''')
form.lint()
form = webtest.Form(None, '''<form>
<label class="field" for="myfield" role="r">my field</label>
<input type="text" id="myfield" name="field"/>
</form>''')
form.lint()
def select_app(environ, start_response):
req = Request(environ)
status = b"200 OK"
if req.method == "GET":
body = to_bytes("""
<html>
<head><title>form page</title></head>
<body>
<form method="POST" id="single_select_form">
<select id="single" name="single">
<option value="4">Four</option>
<option value="5" selected="selected">Five</option>
<option value="6">Six</option>
<option value="7">Seven</option>
</select>
<input name="button" type="submit" value="single">
</form>
<form method="POST" id="multiple_select_form">
<select id="multiple" name="multiple" multiple>
<option value="8" selected="selected">Eight</option>
<option value="9">Nine</option>
<option value="10">Ten</option>
<option value="11" selected="selected">Eleven</option>
</select>
<input name="button" type="submit" value="multiple">
</form>
</body>
</html>
""")
else:
select_type = req.POST.get("button")
if select_type == "single":
selection = req.POST.get("single")
elif select_type == "multiple":
selection = ", ".join(req.POST.getall("multiple"))
body = to_bytes("""
<html>
<head><title>display page</title></head>
<body>
<p>You submitted the %(select_type)s </p>
<p>You selected %(selection)s</p>
</body>
</html>
""" % dict(selection=selection, select_type=select_type))
headers = [
('Content-Type', 'text/html; charset=utf-8'),
('Content-Length', str(len(body)))]
# PEP 3333 requires native strings:
headers = [(str(k), str(v)) for k, v in headers]
start_response(status, headers)
return [body]
def select_app_without_values(environ, start_response):
req = Request(environ)
status = b"200 OK"
if req.method == "GET":
body = to_bytes("""
<html>
<head><title>form page</title></head>
<body>
<form method="POST" id="single_select_form">
<select id="single" name="single">
<option>Four</option>
<option>Five</option>
<option>Six</option>
<option>Seven</option>
</select>
<input name="button" type="submit" value="single">
</form>
<form method="POST" id="multiple_select_form">
<select id="multiple" name="multiple" multiple="multiple">
<option>Eight</option>
<option selected value="Nine">Nine</option>
<option>Ten</option>
<option selected>Eleven</option>
</select>
<input name="button" type="submit" value="multiple">
</form>
</body>
</html>
""")
else:
select_type = req.POST.get("button")
if select_type == "single":
selection = req.POST.get("single")
elif select_type == "multiple":
selection = ", ".join(req.POST.getall("multiple"))
body = to_bytes("""
<html>
<head><title>display page</title></head>
<body>
<p>You submitted the %(select_type)s </p>
<p>You selected %(selection)s</p>
</body>
</html>
""" % dict(selection=selection, select_type=select_type))
headers = [
('Content-Type', 'text/html; charset=utf-8'),
('Content-Length', str(len(body)))]
# PEP 3333 requires native strings:
headers = [(str(k), str(v)) for k, v in headers]
start_response(status, headers)
return [body]
def select_app_without_default(environ, start_response):
req = Request(environ)
status = b"200 OK"
if req.method == "GET":
body = to_bytes("""
<html>
<head><title>form page</title></head>
<body>
<form method="POST" id="single_select_form">
<select id="single" name="single">
<option value="4">Four</option>
<option value="5">Five</option>
<option value="6">Six</option>
<option value="7">Seven</option>
</select>
<input name="button" type="submit" value="single">
</form>
<form method="POST" id="multiple_select_form">
<select id="multiple" name="multiple" multiple="multiple">
<option value="8">Eight</option>
<option value="9">Nine</option>
<option value="10">Ten</option>
<option value="11">Eleven</option>
</select>
<input name="button" type="submit" value="multiple">
</form>
</body>
</html>
""")
else:
select_type = req.POST.get("button")
if select_type == "single":
selection = req.POST.get("single")
elif select_type == "multiple":
selection = ", ".join(req.POST.getall("multiple"))
body = to_bytes("""
<html>
<head><title>display page</title></head>
<body>
<p>You submitted the %(select_type)s </p>
<p>You selected %(selection)s</p>
</body>
</html>
""" % dict(selection=selection, select_type=select_type))
headers = [
('Content-Type', 'text/html; charset=utf-8'),
('Content-Length', str(len(body)))]
# PEP 3333 requires native strings:
headers = [(str(k), str(v)) for k, v in headers]
start_response(status, headers)
return [body]
def select_app_unicode(environ, start_response):
req = Request(environ)
status = b"200 OK"
if req.method == "GET":
body = u("""
<html>
<head><title>form page</title></head>
<body>
<form method="POST" id="single_select_form">
<select id="single" name="single">
<option value="ЕКБ">Екатеринбург</option>
<option value="МСК" selected="selected">Москва</option>
<option value="СПБ">Санкт-Петербург</option>
<option value="САМ">Самара</option>
</select>
<input name="button" type="submit" value="single">
</form>
<form method="POST" id="multiple_select_form">
<select id="multiple" name="multiple" multiple="multiple">
<option value="8" selected="selected">Лондон</option>
<option value="9">Париж</option>
<option value="10">Пекин</option>
<option value="11" selected="selected">Бристоль</option>
</select>
<input name="button" type="submit" value="multiple">
</form>
</body>
</html>
""").encode('utf8')
else:
select_type = req.POST.get("button")
if select_type == "single":
selection = req.POST.get("single")
elif select_type == "multiple":
selection = ", ".join(req.POST.getall("multiple"))
body = (u("""
<html>
<head><title>display page</title></head>
<body>
<p>You submitted the %(select_type)s </p>
<p>You selected %(selection)s</p>
</body>
</html>
""") % dict(selection=selection, select_type=select_type)).encode('utf8')
headers = [
('Content-Type', 'text/html; charset=utf-8'),
('Content-Length', str(len(body)))]
# PEP 3333 requires native strings:
headers = [(str(k), str(v)) for k, v in headers]
start_response(status, headers)
if not isinstance(body, bytes):
raise AssertionError('Body is not %s' % bytes)
return [body]
class TestSelect(unittest.TestCase):
def test_unicode_select(self):
app = webtest.TestApp(select_app_unicode)
res = app.get('/')
single_form = res.forms["single_select_form"]
self.assertEqual(single_form["single"].value, u("МСК"))
display = single_form.submit("button")
self.assertIn(u("<p>You selected МСК</p>"), display, display)
res = app.get('/')
single_form = res.forms["single_select_form"]
self.assertEqual(single_form["single"].value, u("МСК"))
single_form.set("single", u("СПБ"))
self.assertEqual(single_form["single"].value, u("СПБ"))
display = single_form.submit("button")
self.assertIn(u("<p>You selected СПБ</p>"), display, display)
def test_single_select(self):
app = webtest.TestApp(select_app)
res = app.get('/')
self.assertEqual(res.status_int, 200)
self.assertEqual(res.headers['content-type'],
'text/html; charset=utf-8')
self.assertEqual(res.content_type, 'text/html')
single_form = res.forms["single_select_form"]
self.assertEqual(single_form["single"].value, "5")
display = single_form.submit("button")
self.assertIn("<p>You selected 5</p>", display, display)
res = app.get('/')
self.assertEqual(res.status_int, 200)
self.assertEqual(res.headers['content-type'],
'text/html; charset=utf-8')
self.assertEqual(res.content_type, 'text/html')
single_form = res.forms["single_select_form"]
self.assertEqual(single_form["single"].value, "5")
single_form.set("single", "6")
self.assertEqual(single_form["single"].value, "6")
display = single_form.submit("button")
self.assertIn("<p>You selected 6</p>", display, display)
res = app.get('/')
single_form = res.forms["single_select_form"]
self.assertRaises(ValueError, single_form.select, "single", "5",
text="Five")
self.assertRaises(ValueError, single_form.select, "single",
text="Three")
single_form.select("single", text="Seven")
self.assertEqual(single_form["single"].value, "7")
display = single_form.submit("button")
self.assertIn("<p>You selected 7</p>", display, display)
def test_single_select_forced_value(self):
app = webtest.TestApp(select_app)
res = app.get('/')
self.assertEqual(res.status_int, 200)
self.assertEqual(res.headers['content-type'],
'text/html; charset=utf-8')
self.assertEqual(res.content_type, 'text/html')
single_form = res.forms["single_select_form"]
self.assertEqual(single_form["single"].value, "5")
self.assertRaises(ValueError, single_form.set, "single", "984")
single_form["single"].force_value("984")
self.assertEqual(single_form["single"].value, "984")
display = single_form.submit("button")
self.assertIn("<p>You selected 984</p>", display, display)
def test_single_select_no_default(self):
app = webtest.TestApp(select_app_without_default)
res = app.get('/')
self.assertEqual(res.status_int, 200)
self.assertEqual(res.headers['content-type'],
'text/html; charset=utf-8')
self.assertEqual(res.content_type, 'text/html')
single_form = res.forms["single_select_form"]
self.assertEqual(single_form["single"].value, "4")
display = single_form.submit("button")
self.assertIn("<p>You selected 4</p>", display, display)
res = app.get('/')
self.assertEqual(res.status_int, 200)
self.assertEqual(res.headers['content-type'],
'text/html; charset=utf-8')
self.assertEqual(res.content_type, 'text/html')
single_form = res.forms["single_select_form"]
self.assertEqual(single_form["single"].value, "4")
single_form.set("single", 6)
self.assertEqual(single_form["single"].value, "6")
display = single_form.submit("button")
self.assertIn("<p>You selected 6</p>", display, display)
def test_multiple_select(self):
app = webtest.TestApp(select_app)
res = app.get('/')
self.assertEqual(res.status_int, 200)
self.assertEqual(res.headers['content-type'],
'text/html; charset=utf-8')
self.assertEqual(res.content_type, 'text/html')
multiple_form = res.forms["multiple_select_form"]
self.assertEqual(multiple_form["multiple"].value, ['8', '11'],
multiple_form["multiple"].value)
display = multiple_form.submit("button")
self.assertIn("<p>You selected 8, 11</p>", display, display)
res = app.get('/')
self.assertEqual(res.status_int, 200)
self.assertEqual(res.headers['content-type'],
'text/html; charset=utf-8')
self.assertEqual(res.content_type, 'text/html')
multiple_form = res.forms["multiple_select_form"]
self.assertEqual(multiple_form["multiple"].value, ["8", "11"],
multiple_form["multiple"].value)
multiple_form.set("multiple", ["9"])
self.assertEqual(multiple_form["multiple"].value, ["9"],
multiple_form["multiple"].value)
display = multiple_form.submit("button")
self.assertIn("<p>You selected 9</p>", display, display)
res = app.get('/')
multiple_form = res.forms["multiple_select_form"]
self.assertRaises(ValueError, multiple_form.select_multiple,
"multiple",
["8", "10"], texts=["Eight", "Ten"])
self.assertRaises(ValueError, multiple_form.select_multiple,
"multiple", texts=["Twelve"])
multiple_form.select_multiple("multiple",
texts=["Eight", "Nine", "Ten"])
display = multiple_form.submit("button")
self.assertIn("<p>You selected 8, 9, 10</p>", display, display)
def test_multiple_select_forced_values(self):
app = webtest.TestApp(select_app)
res = app.get('/')
self.assertEqual(res.status_int, 200)
self.assertEqual(res.headers['content-type'],
'text/html; charset=utf-8')
self.assertEqual(res.content_type, 'text/html')
multiple_form = res.forms["multiple_select_form"]
self.assertEqual(multiple_form["multiple"].value, ["8", "11"],
multiple_form["multiple"].value)
self.assertRaises(ValueError, multiple_form.set,
"multiple", ["24", "88"])
multiple_form["multiple"].force_value(["24", "88"])
self.assertEqual(multiple_form["multiple"].value, ["24", "88"],
multiple_form["multiple"].value)
display = multiple_form.submit("button")
self.assertIn("<p>You selected 24, 88</p>", display, display)
def test_multiple_select_no_default(self):
app = webtest.TestApp(select_app_without_default)
res = app.get('/')
self.assertEqual(res.status_int, 200)
self.assertEqual(res.headers['content-type'],
'text/html; charset=utf-8')
self.assertEqual(res.content_type, 'text/html')
multiple_form = res.forms["multiple_select_form"]
self.assertTrue(multiple_form["multiple"].value is None,
repr(multiple_form["multiple"].value))
display = multiple_form.submit("button")
self.assertIn("<p>You selected </p>", display, display)
res = app.get('/')
self.assertEqual(res.status_int, 200)
self.assertEqual(res.headers['content-type'],
'text/html; charset=utf-8')
self.assertEqual(res.content_type, 'text/html')
multiple_form = res.forms["multiple_select_form"]
self.assertTrue(multiple_form["multiple"].value is None,
multiple_form["multiple"].value)
multiple_form.set("multiple", ["9"])
self.assertEqual(multiple_form["multiple"].value, ["9"],
multiple_form["multiple"].value)
display = multiple_form.submit("button")
self.assertIn("<p>You selected 9</p>", display, display)
def test_select_no_value(self):
app = webtest.TestApp(select_app_without_values)
res = app.get('/')
self.assertEqual(res.status_int, 200)
self.assertEqual(res.headers['content-type'],
'text/html; charset=utf-8')
self.assertEqual(res.content_type, 'text/html')
single_form = res.forms["single_select_form"]
self.assertEqual(single_form["single"].value, "Four")
display = single_form.submit("button")
self.assertIn("<p>You selected Four</p>", display, display)
res = app.get('/')
self.assertEqual(res.status_int, 200)
self.assertEqual(res.headers['content-type'],
'text/html; charset=utf-8')
self.assertEqual(res.content_type, 'text/html')
single_form = res.forms["single_select_form"]
self.assertEqual(single_form["single"].value, "Four")
single_form.set("single", "Six")
self.assertEqual(single_form["single"].value, "Six")
display = single_form.submit("button")
self.assertIn("<p>You selected Six</p>", display, display)
def test_multiple_select_no_value(self):
app = webtest.TestApp(select_app_without_values)
res = app.get('/')
self.assertEqual(res.status_int, 200)
self.assertEqual(res.headers['content-type'],
'text/html; charset=utf-8')
self.assertEqual(res.content_type, 'text/html')
multiple_form = res.forms["multiple_select_form"]
self.assertEqual(multiple_form["multiple"].value, ["Nine", "Eleven"])
display = multiple_form.submit("button")
self.assertIn("<p>You selected Nine, Eleven</p>", display, display)
res = app.get('/')
self.assertEqual(res.status_int, 200)
self.assertEqual(res.headers['content-type'],
'text/html; charset=utf-8')
self.assertEqual(res.content_type, 'text/html')
multiple_form = res.forms["multiple_select_form"]
self.assertEqual(multiple_form["multiple"].value, ["Nine", "Eleven"])
multiple_form.set("multiple", ["Nine", "Ten"])
self.assertEqual(multiple_form["multiple"].value, ["Nine", "Ten"])
display = multiple_form.submit("button")
self.assertIn("<p>You selected Nine, Ten</p>", display, display)
def test_multiple_select_reset_value(self):
app = webtest.TestApp(select_app_without_values)
res = app.get('/')
self.assertEqual(res.status_int, 200)
self.assertEqual(res.headers['content-type'],
'text/html; charset=utf-8')
self.assertEqual(res.content_type, 'text/html')
multiple_form = res.forms["multiple_select_form"]
self.assertEqual(multiple_form["multiple"].value, ["Nine", "Eleven"])
# reset with value
multiple_form["multiple"].value = []
self.assertIsNone(multiple_form["multiple"].value)
# re-set a value
multiple_form["multiple"].value = ['Nine']
assert multiple_form["multiple"].value == ['Nine']
# reset with force_value
multiple_form["multiple"].force_value(None)
self.assertIsNone(multiple_form["multiple"].value)
display = multiple_form.submit("button")
self.assertIn("<p>You selected </p>", display, display)
class SingleUploadFileApp:
body = b"""
<html>
<head><title>form page</title></head>
<body>
<form method="POST" id="file_upload_form"
enctype="multipart/form-data">
<input name="file-field" type="file" value="some/path/file.txt" />
<input name="int-field" type="text" value="" />
<input name="button" type="submit" value="single">
</form>
</body>
</html>
"""
def __call__(self, environ, start_response):
req = Request(environ)
status = b"200 OK"
if req.method == "GET":
body = self.body
else:
body = b"""
<html>
<head><title>display page</title></head>
<body>
""" + self.get_files_page(req) + b"""
</body>
</html>
"""
headers = [
('Content-Type', 'text/html; charset=utf-8'),
('Content-Length', str(len(body)))]
# PEP 3333 requires native strings:
headers = [(str(k), str(v)) for k, v in headers]
start_response(status, headers)
assert(isinstance(body, bytes))
return [body]
def get_files_page(self, req):
file_parts = []
uploaded_files = [(k, v) for k, v in req.POST.items() if 'file' in k]
uploaded_files = sorted(uploaded_files)
for name, uploaded_file in uploaded_files:
if isinstance(uploaded_file, cgi.FieldStorage):
filename = to_bytes(uploaded_file.filename)
value = to_bytes(uploaded_file.value, 'ascii')
content_type = to_bytes(uploaded_file.type, 'ascii')
else:
filename = value = content_type = b''
file_parts.append(b"""
<p>You selected '""" + filename + b"""'</p>
<p>with contents: '""" + value + b"""'</p>
<p>with content type: '""" + content_type + b"""'</p>
""")
return b''.join(file_parts)
class UploadBinaryApp(SingleUploadFileApp):
def get_files_page(self, req):
uploaded_files = [(k, v) for k, v in req.POST.items() if 'file' in k]
data = uploaded_files[0][1].value
data = struct.unpack(b'255h', data[:510])
return b','.join([to_bytes(str(i)) for i in data])
class MultipleUploadFileApp(SingleUploadFileApp):
body = b"""
<html>
<head><title>form page</title></head>
<body>
<form method="POST" id="file_upload_form"
enctype="multipart/form-data">
<input name="file-field-1" type="file" />
<input name="file-field-2" type="file" />
<input name="button" type="submit" value="single">
</form>
</body>
</html>
"""
class TestFileUpload(unittest.TestCase):
def assertFile(self, name, contents, display, content_type=None):
if isinstance(name, bytes):
text_name = name.decode('ascii')
else:
text_name = name
self.assertIn("<p>You selected '" + text_name + "'</p>",
display, display)
if isinstance(contents, bytes):
text_contents = contents.decode('ascii')
else:
text_contents = contents
self.assertIn("<p>with contents: '" + text_contents + "'</p>",
display, display)
if content_type:
self.assertIn("<p>with content type: '" + content_type + "'</p>",
display, display)
def test_no_uploads_error(self):
app = webtest.TestApp(SingleUploadFileApp())
app.get('/').forms["file_upload_form"].upload_fields()
def test_upload_without_file(self):
app = webtest.TestApp(SingleUploadFileApp())
upload_form = app.get('/').forms["file_upload_form"]
upload_form.submit()
def test_file_upload_with_filename_only(self):
uploaded_file_name = os.path.join(os.path.dirname(__file__),
"__init__.py")
uploaded_file_contents = open(uploaded_file_name).read()
uploaded_file_contents = to_bytes(uploaded_file_contents)
app = webtest.TestApp(SingleUploadFileApp())
res = app.get('/')
self.assertEqual(res.status_int, 200)
self.assertEqual(res.headers['content-type'],
'text/html; charset=utf-8')
self.assertEqual(res.content_type, 'text/html')
self.assertEqual(res.charset, 'utf-8')
single_form = res.forms["file_upload_form"]
single_form.set("file-field", (uploaded_file_name,))
display = single_form.submit("button")
self.assertFile(uploaded_file_name, uploaded_file_contents, display)
def test_file_upload_with_filename_and_contents(self):
uploaded_file_name = os.path.join(os.path.dirname(__file__),
"__init__.py")
uploaded_file_contents = open(uploaded_file_name).read()
uploaded_file_contents = to_bytes(uploaded_file_contents)
app = webtest.TestApp(SingleUploadFileApp())
res = app.get('/')
self.assertEqual(res.status_int, 200)
self.assertEqual(res.headers['content-type'],
'text/html; charset=utf-8')
self.assertEqual(res.content_type, 'text/html')
single_form = res.forms["file_upload_form"]
single_form.set("file-field",
(uploaded_file_name, uploaded_file_contents))
display = single_form.submit("button")
self.assertFile(uploaded_file_name, uploaded_file_contents, display)
def test_file_upload_with_content_type(self):
uploaded_file_name = os.path.join(os.path.dirname(__file__),
"__init__.py")
with open(uploaded_file_name, 'rb') as f:
uploaded_file_contents = f.read()
app = webtest.TestApp(SingleUploadFileApp())
res = app.get('/')
single_form = res.forms["file_upload_form"]
single_form["file-field"].value = Upload(uploaded_file_name,
uploaded_file_contents,
'text/x-custom-type')
display = single_form.submit("button")
self.assertFile(uploaded_file_name, uploaded_file_contents, display,
content_type='text/x-custom-type')
def test_file_upload_binary(self):
binary_data = struct.pack('255h', *range(0, 255))
app = webtest.TestApp(UploadBinaryApp())
res = app.get('/')
single_form = res.forms["file_upload_form"]
single_form.set("file-field", ('my_file.dat', binary_data))
display = single_form.submit("button")
self.assertIn(','.join([str(n) for n in range(0, 255)]), display)
def test_multiple_file_uploads_with_filename_and_contents(self):
uploaded_file1_name = os.path.join(os.path.dirname(__file__),
"__init__.py")
uploaded_file1_contents = open(uploaded_file1_name).read()
uploaded_file1_contents = to_bytes(uploaded_file1_contents)
uploaded_file2_name = __file__
uploaded_file2_name = os.path.join(os.path.dirname(__file__), 'html',
"404.html")
uploaded_file2_contents = open(uploaded_file2_name).read()
uploaded_file2_contents = to_bytes(uploaded_file2_contents)
app = webtest.TestApp(MultipleUploadFileApp())
res = app.get('/')
self.assertEqual(res.status_int, 200)
self.assertEqual(res.headers['content-type'],
'text/html; charset=utf-8')
self.assertEqual(res.content_type, 'text/html')
single_form = res.forms["file_upload_form"]
single_form.set("file-field-1",
(uploaded_file1_name, uploaded_file1_contents))
single_form.set("file-field-2",
(uploaded_file2_name, uploaded_file2_contents))
display = single_form.submit("button")
self.assertFile(uploaded_file1_name, uploaded_file1_contents, display)
self.assertFile(uploaded_file1_name, uploaded_file1_contents, display)
def test_post_int(self):
binary_data = struct.pack('255h', *range(0, 255))
app = webtest.TestApp(SingleUploadFileApp())
res = app.get('/')
single_form = res.forms["file_upload_form"]
single_form.set("file-field", ('my_file.dat', binary_data))
single_form.set("int-field", 100)
# just check it does not raise
single_form.submit("button")
def test_invalid_types(self):
binary_data = struct.pack('255h', *range(0, 255))
app = webtest.TestApp(SingleUploadFileApp())
res = app.get('/')
single_form = res.forms["file_upload_form"]
single_form.set("file-field", ('my_file.dat', binary_data))
single_form.set("int-field", SingleUploadFileApp())
self.assertRaises(ValueError, single_form.submit, "button")
def test_upload_invalid_content(self):
app = webtest.TestApp(SingleUploadFileApp())
res = app.get('/')
single_form = res.forms["file_upload_form"]
single_form.set("file-field", ('my_file.dat', 1))
try:
single_form.submit("button")
except ValueError:
e = sys.exc_info()[1]
self.assertEquals(
str(e),
u('File content must be %s not %s' % (bytes, int))
)
def test_invalid_uploadfiles(self):
app = webtest.TestApp(SingleUploadFileApp())
self.assertRaises(ValueError, app.post, '/', upload_files=[()])
self.assertRaises(
ValueError,
app.post, '/',
upload_files=[('name', 'filename', 'content', 'extra')]
)
def test_goto_upload_files(self):
app = webtest.TestApp(SingleUploadFileApp())
resp = app.get('/')
resp = resp.goto(
'/',
method='post',
upload_files=[('file', 'filename', b'content')]
)
resp.mustcontain("<p>You selected 'filename'</p>",
"<p>with contents: 'content'</p>")
def test_post_upload_files(self):
app = webtest.TestApp(SingleUploadFileApp())
resp = app.post(
'/',
upload_files=[('file', 'filename', b'content')]
)
resp.mustcontain("<p>You selected 'filename'</p>",
"<p>with contents: 'content'</p>")
def test_post_upload_empty_files(self):
app = webtest.TestApp(SingleUploadFileApp())
resp = app.post(
'/',
upload_files=[('file', 'filename', b'')]
)
resp.mustcontain("<p>You selected 'filename'</p>",
"<p>with contents: ''</p>")
resp = app.get('/')
form = resp.form
form['file-field'] = Upload('filename', b'', 'text/plain')
resp = form.submit()
resp.mustcontain("<p>You selected 'filename'</p>",
"<p>with contents: ''</p>")
| 38.932458 | 78 | 0.589128 |
c77238267d66f023e1a89fea1a771831d78b914b | 161 | py | Python | app_polls/graphql/__types.py | Audiotuete/backend_kassel_api | 97bb1f38eea51147660dd2eda052b540293f27a7 | [
"MIT"
] | null | null | null | app_polls/graphql/__types.py | Audiotuete/backend_kassel_api | 97bb1f38eea51147660dd2eda052b540293f27a7 | [
"MIT"
] | null | null | null | app_polls/graphql/__types.py | Audiotuete/backend_kassel_api | 97bb1f38eea51147660dd2eda052b540293f27a7 | [
"MIT"
] | null | null | null | import graphene
from graphene_django import DjangoObjectType
#Models
from ..models import Poll
class PollType(DjangoObjectType):
class Meta:
model = Poll | 17.888889 | 44 | 0.795031 |
8f15354071038386535da464e4ccb514c56dc268 | 812 | py | Python | openpyxl/chart/tests/test_picture.py | sekcheong/openpyxl | e1ba037f171efa348f75431c35a50de5ca277b78 | [
"MIT"
] | null | null | null | openpyxl/chart/tests/test_picture.py | sekcheong/openpyxl | e1ba037f171efa348f75431c35a50de5ca277b78 | [
"MIT"
] | null | null | null | openpyxl/chart/tests/test_picture.py | sekcheong/openpyxl | e1ba037f171efa348f75431c35a50de5ca277b78 | [
"MIT"
] | null | null | null | from __future__ import absolute_import
# Copyright (c) 2010-2017 openpyxl
import pytest
from openpyxl.xml.functions import fromstring, tostring
from openpyxl.tests.helper import compare_xml
@pytest.fixture
def PictureOptions():
from ..picture import PictureOptions
return PictureOptions
class TestPictureOptions:
def test_ctor(self, PictureOptions):
picture = PictureOptions()
xml = tostring(picture.to_tree())
expected = """
<pictureOptions />
"""
diff = compare_xml(xml, expected)
assert diff is None, diff
def test_from_xml(self, PictureOptions):
src = """
<pictureOptions />
"""
node = fromstring(src)
picture = PictureOptions.from_tree(node)
assert picture == PictureOptions()
| 23.882353 | 55 | 0.662562 |
fa8b49a021294e8555e979459615b1956d9b2b55 | 32,375 | py | Python | python/paddle/fluid/executor.py | hjchen2/Paddle | 6c596a2bb1b000171c8a9df6e5c4a6204670cbce | [
"Apache-2.0"
] | null | null | null | python/paddle/fluid/executor.py | hjchen2/Paddle | 6c596a2bb1b000171c8a9df6e5c4a6204670cbce | [
"Apache-2.0"
] | null | null | null | python/paddle/fluid/executor.py | hjchen2/Paddle | 6c596a2bb1b000171c8a9df6e5c4a6204670cbce | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import os
import multiprocessing
import numpy as np
from .wrapped_decorator import signature_safe_contextmanager
import six
from .framework import Program, default_main_program, Variable
from . import core
from . import compiler
from .. import compat as cpt
from .trainer_factory import TrainerFactory
__all__ = ['Executor', 'global_scope', 'scope_guard']
g_scope = core.Scope()
InferNativeConfig = core.NativeConfig
InferAnalysisConfig = core.AnalysisConfig
def global_scope():
"""
Get the global/default scope instance. There are a lot of APIs use
:code:`global_scope` as its default value, e.g., :code:`Executor.run`
Returns:
Scope: The global/default scope instance.
"""
return g_scope
def _switch_scope(scope):
global g_scope
ex = g_scope
g_scope = scope
return ex
@signature_safe_contextmanager
def scope_guard(scope):
"""
Change the global/default scope instance by Python `with` statement. All
variable in runtime will assigned to the new scope.
Examples:
>>> import paddle.fluid as fluid
>>> new_scope = fluid.Scope()
>>> with fluid.scope_guard(new_scope):
>>> ...
Args:
scope: The new global/default scope.
"""
ex = _switch_scope(scope)
yield
_switch_scope(ex)
def as_numpy(tensor):
"""
Convert a Tensor to a numpy.ndarray, its only support Tensor without LoD information.
For higher dimensional sequence data, please use LoDTensor directly.
Examples:
>>> import paddle.fluid as fluid
>>> outs = executor.run(...)
>>> np_outs = map(lambda x: as_numpy(x), outs)
>>> ...
Args:
tensor(Variable): a instance of Tensor
Returns:
numpy.ndarray
"""
if isinstance(tensor, core.LoDTensorArray):
return [as_numpy(t) for t in tensor]
if isinstance(tensor, list):
return [as_numpy(t) for t in tensor]
assert isinstance(tensor, core.LoDTensor)
lod = tensor.lod()
if len(lod) > 0:
raise RuntimeError("Some of your fetched tensors hold LoD information. \
They can not be completely cast to Python ndarray. \
Please set the parameter 'return_numpy' as 'False' to \
return LoDTensor itself directly.")
return np.array(tensor)
def has_feed_operators(block, feed_targets, feed_holder_name):
""" Check whether the block already has feed operators.
Return false if the block does not have any feed operators.
If some feed operators have been prepended to the block, check that
the info contained in these feed operators matches the feed_targets
and feed_holder_name. Raise exception when any mismatch is found.
Return true when the block has feed operators with matching info.
Args:
block: a block instance (typically global block of a program)
feed_targets: a dictionary of {feed_target_name: feed_target_data}
feed_holder_name: the name of the variable that holds the data of
all feed targets. The type of this feed_holder variable is
FEED_MINIBATCH, which is essentially vector<LoDTensor>.
Returns:
A boolean value that indicates whether a block has feed operators
that match the info contained in feed_targets and feed_holder_name.
"""
feed_count = 0
for op in block.ops:
if op.desc.type() == 'feed':
feed_count += 1
assert op.desc.input('X')[0] == feed_holder_name
feed_target_name = op.desc.output('Out')[0]
if feed_target_name not in feed_targets:
raise Exception("'feed_targets' does not have {} variable".
format(feed_target_name))
else:
break
if feed_count > 0 and feed_count != len(feed_targets):
raise Exception(
"Feed operators in program desc do not match 'feed_targets'")
return feed_count > 0
def has_fetch_operators(block, fetch_targets, fetch_holder_name):
""" Check whether the block already has fetch operators.
Return false if the block does not have any fetch operators.
If some fetch operators have been appended to the block, check that
the info contained in these fetch operators matches the fetch_targets
and fetch_holder_name. Raise exception when any mismatch is found.
Return true when the block has fetch operators with matching info.
Args:
block: a block instance (typically global block of a program)
fetch_targets: a dictionary of {fetch_target_name: fetch_target_data}
fetch_holder_name: the name of the variable that holds the data of
all fetch targets. The type of this fetch_holder variable is
FETCH_LIST, which is essentially vector<LoDTensor>.
Return:
A boolean value that indicates whether a block has fetch operators
that match the info contained in fetch_targets and fetch_holder_name.
"""
fetch_count = 0
for op in block.ops:
if op.desc.type() == 'fetch':
fetch_count += 1
assert op.desc.output('Out')[0] == fetch_holder_name
fetch_target_name = op.desc.input('X')[0]
if fetch_target_name not in [
var.desc.name() for var in fetch_targets
]:
raise Exception("'fetch_targets' does not have {} variable".
format(fetch_target_name))
idx = op.desc.attr('col')
assert fetch_target_name == fetch_targets[idx].desc.name()
if fetch_count > 0 and fetch_count != len(fetch_targets):
raise Exception(
"Fetch operators in program desc do not match 'fetch_targets'")
return fetch_count > 0
def _fetch_var(name, scope=None, return_numpy=True):
"""
Fetch the value of the variable with the given name from the
given scope.
Args:
name(str): name of the variable. Typically, only persistable variables
can be found in the scope used for running the program.
scope(core.Scope|None): scope object. It should be the scope where
you pass to Executor.run() when running your program.
If None, global_scope() will be used. Default None.
return_numpy(bool): whether convert the tensor to numpy.ndarray.
Default True.
Returns:
LodTensor|numpy.ndarray
"""
assert isinstance(name, str)
if scope is None:
scope = global_scope()
assert isinstance(scope, core._Scope)
var = scope.find_var(name)
assert var is not None, (
"Cannot find " + name + " in scope. Perhaps you need to make the"
" variable persistable by using var.persistable = True in your"
" program.")
tensor = var.get_tensor()
if return_numpy:
tensor = as_numpy(tensor)
return tensor
def _to_name_str(var):
if isinstance(var, Variable):
return var.desc.name()
elif isinstance(var, str):
return var
elif isinstance(var, six.string_types):
return str(var)
else:
raise TypeError(str(var) + " should be Variable or str")
def _get_program_cache_key(feed, fetch_list):
feed_var_names = list(feed.keys())
fetch_var_names = list(map(_to_name_str, fetch_list))
return str(feed_var_names + fetch_var_names)
def _as_lodtensor(data, place):
"""
Convert numpy.ndarray to Tensor, its only support Tensor without LoD information.
For higher dimensional sequence data, please use LoDTensor directly.
Examples:
>>> import paddle.fluid as fluid
>>> place = fluid.CPUPlace()
>>> exe = fluid.executor(place)
>>> data = np.array(size=(100, 200, 300))
>>> np_outs = map(lambda x: fluid.executor._as_lodtensor(x, place), data)
>>> ...
Args:
data(numpy.ndarray): a instance of array
Returns:
LoDTensor
"""
if isinstance(data, list):
raise RuntimeError("Some of your feed data hold LoD information. \
They can not be completely cast from a list of Python \
ndarray to LoDTensor. Please convert data to LoDTensor \
directly before feeding the data.\
")
# single tensor case
tensor = core.LoDTensor()
tensor.set(data, place)
return tensor
class Executor(object):
"""
An Executor in Python, supports single/multiple-GPU running, and single/multiple-CPU running.
Python executor takes a program, adds feed operators and fetch operators to this program according
to feed map and fetch_list. Feed map provides input data for the program. fetch_list provides
the variables(or names) that user wants to get after program runs. Note: the executor will run all
operators in the program but not only the operators dependent by the fetch_list.
It stores the global variables into the global scope, and creates a local scope for the temporary
variables. The contents in local scope may be discarded after every minibatch forward/backward
finished. But the global scope variables will be persistent through different runs.
Example:
.. code-block:: python
# First create the Executor.
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
exe = fluid.Executor(place)
# Run the startup program once and only once.
# Not need to optimize/compile the startup program.
exe.run(fluid.default_startup_program())
# Run the main program directly without compile.
loss, = exe.run(fluid.default_main_program(),
feed=feed_dict,
fetch_list=[loss.name])
# Or, compiled the program and run. See `CompiledProgram` for more detail.
compiled_prog = compiler.CompiledProgram(
fluid.default_main_program()).with_data_parallel(
loss_name=loss.name)
loss, = exe.run(compiled_prog,
feed=feed_dict,
fetch_list=[loss.name])
Args:
place(core.CPUPlace|core.CUDAPlace(n)): indicate the executor run on which device
"""
def __init__(self, place):
self.place = place
self.program_caches = dict()
p = core.Place()
p.set_place(self.place)
self._default_executor = core.Executor(p)
self._closed = False
def _get_program_cache(self, program_cache_key):
return self.program_caches.get(program_cache_key, None)
def _add_program_cache(self, program_cache_key, program):
self.program_caches[program_cache_key] = program
def _add_feed_fetch_ops(self, program, feed, fetch_list, feed_var_name,
fetch_var_name):
tmp_program = program.clone()
global_block = tmp_program.global_block()
if feed_var_name in global_block.vars:
feed_var = global_block.var(feed_var_name)
else:
feed_var = global_block.create_var(
name=feed_var_name,
type=core.VarDesc.VarType.FEED_MINIBATCH,
persistable=True)
if fetch_var_name in global_block.vars:
fetch_var = global_block.var(fetch_var_name)
else:
fetch_var = global_block.create_var(
name=fetch_var_name,
type=core.VarDesc.VarType.FETCH_LIST,
persistable=True)
# prepend feed operators
if not has_feed_operators(global_block, feed, feed_var_name):
for i, name in enumerate(feed):
out = global_block.var(name)
global_block._prepend_op(
type='feed',
inputs={'X': [feed_var]},
outputs={'Out': [out]},
attrs={'col': i})
# append fetch_operators
if not has_fetch_operators(global_block, fetch_list, fetch_var_name):
for i, var in enumerate(fetch_list):
assert isinstance(var, Variable) or isinstance(
var, six.string_types), (
"Wrong type for fetch_list[%s]: %s" % (i, type(var)))
global_block.append_op(
type='fetch',
inputs={'X': [var]},
outputs={'Out': [fetch_var]},
attrs={'col': i})
return tmp_program
def _feed_data(self, program, feed, feed_var_name, scope):
# feed var to framework
for op in program.global_block().ops:
if op.desc.type() == 'feed':
feed_target_name = op.desc.output('Out')[0]
cur_feed = feed[feed_target_name]
if not isinstance(cur_feed, core.LoDTensor):
cur_feed = _as_lodtensor(cur_feed, self.place)
idx = op.desc.attr('col')
core.set_feed_variable(scope, cur_feed, feed_var_name, idx)
else:
break
def _fetch_data(self, fetch_list, fetch_var_name, scope):
outs = [
core.get_fetch_variable(scope, fetch_var_name, i)
for i in six.moves.range(len(fetch_list))
]
return outs
'''
TODO(typhoonzero): Define "no longer use" meaning? Can user create
a new Executor for the same program and run?
TODO(panyx0718): Why ParallelExecutor doesn't have close?
'''
def close(self):
"""
Close this executor.
You can no longer use this executor after calling this method.
For the distributed training, this method would free the resource on PServers related to
the current Trainer.
Example:
>>> cpu = core.CPUPlace()
>>> exe = Executor(cpu)
>>> ...
>>> exe.close()
"""
if not self._closed:
self._default_executor.close()
self._closed = True
def _run_parallel(self, program, scope, feed, fetch_list, fetch_var_name,
return_numpy):
exe = program._executor
if isinstance(feed, dict):
feed_tensor_dict = dict()
for feed_name in feed:
feed_tensor = feed[feed_name]
if not isinstance(feed_tensor, core.LoDTensor):
feed_tensor = core.LoDTensor()
# always set to CPU place, since the tensor need to be splitted
# it is fast in CPU
feed_tensor.set(feed[feed_name], core.CPUPlace())
feed_tensor_dict[feed_name] = feed_tensor
exe.feed_and_split_tensor_into_local_scopes(feed_tensor_dict)
elif isinstance(feed, list) or isinstance(feed, tuple):
if len(feed) != len(program._places):
raise ValueError(
"Feed a list of tensor, the list should be the same size as places"
)
res = list()
for i, each in enumerate(feed):
if not isinstance(each, dict):
raise TypeError(
"Each element of feed list should be a dict")
res_dict = dict()
for feed_name in each:
tensor = each[feed_name]
if not isinstance(tensor, core.LoDTensor):
tmp = core.LoDTensor()
tmp.set(tensor, program._places[i])
tensor = tmp
res_dict[feed_name] = tensor
res.append(res_dict)
exe.feed_tensors_into_local_scopes(res)
fetch_var_names = list(map(_to_name_str, fetch_list))
exe.run(fetch_var_names, fetch_var_name)
arr = scope.find_var(fetch_var_name).get_lod_tensor_array()
if return_numpy:
return as_numpy(arr)
return [arr[i] for i in range(len(arr))]
def run(self,
program=None,
feed=None,
fetch_list=None,
feed_var_name='feed',
fetch_var_name='fetch',
scope=None,
return_numpy=True,
use_program_cache=False):
"""
Run program by this Executor. Feed data by feed map, fetch result by fetch_list.
Python executor takes a program, add feed operators and fetch operators to this program according
to feed map and fetch_list. Feed map provides input data for the program. fetch_list provides
the variables(or names) that user want to get after program run.
Note: the executor will run all
operators in the program but not only the operators dependent by the fetch_list
Args:
program(Program|CompiledProgram): the program that need to run,
if not provided, then default_main_program (not compiled) will be used.
feed(dict): feed variable map, e.g. {"image": ImageData, "label": LabelData}
fetch_list(list): a list of variable or variable names that user
wants to get, this method will return them according to this list.
feed_var_name(str): the name for the input variable of
feed Operator.
fetch_var_name(str): the name for the output variable of
fetch Operator.
scope(Scope): the scope used to run this program, you can switch
it to different scope. default is global_scope
return_numpy(bool): if convert the fetched tensor to numpy
use_program_cache(bool): whether to use the cached program
settings across batches. Setting it be true would be faster
only when (1) the program is not compiled with data parallel,
and (2) program, feed variable names and fetch_list variable
names do not changed compared to the last step.
Returns:
list(numpy.array): fetch result according to fetch_list.
Examples:
>>> data = fluid.layers.data(name='X', shape=[1], dtype='float32')
>>> out = fluid.layers.create_tensor(dtype='float32')
>>> hidden = fluid.layers.fc(input=data, size=10)
>>> fluid.layers.assign(hidden,out)
>>> loss = fluid.layers.mean(out)
>>> adam = fluid.optimizer.Adam()
>>> adam.minimize(loss)
>>> cpu = core.CPUPlace()
>>> exe = fluid.Executor(cpu)
>>> exe.run(fluid.default_startup_program())
>>> x = numpy.random.random(size=(10, 1)).astype('float32')
>>> outs = exe.run(
>>> feed={'X': x},
>>> fetch_list=[loss.name])
"""
if self._closed:
raise RuntimeError("Attempted to use a closed Executor")
if scope is None:
scope = global_scope()
if fetch_list is None:
fetch_list = []
compiled = isinstance(program, compiler.CompiledProgram)
# For backward compatibility, run directly.
if not compiled:
return self._run(
program,
self._default_executor,
feed=feed,
fetch_list=fetch_list,
feed_var_name=feed_var_name,
fetch_var_name=fetch_var_name,
scope=scope,
return_numpy=return_numpy,
use_program_cache=use_program_cache)
program._compile(scope, self.place)
if program._is_data_parallel:
return self._run_parallel(
program,
scope=scope,
feed=feed,
fetch_list=fetch_list,
fetch_var_name=fetch_var_name,
return_numpy=return_numpy)
elif program._is_inference:
return self._run_inference(program._executor, feed)
else:
# TODO(panyx0718): Can compile program to optimize executor
# performance.
# TODO(panyx0718): executor should be able to run graph.
assert program._program, "CompiledProgram is compiled from graph, can only run with_data_parallel."
return self._run(
program._program,
self._default_executor,
feed=feed,
fetch_list=fetch_list,
feed_var_name=feed_var_name,
fetch_var_name=fetch_var_name,
scope=scope,
return_numpy=return_numpy,
use_program_cache=use_program_cache)
def _run(self, program, exe, feed, fetch_list, feed_var_name,
fetch_var_name, scope, return_numpy, use_program_cache):
if feed is None:
feed = {}
elif isinstance(feed, (list, tuple)):
assert len(feed) == 1, "Not compiled with data parallel"
feed = feed[0]
if not isinstance(feed, dict):
raise TypeError(
"feed requires dict as its Parameter. But you passed in %s" %
(type(feed)))
if program is None:
program = default_main_program()
if not isinstance(program, Program):
raise TypeError(
"Executor requires Program as its Parameter. But you passed in %s"
% (type(program)))
cache_key = _get_program_cache_key(feed, fetch_list)
if use_program_cache:
cached_program = self._get_program_cache(cache_key)
if cached_program is None:
cached_program = self._add_feed_fetch_ops(
program=program,
feed=feed,
fetch_list=fetch_list,
feed_var_name=feed_var_name,
fetch_var_name=fetch_var_name)
self._add_program_cache(cache_key, cached_program)
program = cached_program
else:
self.program_caches.pop(cache_key, None)
program = self._add_feed_fetch_ops(
program=program,
feed=feed,
fetch_list=fetch_list,
feed_var_name=feed_var_name,
fetch_var_name=fetch_var_name)
self._feed_data(program, feed, feed_var_name, scope)
exe.run(program.desc, scope, 0, True, True, fetch_var_name)
outs = self._fetch_data(fetch_list, fetch_var_name, scope)
if return_numpy:
outs = as_numpy(outs)
return outs
def _run_inference(self, exe, feed):
return exe.run(feed)
def _dump_debug_info(self, program=None, trainer=None):
with open(str(id(program)) + "_train_desc.prototxt", "w") as fout:
fout.write(trainer._desc())
if program._fleet_opt:
with open("fleet_desc.prototxt", "w") as fout:
fout.write(str(program._fleet_opt["fleet_desc"]))
def _prepare_trainer(self,
program=None,
dataset=None,
scope=None,
thread=0,
debug=False,
fetch_list=None,
fetch_info=None,
print_period=100):
if scope is None:
scope = global_scope()
if fetch_list is None:
fetch_list = []
if fetch_info is None:
fetch_info = []
assert len(fetch_list) == len(fetch_info)
compiled = isinstance(program, compiler.CompiledProgram)
if not compiled:
trainer = TrainerFactory()._create_trainer(program._fleet_opt)
trainer._set_program(program)
else:
trainer = TrainerFactory()._create_trainer(
program.program._fleet_opt)
trainer._set_program(program.program)
if thread <= 0:
if dataset.thread_num <= 0:
raise RuntimeError(
"You should set thread num first, either in Dataset"
"or in Executor.train_from_dataset")
else:
trainer._set_thread(dataset.thread_num)
else:
trainer._set_thread(thread)
trainer._set_debug(debug)
trainer._set_fetch_var_and_info(fetch_list, fetch_info, print_period)
return scope, trainer
def infer_from_dataset(self,
program=None,
dataset=None,
scope=None,
thread=0,
debug=False,
fetch_list=None,
fetch_info=None,
print_period=100):
"""
The document of infer_from_dataset is almost the same as
train_from_dataset, except that in distributed training,
push gradients will be disabled in infer_from_dataset.
infer_from_dataset() can be used for evaluation in multi-thread
very easily.
Args:
program(Program|CompiledProgram): the program that needs to be run,
if not provided, then default_main_program (not compiled) will be used.
dataset(paddle.fluid.Dataset): dataset created outside this function,
a user should provide a well-defined dataset before calling this function.
Please check the document of Dataset if needed. default is None
scope(Scope): the scope used to run this program, you can switch it to different scope
for each run. default is global_scope
thread(int): number of thread a user wants to run in this function. The actual number
of thread will be min(Dataset.thread_num, thread) if thread > 0, default is 0
debug(bool): whether a user wants to run infer_from_dataset, default is False
fetch_list(Variable List): fetch variable list, each variable
will be printed during training, default is None
fetch_info(String List): print information for each variable, default is None
print_period(int): the number of mini-batches for each print, default is 100
Returns:
None
Examples:
.. code-block:: python
import paddle.fluid as fluid
place = fluid.CPUPlace()
exe = fluid.Executor(place)
x = fluid.layers.data(name="x", type="int64")
y = fluid.layers.data(name="y", type="int64")
dataset = fluid.DatasetFactory().create_dataset()
dataset.set_use_var([x, y])
filelist = ["dataA.txt", "dataB.txt"]
dataset.set_filelist(filelist)
exe.run(fluid.default_startup_program())
exe.infer_from_dataset(program=fluid.default_main_program(),
dataset=dataset)
"""
if dataset == None:
raise RuntimeError("dataset is needed and should be initialized")
scope, trainer = self._prepare_trainer(
program=program,
dataset=dataset,
scope=scope,
thread=thread,
debug=debug,
fetch_list=fetch_list,
fetch_info=fetch_info,
print_period=print_period)
trainer._set_infer(True)
trainer._gen_trainer_desc()
dataset._prepare_to_run()
if debug:
self._dump_debug_info(program=program, trainer=trainer)
self._default_executor.run_from_dataset(program.desc, scope,
dataset.dataset,
trainer._desc())
return None
def train_from_dataset(self,
program=None,
dataset=None,
scope=None,
thread=0,
debug=False,
fetch_list=None,
fetch_info=None,
print_period=100):
"""
Train from a pre-defined Dataset. Dataset is defined in paddle.fluid.dataset.
Given a program, either a program or compiled program, train_from_dataset will
consume all data samples in dataset. Input scope can be given by users. By default,
scope is global_scope(). The total number of thread run in training is `thread`.
Thread number used in training will be minimum value of threadnum in Dataset and
the value of thread in this interface. Debug can be set so that executor will display
Run-Time for all operators and the throughputs of current training task.
Note: train_from_dataset will destroy all resources created within executor for each run.
Args:
program(Program|CompiledProgram): the program that needs to be run,
if not provided, then default_main_program (not compiled) will be used.
dataset(paddle.fluid.Dataset): dataset created outside this function,
a user should provide a well-defined dataset before calling this function.
Please check the document of Dataset if needed.
scope(Scope): the scope used to run this program, you can switch it to different scope
for each run. default is global_scope
thread(int): number of thread a user wants to run in this function. The actual number
of thread will be min(Dataset.thread_num, thread)
debug(bool): whether a user wants to run train_from_dataset
fetch_list(Variable List): fetch variable list, each variable
will be printed during training
fetch_info(String List): print information for each variable
print_period(int): the number of mini-batches for each print
Returns:
None
Examples:
.. code-block:: python
import paddle.fluid as fluid
place = fluid.CPUPlace()
exe = fluid.Executor(place)
x = fluid.layers.data(name="x", type="int64")
y = fluid.layers.data(name="y", type="int64")
dataset = fluid.DatasetFactory().create_dataset()
dataset.set_use_var([x, y])
dataset.set_thread(2)
filelist = ["dataA.txt", "dataB.txt"]
dataset.set_filelist(filelist)
exe.run(fluid.default_startup_program())
exe.train_from_dataset(program=fluid.default_main_program(),
dataset=dataset)
"""
if dataset == None:
raise RuntimeError("dataset is need and should be initialized")
scope, trainer = self._prepare_trainer(
program=program,
dataset=dataset,
scope=scope,
thread=thread,
debug=debug,
fetch_list=fetch_list,
fetch_info=fetch_info,
print_period=print_period)
trainer._gen_trainer_desc()
dataset._prepare_to_run()
if debug:
self._dump_debug_info(program=program, trainer=trainer)
self._default_executor.run_from_dataset(program.desc, scope,
dataset.dataset,
trainer._desc())
return None
| 39.87069 | 111 | 0.595181 |
c2dfdca43153ad69c1438b2038009efcec56337f | 3,308 | py | Python | cloudnetpy/instruments/lufft.py | saveriogzz/cloudnetpy | baa3ed5f254425c5a9c787556ec652ea659b38ba | [
"MIT"
] | null | null | null | cloudnetpy/instruments/lufft.py | saveriogzz/cloudnetpy | baa3ed5f254425c5a9c787556ec652ea659b38ba | [
"MIT"
] | null | null | null | cloudnetpy/instruments/lufft.py | saveriogzz/cloudnetpy | baa3ed5f254425c5a9c787556ec652ea659b38ba | [
"MIT"
] | null | null | null | """Module with a class for Lufft chm15k ceilometer."""
from typing import Union, List, Optional
import logging
import netCDF4
import numpy as np
from cloudnetpy.instruments.ceilometer import Ceilometer
from cloudnetpy import utils
class LufftCeilo(Ceilometer):
"""Class for Lufft chm15k ceilometer."""
def __init__(self, file_name: str, date: Optional[str] = None):
super().__init__(file_name)
self._expected_date = date
self.model = 'Lufft CHM15k'
self.dataset = netCDF4.Dataset(self.file_name)
self.variables = self.dataset.variables
self.noise_params = (70, 2e-14, 0.3e-6, (1e-9, 4e-9))
self.wavelength = 1064
def read_ceilometer_file(self, calibration_factor: Optional[float] = None) -> None:
"""Reads data and metadata from Jenoptik netCDF file."""
self.range = self._calc_range()
self.backscatter = self._calibrate_backscatter(calibration_factor)
self.time = self._fetch_time()
self.date = self._read_date()
self.metadata = self._read_metadata()
def _calc_range(self) -> np.ndarray:
"""Assumes 'range' means the upper limit of range gate."""
ceilo_range = self._getvar('range')
return ceilo_range - utils.mdiff(ceilo_range)/2
def _calibrate_backscatter(self, calibration_factor: Union[float, None]) -> np.ndarray:
beta_raw = self._getvar('beta_raw')
overlap_function = _get_overlap(self.range)
beta_raw /= overlap_function
if calibration_factor is None:
logging.warning('Using default calibration factor for CHM15k')
calibration_factor = 3e-12
self.calibration_factor = calibration_factor
beta_raw *= calibration_factor
return beta_raw
def _fetch_time(self) -> np.ndarray:
time = self.variables['time'][:]
ind = time.argsort()
time = time[ind]
self.backscatter = self.backscatter[ind, :]
if self._expected_date is not None:
epoch = utils.get_epoch(self.variables['time'].units)
valid_ind = []
for ind, timestamp in enumerate(time):
date = '-'.join(utils.seconds2date(timestamp, epoch)[:3])
if date == self._expected_date:
valid_ind.append(ind)
if not valid_ind:
raise ValueError('Error: CHM15k date differs from expected.')
time = time[valid_ind]
self.backscatter = self.backscatter[valid_ind, :]
return utils.seconds2hours(time)
def _read_date(self) -> List[str]:
return [str(self.dataset.year),
str(self.dataset.month).zfill(2),
str(self.dataset.day).zfill(2)]
def _getvar(self, *args) -> Union[np.ndarray, float, None]:
for arg in args:
if arg in self.variables:
var = self.variables[arg]
return var[0] if utils.isscalar(var) else var[:]
return None
def _read_metadata(self) -> dict:
return {'tilt_angle': self._getvar('zenith')}
def _get_overlap(range_ceilo: np.ndarray,
params: Optional[tuple] = (0, 1)) -> np.ndarray:
"""Returns approximative overlap function."""
return utils.array_to_probability(range_ceilo, *params)
| 39.855422 | 91 | 0.633615 |
e4133acf32e8b08cb79bcd8d609533dd760882b7 | 6,663 | py | Python | test/functional/feature_cltv.py | cryptoBLAST/Ravencoin | b277310f51b6f99d52a30eac5e79df29824765f3 | [
"MIT"
] | 3 | 2020-03-31T08:36:54.000Z | 2020-11-17T01:59:46.000Z | test/functional/feature_cltv.py | cryptoBLAST/Ravencoin | b277310f51b6f99d52a30eac5e79df29824765f3 | [
"MIT"
] | 1 | 2020-09-09T23:23:57.000Z | 2020-09-09T23:23:57.000Z | test/functional/feature_cltv.py | cryptoBLAST/Ravencoin | b277310f51b6f99d52a30eac5e79df29824765f3 | [
"MIT"
] | 2 | 2019-04-15T10:15:37.000Z | 2019-05-02T06:29:29.000Z | #!/usr/bin/env python3
# Copyright (c) 2015-2016 The Bitcoin Core developers
# Copyright (c) 2017-2018 The Raven Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test BIP65 (CHECKLOCKTIMEVERIFY).
Test that the CHECKLOCKTIMEVERIFY soft-fork activates at (regtest) block height
1351.
"""
from test_framework.test_framework import BlastTestFramework
from test_framework.util import *
from test_framework.mininode import *
from test_framework.blocktools import create_coinbase, create_block
from test_framework.script import CScript, OP_1NEGATE, OP_CHECKLOCKTIMEVERIFY, OP_DROP, CScriptNum
from io import BytesIO
CLTV_HEIGHT = 1351
# Reject codes that we might receive in this test
REJECT_INVALID = 16
REJECT_OBSOLETE = 17
REJECT_NONSTANDARD = 64
def cltv_invalidate(tx):
'''Modify the signature in vin 0 of the tx to fail CLTV
Prepends -1 CLTV DROP in the scriptSig itself.
TODO: test more ways that transactions using CLTV could be invalid (eg
locktime requirements fail, sequence time requirements fail, etc).
'''
tx.vin[0].scriptSig = CScript([OP_1NEGATE, OP_CHECKLOCKTIMEVERIFY, OP_DROP] +
list(CScript(tx.vin[0].scriptSig)))
def cltv_validate(node, tx, height):
'''Modify the signature in vin 0 of the tx to pass CLTV
Prepends <height> CLTV DROP in the scriptSig, and sets
the locktime to height'''
tx.vin[0].nSequence = 0
tx.nLockTime = height
# Need to re-sign, since nSequence and nLockTime changed
signed_result = node.signrawtransaction(ToHex(tx))
new_tx = CTransaction()
new_tx.deserialize(BytesIO(hex_str_to_bytes(signed_result['hex'])))
new_tx.vin[0].scriptSig = CScript([CScriptNum(height), OP_CHECKLOCKTIMEVERIFY, OP_DROP] +
list(CScript(new_tx.vin[0].scriptSig)))
return new_tx
def create_transaction(node, coinbase, to_address, amount):
from_txid = node.getblock(coinbase)['tx'][0]
inputs = [{ "txid" : from_txid, "vout" : 0}]
outputs = { to_address : amount }
rawtx = node.createrawtransaction(inputs, outputs)
signresult = node.signrawtransaction(rawtx)
tx = CTransaction()
tx.deserialize(BytesIO(hex_str_to_bytes(signresult['hex'])))
return tx
class BIP65Test(BlastTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.extra_args = [['-promiscuousmempoolflags=1', '-whitelist=127.0.0.1']]
self.setup_clean_chain = True
def run_test(self):
node0 = NodeConnCB()
connections = []
connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], node0))
node0.add_connection(connections[0])
NetworkThread().start() # Start up network handling in another thread
# wait_for_verack ensures that the P2P connection is fully up.
node0.wait_for_verack()
self.log.info("Mining %d blocks", CLTV_HEIGHT - 2)
self.coinbase_blocks = self.nodes[0].generate(CLTV_HEIGHT - 2)
self.nodeaddress = self.nodes[0].getnewaddress()
self.log.info("Test that an invalid-according-to-CLTV transaction can still appear in a block")
spendtx = create_transaction(self.nodes[0], self.coinbase_blocks[0],
self.nodeaddress, 1.0)
cltv_invalidate(spendtx)
spendtx.rehash()
tip = self.nodes[0].getbestblockhash()
block_time = self.nodes[0].getblockheader(tip)['mediantime'] + 1
block = create_block(int(tip, 16), create_coinbase(CLTV_HEIGHT - 1), block_time)
block.nVersion = 3
block.vtx.append(spendtx)
block.hashMerkleRoot = block.calc_merkle_root()
block.solve()
node0.send_and_ping(msg_block(block))
assert_equal(self.nodes[0].getbestblockhash(), block.hash)
self.log.info("Test that blocks must now be at least version 4")
tip = block.sha256
block_time += 1
block = create_block(tip, create_coinbase(CLTV_HEIGHT), block_time)
block.nVersion = 3
block.solve()
node0.send_and_ping(msg_block(block))
assert_equal(int(self.nodes[0].getbestblockhash(), 16), tip)
wait_until(lambda: "reject" in node0.last_message.keys(), lock=mininode_lock)
with mininode_lock:
assert_equal(node0.last_message["reject"].code, REJECT_OBSOLETE)
assert_equal(node0.last_message["reject"].reason, b'bad-version(0x00000003)')
assert_equal(node0.last_message["reject"].data, block.sha256)
del node0.last_message["reject"]
self.log.info("Test that invalid-according-to-cltv transactions cannot appear in a block")
block.nVersion = 4
spendtx = create_transaction(self.nodes[0], self.coinbase_blocks[1],
self.nodeaddress, 1.0)
cltv_invalidate(spendtx)
spendtx.rehash()
# First we show that this tx is valid except for CLTV by getting it
# accepted to the mempool (which we can achieve with
# -promiscuousmempoolflags).
node0.send_and_ping(msg_tx(spendtx))
assert spendtx.hash in self.nodes[0].getrawmempool()
# Now we verify that a block with this transaction is invalid.
block.vtx.append(spendtx)
block.hashMerkleRoot = block.calc_merkle_root()
block.solve()
node0.send_and_ping(msg_block(block))
assert_equal(int(self.nodes[0].getbestblockhash(), 16), tip)
wait_until(lambda: "reject" in node0.last_message.keys(), lock=mininode_lock)
with mininode_lock:
assert node0.last_message["reject"].code in [REJECT_INVALID, REJECT_NONSTANDARD]
assert_equal(node0.last_message["reject"].data, block.sha256)
if node0.last_message["reject"].code == REJECT_INVALID:
# Generic rejection when a block is invalid
assert_equal(node0.last_message["reject"].reason, b'block-validation-failed')
else:
assert b'Negative locktime' in node0.last_message["reject"].reason
self.log.info("Test that a version 4 block with a valid-according-to-CLTV transaction is accepted")
spendtx = cltv_validate(self.nodes[0], spendtx, CLTV_HEIGHT - 1)
spendtx.rehash()
block.vtx.pop(1)
block.vtx.append(spendtx)
block.hashMerkleRoot = block.calc_merkle_root()
block.solve()
node0.send_and_ping(msg_block(block))
assert_equal(int(self.nodes[0].getbestblockhash(), 16), block.sha256)
if __name__ == '__main__':
BIP65Test().main()
| 40.381818 | 107 | 0.680774 |
a76a95360b6309f6b55eebaf920cf2bd7e1475d5 | 745 | py | Python | yacos/model/__init__.py | ComputerSystemsLaboratory/YaCoS | abd5d3c6e227e5c7a563493f7855ebf58ba3de05 | [
"Apache-2.0"
] | 8 | 2022-02-03T16:41:01.000Z | 2022-02-09T11:29:20.000Z | yacos/model/__init__.py | ComputerSystemsLaboratory/YaCoS | abd5d3c6e227e5c7a563493f7855ebf58ba3de05 | [
"Apache-2.0"
] | null | null | null | yacos/model/__init__.py | ComputerSystemsLaboratory/YaCoS | abd5d3c6e227e5c7a563493f7855ebf58ba3de05 | [
"Apache-2.0"
] | null | null | null | """
Copyright 2021 Anderson Faustino da Silva.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from .net_model import NetModel
from .representation_extractor import RepresentationExtractor
from .graph_from_sequences import GraphFromSequences
__version__ = '2.1.0'
| 32.391304 | 72 | 0.802685 |
73b8952b5c049648a4673e723793b1bb15e9bdee | 8,276 | py | Python | myuw/views/api/base_schedule.py | timtim17/myuw | d59702a8095daf049d7e57cbb1f7f2a5bebc69af | [
"Apache-2.0"
] | null | null | null | myuw/views/api/base_schedule.py | timtim17/myuw | d59702a8095daf049d7e57cbb1f7f2a5bebc69af | [
"Apache-2.0"
] | null | null | null | myuw/views/api/base_schedule.py | timtim17/myuw | d59702a8095daf049d7e57cbb1f7f2a5bebc69af | [
"Apache-2.0"
] | null | null | null | # Copyright 2022 UW-IT, University of Washington
# SPDX-License-Identifier: Apache-2.0
import logging
import traceback
from operator import itemgetter
from restclients_core.exceptions import InvalidNetID
from myuw.dao.campus_building import get_building_by_code
from myuw.dao.canvas import (
get_canvas_active_enrollments, set_section_canvas_course_urls)
from myuw.dao.enrollment import get_enrollment_for_term, is_ended
from myuw.dao.library import get_subject_guide_by_section
from myuw.dao.pws import get_person_of_current_user
from myuw.dao.registration import get_schedule_by_term
# from myuw.dao.schedule import filter_schedule_sections_by_summer_term
# from myuw.dao.registered_term import get_current_summer_term_in_schedule
from myuw.logger.timer import Timer
from myuw.logger.logresp import (
log_data_not_found_response, log_api_call, log_exception)
from myuw.views.api import ProtectedAPI
from myuw.views.error import data_not_found, unknown_uwnetid, handle_exception
from myuw.views import prefetch_resources
logger = logging.getLogger(__name__)
class StudClasSche(ProtectedAPI):
def dispatch(self, request, *args, **kwargs):
timer = Timer()
try:
person = get_person_of_current_user(request)
except InvalidNetID:
return unknown_uwnetid()
try:
prefetch_resources(request,
prefetch_enrollment=True,
prefetch_library=True,
prefetch_canvas=True)
return super(StudClasSche, self).dispatch(request, *args, **kwargs)
except Exception:
handle_exception(logger, timer, traceback)
def make_http_resp(self, timer, term, request, summer_term=None):
"""
@return class schedule data in json format
status 404: no schedule found (not registered)
"""
schedule = get_schedule_by_term(
request, term=term, summer_term=summer_term)
if len(schedule.sections) == 0:
log_data_not_found_response(logger, timer)
return data_not_found()
resp_data = load_schedule(request, schedule)
log_api_call(timer, request,
"Get Student Schedule {},{}".format(term.year,
term.quarter))
return self.json_response(resp_data)
def load_schedule(request, schedule):
json_data = schedule.json_data()
if schedule.term.is_summer_quarter():
json_data["summer_term"] = schedule.summer_term
if len(schedule.sections):
try:
set_section_canvas_course_urls(
get_canvas_active_enrollments(request), schedule, request)
except Exception:
log_exception(logger, 'get_canvas_active_enrollments', traceback)
pass
section_index = 0
json_data["has_eos_dates"] = False
for section in schedule.sections:
section_data = json_data["sections"][section_index]
section_index += 1
section_data["color_id"] = section.color_id
section_data['course_abbr_slug'] = section.curriculum_abbr.replace(
" ", "-")
if not section_data["section_type"]:
if len(section.meetings) > 0:
section_data["section_type"] = section.meetings[0].meeting_type
if section.is_early_fall_start():
section_data["cc_display_dates"] = True
section_data["early_fall_start"] = True
json_data["has_early_fall_start"] = True
section_data["is_ended"] = is_ended(request, section.end_date)
else:
if irregular_start_end(schedule.term, section):
section_data["cc_display_dates"] = True
section_data["is_ended"] = is_ended(request, section.end_date)
section_data["on_standby"] = (
section.registration.is_standby_status())
try:
section_data["canvas_url"] = section.canvas_course_url
except Exception:
pass
# if section.is_primary_section:
if section.sln:
try:
section_data["lib_subj_guide"] =\
get_subject_guide_by_section(section)
except Exception:
log_exception(logger,
'get_subject_guide_by_section', traceback)
pass
if section.final_exam:
final = section_data["final_exam"]
# MUWM-4728
final["is_remote"] = section.is_remote
# MUWM-596 we don't display
# if section.final_exam.building:
# building = get_building_by_code(section.final_exam.building)
# if building:
# final["longitude"] = building.longitude
# final["latitude"] = building.latitude
# final["building_name"] = building.name
# Also backfill the meeting building data
section_data["has_eos_dates"] = False
meeting_index = 0
for meeting in section.meetings:
mdata = section_data["meetings"][meeting_index]
# MUWM-4728
mdata["is_remote"] = section.is_remote
if meeting.eos_start_date is not None:
if not section_data["has_eos_dates"]:
section_data["has_eos_dates"] = True
mdata["start_end_same"] = False
if mdata["eos_start_date"] == mdata["eos_end_date"]:
mdata["start_end_same"] = True
try:
if not mdata["building_tbd"] and len(mdata["building"]):
building = get_building_by_code(mdata["building"])
if building is not None:
mdata["latitude"] = building.latitude
mdata["longitude"] = building.longitude
mdata["building_name"] = building.name
for instructor in mdata["instructors"]:
if (len(instructor["email_addresses"]) == 0 and
len(instructor["phones"]) == 0 and
len(instructor["voice_mails"]) == 0 and
len(instructor["faxes"]) == 0 and
len(instructor["touch_dials"]) == 0 and
len(instructor["addresses"]) == 0):
instructor["whitepages_publish"] = False
meeting_index += 1
except IndexError as ex:
pass
if section_data["has_eos_dates"]:
if not json_data["has_eos_dates"]:
json_data["has_eos_dates"] = True
section_data["meetings"] = sort_pce_section_meetings(
section_data["meetings"])
# MUWM-443
json_data["sections"] = sorted(json_data["sections"],
key=itemgetter('curriculum_abbr',
'course_number',
'section_id',
))
# add section index
index = 0
for section in json_data["sections"]:
section["index"] = index
index = index + 1
return json_data
def irregular_start_end(term, section):
if section.start_date is None or section.end_date is None:
return False
if section.is_summer_a_term():
return (term.first_day_quarter != section.start_date or
term.aterm_last_date != section.end_date)
if section.is_summer_b_term():
return (term.bterm_first_date != section.start_date or
term.last_day_instruction != section.end_date)
return (term.first_day_quarter != section.start_date or
term.last_final_exam_date != section.end_date) # MUWM-4863
def sort_pce_section_meetings(section_meetings_json_data):
"""
Sort meeting by eos_start_date
"""
ret_list = sorted(section_meetings_json_data,
key=itemgetter('eos_start_date'))
# add section index
index = 0
for meeting in ret_list:
meeting["index"] = index
index = index + 1
return ret_list
| 39.222749 | 79 | 0.599807 |
046ae1fbe9ae09acf4d4b5f8c780577d26fe70a6 | 8,175 | py | Python | pipeline/feature-classification/exp-3/selection-extraction/rf/pipeline_classifier_adc.py | DoraSzasz/mp-mri-prostate | bd420534b4b5c464e5bbb4a07eabdc8724831f8a | [
"MIT"
] | 12 | 2017-07-31T07:19:36.000Z | 2019-12-15T11:54:57.000Z | pipeline/feature-classification/exp-3/selection-extraction/rf/pipeline_classifier_adc.py | DoraSzasz/mp-mri-prostate | bd420534b4b5c464e5bbb4a07eabdc8724831f8a | [
"MIT"
] | 2 | 2019-04-27T12:07:07.000Z | 2020-09-25T15:00:19.000Z | pipeline/feature-classification/exp-3/selection-extraction/rf/pipeline_classifier_adc.py | I2Cvb/mp-mri-prostate | bd420534b4b5c464e5bbb4a07eabdc8724831f8a | [
"MIT"
] | 6 | 2017-07-28T04:46:45.000Z | 2020-10-19T06:56:52.000Z | """This pipeline is intended to make the classification of ADC modality
features."""
from __future__ import division
import os
import numpy as np
from sklearn.externals import joblib
from sklearn.preprocessing import label_binarize
from sklearn.ensemble import RandomForestClassifier
from sklearn.feature_selection import SelectFromModel
from protoclass.data_management import GTModality
# Define the path where the patients are stored
path_patients = '/data/prostate/experiments'
# Define the path where the features have been extracted
path_features = '/data/prostate/extraction/mp-mri-prostate'
# Define a list of the path where the feature are kept
adc_features = ['dct-adc', 'edge-adc/kirsch', 'edge-adc/laplacian',
'edge-adc/prewitt', 'edge-adc/scharr', 'edge-adc/sobel',
'gabor-adc', 'harlick-adc', 'ise-adc', 'lbp-adc', 'lbp-adc',
'phase-congruency-adc']
# Define the extension of each features
ext_features = ['_dct_adc.npy', '_edge_adc.npy', '_edge_adc.npy',
'_edge_adc.npy', '_edge_adc.npy', '_edge_adc.npy',
'_gabor_adc.npy', '_haralick_adc.npy', '_ise_adc.npy',
'_lbp_8_1_adc.npy', '_lbp_16_2_adc.npy',
'_phase_congruency_adc.npy']
# Define the path of the balanced data
path_balanced = '/data/prostate/balanced/mp-mri-prostate/exp-3/iht'
ext_balanced = '_adc.npz'
# Define the path of the ground for the prostate
path_gt = ['GT_inv/prostate', 'GT_inv/pz', 'GT_inv/cg', 'GT_inv/cap']
# Define the label of the ground-truth which will be provided
label_gt = ['prostate', 'pz', 'cg', 'cap']
# Generate the different path to be later treated
path_patients_list_gt = []
# Create the generator
id_patient_list = [name for name in os.listdir(path_patients)
if os.path.isdir(os.path.join(path_patients, name))]
id_patient_list = sorted(id_patient_list)
for id_patient in id_patient_list:
# Append for the GT data - Note that we need a list of gt path
path_patients_list_gt.append([os.path.join(path_patients, id_patient, gt)
for gt in path_gt])
# Load all the data once. Splitting into training and testing will be done at
# the cross-validation time
data = []
data_bal = []
label = []
label_bal = []
for idx_pat in range(len(id_patient_list)):
print 'Read patient {}'.format(id_patient_list[idx_pat])
# For each patient we nee to load the different feature
patient_data = []
for idx_feat in range(len(adc_features)):
# Create the path to the patient file
filename_feature = (id_patient_list[idx_pat].lower().replace(' ', '_') +
ext_features[idx_feat])
path_data = os.path.join(path_features, adc_features[idx_feat],
filename_feature)
single_feature_data = np.load(path_data)
# Check if this is only one dimension data
if len(single_feature_data.shape) == 1:
single_feature_data = np.atleast_2d(single_feature_data).T
patient_data.append(single_feature_data)
# Concatenate the data in a single array
patient_data = np.concatenate(patient_data, axis=1)
print 'Imbalanced feature loaded ...'
# Load the dataset from each balancing method
pat_chg = (id_patient_list[idx_pat].lower().replace(' ', '_') +
ext_balanced)
filename = os.path.join(path_balanced, pat_chg)
npz_file = np.load(filename)
data_bal.append(npz_file['data_resampled'])
label_bal.append(npz_file['label_resampled'])
print 'Balanced data loaded ...'
# Create the corresponding ground-truth
gt_mod = GTModality()
gt_mod.read_data_from_path(label_gt,
path_patients_list_gt[idx_pat])
print 'Read the GT data for the current patient ...'
# Concatenate the training data
data.append(patient_data)
# Extract the corresponding ground-truth for the testing data
# Get the index corresponding to the ground-truth
roi_prostate = gt_mod.extract_gt_data('prostate', output_type='index')
# Get the label of the gt only for the prostate ROI
gt_cap = gt_mod.extract_gt_data('cap', output_type='data')
label.append(gt_cap[roi_prostate])
print 'Data and label extracted for the current patient ...'
# Create all the necessary model only once
crf_cv = []
# Go for LOPO cross-validation
for idx_lopo_cv in range(len(id_patient_list)):
# Display some information about the LOPO-CV
print 'Round #{} of the LOPO-CV'.format(idx_lopo_cv + 1)
# Get the testing data
testing_data = data[idx_lopo_cv]
testing_label = np.ravel(label_binarize(label[idx_lopo_cv], [0, 255]))
print 'Create the testing set ...'
# Create the training data and label
# We need to take the balanced data
training_data = [arr for idx_arr, arr in enumerate(data_bal)
if idx_arr != idx_lopo_cv]
training_label = [arr for idx_arr, arr in enumerate(label_bal)
if idx_arr != idx_lopo_cv]
# Concatenate the data
training_data = np.vstack(training_data)
training_label = np.ravel(label_binarize(
np.hstack(training_label).astype(int), [0, 255]))
print 'Create the training set ...'
# Perform the classification for the current cv and the
# given configuration
crf = RandomForestClassifier(n_estimators=100, n_jobs=-1)
crf_cv.append(crf.fit(training_data, training_label))
percentiles = [1., 2., 5., 10., 15., 20., 30.]
results_p = []
feat_imp_p = []
for p in percentiles:
print 'Computing for percentile: {}'.format(p)
results_cv = []
feat_imp_cv = []
# Go for LOPO cross-validation
for idx_lopo_cv in range(len(id_patient_list)):
# Display some information about the LOPO-CV
print 'Round #{} of the LOPO-CV'.format(idx_lopo_cv + 1)
# Get the testing data
testing_data = data[idx_lopo_cv]
testing_label = np.ravel(label_binarize(label[idx_lopo_cv], [0, 255]))
print 'Create the testing set ...'
# Create the training data and label
# We need to take the balanced data
training_data = [arr for idx_arr, arr in enumerate(data_bal)
if idx_arr != idx_lopo_cv]
training_label = [arr for idx_arr, arr in enumerate(label_bal)
if idx_arr != idx_lopo_cv]
# Concatenate the data
training_data = np.vstack(training_data)
training_label = np.ravel(label_binarize(
np.hstack(training_label).astype(int), [0, 255]))
print 'Create the training set ...'
# Compute the threshold that is needed
# Get the feature importance for this iteration
feat_imp = crf_cv[idx_lopo_cv].feature_importances_
# Sort the importance in decreasing order
feat_imp = np.sort(feat_imp)[::-1]
threshold = feat_imp[int(feat_imp.size * p / 100.)]
# Store which features have been selected
feat_imp_cv.append(np.flatnonzero(crf_cv[
idx_lopo_cv].feature_importances_ > threshold))
# Perform the classification for the current cv and the
# given configuration
# The random forest has been already fitted
sel = SelectFromModel(crf_cv[idx_lopo_cv], threshold=threshold,
prefit=True)
training_data = sel.transform(training_data)
testing_data = sel.transform(testing_data)
crf2 = RandomForestClassifier(n_estimators=100, n_jobs=-1)
pred_prob = crf2.fit(training_data,
training_label).predict_proba(testing_data)
results_cv.append([pred_prob, crf2.classes_])
results_p.append(results_cv)
feat_imp_p.append(feat_imp_cv)
# Save the information
path_store = '/data/prostate/results/mp-mri-prostate/exp-3/selection-extraction/rf/adc'
if not os.path.exists(path_store):
os.makedirs(path_store)
joblib.dump(results_p, os.path.join(path_store,
'results.pkl'))
joblib.dump(feat_imp_p, os.path.join(path_store,
'feat_sel.pkl'))
| 40.671642 | 87 | 0.672049 |
b15a546ba7f7aa0f11dc53339ffc30137176c644 | 16,676 | py | Python | tensorflow_probability/python/distributions/finite_discrete_test.py | timudk/probability | 8bdbf1c0b0f801edaf342f4ffc9caf1cfd6f1103 | [
"Apache-2.0"
] | null | null | null | tensorflow_probability/python/distributions/finite_discrete_test.py | timudk/probability | 8bdbf1c0b0f801edaf342f4ffc9caf1cfd6f1103 | [
"Apache-2.0"
] | null | null | null | tensorflow_probability/python/distributions/finite_discrete_test.py | timudk/probability | 8bdbf1c0b0f801edaf342f4ffc9caf1cfd6f1103 | [
"Apache-2.0"
] | null | null | null | # Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for FiniteDiscrete distribution classs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from tensorflow_probability.python.distributions import finite_discrete
from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import
@test_util.run_all_in_graph_and_eager_modes
class FiniteDiscreteTest(object):
def _build_tensor(self, ndarray):
# Enforce parameterized dtype and static/dynamic testing.
ndarray = np.asarray(ndarray)
return tf.compat.v1.placeholder_with_default(
input=ndarray, shape=ndarray.shape if self.use_static_shape else None)
def _get_shape(self, tensor):
return tensor.shape if self.use_static_shape else tf.shape(input=tensor)
class FiniteDiscreteValidateArgsTest(FiniteDiscreteTest):
def testInequalLastDimRaises(self):
outcomes = self._build_tensor([1.0, 2.0])
probs = self._build_tensor([0.25, 0.25, 0.5])
with self.assertRaisesWithPredicateMatch(
Exception, 'Last dimension of outcomes and probs must be equal size'):
dist = finite_discrete.FiniteDiscrete(
outcomes, probs=probs, validate_args=True)
self.evaluate(dist.outcomes)
def testRankOfOutcomesLargerThanOneRaises(self):
outcomes = self._build_tensor([[1.0, 2.0], [3.0, 4.0]])
probs = self._build_tensor([0.5, 0.5])
with self.assertRaisesWithPredicateMatch(Exception,
'Rank of outcomes must be 1.'):
dist = finite_discrete.FiniteDiscrete(
outcomes, probs=probs, validate_args=True)
self.evaluate(dist.outcomes)
def testSizeOfOutcomesIsZeroRaises(self):
outcomes = self._build_tensor([])
probs = self._build_tensor([])
with self.assertRaisesWithPredicateMatch(
Exception, 'Size of outcomes must be greater than 0.'):
dist = finite_discrete.FiniteDiscrete(
outcomes, probs=probs, validate_args=True)
self.evaluate(dist.outcomes)
def testOutcomesNotStrictlyIncreasingRaises(self):
outcomes = self._build_tensor([1.0, 1.0, 2.0, 2.0])
probs = self._build_tensor([0.25, 0.25, 0.25, 0.25])
with self.assertRaisesWithPredicateMatch(
Exception, 'outcomes is not strictly increasing.'):
dist = finite_discrete.FiniteDiscrete(
outcomes, probs=probs, validate_args=True)
self.evaluate(dist.outcomes)
class FiniteDiscreteScalarTest(FiniteDiscreteTest):
"""Tests FiniteDiscrete when `logits` or `probs` is a 1-D tensor."""
def testShape(self):
outcomes = self._build_tensor([0.0, 0.2, 0.3, 0.5])
logits = self._build_tensor([-0.1, 0.0, 0.1, 0.2])
dist = finite_discrete.FiniteDiscrete(
outcomes, logits=logits, validate_args=True)
if self.use_static_shape:
self.assertAllEqual([], dist.batch_shape)
self.assertAllEqual([], dist.batch_shape_tensor())
self.assertAllEqual([], dist.event_shape)
self.assertAllEqual([], dist.event_shape_tensor())
def testMean(self):
outcomes = self._build_tensor([1.0, 2.0])
probs = self._build_tensor([0.5, 0.5])
dist = finite_discrete.FiniteDiscrete(
outcomes, probs=probs, validate_args=True)
mean = dist.mean()
self.assertAllEqual((), self._get_shape(mean))
self.assertAllClose(1.5, mean)
def testStddevAndVariance(self):
outcomes = self._build_tensor([1.0, 2.0])
probs = self._build_tensor([0.5, 0.5])
dist = finite_discrete.FiniteDiscrete(
outcomes, probs=probs, validate_args=True)
stddev = dist.stddev()
self.assertAllEqual((), self._get_shape(stddev))
self.assertAllClose(0.5, stddev)
variance = dist.variance()
self.assertAllEqual((), self._get_shape(variance))
self.assertAllClose(0.25, variance)
def testEntropy(self):
outcomes = self._build_tensor([1, 2, 3, 4])
probs = np.array([0.125, 0.125, 0.25, 0.5])
outcome_probs = self._build_tensor(probs)
dist = finite_discrete.FiniteDiscrete(
outcomes, probs=outcome_probs, validate_args=True)
entropy = dist.entropy()
self.assertAllEqual((), self._get_shape(entropy))
self.assertAllClose(np.sum(-probs * np.log(probs)), entropy)
def testMode(self):
outcomes = self._build_tensor([1.0, 2.0, 3.0])
probs = self._build_tensor([0.3, 0.1, 0.6])
dist = finite_discrete.FiniteDiscrete(
outcomes, probs=probs, validate_args=True)
mode = dist.mode()
self.assertAllEqual((), self._get_shape(mode))
self.assertAllClose(3.0, mode)
def testModeWithIntegerOutcomes(self):
outcomes = self._build_tensor([1, 2, 3])
probs = self._build_tensor([0.3, 0.1, 0.6])
dist = finite_discrete.FiniteDiscrete(
outcomes, probs=probs, validate_args=True)
mode = dist.mode()
self.assertAllEqual((), self._get_shape(mode))
self.assertAllEqual(3, mode)
def testSample(self):
outcomes = self._build_tensor([1.0, 2.0])
probs = self._build_tensor([0.2, 0.8])
dist = finite_discrete.FiniteDiscrete(
outcomes, probs=probs, validate_args=True)
samples = self.evaluate(dist.sample(5000, seed=1234))
self.assertAllEqual((5000,), self._get_shape(samples))
self.assertAllClose(np.mean(samples), dist.mean(), atol=0.1)
self.assertAllClose(np.std(samples), dist.stddev(), atol=0.1)
def testSampleWithIntegerOutcomes(self):
outcomes = self._build_tensor([1, 2])
probs = self._build_tensor([0.2, 0.8])
dist = finite_discrete.FiniteDiscrete(
outcomes, probs=probs, validate_args=True)
samples = self.evaluate(dist.sample(5000, seed=1234))
self.assertAllClose(np.mean(samples), dist.mean(), atol=0.1)
self.assertAllClose(np.std(samples), dist.stddev(), atol=0.1)
def testPMF(self):
outcomes = self._build_tensor([1.0, 2.0, 4.0, 8.0])
probs = self._build_tensor([0.0, 0.1, 0.2, 0.7])
dist = finite_discrete.FiniteDiscrete(
outcomes, probs=probs, validate_args=True)
prob = dist.prob(4.0)
self.assertAllEqual((), self._get_shape(prob))
self.assertAllClose(0.2, prob)
# Outcome with zero probability.
prob = dist.prob(1.0)
self.assertAllEqual((), self._get_shape(prob))
self.assertAllClose(0.0, prob)
# Input that is not in the list of possible outcomes.
prob = dist.prob(3.0)
self.assertAllEqual((), self._get_shape(prob))
self.assertAllClose(0.0, prob)
def testPMFWithBatchSampleShape(self):
outcomes = self._build_tensor([1.0, 2.0, 4.0, 8.0])
probs = self._build_tensor([0.0, 0.1, 0.2, 0.7])
x = self._build_tensor([[1.0], [2.0], [3.0], [4.0], [8.0]])
dist = finite_discrete.FiniteDiscrete(
outcomes, probs=probs, validate_args=True)
prob = dist.prob(x)
self.assertAllEqual((5, 1), self._get_shape(prob))
self.assertAllClose([[0.0], [0.1], [0.0], [0.2], [0.7]], prob)
def testPMFWithIntegerOutcomes(self):
outcomes = self._build_tensor([1, 2, 4, 8])
probs = self._build_tensor([0.0, 0.1, 0.2, 0.7])
x = self._build_tensor([[1], [2], [3], [4], [8]])
dist = finite_discrete.FiniteDiscrete(
outcomes, probs=probs, validate_args=True)
prob = dist.prob(x)
self.assertAllEqual((5, 1), self._get_shape(prob))
self.assertAllClose([[0.0], [0.1], [0.0], [0.2], [0.7]], prob)
def testCDF(self):
outcomes = self._build_tensor([0.1, 0.2, 0.4, 0.8])
probs = self._build_tensor([0.0, 0.1, 0.2, 0.7])
dist = finite_discrete.FiniteDiscrete(
outcomes, probs=probs, validate_args=True)
cdf = dist.cdf(0.4)
self.assertAllEqual((), self._get_shape(cdf))
self.assertAllClose(0.3, cdf)
def testCDFWithBatchSampleShape(self):
outcomes = self._build_tensor([0.1, 0.2, 0.4, 0.8])
probs = self._build_tensor([0.0, 0.1, 0.2, 0.7])
x = self._build_tensor([[0.0999, 0.1], [0.2, 0.4], [0.8, 0.8001]])
dist = finite_discrete.FiniteDiscrete(
outcomes, probs=probs, validate_args=True)
cdf = dist.cdf(x)
self.assertAllEqual((3, 2), self._get_shape(cdf))
self.assertAllClose([[0.0, 0.0], [0.1, 0.3], [1.0, 1.0]], cdf)
def testCDFWithIntegerOutcomes(self):
outcomes = self._build_tensor([1, 2, 4, 8])
probs = self._build_tensor([0.0, 0.1, 0.2, 0.7])
x = self._build_tensor([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
dist = finite_discrete.FiniteDiscrete(
outcomes, probs=probs, validate_args=True)
cdf = dist.cdf(x)
self.assertAllEqual((10,), self._get_shape(cdf))
self.assertAllClose([0.0, 0.0, 0.1, 0.1, 0.3, 0.3, 0.3, 0.3, 1.0, 1.0], cdf)
def testCDFWithDifferentAtol(self):
outcomes = self._build_tensor([0.1, 0.2, 0.4, 0.8])
probs = self._build_tensor([0.0, 0.1, 0.2, 0.7])
x = self._build_tensor([[0.095, 0.095], [0.395, 0.395]])
dist1 = finite_discrete.FiniteDiscrete(
outcomes, probs=probs, atol=0.001, validate_args=True)
cdf = dist1.cdf(x)
self.assertAllEqual((2, 2), self._get_shape(cdf))
self.assertAllClose([[0.0, 0.0], [0.1, 0.1]], cdf)
dist2 = finite_discrete.FiniteDiscrete(
outcomes, probs=probs, atol=0.01, validate_args=True)
cdf = dist2.cdf(x)
self.assertAllEqual((2, 2), self._get_shape(cdf))
self.assertAllClose([[0.0, 0.0], [0.3, 0.3]], cdf)
class FiniteDiscreteVectorTest(FiniteDiscreteTest):
"""Tests FiniteDiscrete when `logits` or `probs` is a tensor with rank >= 2."""
def testShapes(self):
outcomes = [0.0, 0.2, 0.3, 0.5]
outcomes_tensor = self._build_tensor(outcomes)
for batch_shape in ([1], [2], [3, 4, 5]):
logits = self._build_tensor(
np.random.uniform(-1, 1, size=list(batch_shape) + [len(outcomes)]))
dist = finite_discrete.FiniteDiscrete(
outcomes_tensor, logits=logits, validate_args=True)
if self.use_static_shape:
self.assertAllEqual(batch_shape, dist.batch_shape)
self.assertAllEqual(batch_shape, dist.batch_shape_tensor())
self.assertAllEqual([], dist.event_shape)
self.assertAllEqual([], dist.event_shape_tensor())
def testMean(self):
outcomes = self._build_tensor([1.0, 2.0])
probs = self._build_tensor([[0.5, 0.5], [0.2, 0.8]])
expected_means = [1.5, 1.8]
dist = finite_discrete.FiniteDiscrete(
outcomes, probs=probs, validate_args=True)
mean = dist.mean()
self.assertAllEqual((2,), self._get_shape(mean))
self.assertAllClose(expected_means, mean)
def testStddevAndVariance(self):
outcomes = self._build_tensor([1.0, 2.0])
probs = self._build_tensor([[0.5, 0.5], [0.2, 0.8]])
dist = finite_discrete.FiniteDiscrete(
outcomes, probs=probs, validate_args=True)
stddev = dist.stddev()
self.assertAllEqual((2,), self._get_shape(stddev))
self.assertAllClose([0.5, 0.4], stddev)
variance = dist.variance()
self.assertAllEqual((2,), self._get_shape(variance))
self.assertAllClose([0.25, 0.16], variance)
def testMode(self):
outcomes = self._build_tensor([1.0, 2.0, 3.0])
probs = self._build_tensor([[0.3, 0.1, 0.6], [0.5, 0.4, 0.1],
[0.3, 0.5, 0.2]])
dist = finite_discrete.FiniteDiscrete(
outcomes, probs=probs, validate_args=True)
mode = dist.mode()
self.assertAllEqual((3,), self._get_shape(mode))
self.assertAllClose([3.0, 1.0, 2.0], mode)
def testEntropy(self):
outcomes = self._build_tensor([1, 2, 3, 4])
probs = np.array([[0.125, 0.125, 0.25, 0.5], [0.25, 0.25, 0.25, 0.25]])
outcome_probs = self._build_tensor(probs)
dist = finite_discrete.FiniteDiscrete(
outcomes, probs=outcome_probs, validate_args=True)
entropy = dist.entropy()
self.assertAllEqual((2,), self._get_shape(entropy))
self.assertAllClose(np.sum(-probs * np.log(probs), axis=1), entropy)
def testSample(self):
outcomes = self._build_tensor([1.0, 2.0])
probs = self._build_tensor([[0.2, 0.8], [0.8, 0.2]])
dist = finite_discrete.FiniteDiscrete(
outcomes, probs=probs, validate_args=True)
samples = self.evaluate(dist.sample(5000, seed=1234))
self.assertAllEqual((5000, 2), self._get_shape(samples))
self.assertAllClose(np.mean(samples, axis=0), dist.mean(), atol=0.1)
self.assertAllClose(np.std(samples, axis=0), dist.stddev(), atol=0.1)
def testPMF(self):
outcomes = self._build_tensor([1.0, 2.0, 4.0, 8.0])
probs = self._build_tensor([[0.0, 0.1, 0.2, 0.7], [0.5, 0.3, 0.2, 0.0]])
dist = finite_discrete.FiniteDiscrete(
outcomes, probs=probs, validate_args=True)
prob = dist.prob(8.0)
self.assertAllEqual((2,), self._get_shape(prob))
self.assertAllClose([0.7, 0.0], prob)
def testPMFWithBatchSampleShape(self):
outcomes = self._build_tensor([1.0, 2.0, 4.0, 8.0])
probs = self._build_tensor([[0.0, 0.1, 0.2, 0.7], [0.5, 0.3, 0.2, 0.0]])
x = self._build_tensor([[1.0], [2.0], [3.0], [4.0], [8.0]])
dist = finite_discrete.FiniteDiscrete(
outcomes, probs=probs, validate_args=True)
prob = dist.prob(x)
self.assertAllEqual((5, 2), self._get_shape(prob))
self.assertAllClose(
[[0.0, 0.5], [0.1, 0.3], [0.0, 0.0], [0.2, 0.2], [0.7, 0.0]], prob)
def testCDF(self):
outcomes = self._build_tensor([0.1, 0.2, 0.4, 0.8])
probs = self._build_tensor([[0.0, 0.1, 0.2, 0.7], [0.5, 0.3, 0.2, 0.0]])
dist = finite_discrete.FiniteDiscrete(
outcomes, probs=probs, validate_args=True)
cdf = dist.cdf(0.4)
self.assertAllEqual((2,), self._get_shape(cdf))
self.assertAllClose([0.3, 1.0], cdf)
def testCDFWithBatchSampleShape(self):
outcomes = self._build_tensor([0.1, 0.2, 0.4, 0.8])
probs = self._build_tensor([[0.0, 0.1, 0.2, 0.7], [0.5, 0.3, 0.2, 0.0]])
x = self._build_tensor([[0.0999, 0.0999], [0.1, 0.1], [0.2, 0.2],
[0.4, 0.4], [0.8, 0.8], [0.8001, 0.8001]])
dist = finite_discrete.FiniteDiscrete(
outcomes, probs=probs, validate_args=True)
cdf = dist.cdf(x)
self.assertAllEqual((6, 2), self._get_shape(cdf))
self.assertAllClose([[0.0, 0.0], [0.0, 0.5], [0.1, 0.8], [0.3, 1.0],
[1.0, 1.0], [1.0, 1.0]], cdf)
def testParamTensorFromLogits(self):
outcomes = self._build_tensor([0.1, 0.2, 0.4])
x = tf.constant([-1., 0.5, 1.])
d = finite_discrete.FiniteDiscrete(outcomes, logits=x, validate_args=True)
self.assertAllClose(
*self.evaluate([x, d.logits_parameter()]),
atol=0, rtol=1e-4)
self.assertAllClose(
*self.evaluate([tf.nn.softmax(x), d.probs_parameter()]),
atol=0, rtol=1e-4)
def testParamTensorFromProbs(self):
outcomes = self._build_tensor([0.1, 0.2, 0.4])
x = tf.constant([0.1, 0.5, 0.4])
d = finite_discrete.FiniteDiscrete(outcomes, probs=x, validate_args=True)
self.assertAllClose(
*self.evaluate([tf.math.log(x), d.logits_parameter()]),
atol=0, rtol=1e-4)
self.assertAllClose(
*self.evaluate([x, d.probs_parameter()]),
atol=0, rtol=1e-4)
class FiniteDiscreteValidateArgsStaticShapeTest(FiniteDiscreteValidateArgsTest,
tf.test.TestCase):
use_static_shape = True
class FiniteDiscreteValidateArgsDynamicShapeTest(FiniteDiscreteValidateArgsTest,
tf.test.TestCase):
use_static_shape = False
class FiniteDiscreteScalarStaticShapeTest(FiniteDiscreteScalarTest,
tf.test.TestCase):
use_static_shape = True
class FiniteDiscreteScalarDynamicShapeTest(FiniteDiscreteScalarTest,
tf.test.TestCase):
use_static_shape = False
class FiniteDiscreteVectorStaticShapeTest(FiniteDiscreteVectorTest,
tf.test.TestCase):
use_static_shape = True
class FiniteDiscreteVectorDynamicShapeTest(FiniteDiscreteVectorTest,
tf.test.TestCase):
use_static_shape = False
if __name__ == '__main__':
tf.test.main()
| 40.872549 | 95 | 0.656332 |
8095b07a541d997e0bfd625379d33eb2a72bbe57 | 5,156 | py | Python | IRIS_data_download/IRIS_download_support/obspy/io/gse2/paz.py | earthinversion/Fnet_IRIS_data_automated_download | 09a6e0c992662feac95744935e038d1c68539fa1 | [
"MIT"
] | 2 | 2020-03-05T01:03:01.000Z | 2020-12-17T05:04:07.000Z | IRIS_data_download/IRIS_download_support/obspy/io/gse2/paz.py | earthinversion/Fnet_IRIS_data_automated_download | 09a6e0c992662feac95744935e038d1c68539fa1 | [
"MIT"
] | 4 | 2021-03-31T19:25:55.000Z | 2021-12-13T20:32:46.000Z | IRIS_data_download/IRIS_download_support/obspy/io/gse2/paz.py | earthinversion/Fnet_IRIS_data_automated_download | 09a6e0c992662feac95744935e038d1c68539fa1 | [
"MIT"
] | 2 | 2020-09-08T19:33:40.000Z | 2021-04-05T09:47:50.000Z | #!/usr/bin/env python
# ------------------------------------------------------------------
# Filename: paz.py
# Purpose: Python routines for reading GSE poles and zero files
# Author: Moritz Beyreuther
# Email: moritz.beyreuther@geophysik.uni-muenchen.de
#
# Copyright (C) 2008-2012 Moritz Beyreuther
# --------------------------------------------------------------------
"""
Python routines for reading GSE pole and zero (PAZ) files.
The read in PAZ information can be used with
:mod:`~obspy.signal` for instrument correction.
:copyright:
The ObsPy Development Team (devs@obspy.org)
:license:
GNU Lesser General Public License, Version 3
(https://www.gnu.org/copyleft/lesser.html)
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from future.builtins import * # NOQA
from future.utils import native_str
import doctest
import numpy as np
from obspy.core import AttribDict
def read_paz(paz_file):
'''
Read GSE PAZ / Calibration file format and returns poles, zeros and the
seismometer_gain.
Do not use this function in connection with the ObsPy instrument
simulation, the A0_normalization_factor might be set wrongly. Use
:func:`~obspy.io.gse2.libgse2.attach_paz` instead.
>>> import io
>>> f = io.StringIO(
... """CAL1 RJOB LE-3D Z M24 PAZ 010824 0001
... 2
... -4.39823 4.48709
... -4.39823 -4.48709
... 3
... 0.0 0.0
... 0.0 0.0
... 0.0 0.0
... 0.4""")
>>> p, z, k = read_paz(f)
>>> print('%.4f %.4f %.4f' % (p[0].real, z[0].real, k))
-4.3982 0.0000 0.4000
'''
poles = []
zeros = []
if isinstance(paz_file, (str, native_str)):
with open(paz_file, 'rt') as fh:
paz = fh.readlines()
else:
paz = paz_file.readlines()
if paz[0][0:4] != 'CAL1':
raise NameError("Unknown GSE PAZ format %s" % paz[0][0:4])
if paz[0][31:34] != 'PAZ':
raise NameError("%s type is not known" % paz[0][31:34])
ind = 1
npoles = int(paz[ind])
for i in range(npoles):
try:
poles.append(complex(*[float(n)
for n in paz[i + 1 + ind].split()]))
except ValueError:
poles.append(complex(float(paz[i + 1 + ind][:8]),
float(paz[i + 1 + ind][8:])))
ind += i + 2
nzeros = int(paz[ind])
for i in range(nzeros):
try:
zeros.append(complex(*[float(n)
for n in paz[i + 1 + ind].split()]))
except ValueError:
zeros.append(complex(float(paz[i + 1 + ind][:8]),
float(paz[i + 1 + ind][8:])))
ind += i + 2
# in the observatory this is the seismometer gain [muVolt/nm/s]
# the A0_normalization_factor is hardcoded to 1.0
seismometer_gain = float(paz[ind])
return poles, zeros, seismometer_gain
def attach_paz(tr, paz_file):
'''
Attach tr.stats.paz AttribDict to trace from GSE2 paz_file
This is experimental code, nevertheless it might be useful. It
makes several assumption on the gse2 paz format which are valid for the
geophysical observatory in Fuerstenfeldbruck but might be wrong in
other cases.
Attaches to a trace a paz AttribDict containing poles zeros and gain.
The A0_normalization_factor is set to 1.0.
:param tr: An ObsPy trace object containing the calib and gse2 calper
attributes
:param paz_file: path to pazfile or file pointer
>>> from obspy.core import Trace
>>> import io
>>> tr = Trace(header={'calib': .094856, 'gse2': {'calper': 1}})
>>> f = io.StringIO(
... """CAL1 RJOB LE-3D Z M24 PAZ 010824 0001
... 2
... -4.39823 4.48709
... -4.39823 -4.48709
... 3
... 0.0 0.0
... 0.0 0.0
... 0.0 0.0
... 0.4""")
>>> attach_paz(tr, f)
>>> print(round(tr.stats.paz.sensitivity / 10E3) * 10E3)
671140000.0
'''
poles, zeros, seismometer_gain = read_paz(paz_file)
# remove zero at 0,0j to undo integration in GSE PAZ
for i, zero in enumerate(list(zeros)):
if zero == complex(0, 0j):
zeros.pop(i)
break
else:
raise Exception("Could not remove (0,0j) zero to undo GSE integration")
# ftp://www.orfeus-eu.org/pub/software/conversion/GSE_UTI/gse2001.pdf
# page 3
calibration = tr.stats.calib * 2 * np.pi / tr.stats.gse2.calper
# fill up ObsPy Poles and Zeros AttribDict
tr.stats.paz = AttribDict()
# convert seismometer gain from [muVolt/nm/s] to [Volt/m/s]
tr.stats.paz.seismometer_gain = seismometer_gain * 1e3
# convert digitizer gain [count/muVolt] to [count/Volt]
tr.stats.paz.digitizer_gain = 1e6 / calibration
tr.stats.paz.poles = poles
tr.stats.paz.zeros = zeros
tr.stats.paz.sensitivity = tr.stats.paz.digitizer_gain * \
tr.stats.paz.seismometer_gain
# A0_normalization_factor convention for gse2 paz in Observatory in FFB
tr.stats.paz.gain = 1.0
if __name__ == '__main__':
doctest.testmod(exclude_empty=True)
| 32.024845 | 79 | 0.592126 |
458d7a13f500718b6e96a4c61476bd6821606582 | 45,554 | py | Python | utils/ops.py | gumbernator/Mongolian-ALPR | e6753c6687e5974873135249e17627891a07c295 | [
"Apache-2.0"
] | 10 | 2020-01-12T01:05:32.000Z | 2021-03-04T07:25:48.000Z | utils/ops.py | gumbernator/Mongolian-ALPR | e6753c6687e5974873135249e17627891a07c295 | [
"Apache-2.0"
] | 1 | 2020-02-17T13:23:32.000Z | 2020-06-22T09:47:14.000Z | utils/ops.py | gumbernator/Mongolian-ALPR | e6753c6687e5974873135249e17627891a07c295 | [
"Apache-2.0"
] | 4 | 2019-12-25T21:07:25.000Z | 2022-01-06T08:13:58.000Z | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A module for helper tensorflow ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import math
import six
from six.moves import range
from six.moves import zip
import tensorflow as tf
from core import standard_fields as fields
from utils import shape_utils
from utils import spatial_transform_ops as spatial_ops
from utils import static_shape
matmul_crop_and_resize = spatial_ops.matmul_crop_and_resize
multilevel_roi_align = spatial_ops.multilevel_roi_align
native_crop_and_resize = spatial_ops.native_crop_and_resize
def expanded_shape(orig_shape, start_dim, num_dims):
"""Inserts multiple ones into a shape vector.
Inserts an all-1 vector of length num_dims at position start_dim into a shape.
Can be combined with tf.reshape to generalize tf.expand_dims.
Args:
orig_shape: the shape into which the all-1 vector is added (int32 vector)
start_dim: insertion position (int scalar)
num_dims: length of the inserted all-1 vector (int scalar)
Returns:
An int32 vector of length tf.size(orig_shape) + num_dims.
"""
with tf.name_scope('ExpandedShape'):
start_dim = tf.expand_dims(start_dim, 0) # scalar to rank-1
before = tf.slice(orig_shape, [0], start_dim)
add_shape = tf.ones(tf.reshape(num_dims, [1]), dtype=tf.int32)
after = tf.slice(orig_shape, start_dim, [-1])
new_shape = tf.concat([before, add_shape, after], 0)
return new_shape
def normalized_to_image_coordinates(normalized_boxes, image_shape,
parallel_iterations=32):
"""Converts a batch of boxes from normal to image coordinates.
Args:
normalized_boxes: a tensor of shape [None, num_boxes, 4] in
normalized coordinates. The dtype of this tensor must support tf.mul.
image_shape: a tensor of shape [4] containing the image shape, with same
dtype as `normalized_boxes`.
parallel_iterations: parallelism for the map_fn op.
Returns:
absolute_boxes: a tensor of shape [None, num_boxes, 4] containing
the boxes in image coordinates, with same
dtype as `normalized_boxes`.
"""
x_scale = tf.cast(image_shape[2], normalized_boxes.dtype)
y_scale = tf.cast(image_shape[1], normalized_boxes.dtype)
def _to_absolute_coordinates(normalized_boxes):
y_min, x_min, y_max, x_max = tf.split(
value=normalized_boxes, num_or_size_splits=4, axis=1)
y_min = y_scale * y_min
y_max = y_scale * y_max
x_min = x_scale * x_min
x_max = x_scale * x_max
scaled_boxes = tf.concat([y_min, x_min, y_max, x_max], 1)
return scaled_boxes
absolute_boxes = shape_utils.static_or_dynamic_map_fn(
_to_absolute_coordinates,
elems=(normalized_boxes),
dtype=normalized_boxes.dtype,
parallel_iterations=parallel_iterations,
back_prop=True)
return absolute_boxes
def meshgrid(x, y):
"""Tiles the contents of x and y into a pair of grids.
Multidimensional analog of numpy.meshgrid, giving the same behavior if x and y
are vectors. Generally, this will give:
xgrid(i1, ..., i_m, j_1, ..., j_n) = x(j_1, ..., j_n)
ygrid(i1, ..., i_m, j_1, ..., j_n) = y(i_1, ..., i_m)
Keep in mind that the order of the arguments and outputs is reverse relative
to the order of the indices they go into, done for compatibility with numpy.
The output tensors have the same shapes. Specifically:
xgrid.get_shape() = y.get_shape().concatenate(x.get_shape())
ygrid.get_shape() = y.get_shape().concatenate(x.get_shape())
Args:
x: A tensor of arbitrary shape and rank. xgrid will contain these values
varying in its last dimensions.
y: A tensor of arbitrary shape and rank. ygrid will contain these values
varying in its first dimensions.
Returns:
A tuple of tensors (xgrid, ygrid).
"""
with tf.name_scope('Meshgrid'):
x = tf.convert_to_tensor(x)
y = tf.convert_to_tensor(y)
x_exp_shape = expanded_shape(tf.shape(x), 0, tf.rank(y))
y_exp_shape = expanded_shape(tf.shape(y), tf.rank(y), tf.rank(x))
xgrid = tf.tile(tf.reshape(x, x_exp_shape), y_exp_shape)
ygrid = tf.tile(tf.reshape(y, y_exp_shape), x_exp_shape)
new_shape = y.get_shape().concatenate(x.get_shape())
xgrid.set_shape(new_shape)
ygrid.set_shape(new_shape)
return xgrid, ygrid
def fixed_padding(inputs, kernel_size, rate=1):
"""Pads the input along the spatial dimensions independently of input size.
Args:
inputs: A tensor of size [batch, height_in, width_in, channels].
kernel_size: The kernel to be used in the conv2d or max_pool2d operation.
Should be a positive integer.
rate: An integer, rate for atrous convolution.
Returns:
output: A tensor of size [batch, height_out, width_out, channels] with the
input, either intact (if kernel_size == 1) or padded (if kernel_size > 1).
"""
kernel_size_effective = kernel_size + (kernel_size - 1) * (rate - 1)
pad_total = kernel_size_effective - 1
pad_beg = pad_total // 2
pad_end = pad_total - pad_beg
padded_inputs = tf.pad(inputs, [[0, 0], [pad_beg, pad_end],
[pad_beg, pad_end], [0, 0]])
return padded_inputs
def pad_to_multiple(tensor, multiple):
"""Returns the tensor zero padded to the specified multiple.
Appends 0s to the end of the first and second dimension (height and width) of
the tensor until both dimensions are a multiple of the input argument
'multiple'. E.g. given an input tensor of shape [1, 3, 5, 1] and an input
multiple of 4, PadToMultiple will append 0s so that the resulting tensor will
be of shape [1, 4, 8, 1].
Args:
tensor: rank 4 float32 tensor, where
tensor -> [batch_size, height, width, channels].
multiple: the multiple to pad to.
Returns:
padded_tensor: the tensor zero padded to the specified multiple.
"""
if multiple == 1:
return tensor
tensor_shape = tensor.get_shape()
batch_size = static_shape.get_batch_size(tensor_shape)
tensor_height = static_shape.get_height(tensor_shape)
tensor_width = static_shape.get_width(tensor_shape)
tensor_depth = static_shape.get_depth(tensor_shape)
if batch_size is None:
batch_size = tf.shape(tensor)[0]
if tensor_height is None:
tensor_height = tf.shape(tensor)[1]
padded_tensor_height = tf.cast(
tf.ceil(
tf.cast(tensor_height, dtype=tf.float32) /
tf.cast(multiple, dtype=tf.float32)),
dtype=tf.int32) * multiple
else:
padded_tensor_height = int(
math.ceil(float(tensor_height) / multiple) * multiple)
if tensor_width is None:
tensor_width = tf.shape(tensor)[2]
padded_tensor_width = tf.cast(
tf.ceil(
tf.cast(tensor_width, dtype=tf.float32) /
tf.cast(multiple, dtype=tf.float32)),
dtype=tf.int32) * multiple
else:
padded_tensor_width = int(
math.ceil(float(tensor_width) / multiple) * multiple)
if tensor_depth is None:
tensor_depth = tf.shape(tensor)[3]
# Use tf.concat instead of tf.pad to preserve static shape
if padded_tensor_height != tensor_height:
height_pad = tf.zeros([
batch_size, padded_tensor_height - tensor_height, tensor_width,
tensor_depth
])
tensor = tf.concat([tensor, height_pad], 1)
if padded_tensor_width != tensor_width:
width_pad = tf.zeros([
batch_size, padded_tensor_height, padded_tensor_width - tensor_width,
tensor_depth
])
tensor = tf.concat([tensor, width_pad], 2)
return tensor
def padded_one_hot_encoding(indices, depth, left_pad):
"""Returns a zero padded one-hot tensor.
This function converts a sparse representation of indices (e.g., [4]) to a
zero padded one-hot representation (e.g., [0, 0, 0, 0, 1] with depth = 4 and
left_pad = 1). If `indices` is empty, the result will simply be a tensor of
shape (0, depth + left_pad). If depth = 0, then this function just returns
`None`.
Args:
indices: an integer tensor of shape [num_indices].
depth: depth for the one-hot tensor (integer).
left_pad: number of zeros to left pad the one-hot tensor with (integer).
Returns:
padded_onehot: a tensor with shape (num_indices, depth + left_pad). Returns
`None` if the depth is zero.
Raises:
ValueError: if `indices` does not have rank 1 or if `left_pad` or `depth are
either negative or non-integers.
TODO(rathodv): add runtime checks for depth and indices.
"""
if depth < 0 or not isinstance(depth, six.integer_types):
raise ValueError('`depth` must be a non-negative integer.')
if left_pad < 0 or not isinstance(left_pad, six.integer_types):
raise ValueError('`left_pad` must be a non-negative integer.')
if depth == 0:
return None
rank = len(indices.get_shape().as_list())
if rank != 1:
raise ValueError('`indices` must have rank 1, but has rank=%s' % rank)
def one_hot_and_pad():
one_hot = tf.cast(tf.one_hot(tf.cast(indices, tf.int64), depth,
on_value=1, off_value=0), tf.float32)
return tf.pad(one_hot, [[0, 0], [left_pad, 0]], mode='CONSTANT')
result = tf.cond(tf.greater(tf.size(indices), 0), one_hot_and_pad,
lambda: tf.zeros((depth + left_pad, 0)))
return tf.reshape(result, [-1, depth + left_pad])
def dense_to_sparse_boxes(dense_locations, dense_num_boxes, num_classes):
"""Converts bounding boxes from dense to sparse form.
Args:
dense_locations: a [max_num_boxes, 4] tensor in which only the first k rows
are valid bounding box location coordinates, where k is the sum of
elements in dense_num_boxes.
dense_num_boxes: a [max_num_classes] tensor indicating the counts of
various bounding box classes e.g. [1, 0, 0, 2] means that the first
bounding box is of class 0 and the second and third bounding boxes are
of class 3. The sum of elements in this tensor is the number of valid
bounding boxes.
num_classes: number of classes
Returns:
box_locations: a [num_boxes, 4] tensor containing only valid bounding
boxes (i.e. the first num_boxes rows of dense_locations)
box_classes: a [num_boxes] tensor containing the classes of each bounding
box (e.g. dense_num_boxes = [1, 0, 0, 2] => box_classes = [0, 3, 3]
"""
num_valid_boxes = tf.reduce_sum(dense_num_boxes)
box_locations = tf.slice(dense_locations,
tf.constant([0, 0]), tf.stack([num_valid_boxes, 4]))
tiled_classes = [tf.tile([i], tf.expand_dims(dense_num_boxes[i], 0))
for i in range(num_classes)]
box_classes = tf.concat(tiled_classes, 0)
box_locations.set_shape([None, 4])
return box_locations, box_classes
def indices_to_dense_vector(indices,
size,
indices_value=1.,
default_value=0,
dtype=tf.float32):
"""Creates dense vector with indices set to specific value and rest to zeros.
This function exists because it is unclear if it is safe to use
tf.sparse_to_dense(indices, [size], 1, validate_indices=False)
with indices which are not ordered.
This function accepts a dynamic size (e.g. tf.shape(tensor)[0])
Args:
indices: 1d Tensor with integer indices which are to be set to
indices_values.
size: scalar with size (integer) of output Tensor.
indices_value: values of elements specified by indices in the output vector
default_value: values of other elements in the output vector.
dtype: data type.
Returns:
dense 1D Tensor of shape [size] with indices set to indices_values and the
rest set to default_value.
"""
size = tf.cast(size, dtype=tf.int32)
zeros = tf.ones([size], dtype=dtype) * default_value
values = tf.ones_like(indices, dtype=dtype) * indices_value
return tf.dynamic_stitch([tf.range(size), tf.cast(indices, dtype=tf.int32)],
[zeros, values])
def reduce_sum_trailing_dimensions(tensor, ndims):
"""Computes sum across all dimensions following first `ndims` dimensions."""
return tf.reduce_sum(tensor, axis=tuple(range(ndims, tensor.shape.ndims)))
def retain_groundtruth(tensor_dict, valid_indices):
"""Retains groundtruth by valid indices.
Args:
tensor_dict: a dictionary of following groundtruth tensors -
fields.InputDataFields.groundtruth_boxes
fields.InputDataFields.groundtruth_classes
fields.InputDataFields.groundtruth_confidences
fields.InputDataFields.groundtruth_keypoints
fields.InputDataFields.groundtruth_instance_masks
fields.InputDataFields.groundtruth_is_crowd
fields.InputDataFields.groundtruth_area
fields.InputDataFields.groundtruth_label_types
fields.InputDataFields.groundtruth_difficult
valid_indices: a tensor with valid indices for the box-level groundtruth.
Returns:
a dictionary of tensors containing only the groundtruth for valid_indices.
Raises:
ValueError: If the shape of valid_indices is invalid.
ValueError: field fields.InputDataFields.groundtruth_boxes is
not present in tensor_dict.
"""
input_shape = valid_indices.get_shape().as_list()
if not (len(input_shape) == 1 or
(len(input_shape) == 2 and input_shape[1] == 1)):
raise ValueError('The shape of valid_indices is invalid.')
valid_indices = tf.reshape(valid_indices, [-1])
valid_dict = {}
if fields.InputDataFields.groundtruth_boxes in tensor_dict:
# Prevents reshape failure when num_boxes is 0.
num_boxes = tf.maximum(tf.shape(
tensor_dict[fields.InputDataFields.groundtruth_boxes])[0], 1)
for key in tensor_dict:
if key in [fields.InputDataFields.groundtruth_boxes,
fields.InputDataFields.groundtruth_classes,
fields.InputDataFields.groundtruth_confidences,
fields.InputDataFields.groundtruth_keypoints,
fields.InputDataFields.groundtruth_keypoint_visibilities,
fields.InputDataFields.groundtruth_instance_masks]:
valid_dict[key] = tf.gather(tensor_dict[key], valid_indices)
# Input decoder returns empty tensor when these fields are not provided.
# Needs to reshape into [num_boxes, -1] for tf.gather() to work.
elif key in [fields.InputDataFields.groundtruth_is_crowd,
fields.InputDataFields.groundtruth_area,
fields.InputDataFields.groundtruth_difficult,
fields.InputDataFields.groundtruth_label_types]:
valid_dict[key] = tf.reshape(
tf.gather(tf.reshape(tensor_dict[key], [num_boxes, -1]),
valid_indices), [-1])
# Fields that are not associated with boxes.
else:
valid_dict[key] = tensor_dict[key]
else:
raise ValueError('%s not present in input tensor dict.' % (
fields.InputDataFields.groundtruth_boxes))
return valid_dict
def retain_groundtruth_with_positive_classes(tensor_dict):
"""Retains only groundtruth with positive class ids.
Args:
tensor_dict: a dictionary of following groundtruth tensors -
fields.InputDataFields.groundtruth_boxes
fields.InputDataFields.groundtruth_classes
fields.InputDataFields.groundtruth_confidences
fields.InputDataFields.groundtruth_keypoints
fields.InputDataFields.groundtruth_instance_masks
fields.InputDataFields.groundtruth_is_crowd
fields.InputDataFields.groundtruth_area
fields.InputDataFields.groundtruth_label_types
fields.InputDataFields.groundtruth_difficult
Returns:
a dictionary of tensors containing only the groundtruth with positive
classes.
Raises:
ValueError: If groundtruth_classes tensor is not in tensor_dict.
"""
if fields.InputDataFields.groundtruth_classes not in tensor_dict:
raise ValueError('`groundtruth classes` not in tensor_dict.')
keep_indices = tf.where(tf.greater(
tensor_dict[fields.InputDataFields.groundtruth_classes], 0))
return retain_groundtruth(tensor_dict, keep_indices)
def replace_nan_groundtruth_label_scores_with_ones(label_scores):
"""Replaces nan label scores with 1.0.
Args:
label_scores: a tensor containing object annoation label scores.
Returns:
a tensor where NaN label scores have been replaced by ones.
"""
return tf.where(
tf.is_nan(label_scores), tf.ones(tf.shape(label_scores)), label_scores)
def filter_groundtruth_with_crowd_boxes(tensor_dict):
"""Filters out groundtruth with boxes corresponding to crowd.
Args:
tensor_dict: a dictionary of following groundtruth tensors -
fields.InputDataFields.groundtruth_boxes
fields.InputDataFields.groundtruth_classes
fields.InputDataFields.groundtruth_confidences
fields.InputDataFields.groundtruth_keypoints
fields.InputDataFields.groundtruth_instance_masks
fields.InputDataFields.groundtruth_is_crowd
fields.InputDataFields.groundtruth_area
fields.InputDataFields.groundtruth_label_types
Returns:
a dictionary of tensors containing only the groundtruth that have bounding
boxes.
"""
if fields.InputDataFields.groundtruth_is_crowd in tensor_dict:
is_crowd = tensor_dict[fields.InputDataFields.groundtruth_is_crowd]
is_not_crowd = tf.logical_not(is_crowd)
is_not_crowd_indices = tf.where(is_not_crowd)
tensor_dict = retain_groundtruth(tensor_dict, is_not_crowd_indices)
return tensor_dict
def filter_groundtruth_with_nan_box_coordinates(tensor_dict):
"""Filters out groundtruth with no bounding boxes.
Args:
tensor_dict: a dictionary of following groundtruth tensors -
fields.InputDataFields.groundtruth_boxes
fields.InputDataFields.groundtruth_classes
fields.InputDataFields.groundtruth_confidences
fields.InputDataFields.groundtruth_keypoints
fields.InputDataFields.groundtruth_instance_masks
fields.InputDataFields.groundtruth_is_crowd
fields.InputDataFields.groundtruth_area
fields.InputDataFields.groundtruth_label_types
Returns:
a dictionary of tensors containing only the groundtruth that have bounding
boxes.
"""
groundtruth_boxes = tensor_dict[fields.InputDataFields.groundtruth_boxes]
nan_indicator_vector = tf.greater(tf.reduce_sum(tf.cast(
tf.is_nan(groundtruth_boxes), dtype=tf.int32), reduction_indices=[1]), 0)
valid_indicator_vector = tf.logical_not(nan_indicator_vector)
valid_indices = tf.where(valid_indicator_vector)
return retain_groundtruth(tensor_dict, valid_indices)
def filter_unrecognized_classes(tensor_dict):
"""Filters out class labels that are not unrecognized by the labelmap.
Decoder would parse unrecognized classes (not included in the labelmap) to
a label of value -1. Such targets are unecessary for training, and causes
issue for evaluation, due to labeling mapping logic. This function filters
those labels out for both training and evaluation.
Args:
tensor_dict: dictionary containing input tensors keyed by
fields.InputDataFields.
Returns:
A dictionary keyed by fields.InputDataFields containing the tensors
obtained after applying the filtering.
Raises:
ValueError: If groundtruth_classes tensor is not in tensor_dict.
"""
if fields.InputDataFields.groundtruth_classes not in tensor_dict:
raise ValueError('`groundtruth classes` not in tensor_dict.')
# Refer to tf_example_decoder for how unrecognized labels are handled.
unrecognized_label = -1
recognized_indices = tf.where(
tf.greater(tensor_dict[fields.InputDataFields.groundtruth_classes],
unrecognized_label))
return retain_groundtruth(tensor_dict, recognized_indices)
def normalize_to_target(inputs,
target_norm_value,
dim,
epsilon=1e-7,
trainable=True,
scope='NormalizeToTarget',
summarize=True):
"""L2 normalizes the inputs across the specified dimension to a target norm.
This op implements the L2 Normalization layer introduced in
Liu, Wei, et al. "SSD: Single Shot MultiBox Detector."
and Liu, Wei, Andrew Rabinovich, and Alexander C. Berg.
"Parsenet: Looking wider to see better." and is useful for bringing
activations from multiple layers in a convnet to a standard scale.
Note that the rank of `inputs` must be known and the dimension to which
normalization is to be applied should be statically defined.
TODO(jonathanhuang): Add option to scale by L2 norm of the entire input.
Args:
inputs: A `Tensor` of arbitrary size.
target_norm_value: A float value that specifies an initial target norm or
a list of floats (whose length must be equal to the depth along the
dimension to be normalized) specifying a per-dimension multiplier
after normalization.
dim: The dimension along which the input is normalized.
epsilon: A small value to add to the inputs to avoid dividing by zero.
trainable: Whether the norm is trainable or not
scope: Optional scope for variable_scope.
summarize: Whether or not to add a tensorflow summary for the op.
Returns:
The input tensor normalized to the specified target norm.
Raises:
ValueError: If dim is smaller than the number of dimensions in 'inputs'.
ValueError: If target_norm_value is not a float or a list of floats with
length equal to the depth along the dimension to be normalized.
"""
with tf.variable_scope(scope, 'NormalizeToTarget', [inputs]):
if not inputs.get_shape():
raise ValueError('The input rank must be known.')
input_shape = inputs.get_shape().as_list()
input_rank = len(input_shape)
if dim < 0 or dim >= input_rank:
raise ValueError(
'dim must be non-negative but smaller than the input rank.')
if not input_shape[dim]:
raise ValueError('input shape should be statically defined along '
'the specified dimension.')
depth = input_shape[dim]
if not (isinstance(target_norm_value, float) or
(isinstance(target_norm_value, list) and
len(target_norm_value) == depth) and
all([isinstance(val, float) for val in target_norm_value])):
raise ValueError('target_norm_value must be a float or a list of floats '
'with length equal to the depth along the dimension to '
'be normalized.')
if isinstance(target_norm_value, float):
initial_norm = depth * [target_norm_value]
else:
initial_norm = target_norm_value
target_norm = tf.contrib.framework.model_variable(
name='weights', dtype=tf.float32,
initializer=tf.constant(initial_norm, dtype=tf.float32),
trainable=trainable)
if summarize:
mean = tf.reduce_mean(target_norm)
tf.summary.scalar(tf.get_variable_scope().name, mean)
lengths = epsilon + tf.sqrt(tf.reduce_sum(tf.square(inputs), dim, True))
mult_shape = input_rank*[1]
mult_shape[dim] = depth
return tf.reshape(target_norm, mult_shape) * tf.truediv(inputs, lengths)
def batch_position_sensitive_crop_regions(images,
boxes,
crop_size,
num_spatial_bins,
global_pool,
parallel_iterations=64):
"""Position sensitive crop with batches of images and boxes.
This op is exactly like `position_sensitive_crop_regions` below but operates
on batches of images and boxes. See `position_sensitive_crop_regions` function
below for the operation applied per batch element.
Args:
images: A `Tensor`. Must be one of the following types: `uint8`, `int8`,
`int16`, `int32`, `int64`, `half`, `float32`, `float64`.
A 4-D tensor of shape `[batch, image_height, image_width, depth]`.
Both `image_height` and `image_width` need to be positive.
boxes: A `Tensor` of type `float32`.
A 3-D tensor of shape `[batch, num_boxes, 4]`. Each box is specified in
normalized coordinates `[y1, x1, y2, x2]`. A normalized coordinate value
of `y` is mapped to the image coordinate at `y * (image_height - 1)`, so
as the `[0, 1]` interval of normalized image height is mapped to
`[0, image_height - 1] in image height coordinates. We do allow y1 > y2,
in which case the sampled crop is an up-down flipped version of the
original image. The width dimension is treated similarly.
crop_size: See `position_sensitive_crop_regions` below.
num_spatial_bins: See `position_sensitive_crop_regions` below.
global_pool: See `position_sensitive_crop_regions` below.
parallel_iterations: Number of batch items to process in parallel.
Returns:
"""
def _position_sensitive_crop_fn(inputs):
images, boxes = inputs
return position_sensitive_crop_regions(
images,
boxes,
crop_size=crop_size,
num_spatial_bins=num_spatial_bins,
global_pool=global_pool)
return shape_utils.static_or_dynamic_map_fn(
_position_sensitive_crop_fn,
elems=[images, boxes],
dtype=tf.float32,
parallel_iterations=parallel_iterations)
def position_sensitive_crop_regions(image,
boxes,
crop_size,
num_spatial_bins,
global_pool):
"""Position-sensitive crop and pool rectangular regions from a feature grid.
The output crops are split into `spatial_bins_y` vertical bins
and `spatial_bins_x` horizontal bins. For each intersection of a vertical
and a horizontal bin the output values are gathered by performing
`tf.image.crop_and_resize` (bilinear resampling) on a a separate subset of
channels of the image. This reduces `depth` by a factor of
`(spatial_bins_y * spatial_bins_x)`.
When global_pool is True, this function implements a differentiable version
of position-sensitive RoI pooling used in
[R-FCN detection system](https://arxiv.org/abs/1605.06409).
When global_pool is False, this function implements a differentiable version
of position-sensitive assembling operation used in
[instance FCN](https://arxiv.org/abs/1603.08678).
Args:
image: A `Tensor`. Must be one of the following types: `uint8`, `int8`,
`int16`, `int32`, `int64`, `half`, `float32`, `float64`.
A 3-D tensor of shape `[image_height, image_width, depth]`.
Both `image_height` and `image_width` need to be positive.
boxes: A `Tensor` of type `float32`.
A 2-D tensor of shape `[num_boxes, 4]`. Each box is specified in
normalized coordinates `[y1, x1, y2, x2]`. A normalized coordinate value
of `y` is mapped to the image coordinate at `y * (image_height - 1)`, so
as the `[0, 1]` interval of normalized image height is mapped to
`[0, image_height - 1] in image height coordinates. We do allow y1 > y2,
in which case the sampled crop is an up-down flipped version of the
original image. The width dimension is treated similarly.
crop_size: A list of two integers `[crop_height, crop_width]`. All
cropped image patches are resized to this size. The aspect ratio of the
image content is not preserved. Both `crop_height` and `crop_width` need
to be positive.
num_spatial_bins: A list of two integers `[spatial_bins_y, spatial_bins_x]`.
Represents the number of position-sensitive bins in y and x directions.
Both values should be >= 1. `crop_height` should be divisible by
`spatial_bins_y`, and similarly for width.
The number of image channels should be divisible by
(spatial_bins_y * spatial_bins_x).
Suggested value from R-FCN paper: [3, 3].
global_pool: A boolean variable.
If True, we perform average global pooling on the features assembled from
the position-sensitive score maps.
If False, we keep the position-pooled features without global pooling
over the spatial coordinates.
Note that using global_pool=True is equivalent to but more efficient than
running the function with global_pool=False and then performing global
average pooling.
Returns:
position_sensitive_features: A 4-D tensor of shape
`[num_boxes, K, K, crop_channels]`,
where `crop_channels = depth / (spatial_bins_y * spatial_bins_x)`,
where K = 1 when global_pool is True (Average-pooled cropped regions),
and K = crop_size when global_pool is False.
Raises:
ValueError: Raised in four situations:
`num_spatial_bins` is not >= 1;
`num_spatial_bins` does not divide `crop_size`;
`(spatial_bins_y*spatial_bins_x)` does not divide `depth`;
`bin_crop_size` is not square when global_pool=False due to the
constraint in function space_to_depth.
"""
total_bins = 1
bin_crop_size = []
for (num_bins, crop_dim) in zip(num_spatial_bins, crop_size):
if num_bins < 1:
raise ValueError('num_spatial_bins should be >= 1')
if crop_dim % num_bins != 0:
raise ValueError('crop_size should be divisible by num_spatial_bins')
total_bins *= num_bins
bin_crop_size.append(crop_dim // num_bins)
if not global_pool and bin_crop_size[0] != bin_crop_size[1]:
raise ValueError('Only support square bin crop size for now.')
ymin, xmin, ymax, xmax = tf.unstack(boxes, axis=1)
spatial_bins_y, spatial_bins_x = num_spatial_bins
# Split each box into spatial_bins_y * spatial_bins_x bins.
position_sensitive_boxes = []
for bin_y in range(spatial_bins_y):
step_y = (ymax - ymin) / spatial_bins_y
for bin_x in range(spatial_bins_x):
step_x = (xmax - xmin) / spatial_bins_x
box_coordinates = [ymin + bin_y * step_y,
xmin + bin_x * step_x,
ymin + (bin_y + 1) * step_y,
xmin + (bin_x + 1) * step_x,
]
position_sensitive_boxes.append(tf.stack(box_coordinates, axis=1))
image_splits = tf.split(value=image, num_or_size_splits=total_bins, axis=2)
image_crops = []
for (split, box) in zip(image_splits, position_sensitive_boxes):
if split.shape.is_fully_defined() and box.shape.is_fully_defined():
crop = tf.squeeze(
matmul_crop_and_resize(
tf.expand_dims(split, axis=0), tf.expand_dims(box, axis=0),
bin_crop_size),
axis=0)
else:
crop = tf.image.crop_and_resize(
tf.expand_dims(split, 0), box,
tf.zeros(tf.shape(boxes)[0], dtype=tf.int32), bin_crop_size)
image_crops.append(crop)
if global_pool:
# Average over all bins.
position_sensitive_features = tf.add_n(image_crops) / len(image_crops)
# Then average over spatial positions within the bins.
position_sensitive_features = tf.reduce_mean(
position_sensitive_features, [1, 2], keepdims=True)
else:
# Reorder height/width to depth channel.
block_size = bin_crop_size[0]
if block_size >= 2:
image_crops = [tf.space_to_depth(
crop, block_size=block_size) for crop in image_crops]
# Pack image_crops so that first dimension is for position-senstive boxes.
position_sensitive_features = tf.stack(image_crops, axis=0)
# Unroll the position-sensitive boxes to spatial positions.
position_sensitive_features = tf.squeeze(
tf.batch_to_space_nd(position_sensitive_features,
block_shape=[1] + num_spatial_bins,
crops=tf.zeros((3, 2), dtype=tf.int32)),
axis=[0])
# Reorder back the depth channel.
if block_size >= 2:
position_sensitive_features = tf.depth_to_space(
position_sensitive_features, block_size=block_size)
return position_sensitive_features
def reframe_box_masks_to_image_masks(box_masks, boxes, image_height,
image_width):
"""Transforms the box masks back to full image masks.
Embeds masks in bounding boxes of larger masks whose shapes correspond to
image shape.
Args:
box_masks: A tf.float32 tensor of size [num_masks, mask_height, mask_width].
boxes: A tf.float32 tensor of size [num_masks, 4] containing the box
corners. Row i contains [ymin, xmin, ymax, xmax] of the box
corresponding to mask i. Note that the box corners are in
normalized coordinates.
image_height: Image height. The output mask will have the same height as
the image height.
image_width: Image width. The output mask will have the same width as the
image width.
Returns:
A tf.float32 tensor of size [num_masks, image_height, image_width].
"""
# TODO(rathodv): Make this a public function.
def reframe_box_masks_to_image_masks_default():
"""The default function when there are more than 0 box masks."""
def transform_boxes_relative_to_boxes(boxes, reference_boxes):
boxes = tf.reshape(boxes, [-1, 2, 2])
min_corner = tf.expand_dims(reference_boxes[:, 0:2], 1)
max_corner = tf.expand_dims(reference_boxes[:, 2:4], 1)
transformed_boxes = (boxes - min_corner) / (max_corner - min_corner)
return tf.reshape(transformed_boxes, [-1, 4])
box_masks_expanded = tf.expand_dims(box_masks, axis=3)
num_boxes = tf.shape(box_masks_expanded)[0]
unit_boxes = tf.concat(
[tf.zeros([num_boxes, 2]), tf.ones([num_boxes, 2])], axis=1)
reverse_boxes = transform_boxes_relative_to_boxes(unit_boxes, boxes)
return tf.image.crop_and_resize(
image=box_masks_expanded,
boxes=reverse_boxes,
box_ind=tf.range(num_boxes),
crop_size=[image_height, image_width],
extrapolation_value=0.0)
image_masks = tf.cond(
tf.shape(box_masks)[0] > 0,
reframe_box_masks_to_image_masks_default,
lambda: tf.zeros([0, image_height, image_width, 1], dtype=tf.float32))
return tf.squeeze(image_masks, axis=3)
def merge_boxes_with_multiple_labels(boxes,
classes,
confidences,
num_classes,
quantization_bins=10000):
"""Merges boxes with same coordinates and returns K-hot encoded classes.
Args:
boxes: A tf.float32 tensor with shape [N, 4] holding N boxes. Only
normalized coordinates are allowed.
classes: A tf.int32 tensor with shape [N] holding class indices.
The class index starts at 0.
confidences: A tf.float32 tensor with shape [N] holding class confidences.
num_classes: total number of classes to use for K-hot encoding.
quantization_bins: the number of bins used to quantize the box coordinate.
Returns:
merged_boxes: A tf.float32 tensor with shape [N', 4] holding boxes,
where N' <= N.
class_encodings: A tf.int32 tensor with shape [N', num_classes] holding
K-hot encodings for the merged boxes.
confidence_encodings: A tf.float32 tensor with shape [N', num_classes]
holding encodings of confidences for the merged boxes.
merged_box_indices: A tf.int32 tensor with shape [N'] holding original
indices of the boxes.
"""
boxes_shape = tf.shape(boxes)
classes_shape = tf.shape(classes)
confidences_shape = tf.shape(confidences)
box_class_shape_assert = shape_utils.assert_shape_equal_along_first_dimension(
boxes_shape, classes_shape)
box_confidence_shape_assert = (
shape_utils.assert_shape_equal_along_first_dimension(
boxes_shape, confidences_shape))
box_dimension_assert = tf.assert_equal(boxes_shape[1], 4)
box_normalized_assert = shape_utils.assert_box_normalized(boxes)
with tf.control_dependencies(
[box_class_shape_assert, box_confidence_shape_assert,
box_dimension_assert, box_normalized_assert]):
quantized_boxes = tf.to_int64(boxes * (quantization_bins - 1))
ymin, xmin, ymax, xmax = tf.unstack(quantized_boxes, axis=1)
hashcodes = (
ymin +
xmin * quantization_bins +
ymax * quantization_bins * quantization_bins +
xmax * quantization_bins * quantization_bins * quantization_bins)
unique_hashcodes, unique_indices = tf.unique(hashcodes)
num_boxes = tf.shape(boxes)[0]
num_unique_boxes = tf.shape(unique_hashcodes)[0]
merged_box_indices = tf.unsorted_segment_min(
tf.range(num_boxes), unique_indices, num_unique_boxes)
merged_boxes = tf.gather(boxes, merged_box_indices)
unique_indices = tf.to_int64(unique_indices)
classes = tf.to_int64(classes)
def map_box_encodings(i):
"""Produces box K-hot and score encodings for each class index."""
box_mask = tf.equal(
unique_indices, i * tf.ones(num_boxes, dtype=tf.int64))
box_mask = tf.reshape(box_mask, [-1])
box_indices = tf.boolean_mask(classes, box_mask)
box_confidences = tf.boolean_mask(confidences, box_mask)
box_class_encodings = tf.sparse_to_dense(
box_indices, [num_classes], tf.constant(1, dtype=tf.int64),
validate_indices=False)
box_confidence_encodings = tf.sparse_to_dense(
box_indices, [num_classes], box_confidences, validate_indices=False)
return box_class_encodings, box_confidence_encodings
# Important to avoid int32 here since there is no GPU kernel for int32.
# int64 and float32 are fine.
class_encodings, confidence_encodings = tf.map_fn(
map_box_encodings,
tf.range(tf.to_int64(num_unique_boxes)),
back_prop=False,
dtype=(tf.int64, tf.float32))
merged_boxes = tf.reshape(merged_boxes, [-1, 4])
class_encodings = tf.cast(class_encodings, dtype=tf.int32)
class_encodings = tf.reshape(class_encodings, [-1, num_classes])
confidence_encodings = tf.reshape(confidence_encodings, [-1, num_classes])
merged_box_indices = tf.reshape(merged_box_indices, [-1])
return (merged_boxes, class_encodings, confidence_encodings,
merged_box_indices)
def nearest_neighbor_upsampling(input_tensor, scale=None, height_scale=None,
width_scale=None):
"""Nearest neighbor upsampling implementation.
Nearest neighbor upsampling function that maps input tensor with shape
[batch_size, height, width, channels] to [batch_size, height * scale
, width * scale, channels]. This implementation only uses reshape and
broadcasting to make it TPU compatible.
Args:
input_tensor: A float32 tensor of size [batch, height_in, width_in,
channels].
scale: An integer multiple to scale resolution of input data in both height
and width dimensions.
height_scale: An integer multiple to scale the height of input image. This
option when provided overrides `scale` option.
width_scale: An integer multiple to scale the width of input image. This
option when provided overrides `scale` option.
Returns:
data_up: A float32 tensor of size
[batch, height_in*scale, width_in*scale, channels].
Raises:
ValueError: If both scale and height_scale or if both scale and width_scale
are None.
"""
if not scale and (height_scale is None or width_scale is None):
raise ValueError('Provide either `scale` or `height_scale` and'
' `width_scale`.')
with tf.name_scope('nearest_neighbor_upsampling'):
h_scale = scale if height_scale is None else height_scale
w_scale = scale if width_scale is None else width_scale
(batch_size, height, width,
channels) = shape_utils.combined_static_and_dynamic_shape(input_tensor)
output_tensor = tf.reshape(
input_tensor, [batch_size, height, 1, width, 1, channels]) * tf.ones(
[1, 1, h_scale, 1, w_scale, 1], dtype=input_tensor.dtype)
return tf.reshape(output_tensor,
[batch_size, height * h_scale, width * w_scale, channels])
def matmul_gather_on_zeroth_axis(params, indices, scope=None):
"""Matrix multiplication based implementation of tf.gather on zeroth axis.
TODO(rathodv, jonathanhuang): enable sparse matmul option.
Args:
params: A float32 Tensor. The tensor from which to gather values.
Must be at least rank 1.
indices: A Tensor. Must be one of the following types: int32, int64.
Must be in range [0, params.shape[0])
scope: A name for the operation (optional).
Returns:
A Tensor. Has the same type as params. Values from params gathered
from indices given by indices, with shape indices.shape + params.shape[1:].
"""
with tf.name_scope(scope, 'MatMulGather'):
params_shape = shape_utils.combined_static_and_dynamic_shape(params)
indices_shape = shape_utils.combined_static_and_dynamic_shape(indices)
params2d = tf.reshape(params, [params_shape[0], -1])
indicator_matrix = tf.one_hot(indices, params_shape[0])
gathered_result_flattened = tf.matmul(indicator_matrix, params2d)
return tf.reshape(gathered_result_flattened,
tf.stack(indices_shape + params_shape[1:]))
def fpn_feature_levels(num_levels, unit_scale_index, image_ratio, boxes):
"""Returns fpn feature level for each box based on its area.
See section 4.2 of https://arxiv.org/pdf/1612.03144.pdf for details.
Args:
num_levels: An integer indicating the number of feature levels to crop boxes
from.
unit_scale_index: An 0-based integer indicating the index of feature map
which most closely matches the resolution of the pretrained model.
image_ratio: A float indicating the ratio of input image area to pretraining
image area.
boxes: A float tensor of shape [batch, num_boxes, 4] containing boxes of the
form [ymin, xmin, ymax, xmax] in normalized coordinates.
Returns:
An int32 tensor of shape [batch_size, num_boxes] containing feature indices.
"""
assert num_levels > 0, (
'`num_levels` must be > 0. Found {}'.format(num_levels))
assert unit_scale_index < num_levels and unit_scale_index >= 0, (
'`unit_scale_index` must be in [0, {}). Found {}.'.format(
num_levels, unit_scale_index))
box_height_width = boxes[:, :, 2:4] - boxes[:, :, 0:2]
areas_sqrt = tf.sqrt(tf.reduce_prod(box_height_width, axis=2))
log_2 = tf.cast(tf.log(2.0), dtype=boxes.dtype)
levels = tf.cast(
tf.floordiv(tf.log(areas_sqrt * image_ratio), log_2)
+
unit_scale_index,
dtype=tf.int32)
levels = tf.maximum(0, tf.minimum(num_levels - 1, levels))
return levels
def bfloat16_to_float32_nested(tensor_nested):
"""Convert float32 tensors in a nested structure to bfloat16.
Args:
tensor_nested: A Python dict, values being Tensor or Python list/tuple of
Tensor.
Returns:
A Python dict with the same structure as `tensor_dict`,
with all bfloat16 tensors converted to float32.
"""
if isinstance(tensor_nested, tf.Tensor):
if tensor_nested.dtype == tf.bfloat16:
return tf.cast(tensor_nested, dtype=tf.float32)
else:
return tensor_nested
elif isinstance(tensor_nested, (list, tuple)):
out_tensor_dict = [bfloat16_to_float32_nested(t) for t in tensor_nested]
elif isinstance(tensor_nested, dict):
out_tensor_dict = {
k: bfloat16_to_float32_nested(v) for k, v in tensor_nested.items()
}
return out_tensor_dict
def gather_with_padding_values(input_tensor, indices, padding_value):
"""Gathers elements from tensor and pads `padding_value` for ignore indices.
Gathers elements from `input_tensor` based on `indices`. If there are ignore
indices (which are "-1"s) in `indices`, `padding_value` will be gathered for
those positions.
Args:
input_tensor: A N-D tensor of shape [M, d_1, d_2 .. d_(N-1)] to gather
values from.
indices: A 1-D tensor in which each element is either an index in the
first dimension of input_tensor or -1.
padding_value: A (N-1)-D tensor of shape [d_1, d_2 .. d_(N-1)] which will be
used as gathered value for each ignore index in `indices`.
Returns:
gathered_tensor: A tensor of shape [L, d_1, d_2 .. d_(N-1)] containing
values gathered from input_tensor. The first dimension L is equal to the
length of `indices`.
"""
padding_value = tf.expand_dims(padding_value, axis=0)
input_tensor = tf.concat([padding_value, input_tensor], axis=0)
gather_indices = indices + 1
gathered_tensor = tf.gather(input_tensor, gather_indices)
return gathered_tensor
EqualizationLossConfig = collections.namedtuple('EqualizationLossConfig',
['weight', 'exclude_prefixes'])
| 41.52598 | 80 | 0.702573 |
b50bd96b58c92d17812b302e3c8ac32509153506 | 1,080 | py | Python | nipype/interfaces/dipy/tests/test_auto_APMQball.py | vferat/nipype | 536c57da150d157dcb5c121af43aaeab71cdbd5f | [
"Apache-2.0"
] | null | null | null | nipype/interfaces/dipy/tests/test_auto_APMQball.py | vferat/nipype | 536c57da150d157dcb5c121af43aaeab71cdbd5f | [
"Apache-2.0"
] | 2 | 2018-04-17T19:18:16.000Z | 2020-03-04T22:05:02.000Z | nipype/interfaces/dipy/tests/test_auto_APMQball.py | oesteban/nipype | c14f24eba1da08711bbb894e049ee858ed740096 | [
"Apache-2.0"
] | null | null | null | # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
from __future__ import unicode_literals
from ..anisotropic_power import APMQball
def test_APMQball_inputs():
input_map = dict(
b0_thres=dict(usedefault=True, ),
in_bval=dict(
extensions=None,
mandatory=True,
),
in_bvec=dict(
extensions=None,
mandatory=True,
),
in_file=dict(
extensions=None,
mandatory=True,
),
mask_file=dict(extensions=None, ),
out_prefix=dict(),
)
inputs = APMQball.input_spec()
for key, metadata in list(input_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(inputs.traits()[key], metakey) == value
def test_APMQball_outputs():
output_map = dict(out_file=dict(extensions=None, ), )
outputs = APMQball.output_spec()
for key, metadata in list(output_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(outputs.traits()[key], metakey) == value
| 30 | 67 | 0.612037 |
307735d0ffd35f1d33488d89c2e3b3806b778e0a | 1,062 | py | Python | inmuebles/urls.py | judhenaoma/c4-p27-g3 | 53feed982ef1954a0347e94487fec2f8cd20e971 | [
"CC0-1.0"
] | null | null | null | inmuebles/urls.py | judhenaoma/c4-p27-g3 | 53feed982ef1954a0347e94487fec2f8cd20e971 | [
"CC0-1.0"
] | null | null | null | inmuebles/urls.py | judhenaoma/c4-p27-g3 | 53feed982ef1954a0347e94487fec2f8cd20e971 | [
"CC0-1.0"
] | null | null | null | from django.urls import path
from .views.registroUserView import registroUserView
from .views.detalleUserView import detalleUserView
from .views.ListaInmueblesView import ListaInmueblesView
from .views.CrearInmuebleView import CrearInmuebleView
from .views.EliminarInmuebleView import EliminarInmuebleView
from .views.ListarInmublesHostView import ListarInmueblesHostView
from .views.DetalleInmuebleView import DetalleInmueble
from .views.ActualizarInmuebleView import ActualizarInmuebleView
urlpatterns = [
path('usuario/registro/', registroUserView.as_view()),
path('usuario/detalle-usuario/', detalleUserView.as_view()),
path('lista-inmuebles/', ListaInmueblesView.as_view()),
path('crear-inmueble/', CrearInmuebleView.as_view()),
path('eliminar-inmueble/<int:pk>/', EliminarInmuebleView.as_view()),
path('lista-inmuebles-host/', ListarInmueblesHostView.as_view()),
path('inmueble/<slug:url_id>/', DetalleInmueble.as_view()),
path('lista-inmuebles-host/modificar/<int:inmueble_id>/', ActualizarInmuebleView.as_view())
]
| 39.333333 | 95 | 0.795669 |
9f1a866623e64db4903683df8266024e6dd88344 | 392 | py | Python | ordenacao/insertion_sort.py | italoaalves/projeto-ed-3 | 8f51792ae140018fabb454005f9995f5c6302d3f | [
"Apache-2.0"
] | null | null | null | ordenacao/insertion_sort.py | italoaalves/projeto-ed-3 | 8f51792ae140018fabb454005f9995f5c6302d3f | [
"Apache-2.0"
] | null | null | null | ordenacao/insertion_sort.py | italoaalves/projeto-ed-3 | 8f51792ae140018fabb454005f9995f5c6302d3f | [
"Apache-2.0"
] | null | null | null | def insertionsort(lista):
tam = len(lista)
for i in range(1, tam):
proximo = lista[i]
atual = i - 1
while proximo < lista[atual] and atual >= 0:
lista[atual + 1] = lista[atual]
atual -= 1
lista[atual + 1] = proximo
# debug
if __name__ == "__main__":
lista = [2, 1, 3, 4, 6, 5]
insertionsort(lista)
print(lista)
| 19.6 | 52 | 0.522959 |
b6d2a952d070ba2ce7497c950c893fc3790bf8ac | 230 | py | Python | app/transaction/controller.py | mfurquim/finance-backend | 2ef172217a4cb5602d5b8c1ec5605994662e5155 | [
"MIT"
] | 1 | 2022-02-18T11:19:22.000Z | 2022-02-18T11:19:22.000Z | app/transaction/controller.py | mfurquim/finance-backend | 2ef172217a4cb5602d5b8c1ec5605994662e5155 | [
"MIT"
] | null | null | null | app/transaction/controller.py | mfurquim/finance-backend | 2ef172217a4cb5602d5b8c1ec5605994662e5155 | [
"MIT"
] | null | null | null | from app.log_manager import log
from datetime import date
from .model import TransactionInput
def make_transaction(transaction: TransactionInput):
log.info(f'calling make_transaction({transaction})')
return transaction
| 23 | 56 | 0.804348 |
acc907547c88080875fac1578410647db53a8423 | 254 | py | Python | ips.py | FernandaMakiHirose/threads-ips | c9071df9b2700b60e7284502673b0b7e4f7fa4a9 | [
"MIT"
] | null | null | null | ips.py | FernandaMakiHirose/threads-ips | c9071df9b2700b60e7284502673b0b7e4f7fa4a9 | [
"MIT"
] | null | null | null | ips.py | FernandaMakiHirose/threads-ips | c9071df9b2700b60e7284502673b0b7e4f7fa4a9 | [
"MIT"
] | null | null | null | import ipaddress
ip = '192.168.0.1'
endereço = ipaddress.ip_address(ip)
print(endereço)
ip = '192.168.0.100/32'
network = ipaddress.ip_network(ip, strict=False)
print(network)
# imprime todos os ips da rede
for ip in network:
print(ip) | 19.538462 | 49 | 0.692913 |
1409711e95047b44d73c12c22314cb8ea3a7f32c | 2,235 | py | Python | pdb2pqr-1.9.0/scons/scons-local-2.3.0/SCons/Platform/os2.py | Acpharis/protein_prep | 8cc2f0caedefd5a3fdaa764ed013c2660a4df1b8 | [
"BSD-3-Clause"
] | 9 | 2016-08-17T06:52:10.000Z | 2020-04-28T04:20:07.000Z | pdb2pqr-1.9.0/scons/scons-local-2.3.0/SCons/Platform/os2.py | Acpharis/protein_prep | 8cc2f0caedefd5a3fdaa764ed013c2660a4df1b8 | [
"BSD-3-Clause"
] | null | null | null | pdb2pqr-1.9.0/scons/scons-local-2.3.0/SCons/Platform/os2.py | Acpharis/protein_prep | 8cc2f0caedefd5a3fdaa764ed013c2660a4df1b8 | [
"BSD-3-Clause"
] | 1 | 2021-03-03T23:20:25.000Z | 2021-03-03T23:20:25.000Z | """SCons.Platform.os2
Platform-specific initialization for OS/2 systems.
There normally shouldn't be any need to import this module directly. It
will usually be imported through the generic SCons.Platform.Platform()
selection method.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Platform/os2.py 2013/03/03 09:48:35 garyo"
import win32
def generate(env):
if 'ENV' not in env:
env['ENV'] = {}
env['OBJPREFIX'] = ''
env['OBJSUFFIX'] = '.obj'
env['SHOBJPREFIX'] = '$OBJPREFIX'
env['SHOBJSUFFIX'] = '$OBJSUFFIX'
env['PROGPREFIX'] = ''
env['PROGSUFFIX'] = '.exe'
env['LIBPREFIX'] = ''
env['LIBSUFFIX'] = '.lib'
env['SHLIBPREFIX'] = ''
env['SHLIBSUFFIX'] = '.dll'
env['LIBPREFIXES'] = '$LIBPREFIX'
env['LIBSUFFIXES'] = [ '$LIBSUFFIX', '$SHLIBSUFFIX' ]
env['HOST_OS'] = 'os2'
env['HOST_ARCH'] = win32.get_architecture().arch
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| 37.881356 | 113 | 0.69038 |
a06f4f18d93359062c82de5615040dd21479d387 | 748 | py | Python | django_modules/home/migrations/0002_contact.py | Mehdi6/djangoModules | b6e8fc578933675d0d087e87e1bdc99d12f440c1 | [
"MIT"
] | null | null | null | django_modules/home/migrations/0002_contact.py | Mehdi6/djangoModules | b6e8fc578933675d0d087e87e1bdc99d12f440c1 | [
"MIT"
] | null | null | null | django_modules/home/migrations/0002_contact.py | Mehdi6/djangoModules | b6e8fc578933675d0d087e87e1bdc99d12f440c1 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-12-19 19:45
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('home', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Contact',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('email', models.EmailField(max_length=254)),
('subject', models.CharField(max_length=140, null=True)),
('content', models.TextField()),
],
),
]
| 28.769231 | 114 | 0.573529 |
cdf8fa06763233c748d7ff73194094fd953fc5a5 | 2,422 | py | Python | robot_ws/src/hello_world_robot/nodes/rotate.py | hekmat-shrez/aws-robomaker-sample-application-helloworld | 4eb300dc0360b00f419810318c8d0b771c3a728c | [
"MIT-0"
] | null | null | null | robot_ws/src/hello_world_robot/nodes/rotate.py | hekmat-shrez/aws-robomaker-sample-application-helloworld | 4eb300dc0360b00f419810318c8d0b771c3a728c | [
"MIT-0"
] | null | null | null | robot_ws/src/hello_world_robot/nodes/rotate.py | hekmat-shrez/aws-robomaker-sample-application-helloworld | 4eb300dc0360b00f419810318c8d0b771c3a728c | [
"MIT-0"
] | null | null | null | #!/usr/bin/env python
# source: https://get-help.robotigniteacademy.com/t/how-to-stop-your-robot-when-ros-is-shutting-down/225
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this
# software and associated documentation files (the "Software"), to deal in the Software
# without restriction, including without limitation the rights to use, copy, modify,
# merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
# INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
# PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import rospy
from geometry_msgs.msg import Twist
import time
class MoveRobotStopOnShutdown(object):
def __init__(self):
# create publisher and message as instance variables
self.publisher = rospy.Publisher('/cmd_vel', Twist, queue_size=1)
self.msg = Twist()
# do some cleanup on shutdown
rospy.on_shutdown(self.clean_shutdown)
# start by moving robot
rospy.init_node('move_and_stop_robot')
self.move_robot()
rospy.spin()
def publish(self, msg_type="move"):
while self.publisher.get_num_connections() < 1:
# wait for a connection to publisher
rospy.loginfo("Waiting for connection to publisher...")
time.sleep(1)
rospy.loginfo("Connected to publisher.")
rospy.loginfo("Publishing %s message..." % msg_type)
self.publisher.publish(self.msg)
def move_robot(self):
self.msg.linear.x = 0.2
self.publish()
time.sleep(55) # sleep and then stop
rospy.signal_shutdown("We are done here!")
def clean_shutdown(self):
rospy.loginfo("System is shutting down. Stopping robot...")
self.msg.linear.x = 0
self.publish("stop")
if __name__ == '__main__':
MoveRobotStopOnShutdown()
| 32.72973 | 104 | 0.687448 |
f85828093c2183651ed2702f4a656985ac3a79fe | 10,534 | py | Python | tdcosim/model/psse/psse_model.py | cuihantao/TDcoSim | bc8f26ccc9e32bd47af039f9efcaf0bdc67daddf | [
"BSD-3-Clause"
] | 3 | 2020-03-18T16:40:09.000Z | 2021-04-04T23:21:25.000Z | tdcosim/model/psse/psse_model.py | cuihantao/TDcoSim | bc8f26ccc9e32bd47af039f9efcaf0bdc67daddf | [
"BSD-3-Clause"
] | null | null | null | tdcosim/model/psse/psse_model.py | cuihantao/TDcoSim | bc8f26ccc9e32bd47af039f9efcaf0bdc67daddf | [
"BSD-3-Clause"
] | null | null | null | import numpy as np
import sys
import os
pssePath="C:\Program Files (x86)\PTI\PSSE33\PSSBIN"
sys.path.append(pssePath)
os.environ['PATH']+=';'+pssePath
import psspy
from tdcosim.global_data import GlobalData
class PSSEModel:
def __init__(self):
# psse
self._psspy=psspy
psspy.psseinit(0)
psspy.report_output(6,'',[])
psspy.progress_output(6,'',[])
psspy.alert_output(6,'',[])
psspy.prompt_output(6,'',[])
return None
def setup(self, adjustOpPoint=True):
# psspy info
self._monitorID={}
self._monitorID['angle'] = 1
self._monitorID['pelec'] = 2
self._monitorID['qelec'] = 3
self._monitorID['eterm'] = 4
self._monitorID['efd'] = 5
self._monitorID['pmech'] = 6
self._monitorID['speed'] = 7
self._monitorID['xadifd'] = 8
self._monitorID['ecomp'] = 9
self._monitorID['volt'] = 13
self._monitorID['pload'] = 25
self._monitorID['qload'] = 26
# load psse case
ierr = self._psspy.read(0,GlobalData.config['psseConfig']['rawFilePath'])
assert ierr==0,"Reading raw file failed with error {}".format(ierr)
ierr, nLoads = self._psspy.alodbuscount()
assert ierr==0,"load bus count failed with error {}".format(ierr)
GlobalData.data['TNet']['LoadBusCount'] = nLoads
# default. Will connect dist syst feeder to all load buses
ierr, loadBusNumber = self._psspy.alodbusint(string='NUMBER')
assert ierr==0,"load bus number failed with error {}".format(ierr)
GlobalData.data['TNet']['LoadBusNumber'] = loadBusNumber[0]
if adjustOpPoint:# need to adjust operation point
# find total load
GlobalData.data['TNet']['TotalRealPowerLoad'] = 0
GlobalData.data['TNet']['BusRealPowerLoad'] = {}
ierr,S = self._psspy.alodbuscplx(string='MVAACT')
assert ierr==0,"Reading bus complex load failed with error {}".format(ierr)
for entry,val in zip(GlobalData.data['TNet']['LoadBusNumber'],S[0]):
GlobalData.data['TNet']['TotalRealPowerLoad'] += val.real
GlobalData.data['TNet']['BusRealPowerLoad'][entry]=val.real
def dynamicInitialize(self, adjustOpPoint=True):
if adjustOpPoint:
S = self._adjustSystemOperatingPoint()
else:
self._psspy.dyre_new([1,1,1,1],self.config['psseConfig']['dyrFilePath'])
self._psspy.cong(1)
GlobalData.data['dynamic']['channel'] = {}
nMonVars=0
nGenBus=self._psspy.agenbuscount(-1,1)[1]
nBus=self._psspy.abuscount(-1,1)[1]
nLoad=self._psspy.aloadcount(-1,1)[1]
genBusNumber=self._psspy.agenbusint(-1,1,'NUMBER')[1][0]
busNumber=self._psspy.abusint(string='NUMBER')[1][0]
loadBusNumber=self._psspy.aloadint(-1,1,'NUMBER')[1][0]
for item in ['angle','speed','pelec','qelec','pmech']:
self._psspy.chsb(sid=0,all=1,status=[-1,-1,-1,1,self._monitorID[item],0])
GlobalData.data['dynamic']['channel'][item]={}
for channelID,node in zip(range(nMonVars+1,nMonVars+1+nGenBus),genBusNumber):# psse uses 1 ind
GlobalData.data['dynamic']['channel'][item][channelID]=node
nMonVars+=nGenBus
self._psspy.chsb(sid=0,all=1,status=[-1,-1,-1,1,self._monitorID['volt'],0])
GlobalData.data['dynamic']['channel']['volt']={}
for channelID,node in zip(range(nMonVars+1,nMonVars+1+nBus),busNumber):# psse uses 1 ind
GlobalData.data['dynamic']['channel']['volt'][channelID]=node
nMonVars+=nBus
for item in ['pload','qload']:
self._psspy.chsb(sid=0,all=1,status=[-1,-1,-1,1,self._monitorID[item],0])
GlobalData.data['dynamic']['channel'][item]={}
for channelID,node in zip(range(nMonVars+1,nMonVars+1+nLoad),loadBusNumber):# psse uses 1 ind
GlobalData.data['dynamic']['channel'][item][channelID]=node
nMonVars+=nLoad
self._psspy.strt(outfile=r'result.out')# compute initial conditions
Vpcc=self.getVoltage()
targetS={}
for entry,val in zip(GlobalData.data['TNet']['LoadBusNumber'],S[0]):
if entry in GlobalData.data['DNet']['Nodes']:
targetS[entry]=[val.real*10**3,val.imag*10**3] # convert to kw and kvar from mw and mvar
return targetS,Vpcc
def _adjustSystemOperatingPoint(self):
loadType = 0
try:
offset=3
reductionPercent=GlobalData.data['DNet']['ReductionPercent']
ind={}
ind['GENCLS']=0
ind['GENDCO']=4
ind['GENROE']=4
ind['GENROU']=4
ind['GENSAE']=3
ind['GENSAL']=3
ind['GENTPJU1']=4
ind['GENTRA']=1
dyrPath=GlobalData.config['psseConfig']['dyrFilePath']
f=open(dyrPath)
dyrData=f.read().splitlines()
f.close()
dyrDataStr=''; Zr={}; Zx={}
for line in dyrData:
entry=line.split(',')
for item in ind:
if entry[1]=="'{}'".format(item):
entry[offset+ind[item]]=\
str(float(entry[offset+ind[item]])*(1-reductionPercent))
break
dyrDataStr+=','.join(entry+['\n'])
tempDyrPath=dyrPath.split('.dyr')[0]+'_temp.dyr'
f=open(tempDyrPath,'w')
f.write(dyrDataStr)
f.close()
# now read raw file to get Zr and Zx
f=open(GlobalData.config['psseConfig']['rawFilePath'])
rawFileData=f.read().splitlines()
f.close()
readFlg=False
for line in rawFileData:
if "END OF GENERATOR DATA" in line:
readFlg=False
if readFlg:
entry=line.split(',')
Zr[int(entry[0])]=float(entry[9])
Zx[int(entry[0])]=float(entry[10])
if "BEGIN GENERATOR DATA" in line:
readFlg=True
# modify config to point to the temp dyr file
GlobalData.config['psseConfig']['dyrFilePath']=tempDyrPath
# make changes in machine data through psse internal data structure
m=macVarMap={}
m['PGEN']=0
m['QGEN']=1
m['QMAX']=2
m['QMIN']=3
m['PMAX']=4
m['PMIN']=5
m['MBASE']=6
# read dyr file
self._psspy.dyre_new([1,1,1,1],GlobalData.config['psseConfig']['dyrFilePath'])
# get machine data
macVarData={}
for entry in macVarMap:
ierr,macVarData[entry]=self._psspy.amachreal(sid=-1, flag=1, string=entry)# get data
assert ierr==0,"reading machine data failed with error {}".format(ierr)
genBusNumber=self._psspy.agenbusint(-1,1,'NUMBER')[1][0] # get gen bus number
# change machine data
for n in range(len(genBusNumber)):# make changes at each gen
macVarDataNew=[0.]*11+[1.]*6
for entry in macVarData:# make changes for each variable
# passing double precision data results in long values
# and psspy.machine_chng_2 API fails to change data.
# Hence, use 3 digit precision.
macVarDataNew[macVarMap[entry]]=np.round(macVarData[entry][0][n]\
*(1-reductionPercent),5)
macVarDataNew[7]=np.round(Zr[genBusNumber[n]],5)
macVarDataNew[8]=np.round(Zx[genBusNumber[n]],5)
self._psspy.machine_chng_2(i=genBusNumber[n], realar=macVarDataNew) # change machine data
# adjust load data
ierr,S=self._psspy.alodbuscplx(string='MVAACT')
assert ierr==0,"reading complex load failed with error {}".format(ierr)
for busID,val in zip(GlobalData.data['TNet']['LoadBusNumber'],S[0]):
if busID in GlobalData.data['DNet']['Nodes']:
# constP,Q,IP,IQ,YP,YQ
loadVal=[0]*6
reductionPercent=GlobalData.data['DNet']['Nodes'][busID]['solarPenetration']
loadVal[loadType*2],loadVal[loadType*2+1]=\
val.real*(1-reductionPercent),val.imag*(1-reductionPercent)
ierr=psspy.load_chng_4(busID,'1',[1,1,1,1,1,0],loadVal)
assert ierr==0,"load change failed with error {}".format(ierr)
return S
except:
GlobalData.log('Failed to adjustSystemOperatingPoint from PSSEModel')
def staticInitialize(self):
try:
Vpcc=self.getVoltage()
# scale feeder
targetS={}
ierr,S=self._psspy.alodbuscplx(string='MVAACT')
assert ierr==0,"Reading load bus complex power failed with error {}".format(ierr)
for entry,val in zip(GlobalData.data['TNet']['LoadBusNumber'],S[0]):
if entry in GlobalData.data['DNet']['Nodes']:
targetS[entry]=[val.real*10**3,val.imag*10**3] # convert to kw and kvar from mw and mvar
return targetS, Vpcc
except Exception as e:
GlobalData.log('Failed to initialize from PSSEModel')
#===================GET VOLTAGE FROM PSSSE========================
def getVoltage(self):
try:
"""Get PCC voltage from psse."""
Vpcc={}
if GlobalData.data['TNet']['LoadBusCount']==len(GlobalData.data['TNet']['LoadBusNumber']): # dist syst interfaced at all load buses
loadBusVPU=self._psspy.alodbusreal(string='PU')
loadBusVPU = loadBusVPU[1][0]
for entry,val in zip(GlobalData.data['TNet']['LoadBusNumber'],loadBusVPU):# efficient
if entry in GlobalData.data['DNet']['Nodes']:
Vpcc[entry]=val
else:# subset of loadbuses interfaced as dist syst
for entry in GlobalData.data['TNet']['LoadBusNumber']: # not as efficient for large cases
Vpcc[entry]=self._psspy.busdat(entry,'PU')[1]
return Vpcc
except Exception as e:
GlobalData.log('Failed to getVoltage from PSSEModel')
def setLoad(self, S,loadType=0):
"""set PCC Pinj,Qinj for psse.
Input: S -- dictionary containing Pinj and Qinj.
loadType -- 0- constant power, 1-constant current, 2-constant admittance."""
for busID in GlobalData.data['TNet']['LoadBusNumber']:
if busID in GlobalData.data['DNet']['Nodes']:
# constP,Q,IP,IQ,YP,YQ
loadVal=[0]*6
loadVal[loadType*2],loadVal[loadType*2+1]=S[busID]['P'],S[busID]['Q']
ierr=self._psspy.load_chng_4(busID,'1',[1,1,1,1,1,0],realar=loadVal)
assert ierr==0,"load change failed with error {}".format(ierr)
def shunt(self, targetS, Vpcc, power):
try:
mismatchTolerance=0.1
for node in power:
if abs(power[node]['P']-targetS[node][0])>mismatchTolerance or abs(power[node]['Q']-targetS[node][1])>mismatchTolerance:# add shunt if needed
Pshunt = targetS[node][0]*1e-3 - power[node]['P']
Qshunt = targetS[node][1]*1e-3 - power[node]['Q']
# The remaining power is incorporated as compensating shunt
# The compensating shunt power
# Pshunt + j*Qshunt = Vpcc^2*(YPshunt - YQshunt)
# which gives the below two equations for shunt.
# Note the negative sign for YQshunt is because we are
# considering admittances
YPshunt = Pshunt/(Vpcc[node]*Vpcc[node])
YQshunt = -Qshunt/(Vpcc[node]*Vpcc[node])
# Add the remaining as fixed compensating shunt
ierr = self._psspy.shunt_data(node,'1 ',1,[YPshunt,YQshunt])
assert ierr==0,"Adding shunt failed with error {}".format(ierr)
except Exception as e:
GlobalData.log('Failed to shunt from PSSEModel')
def runPFLOW(self):
self._psspy.fnsl()
def runDynamic(self, tpause):
self._psspy.run(tpause=tpause)
def faultOn(self, faultBus, faultImpedance):
self._psspy.dist_bus_fault(faultBus, 1,0.0,faultImpedance)
def faultOff(self):
self._psspy.dist_clear_fault()
| 36.199313 | 145 | 0.678944 |
bac07e0bbd97880bccd6745ad7bdb1e06f16141e | 1,701 | py | Python | ml-agents/mlagents/trainers/components/reward_signals/reward_signal_factory.py | russellcaughey/ml-agents | 493c75bf683d35d512ae6fb57d4a1a332116df15 | [
"Apache-2.0"
] | 3 | 2018-09-18T13:40:29.000Z | 2019-02-14T07:30:09.000Z | ml-agents/mlagents/trainers/components/reward_signals/reward_signal_factory.py | russellcaughey/ml-agents | 493c75bf683d35d512ae6fb57d4a1a332116df15 | [
"Apache-2.0"
] | 1 | 2019-09-04T23:13:55.000Z | 2019-09-04T23:13:55.000Z | ml-agents/mlagents/trainers/components/reward_signals/reward_signal_factory.py | russellcaughey/ml-agents | 493c75bf683d35d512ae6fb57d4a1a332116df15 | [
"Apache-2.0"
] | 2 | 2019-09-10T16:05:48.000Z | 2020-07-24T20:40:26.000Z | import logging
from typing import Any, Dict, Type
from mlagents.trainers.trainer import UnityTrainerException
from mlagents.trainers.components.reward_signals.reward_signal import RewardSignal
from mlagents.trainers.components.reward_signals.extrinsic.signal import (
ExtrinsicRewardSignal,
)
from mlagents.trainers.components.reward_signals.gail.signal import GAILRewardSignal
from mlagents.trainers.components.reward_signals.curiosity.signal import (
CuriosityRewardSignal,
)
from mlagents.trainers.tf_policy import TFPolicy
from mlagents.trainers.models import LearningModel
logger = logging.getLogger("mlagents.trainers")
NAME_TO_CLASS: Dict[str, Type[RewardSignal]] = {
"extrinsic": ExtrinsicRewardSignal,
"curiosity": CuriosityRewardSignal,
"gail": GAILRewardSignal,
}
def create_reward_signal(
policy: TFPolicy,
policy_model: LearningModel,
name: str,
config_entry: Dict[str, Any],
) -> RewardSignal:
"""
Creates a reward signal class based on the name and config entry provided as a dict.
:param policy: The policy class which the reward will be applied to.
:param name: The name of the reward signal
:param config_entry: The config entries for that reward signal
:return: The reward signal class instantiated
"""
rcls = NAME_TO_CLASS.get(name)
if not rcls:
raise UnityTrainerException("Unknown reward signal type {0}".format(name))
rcls.check_config(config_entry)
try:
class_inst = rcls(policy, policy_model, **config_entry)
except TypeError:
raise UnityTrainerException(
"Unknown parameters given for reward signal {0}".format(name)
)
return class_inst
| 34.02 | 88 | 0.752499 |
f0f4c79f393762a1301647b6bb693973d7151fd2 | 589 | py | Python | inspire_magpie/errors.py | jstypka/inspire-magpie | 7294b9f5347197f59bf7b3f9d164f2ff35a52cef | [
"MIT"
] | 1 | 2017-11-17T17:30:36.000Z | 2017-11-17T17:30:36.000Z | inspire_magpie/errors.py | jstypka/inspire-magpie | 7294b9f5347197f59bf7b3f9d164f2ff35a52cef | [
"MIT"
] | 6 | 2016-05-03T09:25:19.000Z | 2019-03-22T00:45:43.000Z | inspire_magpie/errors.py | jstypka/inspire-magpie | 7294b9f5347197f59bf7b3f9d164f2ff35a52cef | [
"MIT"
] | 2 | 2016-04-13T13:53:36.000Z | 2016-04-28T14:51:42.000Z | # -*- coding: utf-8 -*-
#
# This file is part of Inspire-Magpie.
# Copyright (c) 2016 CERN
#
# Inspire-Magpie is a free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for
# more details.
"""Custom exceptions.
.. codeauthor:: Jan Aage Lavik <jan.age.lavik@cern.ch>
"""
from __future__ import absolute_import, print_function
class InspireMagpieException(Exception):
"""Base exception for Inspire-Magpie."""
class WordDoesNotExist(InspireMagpieException):
"""Raised when word representation is not found in corpus."""
| 24.541667 | 77 | 0.733447 |
6b8ba7c1beb7212e0e66263fd62cb8647e3becbc | 211 | py | Python | setup.py | amit-15/wafer_main | 183f7d0ed87f4ca3938900651b50982590bf89fd | [
"MIT"
] | null | null | null | setup.py | amit-15/wafer_main | 183f7d0ed87f4ca3938900651b50982590bf89fd | [
"MIT"
] | null | null | null | setup.py | amit-15/wafer_main | 183f7d0ed87f4ca3938900651b50982590bf89fd | [
"MIT"
] | null | null | null | from setuptools import find_packages, setup
setup(
name='src',
packages=find_packages(),
version='0.1.0',
description='its a wafer project using mlops',
author='amit15',
license='MIT',
) | 21.1 | 50 | 0.663507 |
82723c311d3d4df1c03c90856191a41298116377 | 3,491 | py | Python | basenji/stream.py | shtoneyan/basenji | b220dc72069c3d8c250f36cb09799b337daac2fe | [
"Apache-2.0"
] | null | null | null | basenji/stream.py | shtoneyan/basenji | b220dc72069c3d8c250f36cb09799b337daac2fe | [
"Apache-2.0"
] | null | null | null | basenji/stream.py | shtoneyan/basenji | b220dc72069c3d8c250f36cb09799b337daac2fe | [
"Apache-2.0"
] | null | null | null | # Copyright 2017 Calico LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =========================================================================
from __future__ import print_function
import pdb
import numpy as np
import tensorflow as tf
from basenji import dna_io
class PredStreamGen:
""" Interface to acquire predictions via a buffered stream mechanism
rather than getting them all at once and using excessive memory.
Accepts generator and constructs stream batches from it. """
def __init__(self, model, seqs_gen, batch_size, stream_seqs=64, verbose=False):
self.model = model
self.seqs_gen = seqs_gen
self.stream_seqs = stream_seqs
self.batch_size = batch_size
self.verbose = verbose
self.stream_start = 0
self.stream_end = 0
def __getitem__(self, i):
# acquire predictions, if needed
if i >= self.stream_end:
# update start
self.stream_start = self.stream_end
if self.verbose:
print('Predicting from %d' % self.stream_start, flush=True)
# predict
self.stream_preds = self.model.predict(self.make_dataset())
# update end
self.stream_end = self.stream_start + self.stream_preds.shape[0]
return self.stream_preds[i - self.stream_start]
def make_dataset(self):
""" Construct Dataset object for this stream chunk. """
seqs_1hot = []
stream_end = self.stream_start+self.stream_seqs
for si in range(self.stream_start, stream_end):
try:
seqs_1hot.append(self.seqs_gen.__next__())
except StopIteration:
continue
seqs_1hot = np.array(seqs_1hot)
dataset = tf.data.Dataset.from_tensor_slices((seqs_1hot,))
dataset = dataset.batch(self.batch_size)
return dataset
class PredStreamIter:
""" Interface to acquire predictions via a buffered stream mechanism
rather than getting them all at once and using excessive memory.
Accepts iterator and constructs stream batches from it.
[I don't recall whether I've ever gotten this one working."""
def __init__(self, model, dataset_iter, stream_seqs=128, verbose=False):
self.model = model
self.dataset_iter = dataset_iter
self.stream_seqs = stream_seqs
self.verbose = verbose
self.stream_start = 0
self.stream_end = 0
def __getitem__(self, i):
# acquire predictions, if needed
if i >= self.stream_end:
# update start
self.stream_start = self.stream_end
if self.verbose:
print('Predicting from %d' % self.stream_start, flush=True)
# predict
self.stream_preds = self.model.predict(self.fetch_batch())
# update end
self.stream_end = self.stream_start + self.stream_preds.shape[0]
return self.stream_preds[i - self.stream_start]
def fetch_batch(self):
"""Fetch a batch of data from the dataset iterator."""
x = [next(self.dataset_iter)]
while x[-1] and len(x) < self.stream_seqs:
x.append(next(self.dataset_iter))
return x
| 31.45045 | 81 | 0.690347 |
7e60baebf2c5b357432324787fe855aad03450a3 | 25,502 | py | Python | robustness_metrics/common/ops.py | goncaloperes/robustness_metrics | 5ee77294432e1265e432b6e84e06e2a5ae2af387 | [
"Apache-2.0"
] | 383 | 2020-09-04T08:25:16.000Z | 2022-03-25T17:39:19.000Z | robustness_metrics/common/ops.py | goncaloperes/robustness_metrics | 5ee77294432e1265e432b6e84e06e2a5ae2af387 | [
"Apache-2.0"
] | 8 | 2020-12-09T16:44:10.000Z | 2022-02-01T10:08:24.000Z | robustness_metrics/common/ops.py | goncaloperes/robustness_metrics | 5ee77294432e1265e432b6e84e06e2a5ae2af387 | [
"Apache-2.0"
] | 23 | 2020-12-07T22:53:31.000Z | 2022-02-21T03:49:46.000Z | # coding=utf-8
# Copyright 2021 The Robustness Metrics Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Implementation of data preprocessing ops.
All preprocessing ops should return a data processing functors. A data
is represented as a dictionary of tensors, where field "image" is reserved
for 3D images (height x width x channels). The functors output dictionary with
field "image" being modified. Potentially, other fields can also be modified
or added.
"""
import abc
import collections
from robustness_metrics.common import registry
import tensorflow as tf2
import tensorflow.compat.v1 as tf
class PreprocessingOp(metaclass=abc.ABCMeta):
"""The abstract class representing a preprocessing operation."""
registry = registry.Registry(PreprocessingOp)
get = registry.get
def tf_apply_to_image_or_images(fn, image_or_images, **map_kw):
"""Applies a function to a single image or each image in a batch of them.
Args:
fn: the function to apply, receives an image, returns an image.
image_or_images: Either a single image, or a batch of images.
**map_kw: Arguments passed through to tf.map_fn if called.
Returns:
The result of applying the function to the image or batch of images.
Raises:
ValueError: if the input is not of rank 3 or 4.
"""
static_rank = len(image_or_images.get_shape().as_list())
if static_rank == 3: # A single image: HWC
return fn(image_or_images)
elif static_rank == 4: # A batch of images: BHWC
return tf.map_fn(fn, image_or_images, **map_kw)
elif static_rank > 4: # A batch of images: ...HWC
input_shape = tf.shape(image_or_images)
h, w, c = image_or_images.get_shape().as_list()[-3:]
image_or_images = tf.reshape(image_or_images, [-1, h, w, c])
image_or_images = tf.map_fn(fn, image_or_images, **map_kw)
return tf.reshape(image_or_images, input_shape)
else:
raise ValueError("Unsupported image rank: %d" % static_rank)
class BatchedPreprocessing(object):
"""Decorator for preprocessing ops, which adds support for image batches."""
def __init__(self, output_dtype=None, data_key="image"):
self.output_dtype = output_dtype
self.data_key = data_key
def __call__(self, get_pp_fn):
def get_batch_pp_fn(*args, **kwargs):
"""Preprocessing function that supports batched images."""
def pp_fn(image):
return get_pp_fn(*args, **kwargs)({self.data_key: image})[self.data_key]
def _batch_pp_fn(data):
image = data[self.data_key]
data[self.data_key] = tf_apply_to_image_or_images(
pp_fn, image, dtype=self.output_dtype)
return data
return _batch_pp_fn
return get_batch_pp_fn
def maybe_repeat(arg, n_reps):
if not isinstance(arg, collections.Sequence):
arg = (arg,) * n_reps
return arg
@registry.register("color_distort")
class ColorDistort(PreprocessingOp):
"""Applies random brigthness/saturation/hue/contrast transformations."""
@staticmethod
@BatchedPreprocessing()
def apply():
"""Applies random brigthness/saturation/hue/contrast transformations."""
def _color_distortion(data):
image = data["image"]
image = tf.image.random_brightness(image, max_delta=128. / 255.)
image = tf.image.random_saturation(image, lower=0.1, upper=2.0)
image = tf.image.random_hue(image, max_delta=0.5)
image = tf.image.random_contrast(image, lower=0.1, upper=2.0)
data["image"] = image
return data
return _color_distortion
@registry.register("decode_unicode")
class DecodeUnicode(PreprocessingOp):
"""Converts unicode to int array."""
@staticmethod
def apply(key, fixed_length=256):
"""Converts unicode to int array.
This function is useful to pass unicode through TPUs, which supports
currently only int/float types.
Args:
key: key of the unicode field in the input data dict.
fixed_length: TPU requires fixed shape arrays. The int array will be
padded to fixed_length, with all zeros.
Returns:
A function that decodes the unicode value to a fixed length list.
"""
def _dynamic_padding(inp, min_size):
"""Padding an input vector to min_size."""
pad_size = min_size - tf.shape(inp)[0]
paddings = [[0, pad_size]]
return tf.pad(inp, paddings)
def _decode_unicode(data):
"""Decode unicode to int array."""
if key in data:
decode = tf.strings.unicode_decode(data[key], "UTF-8")
decode = _dynamic_padding(decode, fixed_length)
decode.set_shape(fixed_length)
data[key] = decode
else:
tf.logging.error(
"Key {} not found from {}.".format(key, data), exc_info=True)
return data
return _decode_unicode
@registry.register("random_brightness")
class RandomBrightness(PreprocessingOp):
"""Adds a random small value to all pixel intensities."""
@staticmethod
@BatchedPreprocessing()
def apply(max_delta=0.1):
"""Applies random brigthness transformations."""
# A random value in [-max_delta, +max_delta] is added to the image values.
# Small max_delta <1.0 assumes that the image values are within [0, 1].
def _random_brightness(data):
image = data["image"]
image = tf.image.random_brightness(image, max_delta)
data["image"] = image
return data
return _random_brightness
@registry.register("random_saturation")
class RandomSaturation(PreprocessingOp):
"""Applies random saturation transformations."""
@staticmethod
@BatchedPreprocessing()
def apply(lower=0.5, upper=2.0):
"""Applies random saturation transformations."""
def _random_saturation(data):
# Multiplies saturation channel in HSV (with converting from/to RGB) with
# a random float value in [lower, upper].
image = data["image"]
image = tf.image.random_saturation(image, lower=lower, upper=upper)
data["image"] = image
return data
return _random_saturation
@registry.register("random_hue")
class RandomHue(PreprocessingOp):
"""Adds a random offset to hue channel in HSV."""
@staticmethod
@BatchedPreprocessing()
def get_random_hue(max_delta=0.1):
"""Applies random hue transformations."""
def _random_hue(data):
# Adds to hue channel in HSV (with converting from/to RGB) a random offset
# in [-max_delta, +max_delta].
image = data["image"]
image = tf.image.random_hue(image, max_delta=max_delta)
data["image"] = image
return data
return _random_hue
@registry.register("random_contrast")
class RandomContrast(PreprocessingOp):
"""Applies a random contrast change."""
@staticmethod
@BatchedPreprocessing()
def apply(lower=0.5, upper=2.0):
"""Applies random contrast transformations."""
def _random_contrast(data):
# Stretches/shrinks value stddev (per channel) by multiplying with a
# random value in [lower, upper].
image = data["image"]
image = tf.image.random_contrast(image, lower=lower, upper=upper)
data["image"] = image
return data
return _random_contrast
@registry.register("drop_channels")
class DropChannels(PreprocessingOp):
"""Drops 2 out of 3 channels ."""
@staticmethod
@BatchedPreprocessing()
def apply(keep_original=0.25, noise_min=-1.0, noise_max=1.0):
"""Drops 2/3 channels and fills the remaining channels with noise."""
def _drop_channels(data):
image = data["image"]
def _drop(keep_i):
shape = image.get_shape().as_list()
size, num_channels = shape[:-1], shape[-1]
return tf.concat([
image[:, :, i:i + 1] if i == keep_i else tf.random_uniform(
size + [1], noise_min, noise_max) for i in range(num_channels)
],
axis=2)
def _drop_random_channel(coin_channel):
return tf.case({
tf.equal(coin_channel, 0): lambda: _drop(0),
tf.equal(coin_channel, 1): lambda: _drop(1),
tf.equal(coin_channel, 2): lambda: _drop(2),
})
coin_keep_original = tf.random.uniform([], 0.0, 1.0, dtype=tf.float32)
coin_channel = tf.random.uniform([], 0, 3, dtype=tf.int32)
image = tf.case({
tf.less(coin_keep_original, keep_original):
lambda: image,
tf.greater_equal(coin_keep_original, keep_original):
lambda: _drop_random_channel(coin_channel)
})
data["image"] = image
return data
return _drop_channels
@registry.register("decode")
class DecodeImage(PreprocessingOp):
"""Decode an encoded image string, see tf.io.decode_image."""
@staticmethod
def apply(key="image", channels=3):
"""Decode an encoded image string, see tf.io.decode_image."""
def _decode(data):
# tf.io.decode_image does not set the shape correctly, so we use
# tf.io.deocde_jpeg, which also works for png, see
# https://github.com/tensorflow/tensorflow/issues/8551
data[key] = tf.io.decode_jpeg(data[key], channels=channels)
return data
return _decode
@registry.register("pad")
class Pad(PreprocessingOp):
"""Pads an image."""
@staticmethod
@BatchedPreprocessing()
def apply(pad_size):
"""Pads an image.
Args:
pad_size: either an integer u giving verticle and horizontal pad sizes u,
or a list or tuple [u, v] of integers where u and v are vertical and
horizontal pad sizes.
Returns:
A function for padding an image.
"""
pad_size = maybe_repeat(pad_size, 2)
def _pad(data):
image = data["image"]
image = tf.pad(
image,
[[pad_size[0], pad_size[0]], [pad_size[1], pad_size[1]], [0, 0]])
data["image"] = image
return data
return _pad
@registry.register("resize")
class Resize(PreprocessingOp):
"""Resizes image to a given size."""
@staticmethod
@BatchedPreprocessing()
def apply(resize_size):
"""Resizes image to a given size.
Args:
resize_size: either an integer H, where H is both the new height and width
of the resized image, or a list or tuple [H, W] of integers, where H and
W are new image"s height and width respectively.
Returns:
A function for resizing an image.
"""
resize_size = maybe_repeat(resize_size, 2)
def _resize(data):
"""Resizes image to a given size."""
image = data["image"]
# Note: use TF-2 version of tf.image.resize as the version in TF-1 is
# buggy: https://github.com/tensorflow/tensorflow/issues/6720.
dtype = image.dtype
image = tf2.image.resize(image, resize_size)
image = tf.cast(image, dtype)
data["image"] = image
return data
return _resize
@registry.register("resize_small")
class ResizeSmall(PreprocessingOp):
"""Resizes the smaller side to a desired value keeping the aspect ratio."""
@staticmethod
@BatchedPreprocessing()
def apply(smaller_size):
"""Resizes the smaller side to `smaller_size` keeping aspect ratio.
Args:
smaller_size: an integer, that represents a new size of the smaller side
of an input image.
Returns:
A function, that resizes an image and preserves its aspect ratio.
"""
def _resize_small(data):
image = data["image"]
h, w = tf.shape(image)[0], tf.shape(image)[1]
ratio = (
tf.cast(smaller_size, tf.float32) /
tf.cast(tf.minimum(h, w), tf.float32))
h = tf.cast(tf.round(tf.cast(h, tf.float32) * ratio), tf.int32)
w = tf.cast(tf.round(tf.cast(w, tf.float32) * ratio), tf.int32)
data["image"] = tf.image.resize_area(image[None], [h, w])[0]
return data
return _resize_small
@registry.register("inception_crop")
class InceptionCrop(PreprocessingOp):
"""Applies an Inception-style image crop."""
@staticmethod
@BatchedPreprocessing()
def apply(resize_size=None, area_min=5, area_max=100):
"""Applies an Inception-style image crop.
Inception-style crop is a random image crop (its size and aspect ratio are
random) that was used for training Inception models, see
https://www.cs.unc.edu/~wliu/papers/GoogLeNet.pdf.
Args:
resize_size: Resize image to [resize_size, resize_size] after crop.
area_min: minimal crop area.
area_max: maximal crop area.
Returns:
A function, that applies inception crop.
"""
def _inception_crop(data): # pylint: disable=missing-docstring
image = data["image"]
begin, size, _ = tf.image.sample_distorted_bounding_box(
tf.shape(image),
tf.zeros([0, 0, 4], tf.float32),
area_range=(area_min / 100, area_max / 100),
min_object_covered=0, # Don't enforce a minimum area.
use_image_if_no_bounding_boxes=True)
data["image"] = tf.slice(image, begin, size)
# Unfortunately, the above operation loses the depth-dimension. So we need
# to restore it the manual way.
data["image"].set_shape([None, None, image.shape[-1]])
if resize_size:
data["image"] = Resize.apply([resize_size, resize_size])(data)["image"]
return data
return _inception_crop
@registry.register("decode_jpeg_and_inception_crop")
class DecodeAndInceptionCrop(PreprocessingOp):
"""Decode jpeg string and make inception-style image crop."""
@staticmethod
def apply(resize_size=None, area_min=5, area_max=100):
"""Decode jpeg string and make inception-style image crop.
Inception-style crop is a random image crop (its size and aspect ratio are
random) that was used for training Inception models, see
https://www.cs.unc.edu/~wliu/papers/GoogLeNet.pdf.
Args:
resize_size: Resize image to [resize_size, resize_size] after crop.
area_min: minimal crop area.
area_max: maximal crop area.
Returns:
A function, that applies inception crop.
"""
def _inception_crop(data): # pylint: disable=missing-docstring
image = data["image"]
shape = tf.image.extract_jpeg_shape(image)
begin, size, _ = tf.image.sample_distorted_bounding_box(
shape,
tf.zeros([0, 0, 4], tf.float32),
area_range=(area_min / 100, area_max / 100),
min_object_covered=0, # Don't enforce a minimum area.
use_image_if_no_bounding_boxes=True)
# Crop the image to the specified bounding box.
offset_y, offset_x, _ = tf.unstack(begin)
target_height, target_width, _ = tf.unstack(size)
crop_window = tf.stack([offset_y, offset_x, target_height, target_width])
image = tf.image.decode_and_crop_jpeg(image, crop_window, channels=3)
data["image"] = image
if resize_size:
data["image"] = Resize.apply([resize_size, resize_size])(data)["image"]
return data
return _inception_crop
@registry.register("random_crop")
class RandomCrop(PreprocessingOp):
"""Makes a random crop of a given size."""
@staticmethod
@BatchedPreprocessing()
def apply(crop_size):
"""Makes a random crop of a given size.
Args:
crop_size: either an integer H, where H is both the height and width of
the random crop, or a list or tuple [H, W] of integers, where H and W
are height and width of the random crop respectively.
Returns:
A function, that applies random crop.
"""
crop_size = maybe_repeat(crop_size, 2)
def _crop(data):
image = data["image"]
h, w, c = crop_size[0], crop_size[1], image.shape[-1]
image = tf.random_crop(image, [h, w, c])
data["image"] = image
return data
return _crop
@registry.register("central_crop")
class CentralCrop(PreprocessingOp):
"""Flips an image horizontally with probability 50%."""
@staticmethod
@BatchedPreprocessing()
def apply(crop_size):
"""Makes central crop of a given size.
Args:
crop_size: either an integer H, where H is both the height and width of
the central crop, or a list or tuple [H, W] of integers, where H and W
are height and width of the central crop respectively.
Returns:
A function, that applies central crop.
"""
crop_size = maybe_repeat(crop_size, 2)
def _crop(data):
image = data["image"]
h, w = crop_size[0], crop_size[1]
dy = (tf.shape(image)[0] - h) // 2
dx = (tf.shape(image)[1] - w) // 2
image = tf.image.crop_to_bounding_box(image, dy, dx, h, w)
data["image"] = image
return data
return _crop
@registry.register("flip_lr")
class FlipLeftRight(PreprocessingOp):
"""Flips an image horizontally with probability 50%."""
@staticmethod
@BatchedPreprocessing()
def apply():
"""Flips an image horizontally with probability 50%."""
def _random_flip_lr_pp(data):
image = data["image"]
image = tf.image.random_flip_left_right(image)
data["image"] = image
return data
return _random_flip_lr_pp
@registry.register("flip_ud")
class FlipUpDown(PreprocessingOp):
"""Flips an image vertically with probability 50%."""
@staticmethod
@BatchedPreprocessing()
def apply():
"""Flips an image vertically with probability 50%."""
def _random_flip_ud_pp(data):
image = data["image"]
image = tf.image.random_flip_up_down(image)
data["image"] = image
return data
return _random_flip_ud_pp
@registry.register("random_rotate90")
class RandomRotate90(PreprocessingOp):
"""Randomly rotate an image by multiples of 90 degrees."""
@staticmethod
@BatchedPreprocessing()
def apply():
"""Randomly rotate an image by multiples of 90 degrees."""
def _random_rotation90(data):
"""Rotation function."""
image = data["image"]
num_rotations = tf.random.uniform(shape=(), maxval=4, dtype=tf.int32)
image = tf.image.rot90(image, k=num_rotations)
data["image"] = image
return data
return _random_rotation90
@registry.register("value_range")
class ValueRange(PreprocessingOp):
"""Transforms a [in_min,in_max] image to [vmin,vmax] range."""
@staticmethod
@BatchedPreprocessing(output_dtype=tf.float32)
def apply(vmin=-1, vmax=1, in_min=0, in_max=255.0, clip_values=False):
"""Transforms a [in_min,in_max] image to [vmin,vmax] range.
Input ranges in_min/in_max can be equal-size lists to rescale the invidudal
channels independently.
Args:
vmin: A scalar. Output max value.
vmax: A scalar. Output min value.
in_min: A scalar or a list of input min values to scale. If a list, the
length should match to the number of channels in the image.
in_max: A scalar or a list of input max values to scale. If a list, the
length should match to the number of channels in the image.
clip_values: Whether to clip the output values to the provided ranges.
Returns:
A function to rescale the values.
"""
def _value_range(data):
"""Scales values in given range."""
in_min_t = tf.constant(in_min, tf.float32)
in_max_t = tf.constant(in_max, tf.float32)
image = tf.cast(data["image"], tf.float32)
image = (image - in_min_t) / (in_max_t - in_min_t)
image = vmin + image * (vmax - vmin)
if clip_values:
image = tf.clip_by_value(image, vmin, vmax)
data["image"] = image
return data
return _value_range
@registry.register("value_range_mc")
class ValueRangeMultichannel(PreprocessingOp):
"""Independent multi-channel rescaling."""
@staticmethod
def apply(vmin, vmax, *args):
"""Independent multi-channel rescaling."""
if len(args) % 2:
raise ValueError("Additional args must be list of even length giving "
"`in_max` and `in_min` concatenated")
num_channels = len(args) // 2
in_min = args[:num_channels]
in_max = args[-num_channels:]
return ValueRange.apply(vmin, vmax, in_min, in_max)
@registry.register("replicate")
class Replicate(PreprocessingOp):
"""Replicates an image along a new batch dimension."""
@staticmethod
def apply(num_replicas=2):
"""Replicates an image `num_replicas` times along a new batch dimension."""
def _replicate(data):
tiles = [num_replicas] + [1] * len(data["image"].shape)
data["image"] = tf.tile(data["image"][None], tiles)
return data
return _replicate
@registry.register("standardize")
class Standardize(PreprocessingOp):
"""Standardize an image."""
@staticmethod
@BatchedPreprocessing(output_dtype=tf.float32)
def apply(mean, std):
"""Standardize an image with the given mean and standard deviation."""
def _standardize(data):
image = tf.cast(data["image"], dtype=tf.float32)
data["image"] = (image - mean) / std
return data
return _standardize
@registry.register("select_channels")
class SelectChannels(PreprocessingOp):
"""Returns function to select specified channels."""
@staticmethod
@BatchedPreprocessing()
def apply(channels):
"""Returns function to select specified channels."""
def _select_channels(data):
"""Returns a subset of available channels."""
data["image"] = tf.gather(data["image"], channels, axis=-1)
return data
return _select_channels
@registry.register("onehot")
class OneHotEncoding(PreprocessingOp):
"""One-hot encoding of the input."""
@staticmethod
def apply(depth, key="labels", key_result=None, multi=True):
"""One-hot encodes the input.
Args:
depth: Length of the one-hot vector (how many classes).
key: Key of the data to be one-hot encoded.
key_result: Key under which to store the result (same as `key` if None).
multi: If there are multiple labels, whether to merge them into the same
"multi-hot" vector (True) or keep them as an extra dimension (False).
Returns:
Data dictionary.
"""
def _onehot(data):
onehot = tf.one_hot(data[key], depth)
if multi and len(onehot.shape) == 2:
onehot = tf.reduce_max(onehot, axis=0)
data[key_result or key] = onehot
return data
return _onehot
def fingerprint_int64(batch):
"""Returns an tf.int64 hash for each element of the input."""
hash_bytes = tf.squeeze(tf.fingerprint([batch]))
# Fingerprint op writes fingerprint values as byte arrays. For example, the
# default method farmhash64 generates a 64-bit fingerprint value at a time.
# This 8-byte value is written out as an tf.uint8 array of size 8,
# in little-endian order. These are then combined in base 8 to get one int64.
hash_base = tf.constant([[256**i for i in range(8)]], dtype=tf.int64)
hash_bytes = tf.cast(hash_bytes, dtype=tf.int64)
element_hashes_int64 = tf.reduce_sum(hash_bytes * hash_base, axis=1)
return element_hashes_int64
def combine_fingerprints(hashes1, hashes2):
"""Combines two tensors of fingerprints.
The two tensors have to be compatible (broadcastable) for addition.
The fingerprints are combined so that the output distribution is roughly
uniform (assuming that the original hashes are also uniformly distributed).
Args:
hashes1: 1-D tensor, tf.int64.
hashes2: 1-D tensor, tf.int64.
Returns:
A 1-D tensor with the hash values combined.
"""
# Based on Boost combine_hash function, extended to 64 bits. Original code
# (in 32 bits): hash1 ^ (hash2 + 0x9e3779b9 + (hash1 << 6) + (hash1 >> 2)).
magic_number = -7046029254386353131 # i.e. 0x9E3779B97F4A7C15
return tf.bitwise.bitwise_xor(
hashes1, hashes2 + magic_number + tf.bitwise.left_shift(hashes1, 6) +
tf.bitwise.right_shift(hashes1, 2))
def to_hash_bucket_deterministic(batch, num_buckets, seed=None):
"""Buckets input examples, roughly uniformly and deterministically.
Args:
batch: a tensor of rank >= 1, containing the input examples
(batch axis = 0).
num_buckets: an integer, number of buckets.
seed: (optional) this seed will be used in the hash computation so that one
can obtain pseudo-random but deterministic bucket assignments.
Returns:
A tensor of rank 1, containing the bucket assigned to each input example.
"""
# Note: In order to get deterministic bucketing, the hash function has to be
# deterministic. That's why we use fingerprint_int64.
hashes = fingerprint_int64(batch)
if seed is not None:
hashes = combine_fingerprints(hashes, fingerprint_int64([seed]))
return tf.math.mod(hashes, num_buckets)
def compose(*functions):
"""Composes an arbitrary number of functions.
Assumes that None == Identity function.
Args:
*functions: Arbitrary number of callables.
Returns:
Composition of said callables.
"""
def _composed_fn(*x):
for fn in functions:
if fn:
# Note that we cannot use `collections.abc.Iterable` because this will
# include a `dict` which will be incorrectly passed if not wrapped in a
# tuple.
if not isinstance(x, (list, tuple)):
x = (x,)
x = fn(*x)
return x
return _composed_fn
| 32.199495 | 80 | 0.679437 |
4393c5abed9947e11b7dd3306f6139678210c99a | 1,207 | py | Python | Django/View/custom_module/module_naver_API.py | navill/TIL | 7656c4fa5cbe271985088d16c91767b6243b4843 | [
"MIT"
] | null | null | null | Django/View/custom_module/module_naver_API.py | navill/TIL | 7656c4fa5cbe271985088d16c91767b6243b4843 | [
"MIT"
] | null | null | null | Django/View/custom_module/module_naver_API.py | navill/TIL | 7656c4fa5cbe271985088d16c91767b6243b4843 | [
"MIT"
] | null | null | null | import requests
"""
geo_coding(<str>address) -><json>coordinates: longitude, latitude
: 정확한 주소값을 입력할 경우 해당하는 좌표를 반환하는 함수 - naver api
road_address(<str>address) -> <json>address: 지번주소, 도로명주소
: 일부 주소를 이용해 상세한 지번 주소와 도로명 주소를 출력 - road address api
"""
def geo_coding(address):
# naver geocoding API - setting
naver_url = "https://naveropenapi.apigw.ntruss.com/map-geocode/v2/geocode?query=" + address
custom_headers = {
"X-NCP-APIGW-API-KEY-ID": 'YOUR-KEY-ID',
"X-NCP-APIGW-API-KEY": "YOUR-KEY"
}
# naver geocoding API - processing
naver_req = requests.get(naver_url, headers=custom_headers)
result = (naver_req.json()["addresses"][0]["x"],
naver_req.json()["addresses"][0]["y"])
return result
def road_address(address):
# road address API - setting
confmkey = "YOUR-CONFIRM-KEY"
road_url = "http://www.juso.go.kr/addrlink/addrLinkApi.do?keyword=" + address + "&confmKey=" + confmkey + "&resultType=json"
# # road address API - processing
road_req = requests.get(road_url)
result = (road_req.json()["results"]["juso"][0]['jibunAddr'],
road_req.json()["results"]["juso"][0]['roadAddr'])
return result
| 33.527778 | 128 | 0.651201 |
8088ab2c3dff664cdc6e0790d88b2a713eca531c | 218 | py | Python | ddtrace/contrib/sqlite3/connection.py | zhammer/dd-trace-py | 4c30f6e36bfa34a63cd9b6884677c977f76d2a01 | [
"Apache-2.0",
"BSD-3-Clause"
] | 5 | 2020-03-07T01:12:29.000Z | 2021-04-21T00:53:19.000Z | ddtrace/contrib/sqlite3/connection.py | zhammer/dd-trace-py | 4c30f6e36bfa34a63cd9b6884677c977f76d2a01 | [
"Apache-2.0",
"BSD-3-Clause"
] | 4 | 2019-11-22T20:58:01.000Z | 2020-08-17T21:16:13.000Z | ddtrace/contrib/sqlite3/connection.py | zhammer/dd-trace-py | 4c30f6e36bfa34a63cd9b6884677c977f76d2a01 | [
"Apache-2.0",
"BSD-3-Clause"
] | 3 | 2020-03-18T16:29:20.000Z | 2020-07-20T16:05:10.000Z | from sqlite3 import Connection
from ...utils.deprecation import deprecated
@deprecated(message='Use patching instead (see the docs).', version='1.0.0')
def connection_factory(*args, **kwargs):
return Connection
| 24.222222 | 76 | 0.756881 |
86cfaa482bb793e38cb44487b6f482a8abeeb045 | 2,619 | py | Python | *_Lambda CS/Week 3 (Binary Trees)/531.py | andremichalowski/CSN1 | 97eaa66b324ef1850237dd6dcd6d8f71a1a2b64b | [
"MIT"
] | null | null | null | *_Lambda CS/Week 3 (Binary Trees)/531.py | andremichalowski/CSN1 | 97eaa66b324ef1850237dd6dcd6d8f71a1a2b64b | [
"MIT"
] | null | null | null | *_Lambda CS/Week 3 (Binary Trees)/531.py | andremichalowski/CSN1 | 97eaa66b324ef1850237dd6dcd6d8f71a1a2b64b | [
"MIT"
] | null | null | null |
1. PROPERTIES OF A BINARY TREE AND OF "PERFECT TREE":
A. WHAT A BINARY TREE MIGHT LOOK LIKE:
class BinaryTreeNode:
def __init__(self, value):
self.value = value
self.left = None
self.right = None
B. NODES NUMBER?:
- Equal to the the number of all previous nodes + 1 (???Does this apply to all cases???)
C. FORMULAS:
1. HEIGHT:
log_2(n + 1) = h #Where n is the number of nodes in the level
2. NODES:
n = 2^h - 1
2. TIME AND SPACE COMPLEXITY, STRENGTHS AND WEAKNESSES, COMMON USES:
A. TIME AND SPACE COMPLEXITY:
TC: Time Complexity:
+ Most time complexity depends on the balance of tree: # This is the same for insert and delete
- Balanced: O(log_n)
- Unbalanced: O(n)
SC: Space Complexity:
+ O(n) Linear #Each node in BST will take up space in memory
B. STRENGTHS AND WEAKNESSES
1. STRENGTHS:
1. Sorted by default # you can pull data in-order traversal
2. Efficient Searches # O(log n)
# Same efficiency as sorted Array
# faster with insertion and deletion though
# Slower than dictionaries in best case
# faster than dictionaries in worst case
2. WEAKNESSES:
1. Unbalanced trees are more inefficient
2. Not especially efficient in anything Specific #(Even though good at general purpose efficiency)
3. CONSTRUCT A BINARY SEARCH TREE THAT CAN PERFORM BASIC OPERATIONS WITH A LOGARITHMIC TIME COMPLEXITY:
1. Node class:
class BSTNode:
def __init__(self, value):
self.value = value
self.left = None
self.right = None
def insert(self, value):
if value < self.value:
if self.left is None:
self.left = BSTNode(value)
else:
self.left.insert(value)
else:
if self.right is None:
self.right = BSTNode(value)
else:
self.right.insert(value)
def search(self, target):
if self.value == target:
return self
elif target < self.value:
if self.left is None:
return False
else:
return self.left.search(target)
else:
if self.right is None:
return False
else:
return self.right.search(target)
2. BST CLASS:
class BST:
def __init__(self, value):
self.root = BSTNode(value)
def insert(self, value):
self.root.insert(value)
def search(self, value):
self.root.search(value)
| 29.761364 | 104 | 0.588393 |
36cce59569885e03092fca79245e92aa06cf9d38 | 5,196 | py | Python | sanity.py | ifwe/digsby | f5fe00244744aa131e07f09348d10563f3d8fa99 | [
"Python-2.0"
] | 35 | 2015-08-15T14:32:38.000Z | 2021-12-09T16:21:26.000Z | sanity.py | niterain/digsby | 16a62c7df1018a49eaa8151c0f8b881c7e252949 | [
"Python-2.0"
] | 4 | 2015-09-12T10:42:57.000Z | 2017-02-27T04:05:51.000Z | sanity.py | niterain/digsby | 16a62c7df1018a49eaa8151c0f8b881c7e252949 | [
"Python-2.0"
] | 15 | 2015-07-10T23:58:07.000Z | 2022-01-23T22:16:33.000Z | from __future__ import print_function
import sys
class SanityException(Exception):
def __init__(self, name, message):
self.component_name = name
super(Exception, self).__init__(message)
def insane(name, message):
raise SanityException(name, message)
def module_check(name):
try:
module = __import__(name)
for part in name.split('.')[1:]:
module = getattr(module, part)
return module
except (ImportError, AttributeError):
insane(name, 'not found')
def sanity(name):
if name == 'all':
_print = lambda s, *a, **k: None
else:
_print = print
_print("{name:.<20}".format(name = name), end = '')
try:
globals().get('sanity_%s' % name, lambda: insane(name, "sanity check not found"))()
except:
_print("FAIL")
raise
else:
_print("OK")
def sanity_path():
path = module_check('path')
if not hasattr(path.path, 'openfolder'):
insane('path.py', 'not patched for Digsby')
def sanity_ZSI():
ZSI = module_check('ZSI')
Namespaces = module_check('ZSI.wstools.Namespaces')
if getattr(getattr(Namespaces, 'SOAP12'), 'ENC12', None) != 'http://www.w3.org/2003/05/soap-encoding':
insane('ZSI', 'namespace modifications for Digsby not found')
test_script = 'import ZSI.generate.pyclass as pyclass;\nif hasattr(pyclass, "pydoc"): raise Exception'
try:
if __debug__:
import subprocess
if subprocess.call([sys.executable, '-O', '-c', test_script]) != 0:
raise Exception
else:
exec(test_script)
except:
insane('ZSI', 'pydoc is imported in non-debug mode')
def sanity_M2Crypto():
M2Crypto = module_check('M2Crypto')
RC4 = module_check('M2Crypto.RC4')
try:
if 'testdata' != RC4.RC4('key').update(RC4.RC4('key').update('testdata')):
raise Exception
except:
insane('M2Crypto', 'crypto test failed')
def sanity_syck():
syck = module_check('syck')
try:
if syck.load('---\ntest: works\n').get('test') != 'works':
raise Exception
except:
insane('syck', 'failed to parse sample document')
def sanity_libxml2():
libxml2 = module_check('libxml2')
doc = None
try:
doc = libxml2.parseDoc('<root><child/></root>')
if doc.children.name != 'root' or doc.children.children.name != 'child':
raise Exception
except:
insane('libxml2', 'failed to process sample document')
finally:
if doc is not None:
doc.freeDoc()
def sanity_PIL():
from StringIO import StringIO
Image = module_check('PIL.Image')
image = None
try:
image = Image.new('RGB', (1, 1))
except:
insane('PIL', 'failed to create test image')
try:
image.save(StringIO(), 'jpeg')
except:
insane('PIL', 'does not have jpeg support')
try:
image.save(StringIO(), 'png')
except:
insane('PIL', 'does not have png support')
try:
image.save(StringIO(), 'ppm')
except:
insane('PIL', 'does not have ppm (freetype) suport')
def sanity_lxml():
html = module_check('lxml.html')
etree = module_check('lxml.etree')
objectify = module_check('lxml.objectify')
try:
etree.tostring(etree.fromstring('<root><child/></root>'))
except:
insane('lxml', 'failed to process sample document')
def sanity_simplejson():
json = module_check('simplejson')
speedups = module_check('simplejson._speedups')
try:
json.dumps({}, use_speedups = False)
except TypeError:
insane('simplejson', 'does not allow disabling speedups')
def sanity_protocols():
import inspect
protocols = module_check('protocols')
speedups = module_check('protocols._speedups')
Adapter = protocols.Adapter
if inspect.getargspec(Adapter.__init__).args != ['self', 'ob']:
insane('protocols', 'constructor for Adapter is incorrect')
# TODO: More verification for these modules
for simple_module in set(('blist', 'cgui', 'babel', 'socks', 'tenjin', 'certifi',
'dns', 'rauth', 'ClientForm', 'peak', '_xmlextra',
'sip', 'wx', 'wx.py', 'wx.calendar', 'wx.webview',
'wx.lib', 'wx.stc', 'feedparser', 'pkg_resources')):
globals()['sanity_' + simple_module] = lambda _name=simple_module: module_check(_name)
def main(*args):
dont_check = set(arg[1:] for arg in args if arg.startswith('-'))
to_check = set(arg for arg in args if arg != 'all' and not arg.startswith('-'))
if not to_check or 'all' in args:
for func_name, sanity_check in globals().items():
if func_name.startswith('sanity_') and callable(sanity_check):
name = func_name[len('sanity_'):]
to_check.add(name)
for name in sorted(to_check - dont_check):
try:
sanity(name)
except SanityException as e:
print("SanityException: %s: %s" % (e.component_name, e), file = sys.stderr)
if __name__ == '__main__':
main(*sys.argv[1:])
| 28.23913 | 106 | 0.601039 |
4790c303e902039cba9791b1a0ebf6febea1ff4d | 161,092 | py | Python | airflow/www/views.py | andrew-nascimento/airflow | a88115ea24a06f8706886a30e4f765aa4346ccc3 | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 1 | 2021-09-04T02:38:21.000Z | 2021-09-04T02:38:21.000Z | airflow/www/views.py | andrew-nascimento/airflow | a88115ea24a06f8706886a30e4f765aa4346ccc3 | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null | airflow/www/views.py | andrew-nascimento/airflow | a88115ea24a06f8706886a30e4f765aa4346ccc3 | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import collections
import copy
import itertools
import json
import logging
import math
import re
import socket
import sys
import traceback
from collections import defaultdict
from datetime import timedelta
from json import JSONDecodeError
from operator import itemgetter
from typing import Iterable, List, Optional, Tuple
from urllib.parse import parse_qsl, unquote, urlencode, urlparse
import lazy_object_proxy
import markupsafe
import nvd3
import sqlalchemy as sqla
from flask import (
Markup,
Response,
abort,
before_render_template,
current_app,
escape,
flash,
g,
jsonify,
make_response,
redirect,
render_template,
request,
send_from_directory,
session as flask_session,
url_for,
)
from flask_appbuilder import BaseView, ModelView, expose
from flask_appbuilder.actions import action
from flask_appbuilder.fieldwidgets import Select2Widget
from flask_appbuilder.models.sqla.filters import BaseFilter
from flask_appbuilder.security.decorators import has_access
from flask_appbuilder.security.views import (
PermissionModelView,
PermissionViewModelView,
ResetMyPasswordView,
ResetPasswordView,
RoleModelView,
UserDBModelView,
UserInfoEditView,
UserLDAPModelView,
UserOAuthModelView,
UserOIDModelView,
UserRemoteUserModelView,
UserStatsChartView,
ViewMenuModelView,
)
from flask_appbuilder.widgets import FormWidget
from flask_babel import lazy_gettext
from jinja2.utils import htmlsafe_json_dumps, pformat # type: ignore
from pendulum.datetime import DateTime
from pendulum.parsing.exceptions import ParserError
from pygments import highlight, lexers
from pygments.formatters import HtmlFormatter
from sqlalchemy import Date, and_, desc, func, or_, union_all
from sqlalchemy.exc import IntegrityError
from sqlalchemy.orm import joinedload
from wtforms import SelectField, validators
from wtforms.validators import InputRequired
import airflow
from airflow import models, plugins_manager, settings
from airflow.api.common.experimental.mark_tasks import (
set_dag_run_state_to_failed,
set_dag_run_state_to_success,
)
from airflow.configuration import AIRFLOW_CONFIG, conf
from airflow.exceptions import AirflowException, SerializedDagNotFound
from airflow.executors.executor_loader import ExecutorLoader
from airflow.jobs.base_job import BaseJob
from airflow.jobs.scheduler_job import SchedulerJob
from airflow.jobs.triggerer_job import TriggererJob
from airflow.models import DAG, Connection, DagModel, DagTag, Log, SlaMiss, TaskFail, XCom, errors
from airflow.models.baseoperator import BaseOperator
from airflow.models.dagcode import DagCode
from airflow.models.dagrun import DagRun, DagRunType
from airflow.models.serialized_dag import SerializedDagModel
from airflow.models.taskinstance import TaskInstance
from airflow.providers_manager import ProvidersManager
from airflow.security import permissions
from airflow.ti_deps.dep_context import DepContext
from airflow.ti_deps.dependencies_deps import RUNNING_DEPS, SCHEDULER_QUEUED_DEPS
from airflow.utils import json as utils_json, timezone, yaml
from airflow.utils.dates import infer_time_unit, scale_time_units
from airflow.utils.docs import get_doc_url_for_provider, get_docs_url
from airflow.utils.helpers import alchemy_to_dict
from airflow.utils.log import secrets_masker
from airflow.utils.log.log_reader import TaskLogReader
from airflow.utils.session import create_session, provide_session
from airflow.utils.state import State
from airflow.utils.strings import to_boolean
from airflow.version import version
from airflow.www import auth, utils as wwwutils
from airflow.www.decorators import action_logging, gzipped
from airflow.www.forms import (
ConnectionForm,
DagRunEditForm,
DateTimeForm,
DateTimeWithNumRunsForm,
DateTimeWithNumRunsWithDagRunsForm,
TaskInstanceEditForm,
)
from airflow.www.widgets import AirflowModelListWidget
PAGE_SIZE = conf.getint('webserver', 'page_size')
FILTER_TAGS_COOKIE = 'tags_filter'
FILTER_STATUS_COOKIE = 'dag_status_filter'
def truncate_task_duration(task_duration):
"""
Cast the task_duration to an int was for optimization for large/huge dags if task_duration > 10s
otherwise we keep it as a float with 3dp
"""
return int(task_duration) if task_duration > 10.0 else round(task_duration, 3)
def get_safe_url(url):
"""Given a user-supplied URL, ensure it points to our web server"""
valid_schemes = ['http', 'https', '']
valid_netlocs = [request.host, '']
if not url:
return url_for('Airflow.index')
parsed = urlparse(url)
# If the url contains semicolon, redirect it to homepage to avoid
# potential XSS. (Similar to https://github.com/python/cpython/pull/24297/files (bpo-42967))
if ';' in unquote(url):
return url_for('Airflow.index')
query = parse_qsl(parsed.query, keep_blank_values=True)
url = parsed._replace(query=urlencode(query)).geturl()
if parsed.scheme in valid_schemes and parsed.netloc in valid_netlocs:
return url
return url_for('Airflow.index')
def get_date_time_num_runs_dag_runs_form_data(www_request, session, dag):
"""Get Execution Data, Base Date & Number of runs from a Request"""
date_time = www_request.args.get('execution_date')
if date_time:
date_time = timezone.parse(date_time)
else:
date_time = dag.get_latest_execution_date(session=session) or timezone.utcnow()
base_date = www_request.args.get('base_date')
if base_date:
base_date = timezone.parse(base_date)
else:
# The DateTimeField widget truncates milliseconds and would loose
# the first dag run. Round to next second.
base_date = (date_time + timedelta(seconds=1)).replace(microsecond=0)
default_dag_run = conf.getint('webserver', 'default_dag_run_display_number')
num_runs = www_request.args.get('num_runs', default=default_dag_run, type=int)
drs = (
session.query(DagRun)
.filter(DagRun.dag_id == dag.dag_id, DagRun.execution_date <= base_date)
.order_by(desc(DagRun.execution_date))
.limit(num_runs)
.all()
)
dr_choices = []
dr_state = None
for dr in drs:
dr_choices.append((dr.execution_date.isoformat(), dr.run_id))
if date_time == dr.execution_date:
dr_state = dr.state
# Happens if base_date was changed and the selected dag run is not in result
if not dr_state and drs:
dr = drs[0]
date_time = dr.execution_date
dr_state = dr.state
return {
'dttm': date_time,
'base_date': base_date,
'num_runs': num_runs,
'execution_date': date_time.isoformat(),
'dr_choices': dr_choices,
'dr_state': dr_state,
}
def task_group_to_dict(task_group):
"""
Create a nested dict representation of this TaskGroup and its children used to construct
the Graph.
"""
if isinstance(task_group, BaseOperator):
return {
'id': task_group.task_id,
'value': {
'label': task_group.label,
'labelStyle': f"fill:{task_group.ui_fgcolor};",
'style': f"fill:{task_group.ui_color};",
'rx': 5,
'ry': 5,
},
}
children = [
task_group_to_dict(child) for child in sorted(task_group.children.values(), key=lambda t: t.label)
]
if task_group.upstream_group_ids or task_group.upstream_task_ids:
children.append(
{
'id': task_group.upstream_join_id,
'value': {
'label': '',
'labelStyle': f"fill:{task_group.ui_fgcolor};",
'style': f"fill:{task_group.ui_color};",
'shape': 'circle',
},
}
)
if task_group.downstream_group_ids or task_group.downstream_task_ids:
# This is the join node used to reduce the number of edges between two TaskGroup.
children.append(
{
'id': task_group.downstream_join_id,
'value': {
'label': '',
'labelStyle': f"fill:{task_group.ui_fgcolor};",
'style': f"fill:{task_group.ui_color};",
'shape': 'circle',
},
}
)
return {
"id": task_group.group_id,
'value': {
'label': task_group.label,
'labelStyle': f"fill:{task_group.ui_fgcolor};",
'style': f"fill:{task_group.ui_color}",
'rx': 5,
'ry': 5,
'clusterLabelPos': 'top',
},
'tooltip': task_group.tooltip,
'children': children,
}
def get_key_paths(input_dict):
"""Return a list of dot-separated dictionary paths"""
for key, value in input_dict.items():
if isinstance(value, dict):
for sub_key in get_key_paths(value):
yield '.'.join((key, sub_key))
else:
yield key
def get_value_from_path(key_path, content):
"""Return the value from a dictionary based on dot-separated path of keys"""
elem = content
for x in key_path.strip(".").split("."):
try:
x = int(x)
elem = elem[x]
except ValueError:
elem = elem.get(x)
return elem
def dag_edges(dag):
"""
Create the list of edges needed to construct the Graph view.
A special case is made if a TaskGroup is immediately upstream/downstream of another
TaskGroup or task. Two dummy nodes named upstream_join_id and downstream_join_id are
created for the TaskGroup. Instead of drawing an edge onto every task in the TaskGroup,
all edges are directed onto the dummy nodes. This is to cut down the number of edges on
the graph.
For example: A DAG with TaskGroups group1 and group2:
group1: task1, task2, task3
group2: task4, task5, task6
group2 is downstream of group1:
group1 >> group2
Edges to add (This avoids having to create edges between every task in group1 and group2):
task1 >> downstream_join_id
task2 >> downstream_join_id
task3 >> downstream_join_id
downstream_join_id >> upstream_join_id
upstream_join_id >> task4
upstream_join_id >> task5
upstream_join_id >> task6
"""
# Edges to add between TaskGroup
edges_to_add = set()
# Edges to remove between individual tasks that are replaced by edges_to_add.
edges_to_skip = set()
task_group_map = dag.task_group.get_task_group_dict()
def collect_edges(task_group):
"""Update edges_to_add and edges_to_skip according to TaskGroups."""
if isinstance(task_group, BaseOperator):
return
for target_id in task_group.downstream_group_ids:
# For every TaskGroup immediately downstream, add edges between downstream_join_id
# and upstream_join_id. Skip edges between individual tasks of the TaskGroups.
target_group = task_group_map[target_id]
edges_to_add.add((task_group.downstream_join_id, target_group.upstream_join_id))
for child in task_group.get_leaves():
edges_to_add.add((child.task_id, task_group.downstream_join_id))
for target in target_group.get_roots():
edges_to_skip.add((child.task_id, target.task_id))
edges_to_skip.add((child.task_id, target_group.upstream_join_id))
for child in target_group.get_roots():
edges_to_add.add((target_group.upstream_join_id, child.task_id))
edges_to_skip.add((task_group.downstream_join_id, child.task_id))
# For every individual task immediately downstream, add edges between downstream_join_id and
# the downstream task. Skip edges between individual tasks of the TaskGroup and the
# downstream task.
for target_id in task_group.downstream_task_ids:
edges_to_add.add((task_group.downstream_join_id, target_id))
for child in task_group.get_leaves():
edges_to_add.add((child.task_id, task_group.downstream_join_id))
edges_to_skip.add((child.task_id, target_id))
# For every individual task immediately upstream, add edges between the upstream task
# and upstream_join_id. Skip edges between the upstream task and individual tasks
# of the TaskGroup.
for source_id in task_group.upstream_task_ids:
edges_to_add.add((source_id, task_group.upstream_join_id))
for child in task_group.get_roots():
edges_to_add.add((task_group.upstream_join_id, child.task_id))
edges_to_skip.add((source_id, child.task_id))
for child in task_group.children.values():
collect_edges(child)
collect_edges(dag.task_group)
# Collect all the edges between individual tasks
edges = set()
def get_downstream(task):
for child in task.downstream_list:
edge = (task.task_id, child.task_id)
if edge not in edges:
edges.add(edge)
get_downstream(child)
for root in dag.roots:
get_downstream(root)
result = []
# Build result dicts with the two ends of the edge, plus any extra metadata
# if we have it.
for source_id, target_id in sorted(edges.union(edges_to_add) - edges_to_skip):
record = {"source_id": source_id, "target_id": target_id}
label = dag.get_edge_info(source_id, target_id).get("label")
if label:
record["label"] = label
result.append(record)
return result
######################################################################################
# Error handlers
######################################################################################
def not_found(error):
"""Show Not Found on screen for any error in the Webserver"""
return (
render_template(
'airflow/not_found.html',
hostname=socket.getfqdn()
if conf.getboolean('webserver', 'EXPOSE_HOSTNAME', fallback=True)
else 'redact',
),
404,
)
def show_traceback(error):
"""Show Traceback for a given error"""
return (
render_template(
'airflow/traceback.html',
python_version=sys.version.split(" ")[0],
airflow_version=version,
hostname=socket.getfqdn()
if conf.getboolean('webserver', 'EXPOSE_HOSTNAME', fallback=True)
else 'redact',
info=traceback.format_exc()
if conf.getboolean('webserver', 'EXPOSE_STACKTRACE', fallback=True)
else 'Error! Please contact server admin.',
),
500,
)
######################################################################################
# BaseViews
######################################################################################
class AirflowBaseView(BaseView):
"""Base View to set Airflow related properties"""
from airflow import macros
route_base = ''
# Make our macros available to our UI templates too.
extra_args = {
'macros': macros,
}
line_chart_attr = {
'legend.maxKeyLength': 200,
}
def render_template(self, *args, **kwargs):
# Add triggerer_job only if we need it
if TriggererJob.is_needed():
kwargs["triggerer_job"] = lazy_object_proxy.Proxy(TriggererJob.most_recent_job)
return super().render_template(
*args,
# Cache this at most once per request, not for the lifetime of the view instance
scheduler_job=lazy_object_proxy.Proxy(SchedulerJob.most_recent_job),
**kwargs,
)
def add_user_permissions_to_dag(sender, template, context, **extra):
"""
Adds `.can_edit`, `.can_trigger`, and `.can_delete` properties
to DAG based on current user's permissions.
Located in `views.py` rather than the DAG model to keep
permissions logic out of the Airflow core.
"""
if 'dag' in context:
dag = context['dag']
can_create_dag_run = current_app.appbuilder.sm.has_access(
permissions.ACTION_CAN_CREATE, permissions.RESOURCE_DAG_RUN
)
dag.can_edit = current_app.appbuilder.sm.can_edit_dag(dag.dag_id)
dag.can_trigger = dag.can_edit and can_create_dag_run
dag.can_delete = current_app.appbuilder.sm.has_access(
permissions.ACTION_CAN_DELETE,
permissions.RESOURCE_DAG,
)
context['dag'] = dag
before_render_template.connect(add_user_permissions_to_dag)
class Airflow(AirflowBaseView):
"""Main Airflow application."""
@expose('/health')
def health(self):
"""
An endpoint helping check the health status of the Airflow instance,
including metadatabase and scheduler.
"""
payload = {'metadatabase': {'status': 'unhealthy'}}
latest_scheduler_heartbeat = None
scheduler_status = 'unhealthy'
payload['metadatabase'] = {'status': 'healthy'}
try:
scheduler_job = SchedulerJob.most_recent_job()
if scheduler_job:
latest_scheduler_heartbeat = scheduler_job.latest_heartbeat.isoformat()
if scheduler_job.is_alive():
scheduler_status = 'healthy'
except Exception:
payload['metadatabase']['status'] = 'unhealthy'
payload['scheduler'] = {
'status': scheduler_status,
'latest_scheduler_heartbeat': latest_scheduler_heartbeat,
}
return wwwutils.json_response(payload)
@expose('/home')
@auth.has_access(
[
(permissions.ACTION_CAN_READ, permissions.RESOURCE_WEBSITE),
]
)
def index(self):
"""Home view."""
unit_test_mode: bool = conf.getboolean('core', 'UNIT_TEST_MODE')
if not unit_test_mode and "sqlite" in conf.get("core", "sql_alchemy_conn"):
db_doc_page = get_docs_url("howto/set-up-database.html")
flash(
Markup(
"Usage of <b>SQLite</b> detected. It should only be used for dev/testing. "
"Do not use <b>SQLite</b> as metadata DB in production. "
"We recommend using Postgres or MySQL. "
f"<a href='{db_doc_page}'><b>Click here</b></a> for more information."
),
category="warning",
)
if not unit_test_mode and conf.get("core", "executor") == "SequentialExecutor":
exec_doc_page = get_docs_url("executor/index.html")
flash(
Markup(
"Usage of <b>SequentialExecutor</b> detected. "
"Do not use <b>SequentialExecutor</b> in production. "
f"<a href='{exec_doc_page}'><b>Click here</b></a> for more information."
),
category="warning",
)
hide_paused_dags_by_default = conf.getboolean('webserver', 'hide_paused_dags_by_default')
default_dag_run = conf.getint('webserver', 'default_dag_run_display_number')
num_runs = request.args.get('num_runs', default=default_dag_run, type=int)
current_page = request.args.get('page', default=0, type=int)
arg_search_query = request.args.get('search')
arg_tags_filter = request.args.getlist('tags')
arg_status_filter = request.args.get('status')
if request.args.get('reset_tags') is not None:
flask_session[FILTER_TAGS_COOKIE] = None
# Remove the reset_tags=reset from the URL
return redirect(url_for('Airflow.index'))
cookie_val = flask_session.get(FILTER_TAGS_COOKIE)
if arg_tags_filter:
flask_session[FILTER_TAGS_COOKIE] = ','.join(arg_tags_filter)
elif cookie_val:
# If tags exist in cookie, but not URL, add them to the URL
return redirect(url_for('Airflow.index', tags=cookie_val.split(',')))
if arg_status_filter is None:
cookie_val = flask_session.get(FILTER_STATUS_COOKIE)
if cookie_val:
arg_status_filter = cookie_val
else:
arg_status_filter = 'active' if hide_paused_dags_by_default else 'all'
flask_session[FILTER_STATUS_COOKIE] = arg_status_filter
else:
status = arg_status_filter.strip().lower()
flask_session[FILTER_STATUS_COOKIE] = status
arg_status_filter = status
dags_per_page = PAGE_SIZE
start = current_page * dags_per_page
end = start + dags_per_page
# Get all the dag id the user could access
filter_dag_ids = current_app.appbuilder.sm.get_accessible_dag_ids(g.user)
with create_session() as session:
# read orm_dags from the db
dags_query = session.query(DagModel).filter(~DagModel.is_subdag, DagModel.is_active)
if arg_search_query:
dags_query = dags_query.filter(
DagModel.dag_id.ilike('%' + arg_search_query + '%')
| DagModel.owners.ilike('%' + arg_search_query + '%')
)
if arg_tags_filter:
dags_query = dags_query.filter(DagModel.tags.any(DagTag.name.in_(arg_tags_filter)))
dags_query = dags_query.filter(DagModel.dag_id.in_(filter_dag_ids))
all_dags = dags_query
active_dags = dags_query.filter(~DagModel.is_paused)
paused_dags = dags_query.filter(DagModel.is_paused)
is_paused_count = dict(
all_dags.with_entities(DagModel.is_paused, func.count(DagModel.dag_id))
.group_by(DagModel.is_paused)
.all()
)
status_count_active = is_paused_count.get(False, 0)
status_count_paused = is_paused_count.get(True, 0)
all_dags_count = status_count_active + status_count_paused
if arg_status_filter == 'active':
current_dags = active_dags
num_of_all_dags = status_count_active
elif arg_status_filter == 'paused':
current_dags = paused_dags
num_of_all_dags = status_count_paused
else:
current_dags = all_dags
num_of_all_dags = all_dags_count
dags = (
current_dags.order_by(DagModel.dag_id)
.options(joinedload(DagModel.tags))
.offset(start)
.limit(dags_per_page)
.all()
)
user_permissions = current_app.appbuilder.sm.get_current_user_permissions()
all_dags_editable = (permissions.ACTION_CAN_EDIT, permissions.RESOURCE_DAG) in user_permissions
can_create_dag_run = (
permissions.ACTION_CAN_CREATE,
permissions.RESOURCE_DAG_RUN,
) in user_permissions
can_delete_dag = (
permissions.ACTION_CAN_DELETE,
permissions.RESOURCE_DAG,
) in user_permissions
for dag in dags:
if all_dags_editable:
dag.can_edit = True
else:
dag_resource_name = permissions.RESOURCE_DAG_PREFIX + dag.dag_id
dag.can_edit = (permissions.ACTION_CAN_EDIT, dag_resource_name) in user_permissions
dag.can_trigger = dag.can_edit and can_create_dag_run
dag.can_delete = can_delete_dag
dagtags = session.query(DagTag.name).distinct(DagTag.name).all()
tags = [
{"name": name, "selected": bool(arg_tags_filter and name in arg_tags_filter)}
for name, in dagtags
]
import_errors = session.query(errors.ImportError).order_by(errors.ImportError.id)
if (permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG) not in user_permissions:
# if the user doesn't have access to all DAGs, only display errors from visible DAGs
import_errors = import_errors.join(
DagModel, DagModel.fileloc == errors.ImportError.filename
).filter(DagModel.dag_id.in_(filter_dag_ids))
for import_error in import_errors:
flash(
"Broken DAG: [{ie.filename}] {ie.stacktrace}".format(ie=import_error),
"dag_import_error",
)
from airflow.plugins_manager import import_errors as plugin_import_errors
for filename, stacktrace in plugin_import_errors.items():
flash(
f"Broken plugin: [{filename}] {stacktrace}",
"error",
)
num_of_pages = int(math.ceil(num_of_all_dags / float(dags_per_page)))
state_color_mapping = State.state_color.copy()
state_color_mapping["null"] = state_color_mapping.pop(None)
page_title = conf.get(section="webserver", key="instance_name", fallback="DAGs")
return self.render_template(
'airflow/dags.html',
dags=dags,
current_page=current_page,
search_query=arg_search_query if arg_search_query else '',
page_title=page_title,
page_size=dags_per_page,
num_of_pages=num_of_pages,
num_dag_from=min(start + 1, num_of_all_dags),
num_dag_to=min(end, num_of_all_dags),
num_of_all_dags=num_of_all_dags,
paging=wwwutils.generate_pages(
current_page,
num_of_pages,
search=escape(arg_search_query) if arg_search_query else None,
status=arg_status_filter if arg_status_filter else None,
tags=arg_tags_filter if arg_tags_filter else None,
),
num_runs=num_runs,
tags=tags,
state_color=state_color_mapping,
status_filter=arg_status_filter,
status_count_all=all_dags_count,
status_count_active=status_count_active,
status_count_paused=status_count_paused,
tags_filter=arg_tags_filter,
)
@expose('/dag_stats', methods=['POST'])
@auth.has_access(
[
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG_RUN),
]
)
@provide_session
def dag_stats(self, session=None):
"""Dag statistics."""
dr = models.DagRun
allowed_dag_ids = current_app.appbuilder.sm.get_accessible_dag_ids(g.user)
dag_state_stats = session.query(dr.dag_id, dr.state, sqla.func.count(dr.state)).group_by(
dr.dag_id, dr.state
)
# Filter by post parameters
selected_dag_ids = {unquote(dag_id) for dag_id in request.form.getlist('dag_ids') if dag_id}
if selected_dag_ids:
filter_dag_ids = selected_dag_ids.intersection(allowed_dag_ids)
else:
filter_dag_ids = allowed_dag_ids
if not filter_dag_ids:
return wwwutils.json_response({})
payload = {}
dag_state_stats = dag_state_stats.filter(dr.dag_id.in_(filter_dag_ids))
data = {}
for dag_id, state, count in dag_state_stats:
if dag_id not in data:
data[dag_id] = {}
data[dag_id][state] = count
for dag_id in filter_dag_ids:
payload[dag_id] = []
for state in State.dag_states:
count = data.get(dag_id, {}).get(state, 0)
payload[dag_id].append({'state': state, 'count': count})
return wwwutils.json_response(payload)
@expose('/task_stats', methods=['POST'])
@auth.has_access(
[
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG_RUN),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_TASK_INSTANCE),
]
)
@provide_session
def task_stats(self, session=None):
"""Task Statistics"""
allowed_dag_ids = current_app.appbuilder.sm.get_accessible_dag_ids(g.user)
if not allowed_dag_ids:
return wwwutils.json_response({})
# Filter by post parameters
selected_dag_ids = {unquote(dag_id) for dag_id in request.form.getlist('dag_ids') if dag_id}
if selected_dag_ids:
filter_dag_ids = selected_dag_ids.intersection(allowed_dag_ids)
else:
filter_dag_ids = allowed_dag_ids
running_dag_run_query_result = (
session.query(DagRun.dag_id, DagRun.execution_date)
.join(DagModel, DagModel.dag_id == DagRun.dag_id)
.filter(DagRun.state == State.RUNNING, DagModel.is_active)
)
running_dag_run_query_result = running_dag_run_query_result.filter(DagRun.dag_id.in_(filter_dag_ids))
running_dag_run_query_result = running_dag_run_query_result.subquery('running_dag_run')
# Select all task_instances from active dag_runs.
running_task_instance_query_result = session.query(
TaskInstance.dag_id.label('dag_id'), TaskInstance.state.label('state')
).join(
running_dag_run_query_result,
and_(
running_dag_run_query_result.c.dag_id == TaskInstance.dag_id,
running_dag_run_query_result.c.execution_date == TaskInstance.execution_date,
),
)
if conf.getboolean('webserver', 'SHOW_RECENT_STATS_FOR_COMPLETED_RUNS', fallback=True):
last_dag_run = (
session.query(DagRun.dag_id, sqla.func.max(DagRun.execution_date).label('execution_date'))
.join(DagModel, DagModel.dag_id == DagRun.dag_id)
.filter(DagRun.state != State.RUNNING, DagModel.is_active)
.group_by(DagRun.dag_id)
)
last_dag_run = last_dag_run.filter(DagRun.dag_id.in_(filter_dag_ids))
last_dag_run = last_dag_run.subquery('last_dag_run')
# Select all task_instances from active dag_runs.
# If no dag_run is active, return task instances from most recent dag_run.
last_task_instance_query_result = session.query(
TaskInstance.dag_id.label('dag_id'), TaskInstance.state.label('state')
).join(
last_dag_run,
and_(
last_dag_run.c.dag_id == TaskInstance.dag_id,
last_dag_run.c.execution_date == TaskInstance.execution_date,
),
)
final_task_instance_query_result = union_all(
last_task_instance_query_result, running_task_instance_query_result
).alias('final_ti')
else:
final_task_instance_query_result = running_task_instance_query_result.subquery('final_ti')
qry = session.query(
final_task_instance_query_result.c.dag_id,
final_task_instance_query_result.c.state,
sqla.func.count(),
).group_by(final_task_instance_query_result.c.dag_id, final_task_instance_query_result.c.state)
data = {}
for dag_id, state, count in qry:
if dag_id not in data:
data[dag_id] = {}
data[dag_id][state] = count
payload = {}
for dag_id in filter_dag_ids:
payload[dag_id] = []
for state in State.task_states:
count = data.get(dag_id, {}).get(state, 0)
payload[dag_id].append({'state': state, 'count': count})
return wwwutils.json_response(payload)
@expose('/last_dagruns', methods=['POST'])
@auth.has_access(
[
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG_RUN),
]
)
@provide_session
def last_dagruns(self, session=None):
"""Last DAG runs"""
allowed_dag_ids = current_app.appbuilder.sm.get_accessible_dag_ids(g.user)
# Filter by post parameters
selected_dag_ids = {unquote(dag_id) for dag_id in request.form.getlist('dag_ids') if dag_id}
if selected_dag_ids:
filter_dag_ids = selected_dag_ids.intersection(allowed_dag_ids)
else:
filter_dag_ids = allowed_dag_ids
if not filter_dag_ids:
return wwwutils.json_response({})
last_runs_subquery = (
session.query(
DagRun.dag_id,
sqla.func.max(DagRun.execution_date).label("max_execution_date"),
)
.group_by(DagRun.dag_id)
.filter(DagRun.dag_id.in_(filter_dag_ids)) # Only include accessible/selected DAGs.
.subquery("last_runs")
)
query = session.query(
DagRun.dag_id,
DagRun.start_date,
DagRun.end_date,
DagRun.state,
DagRun.execution_date,
DagRun.data_interval_start,
DagRun.data_interval_end,
).join(
last_runs_subquery,
and_(
last_runs_subquery.c.dag_id == DagRun.dag_id,
last_runs_subquery.c.max_execution_date == DagRun.execution_date,
),
)
def _datetime_to_string(value: Optional[DateTime]) -> Optional[str]:
if value is None:
return None
return value.isoformat()
resp = {
r.dag_id.replace('.', '__dot__'): {
"dag_id": r.dag_id,
"state": r.state,
"execution_date": _datetime_to_string(r.execution_date),
"start_date": _datetime_to_string(r.start_date),
"end_date": _datetime_to_string(r.end_date),
"data_interval_start": _datetime_to_string(r.data_interval_start),
"data_interval_end": _datetime_to_string(r.data_interval_end),
}
for r in query
}
return wwwutils.json_response(resp)
@expose('/code')
@auth.has_access(
[
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG_CODE),
]
)
@provide_session
def code(self, session=None):
"""Dag Code."""
all_errors = ""
dag_orm = None
dag_id = None
try:
dag_id = request.args.get('dag_id')
dag_orm = DagModel.get_dagmodel(dag_id, session=session)
code = DagCode.get_code_by_fileloc(dag_orm.fileloc)
html_code = Markup(highlight(code, lexers.PythonLexer(), HtmlFormatter(linenos=True)))
except Exception as e:
all_errors += (
"Exception encountered during "
+ f"dag_id retrieval/dag retrieval fallback/code highlighting:\n\n{e}\n"
)
html_code = Markup('<p>Failed to load DAG file Code.</p><p>Details: {}</p>').format(
escape(all_errors)
)
wwwutils.check_import_errors(dag_orm.fileloc, session)
return self.render_template(
'airflow/dag_code.html',
html_code=html_code,
dag=dag_orm,
dag_model=dag_orm,
title=dag_id,
root=request.args.get('root'),
wrapped=conf.getboolean('webserver', 'default_wrap'),
)
@expose('/dag_details')
@auth.has_access(
[
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG_RUN),
]
)
@provide_session
def dag_details(self, session=None):
"""Get Dag details."""
dag_id = request.args.get('dag_id')
dag = current_app.dag_bag.get_dag(dag_id)
dag_model = DagModel.get_dagmodel(dag_id)
title = "DAG Details"
root = request.args.get('root', '')
wwwutils.check_import_errors(dag.fileloc, session)
states = (
session.query(TaskInstance.state, sqla.func.count(TaskInstance.dag_id))
.filter(TaskInstance.dag_id == dag_id)
.group_by(TaskInstance.state)
.all()
)
active_runs = models.DagRun.find(dag_id=dag_id, state=State.RUNNING, external_trigger=False)
tags = session.query(models.DagTag).filter(models.DagTag.dag_id == dag_id).all()
return self.render_template(
'airflow/dag_details.html',
dag=dag,
title=title,
root=root,
states=states,
State=State,
active_runs=active_runs,
tags=tags,
dag_model=dag_model,
)
@expose('/rendered-templates')
@auth.has_access(
[
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_TASK_INSTANCE),
]
)
@action_logging
def rendered_templates(self):
"""Get rendered Dag."""
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
execution_date = request.args.get('execution_date')
dttm = timezone.parse(execution_date)
form = DateTimeForm(data={'execution_date': dttm})
root = request.args.get('root', '')
logging.info("Retrieving rendered templates.")
dag = current_app.dag_bag.get_dag(dag_id)
task = copy.copy(dag.get_task(task_id))
ti = models.TaskInstance(task=task, execution_date=dttm)
try:
ti.get_rendered_template_fields()
except AirflowException as e:
msg = "Error rendering template: " + escape(e)
if e.__cause__:
msg += Markup("<br><br>OriginalError: ") + escape(e.__cause__)
flash(msg, "error")
except Exception as e:
flash("Error rendering template: " + str(e), "error")
title = "Rendered Template"
html_dict = {}
renderers = wwwutils.get_attr_renderer()
for template_field in task.template_fields:
content = getattr(task, template_field)
renderer = task.template_fields_renderers.get(template_field, template_field)
if renderer in renderers:
if isinstance(content, (dict, list)):
json_content = json.dumps(content, sort_keys=True, indent=4)
html_dict[template_field] = renderers[renderer](json_content)
else:
html_dict[template_field] = renderers[renderer](content)
else:
html_dict[template_field] = Markup("<pre><code>{}</pre></code>").format(pformat(content))
if isinstance(content, dict):
if template_field == 'op_kwargs':
for key, value in content.items():
renderer = task.template_fields_renderers.get(key, key)
if renderer in renderers:
html_dict['.'.join([template_field, key])] = renderers[renderer](value)
else:
html_dict['.'.join([template_field, key])] = Markup(
"<pre><code>{}</pre></code>"
).format(pformat(value))
else:
for dict_keys in get_key_paths(content):
template_path = '.'.join((template_field, dict_keys))
renderer = task.template_fields_renderers.get(template_path, template_path)
if renderer in renderers:
content_value = get_value_from_path(dict_keys, content)
html_dict[template_path] = renderers[renderer](content_value)
return self.render_template(
'airflow/ti_code.html',
html_dict=html_dict,
dag=dag,
task_id=task_id,
execution_date=execution_date,
form=form,
root=root,
title=title,
)
@expose('/rendered-k8s')
@auth.has_access(
[
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_TASK_INSTANCE),
]
)
@action_logging
def rendered_k8s(self):
"""Get rendered k8s yaml."""
if not settings.IS_K8S_OR_K8SCELERY_EXECUTOR:
abort(404)
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
execution_date = request.args.get('execution_date')
dttm = timezone.parse(execution_date)
form = DateTimeForm(data={'execution_date': dttm})
root = request.args.get('root', '')
logging.info("Retrieving rendered templates.")
dag = current_app.dag_bag.get_dag(dag_id)
task = dag.get_task(task_id)
ti = models.TaskInstance(task=task, execution_date=dttm)
pod_spec = None
try:
pod_spec = ti.get_rendered_k8s_spec()
except AirflowException as e:
msg = "Error rendering Kubernetes POD Spec: " + escape(e)
if e.__cause__:
msg += Markup("<br><br>OriginalError: ") + escape(e.__cause__)
flash(msg, "error")
except Exception as e:
flash("Error rendering Kubernetes Pod Spec: " + str(e), "error")
title = "Rendered K8s Pod Spec"
html_dict = {}
renderers = wwwutils.get_attr_renderer()
if pod_spec:
content = yaml.dump(pod_spec)
content = renderers["yaml"](content)
else:
content = Markup("<pre><code>Error rendering Kubernetes POD Spec</pre></code>")
html_dict['k8s'] = content
return self.render_template(
'airflow/ti_code.html',
html_dict=html_dict,
dag=dag,
task_id=task_id,
execution_date=execution_date,
form=form,
root=root,
title=title,
)
@expose('/get_logs_with_metadata')
@auth.has_access(
[
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_TASK_INSTANCE),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_TASK_LOG),
]
)
@action_logging
@provide_session
def get_logs_with_metadata(self, session=None):
"""Retrieve logs including metadata."""
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
execution_date = request.args.get('execution_date')
try_number = request.args.get('try_number', type=int)
metadata = request.args.get('metadata')
metadata = json.loads(metadata)
response_format = request.args.get('format', 'json')
# metadata may be null
if not metadata:
metadata = {}
# Convert string datetime into actual datetime
try:
execution_date = timezone.parse(execution_date)
except ValueError:
error_message = (
'Given execution date, {}, could not be identified '
'as a date. Example date format: 2015-11-16T14:34:15+00:00'.format(execution_date)
)
response = jsonify({'error': error_message})
response.status_code = 400
return response
task_log_reader = TaskLogReader()
if not task_log_reader.supports_read:
return jsonify(
message="Task log handler does not support read logs.",
error=True,
metadata={"end_of_log": True},
)
ti = (
session.query(models.TaskInstance)
.filter(
models.TaskInstance.dag_id == dag_id,
models.TaskInstance.task_id == task_id,
models.TaskInstance.execution_date == execution_date,
)
.first()
)
if ti is None:
return jsonify(
message="*** Task instance did not exist in the DB\n",
error=True,
metadata={"end_of_log": True},
)
try:
dag = current_app.dag_bag.get_dag(dag_id)
if dag:
ti.task = dag.get_task(ti.task_id)
if response_format == 'json':
logs, metadata = task_log_reader.read_log_chunks(ti, try_number, metadata)
message = logs[0] if try_number is not None else logs
return jsonify(message=message, metadata=metadata)
metadata['download_logs'] = True
attachment_filename = task_log_reader.render_log_filename(ti, try_number)
log_stream = task_log_reader.read_log_stream(ti, try_number, metadata)
return Response(
response=log_stream,
mimetype="text/plain",
headers={"Content-Disposition": f"attachment; filename={attachment_filename}"},
)
except AttributeError as e:
error_message = [f"Task log handler does not support read logs.\n{str(e)}\n"]
metadata['end_of_log'] = True
return jsonify(message=error_message, error=True, metadata=metadata)
@expose('/log')
@auth.has_access(
[
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_TASK_INSTANCE),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_TASK_LOG),
]
)
@action_logging
@provide_session
def log(self, session=None):
"""Retrieve log."""
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
execution_date = request.args.get('execution_date')
dttm = timezone.parse(execution_date)
form = DateTimeForm(data={'execution_date': dttm})
dag_model = DagModel.get_dagmodel(dag_id)
ti = (
session.query(models.TaskInstance)
.filter(
models.TaskInstance.dag_id == dag_id,
models.TaskInstance.task_id == task_id,
models.TaskInstance.execution_date == dttm,
)
.first()
)
num_logs = 0
if ti is not None:
num_logs = ti.next_try_number - 1
if ti.state == State.UP_FOR_RESCHEDULE:
# Tasks in reschedule state decremented the try number
num_logs += 1
logs = [''] * num_logs
root = request.args.get('root', '')
return self.render_template(
'airflow/ti_log.html',
logs=logs,
dag=dag_model,
title="Log by attempts",
dag_id=dag_id,
task_id=task_id,
execution_date=execution_date,
form=form,
root=root,
wrapped=conf.getboolean('webserver', 'default_wrap'),
)
@expose('/redirect_to_external_log')
@auth.has_access(
[
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_TASK_INSTANCE),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_TASK_LOG),
]
)
@action_logging
@provide_session
def redirect_to_external_log(self, session=None):
"""Redirects to external log."""
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
execution_date = request.args.get('execution_date')
dttm = timezone.parse(execution_date)
try_number = request.args.get('try_number', 1)
ti = (
session.query(models.TaskInstance)
.filter(
models.TaskInstance.dag_id == dag_id,
models.TaskInstance.task_id == task_id,
models.TaskInstance.execution_date == dttm,
)
.first()
)
if not ti:
flash(f"Task [{dag_id}.{task_id}] does not exist", "error")
return redirect(url_for('Airflow.index'))
task_log_reader = TaskLogReader()
if not task_log_reader.supports_external_link:
flash("Task log handler does not support external links", "error")
return redirect(url_for('Airflow.index'))
handler = task_log_reader.log_handler
url = handler.get_external_log_url(ti, try_number)
return redirect(url)
@expose('/task')
@auth.has_access(
[
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_TASK_INSTANCE),
]
)
@action_logging
def task(self):
"""Retrieve task."""
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
execution_date = request.args.get('execution_date')
dttm = timezone.parse(execution_date)
form = DateTimeForm(data={'execution_date': dttm})
root = request.args.get('root', '')
dag = current_app.dag_bag.get_dag(dag_id)
if not dag or task_id not in dag.task_ids:
flash(f"Task [{dag_id}.{task_id}] doesn't seem to exist at the moment", "error")
return redirect(url_for('Airflow.index'))
task = copy.copy(dag.get_task(task_id))
task.resolve_template_files()
ti = TaskInstance(task=task, execution_date=dttm)
ti.refresh_from_db()
ti_attrs = []
for attr_name in dir(ti):
if not attr_name.startswith('_'):
attr = getattr(ti, attr_name)
if type(attr) != type(self.task): # noqa
ti_attrs.append((attr_name, str(attr)))
task_attrs = []
for attr_name in dir(task):
if not attr_name.startswith('_'):
attr = getattr(task, attr_name)
if type(attr) != type(self.task) and attr_name not in wwwutils.get_attr_renderer(): # noqa
task_attrs.append((attr_name, str(attr)))
# Color coding the special attributes that are code
special_attrs_rendered = {}
for attr_name in wwwutils.get_attr_renderer():
if getattr(task, attr_name, None) is not None:
source = getattr(task, attr_name)
special_attrs_rendered[attr_name] = wwwutils.get_attr_renderer()[attr_name](source)
no_failed_deps_result = [
(
"Unknown",
"All dependencies are met but the task instance is not running. In most "
"cases this just means that the task will probably be scheduled soon "
"unless:<br>\n- The scheduler is down or under heavy load<br>\n{}\n"
"<br>\nIf this task instance does not start soon please contact your "
"Airflow administrator for assistance.".format(
"- This task instance already ran and had it's state changed manually "
"(e.g. cleared in the UI)<br>"
if ti.state == State.NONE
else ""
),
)
]
# Use the scheduler's context to figure out which dependencies are not met
dep_context = DepContext(SCHEDULER_QUEUED_DEPS)
failed_dep_reasons = [
(dep.dep_name, dep.reason) for dep in ti.get_failed_dep_statuses(dep_context=dep_context)
]
title = "Task Instance Details"
return self.render_template(
'airflow/task.html',
task_attrs=task_attrs,
ti_attrs=ti_attrs,
failed_dep_reasons=failed_dep_reasons or no_failed_deps_result,
task_id=task_id,
execution_date=execution_date,
special_attrs_rendered=special_attrs_rendered,
form=form,
root=root,
dag=dag,
title=title,
)
@expose('/xcom')
@auth.has_access(
[
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_TASK_INSTANCE),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_XCOM),
]
)
@action_logging
@provide_session
def xcom(self, session=None):
"""Retrieve XCOM."""
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
# Carrying execution_date through, even though it's irrelevant for
# this context
execution_date = request.args.get('execution_date')
dttm = timezone.parse(execution_date)
form = DateTimeForm(data={'execution_date': dttm})
root = request.args.get('root', '')
ti_db = models.TaskInstance
dag = DagModel.get_dagmodel(dag_id)
ti = session.query(ti_db).filter(and_(ti_db.dag_id == dag_id, ti_db.task_id == task_id)).first()
if not ti:
flash(f"Task [{dag_id}.{task_id}] doesn't seem to exist at the moment", "error")
return redirect(url_for('Airflow.index'))
xcomlist = (
session.query(XCom)
.filter(XCom.dag_id == dag_id, XCom.task_id == task_id, XCom.execution_date == dttm)
.all()
)
attributes = []
for xcom in xcomlist:
if not xcom.key.startswith('_'):
attributes.append((xcom.key, xcom.value))
title = "XCom"
return self.render_template(
'airflow/xcom.html',
attributes=attributes,
task_id=task_id,
execution_date=execution_date,
form=form,
root=root,
dag=dag,
title=title,
)
@expose('/run', methods=['POST'])
@auth.has_access(
[
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_CREATE, permissions.RESOURCE_TASK_INSTANCE),
]
)
@action_logging
def run(self):
"""Runs Task Instance."""
dag_id = request.form.get('dag_id')
task_id = request.form.get('task_id')
origin = get_safe_url(request.form.get('origin'))
dag = current_app.dag_bag.get_dag(dag_id)
task = dag.get_task(task_id)
execution_date = request.form.get('execution_date')
execution_date = timezone.parse(execution_date)
ignore_all_deps = request.form.get('ignore_all_deps') == "true"
ignore_task_deps = request.form.get('ignore_task_deps') == "true"
ignore_ti_state = request.form.get('ignore_ti_state') == "true"
executor = ExecutorLoader.get_default_executor()
valid_celery_config = False
valid_kubernetes_config = False
try:
from airflow.executors.celery_executor import CeleryExecutor
valid_celery_config = isinstance(executor, CeleryExecutor)
except ImportError:
pass
try:
from airflow.executors.kubernetes_executor import KubernetesExecutor
valid_kubernetes_config = isinstance(executor, KubernetesExecutor)
except ImportError:
pass
if not valid_celery_config and not valid_kubernetes_config:
flash("Only works with the Celery or Kubernetes executors, sorry", "error")
return redirect(origin)
ti = models.TaskInstance(task=task, execution_date=execution_date)
ti.refresh_from_db()
# Make sure the task instance can be run
dep_context = DepContext(
deps=RUNNING_DEPS,
ignore_all_deps=ignore_all_deps,
ignore_task_deps=ignore_task_deps,
ignore_ti_state=ignore_ti_state,
)
failed_deps = list(ti.get_failed_dep_statuses(dep_context=dep_context))
if failed_deps:
failed_deps_str = ", ".join(f"{dep.dep_name}: {dep.reason}" for dep in failed_deps)
flash(
"Could not queue task instance for execution, dependencies not met: "
"{}".format(failed_deps_str),
"error",
)
return redirect(origin)
executor.job_id = "manual"
executor.start()
executor.queue_task_instance(
ti,
ignore_all_deps=ignore_all_deps,
ignore_task_deps=ignore_task_deps,
ignore_ti_state=ignore_ti_state,
)
executor.heartbeat()
flash(f"Sent {ti} to the message queue, it should start any moment now.")
return redirect(origin)
@expose('/delete', methods=['POST'])
@auth.has_access(
[
(permissions.ACTION_CAN_DELETE, permissions.RESOURCE_DAG),
]
)
@action_logging
def delete(self):
"""Deletes DAG."""
from airflow.api.common.experimental import delete_dag
from airflow.exceptions import DagNotFound
dag_id = request.values.get('dag_id')
origin = get_safe_url(request.values.get('origin'))
try:
delete_dag.delete_dag(dag_id)
except DagNotFound:
flash(f"DAG with id {dag_id} not found. Cannot delete", 'error')
return redirect(request.referrer)
except AirflowException:
flash(
f"Cannot delete DAG with id {dag_id} because some task instances of the DAG "
"are still running. Please mark the task instances as "
"failed/succeeded before deleting the DAG",
"error",
)
return redirect(request.referrer)
flash(f"Deleting DAG with id {dag_id}. May take a couple minutes to fully disappear.")
# Upon success return to origin.
return redirect(origin)
@expose('/trigger', methods=['POST', 'GET'])
@auth.has_access(
[
(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_CREATE, permissions.RESOURCE_DAG_RUN),
]
)
@action_logging
@provide_session
def trigger(self, session=None):
"""Triggers DAG Run."""
dag_id = request.values.get('dag_id')
origin = get_safe_url(request.values.get('origin'))
unpause = request.values.get('unpause')
request_conf = request.values.get('conf')
request_execution_date = request.values.get('execution_date', default=timezone.utcnow().isoformat())
if request.method == 'GET':
# Populate conf textarea with conf requests parameter, or dag.params
default_conf = ''
dag = current_app.dag_bag.get_dag(dag_id)
doc_md = wwwutils.wrapped_markdown(getattr(dag, 'doc_md', None))
form = DateTimeForm(data={'execution_date': request_execution_date})
if request_conf:
default_conf = request_conf
else:
try:
default_conf = json.dumps(dag.params, indent=4)
except TypeError:
flash("Could not pre-populate conf field due to non-JSON-serializable data-types")
return self.render_template(
'airflow/trigger.html',
dag_id=dag_id,
origin=origin,
conf=default_conf,
doc_md=doc_md,
form=form,
)
dag_orm = session.query(models.DagModel).filter(models.DagModel.dag_id == dag_id).first()
if not dag_orm:
flash(f"Cannot find dag {dag_id}")
return redirect(origin)
try:
execution_date = timezone.parse(request_execution_date)
except ParserError:
flash("Invalid execution date", "error")
form = DateTimeForm(data={'execution_date': timezone.utcnow().isoformat()})
return self.render_template(
'airflow/trigger.html', dag_id=dag_id, origin=origin, conf=request_conf, form=form
)
dr = DagRun.find(dag_id=dag_id, execution_date=execution_date, run_type=DagRunType.MANUAL)
if dr:
flash(f"This run_id {dr.run_id} already exists")
return redirect(origin)
run_conf = {}
if request_conf:
try:
run_conf = json.loads(request_conf)
if not isinstance(run_conf, dict):
flash("Invalid JSON configuration, must be a dict", "error")
form = DateTimeForm(data={'execution_date': execution_date})
return self.render_template(
'airflow/trigger.html', dag_id=dag_id, origin=origin, conf=request_conf, form=form
)
except json.decoder.JSONDecodeError:
flash("Invalid JSON configuration, not parseable", "error")
form = DateTimeForm(data={'execution_date': execution_date})
return self.render_template(
'airflow/trigger.html', dag_id=dag_id, origin=origin, conf=request_conf, form=form
)
dag = current_app.dag_bag.get_dag(dag_id)
if unpause and dag.is_paused:
models.DagModel.get_dagmodel(dag_id).set_is_paused(is_paused=False)
dag.create_dagrun(
run_type=DagRunType.MANUAL,
execution_date=execution_date,
state=State.QUEUED,
conf=run_conf,
external_trigger=True,
dag_hash=current_app.dag_bag.dags_hash.get(dag_id),
)
flash(f"Triggered {dag_id}, it should start any moment now.")
return redirect(origin)
def _clear_dag_tis(
self, dag, start_date, end_date, origin, recursive=False, confirmed=False, only_failed=False
):
if confirmed:
count = dag.clear(
start_date=start_date,
end_date=end_date,
include_subdags=recursive,
include_parentdag=recursive,
only_failed=only_failed,
)
flash(f"{count} task instances have been cleared")
return redirect(origin)
try:
tis = dag.clear(
start_date=start_date,
end_date=end_date,
include_subdags=recursive,
include_parentdag=recursive,
only_failed=only_failed,
dry_run=True,
)
except AirflowException as ex:
flash(str(ex), 'error')
return redirect(origin)
if not tis:
flash("No task instances to clear", 'error')
response = redirect(origin)
else:
details = "\n".join(str(t) for t in tis)
response = self.render_template(
'airflow/confirm.html',
endpoint=None,
message="Here's the list of task instances you are about to clear:",
details=details,
)
return response
@expose('/clear', methods=['POST'])
@auth.has_access(
[
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_DELETE, permissions.RESOURCE_TASK_INSTANCE),
]
)
@action_logging
def clear(self):
"""Clears the Dag."""
dag_id = request.form.get('dag_id')
task_id = request.form.get('task_id')
origin = get_safe_url(request.form.get('origin'))
dag = current_app.dag_bag.get_dag(dag_id)
execution_date = request.form.get('execution_date')
execution_date = timezone.parse(execution_date)
confirmed = request.form.get('confirmed') == "true"
upstream = request.form.get('upstream') == "true"
downstream = request.form.get('downstream') == "true"
future = request.form.get('future') == "true"
past = request.form.get('past') == "true"
recursive = request.form.get('recursive') == "true"
only_failed = request.form.get('only_failed') == "true"
dag = dag.partial_subset(
task_ids_or_regex=fr"^{task_id}$",
include_downstream=downstream,
include_upstream=upstream,
)
end_date = execution_date if not future else None
start_date = execution_date if not past else None
return self._clear_dag_tis(
dag,
start_date,
end_date,
origin,
recursive=recursive,
confirmed=confirmed,
only_failed=only_failed,
)
@expose('/dagrun_clear', methods=['POST'])
@auth.has_access(
[
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_DELETE, permissions.RESOURCE_TASK_INSTANCE),
]
)
@action_logging
def dagrun_clear(self):
"""Clears the DagRun"""
dag_id = request.form.get('dag_id')
origin = get_safe_url(request.form.get('origin'))
execution_date = request.form.get('execution_date')
confirmed = request.form.get('confirmed') == "true"
dag = current_app.dag_bag.get_dag(dag_id)
execution_date = timezone.parse(execution_date)
start_date = execution_date
end_date = execution_date
return self._clear_dag_tis(dag, start_date, end_date, origin, recursive=True, confirmed=confirmed)
@expose('/blocked', methods=['POST'])
@auth.has_access(
[
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG_RUN),
]
)
@provide_session
def blocked(self, session=None):
"""Mark Dag Blocked."""
allowed_dag_ids = current_app.appbuilder.sm.get_accessible_dag_ids(g.user)
# Filter by post parameters
selected_dag_ids = {unquote(dag_id) for dag_id in request.form.getlist('dag_ids') if dag_id}
if selected_dag_ids:
filter_dag_ids = selected_dag_ids.intersection(allowed_dag_ids)
else:
filter_dag_ids = allowed_dag_ids
if not filter_dag_ids:
return wwwutils.json_response([])
dags = (
session.query(DagRun.dag_id, sqla.func.count(DagRun.id))
.filter(DagRun.state == State.RUNNING)
.filter(DagRun.dag_id.in_(filter_dag_ids))
.group_by(DagRun.dag_id)
)
payload = []
for dag_id, active_dag_runs in dags:
max_active_runs = 0
try:
dag = current_app.dag_bag.get_dag(dag_id)
except SerializedDagNotFound:
dag = None
if dag:
# TODO: Make max_active_runs a column so we can query for it directly
max_active_runs = dag.max_active_runs
payload.append(
{
'dag_id': dag_id,
'active_dag_run': active_dag_runs,
'max_active_runs': max_active_runs,
}
)
return wwwutils.json_response(payload)
def _mark_dagrun_state_as_failed(self, dag_id, execution_date, confirmed, origin):
if not execution_date:
flash('Invalid execution date', 'error')
return redirect(origin)
execution_date = timezone.parse(execution_date)
dag = current_app.dag_bag.get_dag(dag_id)
if not dag:
flash(f'Cannot find DAG: {dag_id}', 'error')
return redirect(origin)
new_dag_state = set_dag_run_state_to_failed(dag, execution_date, commit=confirmed)
if confirmed:
flash(f'Marked failed on {len(new_dag_state)} task instances')
return redirect(origin)
else:
details = '\n'.join(str(t) for t in new_dag_state)
response = self.render_template(
'airflow/confirm.html',
message="Here's the list of task instances you are about to mark as failed",
details=details,
)
return response
def _mark_dagrun_state_as_success(self, dag_id, execution_date, confirmed, origin):
if not execution_date:
flash('Invalid execution date', 'error')
return redirect(origin)
execution_date = timezone.parse(execution_date)
dag = current_app.dag_bag.get_dag(dag_id)
if not dag:
flash(f'Cannot find DAG: {dag_id}', 'error')
return redirect(origin)
new_dag_state = set_dag_run_state_to_success(dag, execution_date, commit=confirmed)
if confirmed:
flash(f'Marked success on {len(new_dag_state)} task instances')
return redirect(origin)
else:
details = '\n'.join(str(t) for t in new_dag_state)
response = self.render_template(
'airflow/confirm.html',
message="Here's the list of task instances you are about to mark as success",
details=details,
)
return response
@expose('/dagrun_failed', methods=['POST'])
@auth.has_access(
[
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_DAG_RUN),
]
)
@action_logging
def dagrun_failed(self):
"""Mark DagRun failed."""
dag_id = request.form.get('dag_id')
execution_date = request.form.get('execution_date')
confirmed = request.form.get('confirmed') == 'true'
origin = get_safe_url(request.form.get('origin'))
return self._mark_dagrun_state_as_failed(dag_id, execution_date, confirmed, origin)
@expose('/dagrun_success', methods=['POST'])
@auth.has_access(
[
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_DAG_RUN),
]
)
@action_logging
def dagrun_success(self):
"""Mark DagRun success"""
dag_id = request.form.get('dag_id')
execution_date = request.form.get('execution_date')
confirmed = request.form.get('confirmed') == 'true'
origin = get_safe_url(request.form.get('origin'))
return self._mark_dagrun_state_as_success(dag_id, execution_date, confirmed, origin)
def _mark_task_instance_state(
self,
dag_id,
task_id,
origin,
execution_date,
upstream,
downstream,
future,
past,
state,
):
dag = current_app.dag_bag.get_dag(dag_id)
latest_execution_date = dag.get_latest_execution_date()
if not latest_execution_date:
flash(f"Cannot mark tasks as {state}, seem that dag {dag_id} has never run", "error")
return redirect(origin)
execution_date = timezone.parse(execution_date)
altered = dag.set_task_instance_state(
task_id, execution_date, state, upstream=upstream, downstream=downstream, future=future, past=past
)
flash(f"Marked {state} on {len(altered)} task instances")
return redirect(origin)
@expose('/confirm', methods=['GET'])
@auth.has_access(
[
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_TASK_INSTANCE),
]
)
@action_logging
def confirm(self):
"""Show confirmation page for marking tasks as success or failed."""
args = request.args
dag_id = args.get('dag_id')
task_id = args.get('task_id')
execution_date = args.get('execution_date')
state = args.get('state')
upstream = to_boolean(args.get('upstream'))
downstream = to_boolean(args.get('downstream'))
future = to_boolean(args.get('future'))
past = to_boolean(args.get('past'))
try:
dag = current_app.dag_bag.get_dag(dag_id)
except airflow.exceptions.SerializedDagNotFound:
flash(f'DAG {dag_id} not found', "error")
return redirect(request.referrer or url_for('Airflow.index'))
try:
task = dag.get_task(task_id)
except airflow.exceptions.TaskNotFound:
flash(f"Task {task_id} not found", "error")
return redirect(request.referrer or url_for('Airflow.index'))
task.dag = dag
if state not in (
'success',
'failed',
):
flash(f"Invalid state {state}, must be either 'success' or 'failed'", "error")
return redirect(request.referrer or url_for('Airflow.index'))
latest_execution_date = dag.get_latest_execution_date()
if not latest_execution_date:
flash(f"Cannot mark tasks as {state}, seem that dag {dag_id} has never run", "error")
return redirect(request.referrer or url_for('Airflow.index'))
execution_date = timezone.parse(execution_date)
from airflow.api.common.experimental.mark_tasks import set_state
to_be_altered = set_state(
tasks=[task],
execution_date=execution_date,
upstream=upstream,
downstream=downstream,
future=future,
past=past,
state=state,
commit=False,
)
details = "\n".join(str(t) for t in to_be_altered)
response = self.render_template(
"airflow/confirm.html",
endpoint=url_for(f'Airflow.{state}'),
message=f"Here's the list of task instances you are about to mark as {state}:",
details=details,
)
return response
@expose('/failed', methods=['POST'])
@auth.has_access(
[
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_TASK_INSTANCE),
]
)
@action_logging
def failed(self):
"""Mark task as failed."""
args = request.form
dag_id = args.get('dag_id')
task_id = args.get('task_id')
origin = get_safe_url(args.get('origin'))
execution_date = args.get('execution_date')
upstream = to_boolean(args.get('upstream'))
downstream = to_boolean(args.get('downstream'))
future = to_boolean(args.get('future'))
past = to_boolean(args.get('past'))
return self._mark_task_instance_state(
dag_id,
task_id,
origin,
execution_date,
upstream,
downstream,
future,
past,
State.FAILED,
)
@expose('/success', methods=['POST'])
@auth.has_access(
[
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_TASK_INSTANCE),
]
)
@action_logging
def success(self):
"""Mark task as success."""
args = request.form
dag_id = args.get('dag_id')
task_id = args.get('task_id')
origin = get_safe_url(args.get('origin'))
execution_date = args.get('execution_date')
upstream = to_boolean(args.get('upstream'))
downstream = to_boolean(args.get('downstream'))
future = to_boolean(args.get('future'))
past = to_boolean(args.get('past'))
return self._mark_task_instance_state(
dag_id,
task_id,
origin,
execution_date,
upstream,
downstream,
future,
past,
State.SUCCESS,
)
def _get_tree_data(self, dag_runs: Iterable[DagRun], dag: DAG, base_date: DateTime):
"""Returns formatted dag_runs for Tree view"""
dates = sorted(dag_runs.keys())
min_date = min(dag_runs, default=None)
task_instances = {
(ti.task_id, ti.execution_date): ti
for ti in dag.get_task_instances(start_date=min_date, end_date=base_date)
}
expanded = set()
# The default recursion traces every path so that tree view has full
# expand/collapse functionality. After 5,000 nodes we stop and fall
# back on a quick DFS search for performance. See PR #320.
node_count = 0
node_limit = 5000 / max(1, len(dag.leaves))
def encode_ti(task_instance: Optional[models.TaskInstance]) -> Optional[List]:
if not task_instance:
return None
# NOTE: order of entry is important here because client JS relies on it for
# tree node reconstruction. Remember to change JS code in tree.html
# whenever order is altered.
task_instance_data = [
task_instance.state,
task_instance.try_number,
None, # start_ts
None, # duration
]
if task_instance.start_date:
# round to seconds to reduce payload size
task_instance_data[2] = int(task_instance.start_date.timestamp())
if task_instance.duration is not None:
task_instance_data[3] = truncate_task_duration(task_instance.duration)
return task_instance_data
def recurse_nodes(task, visited):
nonlocal node_count
node_count += 1
visited.add(task)
task_id = task.task_id
node = {
'name': task.task_id,
'instances': [encode_ti(task_instances.get((task_id, d))) for d in dates],
'num_dep': len(task.downstream_list),
'operator': task.task_type,
'retries': task.retries,
'owner': task.owner,
'ui_color': task.ui_color,
}
if task.downstream_list:
children = [
recurse_nodes(t, visited)
for t in task.downstream_list
if node_count < node_limit or t not in visited
]
# D3 tree uses children vs _children to define what is
# expanded or not. The following block makes it such that
# repeated nodes are collapsed by default.
if task.task_id not in expanded:
children_key = 'children'
expanded.add(task.task_id)
else:
children_key = "_children"
node[children_key] = children
if task.depends_on_past:
node['depends_on_past'] = task.depends_on_past
if task.start_date:
# round to seconds to reduce payload size
node['start_ts'] = int(task.start_date.timestamp())
if task.end_date:
# round to seconds to reduce payload size
node['end_ts'] = int(task.end_date.timestamp())
if task.extra_links:
node['extra_links'] = task.extra_links
return node
return {
'name': '[DAG]',
'children': [recurse_nodes(t, set()) for t in dag.roots],
'instances': [dag_runs.get(d) or {'execution_date': d.isoformat()} for d in dates],
}
@expose('/tree')
@auth.has_access(
[
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_TASK_INSTANCE),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_TASK_LOG),
]
)
@gzipped
@action_logging
@provide_session
def tree(self, session=None):
"""Get Dag as tree."""
dag_id = request.args.get('dag_id')
dag = current_app.dag_bag.get_dag(dag_id)
dag_model = DagModel.get_dagmodel(dag_id)
if not dag:
flash(f'DAG "{dag_id}" seems to be missing from DagBag.', "error")
return redirect(url_for('Airflow.index'))
wwwutils.check_import_errors(dag.fileloc, session)
root = request.args.get('root')
if root:
dag = dag.partial_subset(task_ids_or_regex=root, include_downstream=False, include_upstream=True)
num_runs = request.args.get('num_runs', type=int)
if num_runs is None:
num_runs = conf.getint('webserver', 'default_dag_run_display_number')
try:
base_date = timezone.parse(request.args["base_date"])
except (KeyError, ValueError):
base_date = dag.get_latest_execution_date() or timezone.utcnow()
dag_runs = (
session.query(DagRun)
.filter(DagRun.dag_id == dag.dag_id, DagRun.execution_date <= base_date)
.order_by(DagRun.execution_date.desc())
.limit(num_runs)
.all()
)
dag_runs = {dr.execution_date: alchemy_to_dict(dr) for dr in dag_runs}
max_date = max(dag_runs.keys(), default=None)
form = DateTimeWithNumRunsForm(
data={
'base_date': max_date or timezone.utcnow(),
'num_runs': num_runs,
}
)
doc_md = wwwutils.wrapped_markdown(getattr(dag, 'doc_md', None))
task_log_reader = TaskLogReader()
if task_log_reader.supports_external_link:
external_log_name = task_log_reader.log_handler.log_name
else:
external_log_name = None
data = self._get_tree_data(dag_runs, dag, base_date)
# avoid spaces to reduce payload size
data = htmlsafe_json_dumps(data, separators=(',', ':'))
return self.render_template(
'airflow/tree.html',
operators=sorted({op.task_type: op for op in dag.tasks}.values(), key=lambda x: x.task_type),
root=root,
form=form,
dag=dag,
doc_md=doc_md,
data=data,
num_runs=num_runs,
show_external_log_redirect=task_log_reader.supports_external_link,
external_log_name=external_log_name,
dag_model=dag_model,
)
@expose('/calendar')
@auth.has_access(
[
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_TASK_INSTANCE),
]
)
@gzipped
@action_logging
@provide_session
def calendar(self, session=None):
"""Get DAG runs as calendar"""
def _convert_to_date(session, column):
"""Convert column to date."""
if session.bind.dialect.name == 'mssql':
return column.cast(Date)
else:
return func.date(column)
dag_id = request.args.get('dag_id')
dag = current_app.dag_bag.get_dag(dag_id)
dag_model = DagModel.get_dagmodel(dag_id)
if not dag:
flash(f'DAG "{dag_id}" seems to be missing from DagBag.', "error")
return redirect(url_for('Airflow.index'))
wwwutils.check_import_errors(dag.fileloc, session)
root = request.args.get('root')
if root:
dag = dag.partial_subset(task_ids_or_regex=root, include_downstream=False, include_upstream=True)
dag_states = (
session.query(
(_convert_to_date(session, DagRun.execution_date)).label('date'),
DagRun.state,
func.count('*').label('count'),
)
.filter(DagRun.dag_id == dag.dag_id)
.group_by(_convert_to_date(session, DagRun.execution_date), DagRun.state)
.order_by(_convert_to_date(session, DagRun.execution_date).asc())
.all()
)
dag_states = [
{
# DATE() in SQLite and MySQL behave differently:
# SQLite returns a string, MySQL returns a date.
'date': dr.date if isinstance(dr.date, str) else dr.date.isoformat(),
'state': dr.state,
'count': dr.count,
}
for dr in dag_states
]
data = {
'dag_states': dag_states,
'start_date': (dag.start_date or DateTime.utcnow()).date().isoformat(),
'end_date': (dag.end_date or DateTime.utcnow()).date().isoformat(),
}
doc_md = wwwutils.wrapped_markdown(getattr(dag, 'doc_md', None))
# avoid spaces to reduce payload size
data = htmlsafe_json_dumps(data, separators=(',', ':'))
return self.render_template(
'airflow/calendar.html',
dag=dag,
doc_md=doc_md,
data=data,
root=root,
dag_model=dag_model,
)
@expose('/graph')
@auth.has_access(
[
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_TASK_INSTANCE),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_TASK_LOG),
]
)
@gzipped
@action_logging
@provide_session
def graph(self, session=None):
"""Get DAG as Graph."""
dag_id = request.args.get('dag_id')
dag = current_app.dag_bag.get_dag(dag_id)
dag_model = DagModel.get_dagmodel(dag_id)
if not dag:
flash(f'DAG "{dag_id}" seems to be missing.', "error")
return redirect(url_for('Airflow.index'))
wwwutils.check_import_errors(dag.fileloc, session)
root = request.args.get('root')
if root:
dag = dag.partial_subset(task_ids_or_regex=root, include_upstream=True, include_downstream=False)
arrange = request.args.get('arrange', dag.orientation)
nodes = task_group_to_dict(dag.task_group)
edges = dag_edges(dag)
dt_nr_dr_data = get_date_time_num_runs_dag_runs_form_data(request, session, dag)
dt_nr_dr_data['arrange'] = arrange
dttm = dt_nr_dr_data['dttm']
class GraphForm(DateTimeWithNumRunsWithDagRunsForm):
"""Graph Form class."""
arrange = SelectField(
"Layout",
choices=(
('LR', "Left > Right"),
('RL', "Right > Left"),
('TB', "Top > Bottom"),
('BT', "Bottom > Top"),
),
)
form = GraphForm(data=dt_nr_dr_data)
form.execution_date.choices = dt_nr_dr_data['dr_choices']
task_instances = {ti.task_id: alchemy_to_dict(ti) for ti in dag.get_task_instances(dttm, dttm)}
tasks = {
t.task_id: {
'dag_id': t.dag_id,
'task_type': t.task_type,
'extra_links': t.extra_links,
}
for t in dag.tasks
}
if not tasks:
flash("No tasks found", "error")
session.commit()
doc_md = wwwutils.wrapped_markdown(getattr(dag, 'doc_md', None))
task_log_reader = TaskLogReader()
if task_log_reader.supports_external_link:
external_log_name = task_log_reader.log_handler.log_name
else:
external_log_name = None
return self.render_template(
'airflow/graph.html',
dag=dag,
form=form,
width=request.args.get('width', "100%"),
height=request.args.get('height', "800"),
execution_date=dttm.isoformat(),
state_token=wwwutils.state_token(dt_nr_dr_data['dr_state']),
doc_md=doc_md,
arrange=arrange,
operators=sorted({op.task_type: op for op in dag.tasks}.values(), key=lambda x: x.task_type),
root=root or '',
task_instances=task_instances,
tasks=tasks,
nodes=nodes,
edges=edges,
show_external_log_redirect=task_log_reader.supports_external_link,
external_log_name=external_log_name,
dag_run_state=dt_nr_dr_data['dr_state'],
dag_model=dag_model,
)
@expose('/duration')
@auth.has_access(
[
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_TASK_INSTANCE),
]
)
@action_logging
@provide_session
def duration(self, session=None):
"""Get Dag as duration graph."""
default_dag_run = conf.getint('webserver', 'default_dag_run_display_number')
dag_id = request.args.get('dag_id')
dag_model = DagModel.get_dagmodel(dag_id)
try:
dag = current_app.dag_bag.get_dag(dag_id)
except airflow.exceptions.SerializedDagNotFound:
dag = None
if dag is None:
flash(f'DAG "{dag_id}" seems to be missing.', "error")
return redirect(url_for('Airflow.index'))
wwwutils.check_import_errors(dag.fileloc, session)
base_date = request.args.get('base_date')
num_runs = request.args.get('num_runs', default=default_dag_run, type=int)
if base_date:
base_date = timezone.parse(base_date)
else:
base_date = dag.get_latest_execution_date() or timezone.utcnow()
root = request.args.get('root')
if root:
dag = dag.partial_subset(task_ids_or_regex=root, include_upstream=True, include_downstream=False)
chart_height = wwwutils.get_chart_height(dag)
chart = nvd3.lineChart(
name="lineChart", x_is_date=True, height=chart_height, chart_attr=self.line_chart_attr
)
cum_chart = nvd3.lineChart(
name="cumLineChart", x_is_date=True, height=chart_height, chart_attr=self.line_chart_attr
)
y_points = defaultdict(list)
x_points = defaultdict(list)
cumulative_y = defaultdict(list)
task_instances = dag.get_task_instances_before(base_date, num_runs, session=session)
if task_instances:
min_date = task_instances[0].execution_date
else:
min_date = timezone.utc_epoch()
ti_fails = (
session.query(TaskFail)
.filter(
TaskFail.dag_id == dag.dag_id,
TaskFail.execution_date >= min_date,
TaskFail.execution_date <= base_date,
TaskFail.task_id.in_([t.task_id for t in dag.tasks]),
)
.all()
)
fails_totals = defaultdict(int)
for failed_task_instance in ti_fails:
dict_key = (
failed_task_instance.dag_id,
failed_task_instance.task_id,
failed_task_instance.execution_date,
)
if failed_task_instance.duration:
fails_totals[dict_key] += failed_task_instance.duration
for task_instance in task_instances:
if task_instance.duration:
date_time = wwwutils.epoch(task_instance.execution_date)
x_points[task_instance.task_id].append(date_time)
y_points[task_instance.task_id].append(float(task_instance.duration))
fails_dict_key = (task_instance.dag_id, task_instance.task_id, task_instance.execution_date)
fails_total = fails_totals[fails_dict_key]
cumulative_y[task_instance.task_id].append(float(task_instance.duration + fails_total))
# determine the most relevant time unit for the set of task instance
# durations for the DAG
y_unit = infer_time_unit([d for t in y_points.values() for d in t])
cum_y_unit = infer_time_unit([d for t in cumulative_y.values() for d in t])
# update the y Axis on both charts to have the correct time units
chart.create_y_axis('yAxis', format='.02f', custom_format=False, label=f'Duration ({y_unit})')
chart.axislist['yAxis']['axisLabelDistance'] = '-15'
cum_chart.create_y_axis('yAxis', format='.02f', custom_format=False, label=f'Duration ({cum_y_unit})')
cum_chart.axislist['yAxis']['axisLabelDistance'] = '-15'
for task_id in x_points:
chart.add_serie(
name=task_id,
x=x_points[task_id],
y=scale_time_units(y_points[task_id], y_unit),
)
cum_chart.add_serie(
name=task_id,
x=x_points[task_id],
y=scale_time_units(cumulative_y[task_id], cum_y_unit),
)
dates = sorted({ti.execution_date for ti in task_instances})
max_date = max(ti.execution_date for ti in task_instances) if dates else None
session.commit()
form = DateTimeWithNumRunsForm(
data={
'base_date': max_date or timezone.utcnow(),
'num_runs': num_runs,
}
)
chart.buildcontent()
cum_chart.buildcontent()
s_index = cum_chart.htmlcontent.rfind('});')
cum_chart.htmlcontent = (
cum_chart.htmlcontent[:s_index]
+ "$( document ).trigger('chartload')"
+ cum_chart.htmlcontent[s_index:]
)
return self.render_template(
'airflow/duration_chart.html',
dag=dag,
root=root,
form=form,
chart=Markup(chart.htmlcontent),
cum_chart=Markup(cum_chart.htmlcontent),
dag_model=dag_model,
)
@expose('/tries')
@auth.has_access(
[
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_TASK_INSTANCE),
]
)
@action_logging
@provide_session
def tries(self, session=None):
"""Shows all tries."""
default_dag_run = conf.getint('webserver', 'default_dag_run_display_number')
dag_id = request.args.get('dag_id')
dag = current_app.dag_bag.get_dag(dag_id)
dag_model = DagModel.get_dagmodel(dag_id)
base_date = request.args.get('base_date')
num_runs = request.args.get('num_runs', default=default_dag_run, type=int)
if base_date:
base_date = timezone.parse(base_date)
else:
base_date = dag.get_latest_execution_date() or timezone.utcnow()
wwwutils.check_import_errors(dag.fileloc, session)
root = request.args.get('root')
if root:
dag = dag.partial_subset(task_ids_or_regex=root, include_upstream=True, include_downstream=False)
chart_height = wwwutils.get_chart_height(dag)
chart = nvd3.lineChart(
name="lineChart",
x_is_date=True,
y_axis_format='d',
height=chart_height,
chart_attr=self.line_chart_attr,
)
tis = dag.get_task_instances_before(base_date, num_runs, session=session)
for task in dag.tasks:
y_points = []
x_points = []
for ti in tis:
dttm = wwwutils.epoch(ti.execution_date)
x_points.append(dttm)
# y value should reflect completed tries to have a 0 baseline.
y_points.append(ti.prev_attempted_tries)
if x_points:
chart.add_serie(name=task.task_id, x=x_points, y=y_points)
tries = sorted({ti.try_number for ti in tis})
max_date = max(ti.execution_date for ti in tis) if tries else None
chart.create_y_axis('yAxis', format='.02f', custom_format=False, label='Tries')
chart.axislist['yAxis']['axisLabelDistance'] = '-15'
session.commit()
form = DateTimeWithNumRunsForm(
data={
'base_date': max_date or timezone.utcnow(),
'num_runs': num_runs,
}
)
chart.buildcontent()
return self.render_template(
'airflow/chart.html',
dag=dag,
root=root,
form=form,
chart=Markup(chart.htmlcontent),
tab_title='Tries',
dag_model=dag_model,
)
@expose('/landing_times')
@auth.has_access(
[
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_TASK_INSTANCE),
]
)
@action_logging
@provide_session
def landing_times(self, session=None):
"""Shows landing times."""
default_dag_run = conf.getint('webserver', 'default_dag_run_display_number')
dag_id = request.args.get('dag_id')
dag = current_app.dag_bag.get_dag(dag_id)
dag_model = DagModel.get_dagmodel(dag_id)
base_date = request.args.get('base_date')
num_runs = request.args.get('num_runs', default=default_dag_run, type=int)
if base_date:
base_date = timezone.parse(base_date)
else:
base_date = dag.get_latest_execution_date() or timezone.utcnow()
wwwutils.check_import_errors(dag.fileloc, session)
root = request.args.get('root')
if root:
dag = dag.partial_subset(task_ids_or_regex=root, include_upstream=True, include_downstream=False)
tis = dag.get_task_instances_before(base_date, num_runs, session=session)
chart_height = wwwutils.get_chart_height(dag)
chart = nvd3.lineChart(
name="lineChart", x_is_date=True, height=chart_height, chart_attr=self.line_chart_attr
)
y_points = {}
x_points = {}
for task in dag.tasks:
task_id = task.task_id
y_points[task_id] = []
x_points[task_id] = []
for ti in tis:
ts = ti.execution_date
if dag.following_schedule(ts):
ts = dag.following_schedule(ts)
if ti.end_date:
dttm = wwwutils.epoch(ti.execution_date)
secs = (ti.end_date - ts).total_seconds()
x_points[task_id].append(dttm)
y_points[task_id].append(secs)
# determine the most relevant time unit for the set of landing times
# for the DAG
y_unit = infer_time_unit([d for t in y_points.values() for d in t])
# update the y Axis to have the correct time units
chart.create_y_axis('yAxis', format='.02f', custom_format=False, label=f'Landing Time ({y_unit})')
chart.axislist['yAxis']['axisLabelDistance'] = '-15'
for task_id in x_points:
chart.add_serie(
name=task_id,
x=x_points[task_id],
y=scale_time_units(y_points[task_id], y_unit),
)
dates = sorted({ti.execution_date for ti in tis})
max_date = max(ti.execution_date for ti in tis) if dates else None
session.commit()
form = DateTimeWithNumRunsForm(
data={
'base_date': max_date or timezone.utcnow(),
'num_runs': num_runs,
}
)
chart.buildcontent()
return self.render_template(
'airflow/chart.html',
dag=dag,
chart=Markup(chart.htmlcontent),
height=str(chart_height + 100) + "px",
root=root,
form=form,
tab_title='Landing times',
dag_model=dag_model,
)
@expose('/paused', methods=['POST'])
@auth.has_access(
[
(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_DAG),
]
)
@action_logging
def paused(self):
"""Toggle paused."""
dag_id = request.args.get('dag_id')
is_paused = request.args.get('is_paused') == 'false'
models.DagModel.get_dagmodel(dag_id).set_is_paused(is_paused=is_paused)
return "OK"
@expose('/gantt')
@auth.has_access(
[
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_TASK_INSTANCE),
]
)
@action_logging
@provide_session
def gantt(self, session=None):
"""Show GANTT chart."""
dag_id = request.args.get('dag_id')
dag = current_app.dag_bag.get_dag(dag_id)
dag_model = DagModel.get_dagmodel(dag_id)
root = request.args.get('root')
if root:
dag = dag.partial_subset(task_ids_or_regex=root, include_upstream=True, include_downstream=False)
wwwutils.check_import_errors(dag.fileloc, session)
dt_nr_dr_data = get_date_time_num_runs_dag_runs_form_data(request, session, dag)
dttm = dt_nr_dr_data['dttm']
form = DateTimeWithNumRunsWithDagRunsForm(data=dt_nr_dr_data)
form.execution_date.choices = dt_nr_dr_data['dr_choices']
tis = [ti for ti in dag.get_task_instances(dttm, dttm) if ti.start_date and ti.state]
tis = sorted(tis, key=lambda ti: ti.start_date)
ti_fails = list(
itertools.chain(
*(
(
session.query(TaskFail)
.filter(
TaskFail.dag_id == ti.dag_id,
TaskFail.task_id == ti.task_id,
TaskFail.execution_date == ti.execution_date,
)
.all()
)
for ti in tis
)
)
)
tasks = []
for ti in tis:
# prev_attempted_tries will reflect the currently running try_number
# or the try_number of the last complete run
# https://issues.apache.org/jira/browse/AIRFLOW-2143
try_count = ti.prev_attempted_tries if ti.prev_attempted_tries != 0 else ti.try_number
task_dict = alchemy_to_dict(ti)
task_dict['end_date'] = task_dict['end_date'] or timezone.utcnow()
task_dict['extraLinks'] = dag.get_task(ti.task_id).extra_links
task_dict['try_number'] = try_count
tasks.append(task_dict)
tf_count = 0
try_count = 1
prev_task_id = ""
for failed_task_instance in ti_fails:
if tf_count != 0 and failed_task_instance.task_id == prev_task_id:
try_count += 1
else:
try_count = 1
prev_task_id = failed_task_instance.task_id
tf_count += 1
task = dag.get_task(failed_task_instance.task_id)
task_dict = alchemy_to_dict(failed_task_instance)
end_date = task_dict['end_date'] or timezone.utcnow()
task_dict['end_date'] = end_date
task_dict['start_date'] = task_dict['start_date'] or end_date
task_dict['state'] = State.FAILED
task_dict['operator'] = task.task_type
task_dict['try_number'] = try_count
task_dict['extraLinks'] = task.extra_links
tasks.append(task_dict)
data = {
'taskNames': [ti.task_id for ti in tis],
'tasks': tasks,
'height': len(tis) * 25 + 25,
}
session.commit()
return self.render_template(
'airflow/gantt.html',
dag=dag,
execution_date=dttm.isoformat(),
form=form,
data=data,
base_date='',
root=root,
dag_model=dag_model,
)
@expose('/extra_links')
@auth.has_access(
[
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_TASK_INSTANCE),
]
)
@action_logging
def extra_links(self):
"""
A restful endpoint that returns external links for a given Operator
It queries the operator that sent the request for the links it wishes
to provide for a given external link name.
API: GET
Args: dag_id: The id of the dag containing the task in question
task_id: The id of the task in question
execution_date: The date of execution of the task
link_name: The name of the link reference to find the actual URL for
Returns:
200: {url: <url of link>, error: None} - returned when there was no problem
finding the URL
404: {url: None, error: <error message>} - returned when the operator does
not return a URL
"""
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
execution_date = request.args.get('execution_date')
link_name = request.args.get('link_name')
dttm = timezone.parse(execution_date)
dag = current_app.dag_bag.get_dag(dag_id)
if not dag or task_id not in dag.task_ids:
response = jsonify(
{
'url': None,
'error': f"can't find dag {dag} or task_id {task_id}",
}
)
response.status_code = 404
return response
task = dag.get_task(task_id)
try:
url = task.get_extra_links(dttm, link_name)
except ValueError as err:
response = jsonify({'url': None, 'error': str(err)})
response.status_code = 404
return response
if url:
response = jsonify({'error': None, 'url': url})
response.status_code = 200
return response
else:
response = jsonify({'url': None, 'error': f'No URL found for {link_name}'})
response.status_code = 404
return response
@expose('/object/task_instances')
@auth.has_access(
[
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_TASK_INSTANCE),
]
)
@action_logging
def task_instances(self):
"""Shows task instances."""
dag_id = request.args.get('dag_id')
dag = current_app.dag_bag.get_dag(dag_id)
dttm = request.args.get('execution_date')
if dttm:
dttm = timezone.parse(dttm)
else:
return "Error: Invalid execution_date"
task_instances = {ti.task_id: alchemy_to_dict(ti) for ti in dag.get_task_instances(dttm, dttm)}
return json.dumps(task_instances, cls=utils_json.AirflowJsonEncoder)
@expose('/object/tree_data')
@auth.has_access(
[
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_TASK_INSTANCE),
]
)
@action_logging
def tree_data(self):
"""Returns tree data"""
dag_id = request.args.get('dag_id')
dag = current_app.dag_bag.get_dag(dag_id)
if not dag:
response = jsonify({'error': f"can't find dag {dag_id}"})
response.status_code = 404
return response
root = request.args.get('root')
if root:
dag = dag.partial_subset(task_ids_or_regex=root, include_downstream=False, include_upstream=True)
num_runs = request.args.get('num_runs', type=int)
if num_runs is None:
num_runs = conf.getint('webserver', 'default_dag_run_display_number')
try:
base_date = timezone.parse(request.args["base_date"])
except (KeyError, ValueError):
base_date = dag.get_latest_execution_date() or timezone.utcnow()
with create_session() as session:
dag_runs = (
session.query(DagRun)
.filter(DagRun.dag_id == dag.dag_id, DagRun.execution_date <= base_date)
.order_by(DagRun.execution_date.desc())
.limit(num_runs)
.all()
)
dag_runs = {dr.execution_date: alchemy_to_dict(dr) for dr in dag_runs}
tree_data = self._get_tree_data(dag_runs, dag, base_date)
# avoid spaces to reduce payload size
return htmlsafe_json_dumps(tree_data, separators=(',', ':'))
@expose('/robots.txt')
@action_logging
def robots(self):
"""
Returns a robots.txt file for blocking certain search engine crawlers. This mitigates some
of the risk associated with exposing Airflow to the public internet, however it does not
address the real security risks associated with such a deployment.
"""
return send_from_directory(current_app.static_folder, 'robots.txt')
class ConfigurationView(AirflowBaseView):
"""View to show Airflow Configurations"""
default_view = 'conf'
class_permission_name = permissions.RESOURCE_CONFIG
base_permissions = [
permissions.ACTION_CAN_READ,
permissions.ACTION_CAN_ACCESS_MENU,
]
@expose('/configuration')
@auth.has_access(
[
(permissions.ACTION_CAN_READ, permissions.RESOURCE_CONFIG),
]
)
def conf(self):
"""Shows configuration."""
raw = request.args.get('raw') == "true"
title = "Airflow Configuration"
subtitle = AIRFLOW_CONFIG
# Don't show config when expose_config variable is False in airflow config
if conf.getboolean("webserver", "expose_config"):
with open(AIRFLOW_CONFIG) as file:
config = file.read()
table = [
(section, key, value, source)
for section, parameters in conf.as_dict(True, True).items()
for key, (value, source) in parameters.items()
]
else:
config = (
"# Your Airflow administrator chose not to expose the "
"configuration, most likely for security reasons."
)
table = None
if raw:
return Response(response=config, status=200, mimetype="application/text")
else:
code_html = Markup(
highlight(
config,
lexers.IniLexer(), # Lexer call
HtmlFormatter(noclasses=True),
)
)
return self.render_template(
'airflow/config.html',
pre_subtitle=settings.HEADER + " v" + airflow.__version__,
code_html=code_html,
title=title,
subtitle=subtitle,
table=table,
)
class RedocView(AirflowBaseView):
"""Redoc Open API documentation"""
default_view = 'redoc'
@expose('/redoc')
def redoc(self):
"""Redoc API documentation."""
openapi_spec_url = url_for("/api/v1./api/v1_openapi_yaml")
return self.render_template('airflow/redoc.html', openapi_spec_url=openapi_spec_url)
######################################################################################
# ModelViews
######################################################################################
class DagFilter(BaseFilter):
"""Filter using DagIDs"""
def apply(self, query, func):
if current_app.appbuilder.sm.has_all_dags_access():
return query
filter_dag_ids = current_app.appbuilder.sm.get_accessible_dag_ids(g.user)
return query.filter(self.model.dag_id.in_(filter_dag_ids))
class AirflowModelView(ModelView):
"""Airflow Mode View."""
list_widget = AirflowModelListWidget
page_size = PAGE_SIZE
CustomSQLAInterface = wwwutils.CustomSQLAInterface
class SlaMissModelView(AirflowModelView):
"""View to show SlaMiss table"""
route_base = '/slamiss'
datamodel = AirflowModelView.CustomSQLAInterface(SlaMiss) # type: ignore
class_permission_name = permissions.RESOURCE_SLA_MISS
method_permission_name = {
'list': 'read',
}
base_permissions = [
permissions.ACTION_CAN_READ,
permissions.ACTION_CAN_ACCESS_MENU,
]
list_columns = ['dag_id', 'task_id', 'execution_date', 'email_sent', 'timestamp']
add_columns = ['dag_id', 'task_id', 'execution_date', 'email_sent', 'timestamp']
edit_columns = ['dag_id', 'task_id', 'execution_date', 'email_sent', 'timestamp']
search_columns = ['dag_id', 'task_id', 'email_sent', 'timestamp', 'execution_date']
base_order = ('execution_date', 'desc')
base_filters = [['dag_id', DagFilter, lambda: []]]
formatters_columns = {
'task_id': wwwutils.task_instance_link,
'execution_date': wwwutils.datetime_f('execution_date'),
'timestamp': wwwutils.datetime_f('timestamp'),
'dag_id': wwwutils.dag_link,
}
class XComModelView(AirflowModelView):
"""View to show records from XCom table"""
route_base = '/xcom'
list_title = 'List XComs'
datamodel = AirflowModelView.CustomSQLAInterface(XCom)
class_permission_name = permissions.RESOURCE_XCOM
method_permission_name = {
'list': 'read',
'delete': 'delete',
'action_muldelete': 'delete',
}
base_permissions = [
permissions.ACTION_CAN_CREATE,
permissions.ACTION_CAN_READ,
permissions.ACTION_CAN_DELETE,
permissions.ACTION_CAN_ACCESS_MENU,
]
search_columns = ['key', 'value', 'timestamp', 'execution_date', 'task_id', 'dag_id']
list_columns = ['key', 'value', 'timestamp', 'execution_date', 'task_id', 'dag_id']
base_order = ('execution_date', 'desc')
base_filters = [['dag_id', DagFilter, lambda: []]]
formatters_columns = {
'task_id': wwwutils.task_instance_link,
'execution_date': wwwutils.datetime_f('execution_date'),
'timestamp': wwwutils.datetime_f('timestamp'),
'dag_id': wwwutils.dag_link,
}
@action('muldelete', 'Delete', "Are you sure you want to delete selected records?", single=False)
def action_muldelete(self, items):
"""Multiple delete action."""
self.datamodel.delete_all(items)
self.update_redirect()
return redirect(self.get_redirect())
def pre_add(self, item):
"""Pre add hook."""
item.execution_date = timezone.make_aware(item.execution_date)
item.value = XCom.serialize_value(item.value)
def pre_update(self, item):
"""Pre update hook."""
item.execution_date = timezone.make_aware(item.execution_date)
item.value = XCom.serialize_value(item.value)
def lazy_add_provider_discovered_options_to_connection_form():
"""Adds provider-discovered connection parameters as late as possible"""
def _get_connection_types() -> List[Tuple[str, str]]:
"""Returns connection types available."""
_connection_types = [
('fs', 'File (path)'),
('mesos_framework-id', 'Mesos Framework ID'),
]
providers_manager = ProvidersManager()
for connection_type, provider_info in providers_manager.hooks.items():
if provider_info:
_connection_types.append((connection_type, provider_info.hook_name))
return _connection_types
ConnectionForm.conn_type = SelectField(
lazy_gettext('Conn Type'),
choices=sorted(_get_connection_types(), key=itemgetter(1)),
widget=Select2Widget(),
validators=[InputRequired()],
description="""
Conn Type missing?
Make sure you've installed the corresponding Airflow Provider Package.
""",
)
for key, value in ProvidersManager().connection_form_widgets.items():
setattr(ConnectionForm, key, value.field)
# Used to store a dictionary of field behaviours used to dynamically change available
# fields in ConnectionForm based on type of connection chosen
# See airflow.hooks.base_hook.DiscoverableHook for details on how to customize your Hooks.
# those field behaviours are rendered as scripts in the conn_create.html and conn_edit.html templates
class ConnectionFormWidget(FormWidget):
"""Form widget used to display connection"""
field_behaviours = json.dumps(ProvidersManager().field_behaviours)
class ConnectionModelView(AirflowModelView):
"""View to show records from Connections table"""
route_base = '/connection'
datamodel = AirflowModelView.CustomSQLAInterface(Connection) # type: ignore
class_permission_name = permissions.RESOURCE_CONNECTION
method_permission_name = {
'add': 'create',
'list': 'read',
'edit': 'edit',
'delete': 'delete',
'action_muldelete': 'delete',
'action_mulduplicate': 'create',
}
base_permissions = [
permissions.ACTION_CAN_CREATE,
permissions.ACTION_CAN_READ,
permissions.ACTION_CAN_EDIT,
permissions.ACTION_CAN_DELETE,
permissions.ACTION_CAN_ACCESS_MENU,
]
extra_fields = list(ProvidersManager().connection_form_widgets.keys())
list_columns = [
'conn_id',
'conn_type',
'description',
'host',
'port',
'is_encrypted',
'is_extra_encrypted',
]
add_columns = edit_columns = [
'conn_id',
'conn_type',
'description',
'host',
'schema',
'login',
'password',
'port',
'extra',
] + extra_fields
add_form = edit_form = ConnectionForm
add_template = 'airflow/conn_create.html'
edit_template = 'airflow/conn_edit.html'
add_widget = ConnectionFormWidget
edit_widget = ConnectionFormWidget
base_order = ('conn_id', 'asc')
@action('muldelete', 'Delete', 'Are you sure you want to delete selected records?', single=False)
@auth.has_access(
[
(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_DAG),
]
)
def action_muldelete(self, items):
"""Multiple delete."""
self.datamodel.delete_all(items)
self.update_redirect()
return redirect(self.get_redirect())
@action(
'mulduplicate',
'Duplicate',
'Are you sure you want to duplicate the selected connections?',
single=False,
)
@provide_session
@auth.has_access(
[
(permissions.ACTION_CAN_CREATE, permissions.RESOURCE_CONNECTION),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_CONNECTION),
]
)
def action_mulduplicate(self, connections, session=None):
"""Duplicate Multiple connections"""
for selected_conn in connections:
new_conn_id = selected_conn.conn_id
match = re.search(r"_copy(\d+)$", selected_conn.conn_id)
if match:
conn_id_prefix = selected_conn.conn_id[: match.start()]
new_conn_id = f"{conn_id_prefix}_copy{int(match.group(1)) + 1}"
else:
new_conn_id += '_copy1'
dup_conn = Connection(
new_conn_id,
selected_conn.conn_type,
selected_conn.description,
selected_conn.host,
selected_conn.login,
selected_conn.password,
selected_conn.schema,
selected_conn.port,
selected_conn.extra,
)
try:
session.add(dup_conn)
session.commit()
flash(f"Connection {new_conn_id} added successfully.", "success")
except IntegrityError:
flash(
f"Connection {new_conn_id} can't be added. Integrity error, probably unique constraint.",
"warning",
)
session.rollback()
self.update_redirect()
return redirect(self.get_redirect())
def process_form(self, form, is_created):
"""Process form data."""
conn_type = form.data['conn_type']
conn_id = form.data["conn_id"]
extra = {
key: form.data[key]
for key in self.extra_fields
if key in form.data and key.startswith(f"extra__{conn_type}__")
}
# If parameters are added to the classic `Extra` field, include these values along with
# custom-field extras.
extra_conn_params = form.data.get("extra")
if extra_conn_params:
try:
extra.update(json.loads(extra_conn_params))
except (JSONDecodeError, TypeError):
flash(
Markup(
"<p>The <em>Extra</em> connection field contained an invalid value for Conn ID: "
f"<q>{conn_id}</q>.</p>"
"<p>If connection parameters need to be added to <em>Extra</em>, "
"please make sure they are in the form of a single, valid JSON object.</p><br>"
"The following <em>Extra</em> parameters were <b>not</b> added to the connection:<br>"
f"{extra_conn_params}",
),
category="error",
)
if extra.keys():
form.extra.data = json.dumps(extra)
def prefill_form(self, form, pk):
"""Prefill the form."""
try:
extra = form.data.get('extra')
if extra is None:
extra_dictionary = {}
else:
extra_dictionary = json.loads(extra)
except JSONDecodeError:
extra_dictionary = {}
if not isinstance(extra_dictionary, dict):
logging.warning('extra field for %s is not a dictionary', form.data.get('conn_id', '<unknown>'))
return
for field in self.extra_fields:
value = extra_dictionary.get(field, '')
if value:
field = getattr(form, field)
field.data = value
class PluginView(AirflowBaseView):
"""View to show Airflow Plugins"""
default_view = 'list'
class_permission_name = permissions.RESOURCE_PLUGIN
method_permission_name = {
'list': 'read',
}
base_permissions = [
permissions.ACTION_CAN_READ,
permissions.ACTION_CAN_ACCESS_MENU,
]
plugins_attributes_to_dump = [
"hooks",
"executors",
"macros",
"admin_views",
"flask_blueprints",
"menu_links",
"appbuilder_views",
"appbuilder_menu_items",
"global_operator_extra_links",
"operator_extra_links",
"source",
]
@expose('/plugin')
@auth.has_access(
[
(permissions.ACTION_CAN_READ, permissions.RESOURCE_PLUGIN),
]
)
def list(self):
"""List loaded plugins."""
plugins_manager.ensure_plugins_loaded()
plugins_manager.integrate_executor_plugins()
plugins_manager.initialize_extra_operators_links_plugins()
plugins_manager.initialize_web_ui_plugins()
plugins = []
for plugin_no, plugin in enumerate(plugins_manager.plugins, 1):
plugin_data = {
'plugin_no': plugin_no,
'plugin_name': plugin.name,
'attrs': {},
}
for attr_name in self.plugins_attributes_to_dump:
attr_value = getattr(plugin, attr_name)
plugin_data['attrs'][attr_name] = attr_value
plugins.append(plugin_data)
title = "Airflow Plugins"
doc_url = get_docs_url("plugins.html")
return self.render_template(
'airflow/plugin.html',
plugins=plugins,
title=title,
doc_url=doc_url,
)
class ProviderView(AirflowBaseView):
"""View to show Airflow Providers"""
default_view = 'list'
class_permission_name = permissions.RESOURCE_PROVIDER
method_permission_name = {
'list': 'read',
}
base_permissions = [
permissions.ACTION_CAN_READ,
permissions.ACTION_CAN_ACCESS_MENU,
]
@expose('/provider')
@auth.has_access(
[
(permissions.ACTION_CAN_READ, permissions.RESOURCE_PROVIDER),
]
)
def list(self):
"""List providers."""
providers_manager = ProvidersManager()
providers = []
for pi in providers_manager.providers.values():
provider_info = pi[1]
provider_data = {
"package_name": provider_info["package-name"],
"description": self._clean_description(provider_info["description"]),
"version": pi[0],
"documentation_url": get_doc_url_for_provider(provider_info["package-name"], pi[0]),
}
providers.append(provider_data)
title = "Providers"
doc_url = get_docs_url("apache-airflow-providers/index.html")
return self.render_template(
'airflow/providers.html',
providers=providers,
title=title,
doc_url=doc_url,
)
def _clean_description(self, description):
def _build_link(match_obj):
text = match_obj.group(1)
url = match_obj.group(2)
return markupsafe.Markup(f'<a href="{url}">{text}</a>')
cd = markupsafe.escape(description)
cd = re.sub(r"`(.*)[\s+]+<(.*)>`__", _build_link, cd)
cd = re.sub(r"\n", r"<br>", cd)
return markupsafe.Markup(cd)
class PoolModelView(AirflowModelView):
"""View to show records from Pool table"""
route_base = '/pool'
datamodel = AirflowModelView.CustomSQLAInterface(models.Pool) # type: ignore
class_permission_name = permissions.RESOURCE_POOL
method_permission_name = {
'add': 'create',
'list': 'read',
'edit': 'edit',
'delete': 'delete',
'action_muldelete': 'delete',
}
base_permissions = [
permissions.ACTION_CAN_CREATE,
permissions.ACTION_CAN_READ,
permissions.ACTION_CAN_EDIT,
permissions.ACTION_CAN_DELETE,
permissions.ACTION_CAN_ACCESS_MENU,
]
list_columns = ['pool', 'slots', 'running_slots', 'queued_slots']
add_columns = ['pool', 'slots', 'description']
edit_columns = ['pool', 'slots', 'description']
base_order = ('pool', 'asc')
@action('muldelete', 'Delete', 'Are you sure you want to delete selected records?', single=False)
def action_muldelete(self, items):
"""Multiple delete."""
if any(item.pool == models.Pool.DEFAULT_POOL_NAME for item in items):
flash("default_pool cannot be deleted", 'error')
self.update_redirect()
return redirect(self.get_redirect())
self.datamodel.delete_all(items)
self.update_redirect()
return redirect(self.get_redirect())
def pool_link(self):
"""Pool link rendering."""
pool_id = self.get('pool')
if pool_id is not None:
url = url_for('TaskInstanceModelView.list', _flt_3_pool=pool_id)
return Markup("<a href='{url}'>{pool_id}</a>").format(url=url, pool_id=pool_id)
else:
return Markup('<span class="label label-danger">Invalid</span>')
def frunning_slots(self):
"""Running slots rendering."""
pool_id = self.get('pool')
running_slots = self.get('running_slots')
if pool_id is not None and running_slots is not None:
url = url_for('TaskInstanceModelView.list', _flt_3_pool=pool_id, _flt_3_state='running')
return Markup("<a href='{url}'>{running_slots}</a>").format(url=url, running_slots=running_slots)
else:
return Markup('<span class="label label-danger">Invalid</span>')
def fqueued_slots(self):
"""Queued slots rendering."""
pool_id = self.get('pool')
queued_slots = self.get('queued_slots')
if pool_id is not None and queued_slots is not None:
url = url_for('TaskInstanceModelView.list', _flt_3_pool=pool_id, _flt_3_state='queued')
return Markup("<a href='{url}'>{queued_slots}</a>").format(url=url, queued_slots=queued_slots)
else:
return Markup('<span class="label label-danger">Invalid</span>')
formatters_columns = {'pool': pool_link, 'running_slots': frunning_slots, 'queued_slots': fqueued_slots}
validators_columns = {'pool': [validators.DataRequired()], 'slots': [validators.NumberRange(min=-1)]}
def _can_create_variable() -> bool:
return current_app.appbuilder.sm.has_access(permissions.ACTION_CAN_CREATE, permissions.RESOURCE_VARIABLE)
class VariableModelView(AirflowModelView):
"""View to show records from Variable table"""
route_base = '/variable'
list_template = 'airflow/variable_list.html'
edit_template = 'airflow/variable_edit.html'
datamodel = AirflowModelView.CustomSQLAInterface(models.Variable) # type: ignore
class_permission_name = permissions.RESOURCE_VARIABLE
method_permission_name = {
'add': 'create',
'list': 'read',
'edit': 'edit',
'delete': 'delete',
'action_muldelete': 'delete',
'action_varexport': 'read',
}
base_permissions = [
permissions.ACTION_CAN_CREATE,
permissions.ACTION_CAN_READ,
permissions.ACTION_CAN_EDIT,
permissions.ACTION_CAN_DELETE,
permissions.ACTION_CAN_ACCESS_MENU,
]
list_columns = ['key', 'val', 'description', 'is_encrypted']
add_columns = ['key', 'val', 'description']
edit_columns = ['key', 'val', 'description']
search_columns = ['key', 'val']
base_order = ('key', 'asc')
def hidden_field_formatter(self):
"""Formats hidden fields"""
key = self.get('key')
val = self.get('val')
if secrets_masker.should_hide_value_for_key(key):
return Markup('*' * 8)
if val:
return val
else:
return Markup('<span class="label label-danger">Invalid</span>')
formatters_columns = {
'val': hidden_field_formatter,
}
validators_columns = {'key': [validators.DataRequired()]}
def prefill_form(self, form, request_id):
if secrets_masker.should_hide_value_for_key(form.key.data):
form.val.data = '*' * 8
extra_args = {"can_create_variable": _can_create_variable}
@action('muldelete', 'Delete', 'Are you sure you want to delete selected records?', single=False)
def action_muldelete(self, items):
"""Multiple delete."""
self.datamodel.delete_all(items)
self.update_redirect()
return redirect(self.get_redirect())
@action('varexport', 'Export', '', single=False)
def action_varexport(self, items):
"""Export variables."""
var_dict = {}
decoder = json.JSONDecoder()
for var in items:
try:
val = decoder.decode(var.val)
except Exception:
val = var.val
var_dict[var.key] = val
response = make_response(json.dumps(var_dict, sort_keys=True, indent=4))
response.headers["Content-Disposition"] = "attachment; filename=variables.json"
response.headers["Content-Type"] = "application/json; charset=utf-8"
return response
@expose('/varimport', methods=["POST"])
@auth.has_access([(permissions.ACTION_CAN_CREATE, permissions.RESOURCE_VARIABLE)])
@action_logging
def varimport(self):
"""Import variables"""
try:
variable_dict = json.loads(request.files['file'].read())
except Exception:
self.update_redirect()
flash("Missing file or syntax error.", 'error')
return redirect(self.get_redirect())
else:
suc_count = fail_count = 0
for k, v in variable_dict.items():
try:
models.Variable.set(k, v, serialize_json=not isinstance(v, str))
except Exception as e:
logging.info('Variable import failed: %s', repr(e))
fail_count += 1
else:
suc_count += 1
flash(f"{suc_count} variable(s) successfully updated.")
if fail_count:
flash(f"{fail_count} variable(s) failed to be updated.", 'error')
self.update_redirect()
return redirect(self.get_redirect())
class JobModelView(AirflowModelView):
"""View to show records from Job table"""
route_base = '/job'
datamodel = AirflowModelView.CustomSQLAInterface(BaseJob) # type: ignore
class_permission_name = permissions.RESOURCE_JOB
method_permission_name = {
'list': 'read',
}
base_permissions = [
permissions.ACTION_CAN_READ,
permissions.ACTION_CAN_ACCESS_MENU,
]
list_columns = [
'id',
'dag_id',
'state',
'job_type',
'start_date',
'end_date',
'latest_heartbeat',
'executor_class',
'hostname',
'unixname',
]
search_columns = [
'id',
'dag_id',
'state',
'job_type',
'start_date',
'end_date',
'latest_heartbeat',
'executor_class',
'hostname',
'unixname',
]
base_order = ('start_date', 'desc')
base_filters = [['dag_id', DagFilter, lambda: []]]
formatters_columns = {
'start_date': wwwutils.datetime_f('start_date'),
'end_date': wwwutils.datetime_f('end_date'),
'hostname': wwwutils.nobr_f('hostname'),
'state': wwwutils.state_f,
'latest_heartbeat': wwwutils.datetime_f('latest_heartbeat'),
}
class DagRunModelView(AirflowModelView):
"""View to show records from DagRun table"""
route_base = '/dagrun'
datamodel = AirflowModelView.CustomSQLAInterface(models.DagRun) # type: ignore
class_permission_name = permissions.RESOURCE_DAG_RUN
method_permission_name = {
'list': 'read',
'action_clear': 'delete',
'action_muldelete': 'delete',
'action_set_running': 'edit',
'action_set_failed': 'edit',
'action_set_success': 'edit',
}
base_permissions = [
permissions.ACTION_CAN_READ,
permissions.ACTION_CAN_EDIT,
permissions.ACTION_CAN_DELETE,
permissions.ACTION_CAN_ACCESS_MENU,
]
list_columns = [
'state',
'dag_id',
'execution_date',
'run_id',
'run_type',
'queued_at',
'start_date',
'end_date',
'external_trigger',
'conf',
]
search_columns = [
'state',
'dag_id',
'execution_date',
'run_id',
'run_type',
'start_date',
'end_date',
'external_trigger',
]
edit_columns = ['state', 'dag_id', 'execution_date', 'start_date', 'end_date', 'run_id', 'conf']
base_order = ('execution_date', 'desc')
base_filters = [['dag_id', DagFilter, lambda: []]]
edit_form = DagRunEditForm
formatters_columns = {
'execution_date': wwwutils.datetime_f('execution_date'),
'state': wwwutils.state_f,
'start_date': wwwutils.datetime_f('start_date'),
'end_date': wwwutils.datetime_f('end_date'),
'dag_id': wwwutils.dag_link,
'run_id': wwwutils.dag_run_link,
'conf': wwwutils.json_f('conf'),
}
@action('muldelete', "Delete", "Are you sure you want to delete selected records?", single=False)
@provide_session
def action_muldelete(self, items, session=None):
"""Multiple delete."""
self.datamodel.delete_all(items)
self.update_redirect()
return redirect(self.get_redirect())
@action('set_running', "Set state to 'running'", '', single=False)
@provide_session
def action_set_running(self, drs, session=None):
"""Set state to running."""
try:
count = 0
for dr in session.query(DagRun).filter(DagRun.id.in_([dagrun.id for dagrun in drs])).all():
count += 1
dr.start_date = timezone.utcnow()
dr.state = State.RUNNING
session.commit()
flash(f"{count} dag runs were set to running")
except Exception as ex:
flash(str(ex), 'error')
flash('Failed to set state', 'error')
return redirect(self.get_default_url())
@action(
'set_failed',
"Set state to 'failed'",
"All running task instances would also be marked as failed, are you sure?",
single=False,
)
@provide_session
def action_set_failed(self, drs, session=None):
"""Set state to failed."""
try:
count = 0
altered_tis = []
for dr in session.query(DagRun).filter(DagRun.id.in_([dagrun.id for dagrun in drs])).all():
count += 1
altered_tis += set_dag_run_state_to_failed(
current_app.dag_bag.get_dag(dr.dag_id), dr.execution_date, commit=True, session=session
)
altered_ti_count = len(altered_tis)
flash(
"{count} dag runs and {altered_ti_count} task instances "
"were set to failed".format(count=count, altered_ti_count=altered_ti_count)
)
except Exception:
flash('Failed to set state', 'error')
return redirect(self.get_default_url())
@action(
'set_success',
"Set state to 'success'",
"All task instances would also be marked as success, are you sure?",
single=False,
)
@provide_session
def action_set_success(self, drs, session=None):
"""Set state to success."""
try:
count = 0
altered_tis = []
for dr in session.query(DagRun).filter(DagRun.id.in_([dagrun.id for dagrun in drs])).all():
count += 1
altered_tis += set_dag_run_state_to_success(
current_app.dag_bag.get_dag(dr.dag_id), dr.execution_date, commit=True, session=session
)
altered_ti_count = len(altered_tis)
flash(
"{count} dag runs and {altered_ti_count} task instances "
"were set to success".format(count=count, altered_ti_count=altered_ti_count)
)
except Exception:
flash('Failed to set state', 'error')
return redirect(self.get_default_url())
@action('clear', "Clear the state", "All task instances would be cleared, are you sure?", single=False)
@provide_session
def action_clear(self, drs, session=None):
"""Clears the state."""
try:
count = 0
cleared_ti_count = 0
dag_to_tis = {}
for dr in session.query(DagRun).filter(DagRun.id.in_([dagrun.id for dagrun in drs])).all():
count += 1
dag = current_app.dag_bag.get_dag(dr.dag_id)
tis_to_clear = dag_to_tis.setdefault(dag, [])
tis_to_clear += dr.get_task_instances()
for dag, tis in dag_to_tis.items():
cleared_ti_count += len(tis)
models.clear_task_instances(tis, session, dag=dag)
flash(f"{count} dag runs and {cleared_ti_count} task instances were cleared")
except Exception:
flash('Failed to clear state', 'error')
return redirect(self.get_default_url())
class LogModelView(AirflowModelView):
"""View to show records from Log table"""
route_base = '/log'
datamodel = AirflowModelView.CustomSQLAInterface(Log) # type:ignore
class_permission_name = permissions.RESOURCE_AUDIT_LOG
method_permission_name = {
'list': 'read',
}
base_permissions = [
permissions.ACTION_CAN_READ,
permissions.ACTION_CAN_ACCESS_MENU,
]
list_columns = ['id', 'dttm', 'dag_id', 'task_id', 'event', 'execution_date', 'owner', 'extra']
search_columns = ['dag_id', 'task_id', 'event', 'execution_date', 'owner', 'extra']
base_order = ('dttm', 'desc')
base_filters = [['dag_id', DagFilter, lambda: []]]
formatters_columns = {
'dttm': wwwutils.datetime_f('dttm'),
'execution_date': wwwutils.datetime_f('execution_date'),
'dag_id': wwwutils.dag_link,
}
class TaskRescheduleModelView(AirflowModelView):
"""View to show records from Task Reschedule table"""
route_base = '/taskreschedule'
datamodel = AirflowModelView.CustomSQLAInterface(models.TaskReschedule) # type: ignore
related_views = [DagRunModelView]
class_permission_name = permissions.RESOURCE_TASK_RESCHEDULE
method_permission_name = {
'list': 'read',
}
base_permissions = [
permissions.ACTION_CAN_READ,
permissions.ACTION_CAN_ACCESS_MENU,
]
list_columns = [
'id',
'dag_id',
'task_id',
'execution_date',
'try_number',
'start_date',
'end_date',
'duration',
'reschedule_date',
]
search_columns = ['dag_id', 'task_id', 'execution_date', 'start_date', 'end_date', 'reschedule_date']
base_order = ('id', 'desc')
base_filters = [['dag_id', DagFilter, lambda: []]]
def duration_f(self):
"""Duration calculation."""
end_date = self.get('end_date')
duration = self.get('duration')
if end_date and duration:
return timedelta(seconds=duration)
return None
formatters_columns = {
'dag_id': wwwutils.dag_link,
'task_id': wwwutils.task_instance_link,
'start_date': wwwutils.datetime_f('start_date'),
'end_date': wwwutils.datetime_f('end_date'),
'execution_date': wwwutils.datetime_f('execution_date'),
'reschedule_date': wwwutils.datetime_f('reschedule_date'),
'duration': duration_f,
}
class TaskInstanceModelView(AirflowModelView):
"""View to show records from TaskInstance table"""
route_base = '/taskinstance'
datamodel = AirflowModelView.CustomSQLAInterface(models.TaskInstance) # type: ignore
class_permission_name = permissions.RESOURCE_TASK_INSTANCE
method_permission_name = {
'list': 'read',
'action_clear': 'edit',
'action_set_running': 'edit',
'action_set_failed': 'edit',
'action_set_success': 'edit',
'action_set_retry': 'edit',
}
base_permissions = [
permissions.ACTION_CAN_CREATE,
permissions.ACTION_CAN_READ,
permissions.ACTION_CAN_EDIT,
permissions.ACTION_CAN_DELETE,
permissions.ACTION_CAN_ACCESS_MENU,
]
page_size = PAGE_SIZE
list_columns = [
'state',
'dag_id',
'task_id',
'execution_date',
'operator',
'start_date',
'end_date',
'duration',
'job_id',
'hostname',
'unixname',
'priority_weight',
'queue',
'queued_dttm',
'try_number',
'pool',
'queued_by_job_id',
'external_executor_id',
'log_url',
]
order_columns = [
item for item in list_columns if item not in ['try_number', 'log_url', 'external_executor_id']
]
search_columns = [
'state',
'dag_id',
'task_id',
'execution_date',
'hostname',
'queue',
'pool',
'operator',
'start_date',
'end_date',
'queued_dttm',
]
edit_columns = [
'state',
'dag_id',
'task_id',
'execution_date',
'start_date',
'end_date',
]
add_exclude_columns = ["next_method", "next_kwargs", "trigger_id"]
edit_form = TaskInstanceEditForm
base_order = ('job_id', 'asc')
base_filters = [['dag_id', DagFilter, lambda: []]]
def log_url_formatter(self):
"""Formats log URL."""
log_url = self.get('log_url')
return Markup(
'<a href="{log_url}"><span class="material-icons" aria-hidden="true">reorder</span></a>'
).format(log_url=log_url)
def duration_f(self):
"""Formats duration."""
end_date = self.get('end_date')
duration = self.get('duration')
if end_date and duration:
return timedelta(seconds=duration)
return None
formatters_columns = {
'log_url': log_url_formatter,
'task_id': wwwutils.task_instance_link,
'hostname': wwwutils.nobr_f('hostname'),
'state': wwwutils.state_f,
'execution_date': wwwutils.datetime_f('execution_date'),
'start_date': wwwutils.datetime_f('start_date'),
'end_date': wwwutils.datetime_f('end_date'),
'queued_dttm': wwwutils.datetime_f('queued_dttm'),
'dag_id': wwwutils.dag_link,
'duration': duration_f,
}
@provide_session
@action(
'clear',
lazy_gettext('Clear'),
lazy_gettext(
'Are you sure you want to clear the state of the selected task'
' instance(s) and set their dagruns to the QUEUED state?'
),
single=False,
)
def action_clear(self, task_instances, session=None):
"""Clears the action."""
try:
dag_to_tis = collections.defaultdict(list)
for ti in task_instances:
dag = current_app.dag_bag.get_dag(ti.dag_id)
dag_to_tis[dag].append(ti)
for dag, task_instances_list in dag_to_tis.items():
models.clear_task_instances(task_instances_list, session, dag=dag)
session.commit()
flash(f"{len(task_instances)} task instances have been cleared")
except Exception as e:
flash(f'Failed to clear task instances: "{e}"', 'error')
self.update_redirect()
return redirect(self.get_redirect())
@provide_session
def set_task_instance_state(self, tis, target_state, session=None):
"""Set task instance state."""
try:
count = len(tis)
for ti in tis:
ti.set_state(target_state, session)
session.commit()
flash(f"{count} task instances were set to '{target_state}'")
except Exception:
flash('Failed to set state', 'error')
@action('set_running', "Set state to 'running'", '', single=False)
@auth.has_access(
[
(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_DAG),
]
)
def action_set_running(self, tis):
"""Set state to 'running'"""
self.set_task_instance_state(tis, State.RUNNING)
self.update_redirect()
return redirect(self.get_redirect())
@action('set_failed', "Set state to 'failed'", '', single=False)
@auth.has_access(
[
(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_DAG),
]
)
def action_set_failed(self, tis):
"""Set state to 'failed'"""
self.set_task_instance_state(tis, State.FAILED)
self.update_redirect()
return redirect(self.get_redirect())
@action('set_success', "Set state to 'success'", '', single=False)
@auth.has_access(
[
(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_DAG),
]
)
def action_set_success(self, tis):
"""Set state to 'success'"""
self.set_task_instance_state(tis, State.SUCCESS)
self.update_redirect()
return redirect(self.get_redirect())
@action('set_retry', "Set state to 'up_for_retry'", '', single=False)
@auth.has_access(
[
(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_DAG),
]
)
def action_set_retry(self, tis):
"""Set state to 'up_for_retry'"""
self.set_task_instance_state(tis, State.UP_FOR_RETRY)
self.update_redirect()
return redirect(self.get_redirect())
class DagModelView(AirflowModelView):
"""View to show records from DAG table"""
route_base = '/dagmodel'
datamodel = AirflowModelView.CustomSQLAInterface(DagModel) # type: ignore
class_permission_name = permissions.RESOURCE_DAG
method_permission_name = {
'list': 'read',
'show': 'read',
}
base_permissions = [
permissions.ACTION_CAN_READ,
permissions.ACTION_CAN_EDIT,
permissions.ACTION_CAN_DELETE,
]
list_columns = [
'dag_id',
'is_paused',
'last_parsed_time',
'last_expired',
'scheduler_lock',
'fileloc',
'owners',
]
formatters_columns = {'dag_id': wwwutils.dag_link}
base_filters = [['dag_id', DagFilter, lambda: []]]
def get_query(self):
"""Default filters for model"""
return (
super()
.get_query()
.filter(or_(models.DagModel.is_active, models.DagModel.is_paused))
.filter(~models.DagModel.is_subdag)
)
def get_count_query(self):
"""Default filters for model"""
return super().get_count_query().filter(models.DagModel.is_active).filter(~models.DagModel.is_subdag)
@auth.has_access(
[
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),
]
)
@provide_session
@expose('/autocomplete')
def autocomplete(self, session=None):
"""Autocomplete."""
query = unquote(request.args.get('query', ''))
if not query:
return wwwutils.json_response([])
# Provide suggestions of dag_ids and owners
dag_ids_query = session.query(DagModel.dag_id.label('item')).filter(
~DagModel.is_subdag, DagModel.is_active, DagModel.dag_id.ilike('%' + query + '%')
)
owners_query = session.query(func.distinct(DagModel.owners).label('item')).filter(
~DagModel.is_subdag, DagModel.is_active, DagModel.owners.ilike('%' + query + '%')
)
# Hide DAGs if not showing status: "all"
status = flask_session.get(FILTER_STATUS_COOKIE)
if status == 'active':
dag_ids_query = dag_ids_query.filter(~DagModel.is_paused)
owners_query = owners_query.filter(~DagModel.is_paused)
elif status == 'paused':
dag_ids_query = dag_ids_query.filter(DagModel.is_paused)
owners_query = owners_query.filter(DagModel.is_paused)
filter_dag_ids = current_app.appbuilder.sm.get_accessible_dag_ids(g.user)
dag_ids_query = dag_ids_query.filter(DagModel.dag_id.in_(filter_dag_ids))
owners_query = owners_query.filter(DagModel.dag_id.in_(filter_dag_ids))
payload = [row[0] for row in dag_ids_query.union(owners_query).limit(10).all()]
return wwwutils.json_response(payload)
class DagDependenciesView(AirflowBaseView):
"""View to show dependencies between DAGs"""
refresh_interval = timedelta(
seconds=conf.getint(
"webserver",
"dag_dependencies_refresh_interval",
fallback=conf.getint("scheduler", "dag_dir_list_interval"),
)
)
last_refresh = timezone.utcnow() - refresh_interval
nodes = []
edges = []
@expose('/dag-dependencies')
@auth.has_access(
[
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG_DEPENDENCIES),
]
)
@gzipped
@action_logging
def list(self):
"""Display DAG dependencies"""
title = "DAG Dependencies"
if not self.nodes or not self.edges:
self._calculate_graph()
self.last_refresh = timezone.utcnow()
elif timezone.utcnow() > self.last_refresh + self.refresh_interval:
max_last_updated = SerializedDagModel.get_max_last_updated_datetime()
if max_last_updated is None or max_last_updated > self.last_refresh:
self._calculate_graph()
self.last_refresh = timezone.utcnow()
return self.render_template(
"airflow/dag_dependencies.html",
title=title,
nodes=self.nodes,
edges=self.edges,
last_refresh=self.last_refresh,
arrange=conf.get("webserver", "dag_orientation"),
width=request.args.get("width", "100%"),
height=request.args.get("height", "800"),
)
def _calculate_graph(self):
nodes = []
edges = []
for dag, dependencies in SerializedDagModel.get_dag_dependencies().items():
dag_node_id = f"dag:{dag}"
nodes.append(self._node_dict(dag_node_id, dag, "dag"))
for dep in dependencies:
nodes.append(self._node_dict(dep.node_id, dep.dependency_id, dep.dependency_type))
edges.extend(
[
{"u": f"dag:{dep.source}", "v": dep.node_id},
{"u": dep.node_id, "v": f"dag:{dep.target}"},
]
)
self.nodes = nodes
self.edges = edges
@staticmethod
def _node_dict(node_id, label, node_class):
return {
"id": node_id,
"value": {"label": label, "rx": 5, "ry": 5, "class": node_class},
}
class CustomPermissionModelView(PermissionModelView):
"""Customize permission names for FAB's builtin PermissionModelView."""
class_permission_name = permissions.RESOURCE_PERMISSION
method_permission_name = {
'list': 'read',
}
base_permissions = [
permissions.ACTION_CAN_READ,
]
class CustomPermissionViewModelView(PermissionViewModelView):
"""Customize permission names for FAB's builtin PermissionViewModelView."""
class_permission_name = permissions.RESOURCE_PERMISSION_VIEW
method_permission_name = {
'list': 'read',
}
base_permissions = [
permissions.ACTION_CAN_READ,
]
class CustomResetMyPasswordView(ResetMyPasswordView):
"""Customize permission names for FAB's builtin ResetMyPasswordView."""
class_permission_name = permissions.RESOURCE_MY_PASSWORD
method_permission_name = {
'this_form_get': 'read',
'this_form_post': 'edit',
}
base_permissions = [permissions.ACTION_CAN_EDIT, permissions.ACTION_CAN_READ]
class CustomResetPasswordView(ResetPasswordView):
"""Customize permission names for FAB's builtin ResetPasswordView."""
class_permission_name = permissions.RESOURCE_PASSWORD
method_permission_name = {
'this_form_get': 'read',
'this_form_post': 'edit',
}
base_permissions = [permissions.ACTION_CAN_EDIT, permissions.ACTION_CAN_READ]
class CustomRoleModelView(RoleModelView):
"""Customize permission names for FAB's builtin RoleModelView."""
class_permission_name = permissions.RESOURCE_ROLE
method_permission_name = {
'delete': 'delete',
'download': 'read',
'show': 'read',
'list': 'read',
'edit': 'edit',
'add': 'create',
'copy_role': 'create',
}
base_permissions = [
permissions.ACTION_CAN_CREATE,
permissions.ACTION_CAN_READ,
permissions.ACTION_CAN_EDIT,
permissions.ACTION_CAN_DELETE,
]
class CustomViewMenuModelView(ViewMenuModelView):
"""Customize permission names for FAB's builtin ViewMenuModelView."""
class_permission_name = permissions.RESOURCE_VIEW_MENU
method_permission_name = {
'list': 'read',
}
base_permissions = [
permissions.ACTION_CAN_READ,
]
class CustomUserInfoEditView(UserInfoEditView):
"""Customize permission names for FAB's builtin UserInfoEditView."""
class_permission_name = permissions.RESOURCE_MY_PROFILE
route_base = "/userinfoeditview"
method_permission_name = {
'this_form_get': 'edit',
'this_form_post': 'edit',
}
base_permissions = [permissions.ACTION_CAN_EDIT, permissions.ACTION_CAN_READ]
class CustomUserStatsChartView(UserStatsChartView):
"""Customize permission names for FAB's builtin UserStatsChartView."""
class_permission_name = permissions.RESOURCE_USER_STATS_CHART
route_base = "/userstatschartview"
method_permission_name = {
'chart': 'read',
'list': 'read',
}
base_permissions = [permissions.ACTION_CAN_READ]
class MultiResourceUserMixin:
"""Remaps UserModelView permissions to new resources and actions."""
_class_permission_name = permissions.RESOURCE_USER
class_permission_name_mapping = {
'userinfoedit': permissions.RESOURCE_MY_PROFILE,
'userinfo': permissions.RESOURCE_MY_PROFILE,
}
method_permission_name = {
'userinfo': 'read',
'download': 'read',
'show': 'read',
'list': 'read',
'edit': 'edit',
'userinfoedit': 'edit',
'delete': 'delete',
}
base_permissions = [
permissions.ACTION_CAN_READ,
permissions.ACTION_CAN_EDIT,
permissions.ACTION_CAN_DELETE,
]
@expose("/show/<pk>", methods=["GET"])
@has_access
def show(self, pk):
pk = self._deserialize_pk_if_composite(pk)
widgets = self._show(pk)
widgets['show'].template_args['actions'].pop('userinfoedit')
return self.render_template(
self.show_template,
pk=pk,
title=self.show_title,
widgets=widgets,
related_views=self._related_views,
)
class CustomUserDBModelView(MultiResourceUserMixin, UserDBModelView):
"""Customize permission names for FAB's builtin UserDBModelView."""
_class_permission_name = permissions.RESOURCE_USER
class_permission_name_mapping = {
'resetmypassword': permissions.RESOURCE_MY_PASSWORD,
'resetpasswords': permissions.RESOURCE_PASSWORD,
'userinfoedit': permissions.RESOURCE_MY_PROFILE,
'userinfo': permissions.RESOURCE_MY_PROFILE,
}
method_permission_name = {
'add': 'create',
'download': 'read',
'show': 'read',
'list': 'read',
'edit': 'edit',
'delete': 'delete',
'resetmypassword': 'read',
'resetpasswords': 'read',
'userinfo': 'read',
'userinfoedit': 'read',
}
base_permissions = [
permissions.ACTION_CAN_CREATE,
permissions.ACTION_CAN_READ,
permissions.ACTION_CAN_EDIT,
permissions.ACTION_CAN_DELETE,
]
@property
def class_permission_name(self):
"""Returns appropriate permission name depending on request method name."""
if request:
action_name = request.view_args.get("name")
_, method_name = request.url_rule.endpoint.rsplit(".", 1)
if method_name == 'action' and action_name:
return self.class_permission_name_mapping.get(action_name, self._class_permission_name)
if method_name:
return self.class_permission_name_mapping.get(method_name, self._class_permission_name)
return self._class_permission_name
@class_permission_name.setter
def class_permission_name(self, name):
self._class_permission_name = name
class CustomUserLDAPModelView(MultiResourceUserMixin, UserLDAPModelView):
"""Customize permission names for FAB's builtin UserLDAPModelView."""
pass
class CustomUserOAuthModelView(MultiResourceUserMixin, UserOAuthModelView):
"""Customize permission names for FAB's builtin UserOAuthModelView."""
pass
class CustomUserOIDModelView(MultiResourceUserMixin, UserOIDModelView):
"""Customize permission names for FAB's builtin UserOIDModelView."""
pass
class CustomUserRemoteUserModelView(MultiResourceUserMixin, UserRemoteUserModelView):
"""Customize permission names for FAB's builtin UserRemoteUserModelView."""
pass
| 35.280771 | 110 | 0.60304 |
42c12d3d7be513cb6d59d552b0a6f850c6aec7af | 606 | py | Python | mycal/attendance/migrations/0005_auto_20201112_2000.py | mjhow4/newattendanceapp | 685173631fc7cf7e56923e27f47d405629633386 | [
"MIT"
] | null | null | null | mycal/attendance/migrations/0005_auto_20201112_2000.py | mjhow4/newattendanceapp | 685173631fc7cf7e56923e27f47d405629633386 | [
"MIT"
] | null | null | null | mycal/attendance/migrations/0005_auto_20201112_2000.py | mjhow4/newattendanceapp | 685173631fc7cf7e56923e27f47d405629633386 | [
"MIT"
] | null | null | null | # Generated by Django 3.1.3 on 2020-11-13 01:00
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('attendance', '0004_case_continuance_request'),
]
operations = [
migrations.AlterField(
model_name='case',
name='continuance_request',
field=models.DateField(blank=True, default='', null=True),
),
migrations.AlterField(
model_name='case',
name='response_by',
field=models.TextField(blank=True, max_length=400, null=True),
),
]
| 25.25 | 74 | 0.59736 |
6c25b396e4b6a97fa4688bf32f76a4b937ca8842 | 4,122 | py | Python | nova/tests/test_crypto.py | armaan/nova | 22859fccb95502efcb73ecf2bd827c45c0886bd3 | [
"Apache-2.0"
] | 1 | 2021-11-08T10:11:44.000Z | 2021-11-08T10:11:44.000Z | nova/tests/test_crypto.py | armaan/nova | 22859fccb95502efcb73ecf2bd827c45c0886bd3 | [
"Apache-2.0"
] | null | null | null | nova/tests/test_crypto.py | armaan/nova | 22859fccb95502efcb73ecf2bd827c45c0886bd3 | [
"Apache-2.0"
] | 1 | 2020-05-10T16:36:03.000Z | 2020-05-10T16:36:03.000Z | # Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests for Crypto module.
"""
import mox
import stubout
from nova import crypto
from nova import db
from nova import test
class SymmetricKeyTestCase(test.TestCase):
"""Test case for Encrypt/Decrypt"""
def test_encrypt_decrypt(self):
key = 'c286696d887c9aa0611bbb3e2025a45a'
plain_text = "The quick brown fox jumped over the lazy dog."
# No IV supplied (all 0's)
encrypt = crypto.encryptor(key)
cipher_text = encrypt(plain_text)
self.assertNotEquals(plain_text, cipher_text)
decrypt = crypto.decryptor(key)
plain = decrypt(cipher_text)
self.assertEquals(plain_text, plain)
# IV supplied ...
iv = '562e17996d093d28ddb3ba695a2e6f58'
encrypt = crypto.encryptor(key, iv)
cipher_text = encrypt(plain_text)
self.assertNotEquals(plain_text, cipher_text)
decrypt = crypto.decryptor(key, iv)
plain = decrypt(cipher_text)
self.assertEquals(plain_text, plain)
class RevokeCertsTest(test.TestCase):
def setUp(self):
super(RevokeCertsTest, self).setUp()
self.stubs = stubout.StubOutForTesting()
def tearDown(self):
self.stubs.UnsetAll()
super(RevokeCertsTest, self).tearDown()
def test_revoke_certs_by_user_and_project(self):
user_id = 'test_user'
project_id = 2
file_name = 'test_file'
def mock_certificate_get_all_by_user_and_project(context,
user_id,
project_id):
return [{"user_id": user_id, "project_id": project_id,
"file_name": file_name}]
self.stubs.Set(db, 'certificate_get_all_by_user_and_project',
mock_certificate_get_all_by_user_and_project)
self.mox.StubOutWithMock(crypto, 'revoke_cert')
crypto.revoke_cert(project_id, file_name)
self.mox.ReplayAll()
crypto.revoke_certs_by_user_and_project(user_id, project_id)
self.mox.VerifyAll()
def test_revoke_certs_by_user(self):
user_id = 'test_user'
project_id = 2
file_name = 'test_file'
def mock_certificate_get_all_by_user(context, user_id):
return [{"user_id": user_id, "project_id": project_id,
"file_name": file_name}]
self.stubs.Set(db, 'certificate_get_all_by_user',
mock_certificate_get_all_by_user)
self.mox.StubOutWithMock(crypto, 'revoke_cert')
crypto.revoke_cert(project_id, mox.IgnoreArg())
self.mox.ReplayAll()
crypto.revoke_certs_by_user(user_id)
self.mox.VerifyAll()
def test_revoke_certs_by_project(self):
user_id = 'test_user'
project_id = 2
file_name = 'test_file'
def mock_certificate_get_all_by_project(context, project_id):
return [{"user_id": user_id, "project_id": project_id,
"file_name": file_name}]
self.stubs.Set(db, 'certificate_get_all_by_project',
mock_certificate_get_all_by_project)
self.mox.StubOutWithMock(crypto, 'revoke_cert')
crypto.revoke_cert(project_id, mox.IgnoreArg())
self.mox.ReplayAll()
crypto.revoke_certs_by_project(project_id)
self.mox.VerifyAll()
| 31.227273 | 78 | 0.631004 |
c6a2c487cb2dcd48e3610137e0bf01f21cf00237 | 4,401 | py | Python | rx/core/operators/merge.py | mmpio/RxPY | 4ed60bb5c04aa85de5210e5537a6adfe1b667d50 | [
"MIT"
] | 4,342 | 2015-01-06T09:00:23.000Z | 2022-03-28T15:05:50.000Z | rx/core/operators/merge.py | mmpio/RxPY | 4ed60bb5c04aa85de5210e5537a6adfe1b667d50 | [
"MIT"
] | 613 | 2015-01-07T20:44:56.000Z | 2022-03-20T06:14:20.000Z | rx/core/operators/merge.py | mmpio/RxPY | 4ed60bb5c04aa85de5210e5537a6adfe1b667d50 | [
"MIT"
] | 420 | 2015-01-07T14:30:30.000Z | 2022-03-11T22:47:46.000Z | from typing import Callable, Optional
import rx
from rx import from_future
from rx.core import Observable
from rx.disposable import CompositeDisposable, SingleAssignmentDisposable
from rx.internal.concurrency import synchronized
from rx.internal.utils import is_future
def _merge(*sources: Observable,
max_concurrent: Optional[int] = None
) -> Callable[[Observable], Observable]:
def merge(source: Observable) -> Observable:
"""Merges an observable sequence of observable sequences into
an observable sequence, limiting the number of concurrent
subscriptions to inner sequences. Or merges two observable
sequences into a single observable sequence.
Examples:
>>> res = merge(sources)
Args:
source: Source observable.
Returns:
The observable sequence that merges the elements of the
inner sequences.
"""
if max_concurrent is None:
sources_ = tuple([source]) + sources
return rx.merge(*sources_)
def subscribe(observer, scheduler=None):
active_count = [0]
group = CompositeDisposable()
is_stopped = [False]
queue = []
def subscribe(xs):
subscription = SingleAssignmentDisposable()
group.add(subscription)
@synchronized(source.lock)
def on_completed():
group.remove(subscription)
if queue:
s = queue.pop(0)
subscribe(s)
else:
active_count[0] -= 1
if is_stopped[0] and active_count[0] == 0:
observer.on_completed()
on_next = synchronized(source.lock)(observer.on_next)
on_error = synchronized(source.lock)(observer.on_error)
subscription.disposable = xs.subscribe_(on_next, on_error, on_completed, scheduler)
def on_next(inner_source):
if active_count[0] < max_concurrent:
active_count[0] += 1
subscribe(inner_source)
else:
queue.append(inner_source)
def on_completed():
is_stopped[0] = True
if active_count[0] == 0:
observer.on_completed()
group.add(source.subscribe_(on_next, observer.on_error, on_completed, scheduler))
return group
return Observable(subscribe)
return merge
def _merge_all() -> Callable[[Observable], Observable]:
def merge_all(source: Observable) -> Observable:
"""Partially applied merge_all operator.
Merges an observable sequence of observable sequences into an
observable sequence.
Args:
source: Source observable to merge.
Returns:
The observable sequence that merges the elements of the inner
sequences.
"""
def subscribe(observer, scheduler=None):
group = CompositeDisposable()
is_stopped = [False]
m = SingleAssignmentDisposable()
group.add(m)
def on_next(inner_source):
inner_subscription = SingleAssignmentDisposable()
group.add(inner_subscription)
inner_source = from_future(inner_source) if is_future(inner_source) else inner_source
@synchronized(source.lock)
def on_completed():
group.remove(inner_subscription)
if is_stopped[0] and len(group) == 1:
observer.on_completed()
on_next = synchronized(source.lock)(observer.on_next)
on_error = synchronized(source.lock)(observer.on_error)
subscription = inner_source.subscribe_(on_next, on_error, on_completed, scheduler)
inner_subscription.disposable = subscription
def on_completed():
is_stopped[0] = True
if len(group) == 1:
observer.on_completed()
m.disposable = source.subscribe_(on_next, observer.on_error, on_completed, scheduler)
return group
return Observable(subscribe)
return merge_all
| 34.928571 | 101 | 0.578732 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.