content stringlengths 27 928k | path stringlengths 4 230 | size int64 27 928k | nl_text stringlengths 21 396k | nl_size int64 21 396k | nl_language stringlengths 2 3 | nl_language_score float64 0.04 1 |
|---|---|---|---|---|---|---|
from reqlist import *
import random
from catalog.models import Course
def ceiling_thresh(progress, maximum):
"""Creates a progress object
Ensures that 0 < progress < maximum"""
effective_progress = max(0, progress)
if maximum > 0:
return Progress(min(effective_progress, maximum), maximum)
else:
return Progress(effective_progress, maximum)
def total_units(courses):
"""Finds the total units in a list of Course objects"""
total = 0
for course in courses:
total += course.total_units
return total
def sum_progresses(progresses, criterion_type, maxFunc):
"""Adds together a list of Progress objects by combining them one by one
criterion_type: either subjects or units
maxFunc: describes how to combine the maximums of the Progress objects"""
if criterion_type == CRITERION_SUBJECTS:
mapfunc = lambda p: p.subject_fulfillment
elif criterion_type == CRITERION_UNITS:
mapfunc = lambda p: p.unit_fulfillment
sum_progress = reduce(lambda p1, p2: p1.combine(p2, maxFunc), map(mapfunc, progresses))
return sum_progress
def force_unfill_progresses(satisfied_by_category, current_distinct_threshold, current_threshold):
"""Adjusts the fulfillment and progress of RequirementsProgress object with both distinct thresholds and thresholds
These requirements follow the form "X subjects/units from at least N categories"
satisfied_by_category: list of lists of Courses for each category
current_distinct_threshold: threshold object for distinct threshold
current_threshold: threshold object for regular threshold"""
subject_cutoff = current_threshold.cutoff_for_criterion(CRITERION_SUBJECTS)
unit_cutoff = current_threshold.cutoff_for_criterion(CRITERION_UNITS)
#list of subjects by category sorted by units
max_unit_subjects = map(lambda sat_cat: sorted(sat_cat, key = lambda s: s.total_units), satisfied_by_category)
#split subjects into two sections: fixed and free
#fixed subjects: must have one subject from each category
#free subjects: remaining subjects to fill requirement can come from any category
#choose maximum-unit courses to fulfill requirement with least amount of courses possible
fixed_subject_progress = 0
fixed_subject_max = current_distinct_threshold.get_actual_cutoff()
fixed_unit_progress = 0
fixed_unit_max = 0
#fill fixed subjects with maximum-unit course in each category
for category_subjects in max_unit_subjects:
if len(category_subjects) > 0:
subject_to_count = category_subjects.pop()
fixed_subject_progress += 1
fixed_unit_progress += subject_to_count.total_units
fixed_unit_max += subject_to_count.total_units
else:
fixed_unit_max += DEFAULT_UNIT_COUNT
#remaining subjects/units to fill
remaining_subject_progress = subject_cutoff - fixed_subject_max
remaining_unit_progress = unit_cutoff - fixed_unit_max
#choose free courses from all remaining courses
free_courses = sorted([course for category in max_unit_subjects for course in category], key = lambda s: s.total_units, reverse = True)
free_subject_max = subject_cutoff - fixed_subject_max
free_unit_max = unit_cutoff - fixed_unit_max
free_subject_progress = min(len(free_courses), free_subject_max)
free_unit_progress = min(total_units(free_courses), free_unit_max)
#add fixed and free courses to get total progress
subject_progress = Progress(fixed_subject_progress + free_subject_progress, subject_cutoff)
unit_progress = Progress(fixed_unit_progress + free_unit_progress, unit_cutoff)
return (subject_progress, unit_progress)
class JSONProgressConstants:
"""Each of these keys will be filled in a RequirementsStatement JSON
representation decorated by a RequirementsProgress object."""
is_fulfilled = "fulfilled"
progress = "progress"
progress_max = "max"
percent_fulfilled = "percent_fulfilled"
satisfied_courses = "sat_courses"
# Progress assertions
is_bypassed = "is_bypassed"
assertion = "assertion"
class Progress(object):
"""An object describing simple progress towards a requirement
Different from RequirementsProgress object as it only includes progress information,
not nested RequirementsProgress objects, fulfillment status, title, and other information
progress: number of units/subjects completed
max: number of units/subjects needed to fulfill requirement"""
def __init__(self, progress, max):
self.progress = progress
self.max = max
def get_percent(self):
if self.max > 0:
return min(100, int(round((self.progress / float(self.max)) * 100)))
else:
return "N/A"
def get_fraction(self):
if self.max > 0:
return self.progress / float(self.max)
else:
return "N/A"
def get_raw_fraction(self, unit):
denom = max(self.max, DEFAULT_UNIT_COUNT if unit == CRITERION_UNITS else 1)
return self.progress/denom
def combine(self, p2, maxFunc):
if maxFunc is not None:
return Progress(self.progress + p2.progress, self.max + maxFunc(p2.max))
return Progress(self.progress + p2.progress, self.max + p2.max)
def __repr__(self):
return str(self.progress) + " / " + str(self.max)
class RequirementsProgress(object):
"""
Stores a user's progress towards a given requirements statement. This object
wraps a requirements statement and has a to_json_object() method which
returns the statement's own JSON dictionary representation with progress
information added.
Note: This class is maintained separately from the Django model so that
persistent information can be stored in a database-friendly format, while
information specific to a user's request is transient.
"""
def __init__(self, statement, list_path):
"""Initializes a progress object with the given requirements statement."""
self.statement = statement
self.threshold = self.statement.get_threshold()
self.distinct_threshold = self.statement.get_distinct_threshold()
self.list_path = list_path
self.children = []
if self.statement.requirement is None:
for index, child in enumerate(self.statement.requirements.iterator()):
self.children.append(RequirementsProgress(child, list_path + "." + str(index)))
def courses_satisfying_req(self, courses):
"""
Returns the whole courses and the half courses satisfying this requirement
separately.
"""
if self.statement.requirement is not None:
req = self.statement.requirement
if "GIR:" in req or "HASS" in req or "CI-" in req:
# Separate whole and half courses
whole_courses = []
half_courses = []
for c in courses:
if not c.satisfies(req, courses):
continue
if c.is_half_class:
half_courses.append(c)
else:
whole_courses.append(c)
return whole_courses, half_courses
else:
return [c for c in courses if c.satisfies(req, courses)], []
return [], []
def override_requirement(self, manual_progress):
"""
Sets the progress fulfillment variables based on a manual progress value, which is
expressed in either units or subjects depending on the requirement's threshold.
"""
self.is_fulfilled = manual_progress >= self.threshold.get_actual_cutoff()
subjects = 0
units = 0
satisfied_courses = set()
if self.threshold.criterion == CRITERION_UNITS:
units = manual_progress
subjects = manual_progress / DEFAULT_UNIT_COUNT
else:
units = manual_progress * DEFAULT_UNIT_COUNT
subjects = manual_progress
subject_progress = ceiling_thresh(subjects, self.threshold.cutoff_for_criterion(CRITERION_SUBJECTS))
unit_progress = ceiling_thresh(units, self.threshold.cutoff_for_criterion(CRITERION_UNITS))
#fill with dummy courses
random_ids = random.sample(range(1000, max(10000, subject_progress.progress + 1000)), subject_progress.progress)
for rand_id in random_ids:
dummy_course = Course(id = self.list_path + "_" + str(rand_id), subject_id = "gen_course_" + self.list_path + "_" + str(rand_id), title = "Generated Course " + self.list_path + " " + str(rand_id))
satisfied_courses.add(dummy_course)
self.subject_fulfillment = subject_progress
self.subject_progress = subject_progress.progress
self.subject_max = subject_progress.max
self.unit_fulfillment = unit_progress
self.unit_progress = unit_progress.progress
self.unit_max = unit_progress.max
progress = unit_progress if self.threshold is not None and self.threshold.criterion == CRITERION_UNITS else subject_progress
self.progress = progress.progress
self.progress_max = progress.max
self.percent_fulfilled = progress.get_percent()
self.fraction_fulfilled = progress.get_fraction()
self.satisfied_courses = list(satisfied_courses)
def compute_assertions(self, courses, progress_assertions):
"""
Computes the fulfillment of this requirement based on progress assertions, and returns
True if the requirement has an assertion available or False otherwise.
Assertions are in the format of a dictionary keyed by requirements list paths, where the
values are dictionaries containing three possible keys: "substitutions", which should be a
list of course IDs that combine to substitute for the requirement, "ignore", which
indicates that the requirement is not to be used when satisfying later requirements, and
"override", which is equivalent to the old manual progress value and indicates a progress
toward the requirement in the unit specified by the requirement's threshold type (only
used if the requirement is a plain string requirement and has a threshold). The order of
precedence is override, ignore, substitutions.
"""
self.assertion = progress_assertions.get(self.list_path, None)
self.is_bypassed = False
if self.assertion is not None:
substitutions = self.assertion.get("substitutions", None) #List of substitutions
ignore = self.assertion.get("ignore", False) #Boolean
override = self.assertion.get("override", 0)
else:
substitutions = None
ignore = False
override = 0
if self.statement.is_plain_string and self.threshold is not None and override:
self.override_requirement(override)
return True
if ignore:
self.is_fulfilled = False
subject_progress = Progress(0, 0)
self.subject_fulfillment = subject_progress
self.subject_progress = subject_progress.progress
self.subject_max = subject_progress.max
unit_progress = Progress(0, 0)
self.unit_fulfillment = unit_progress
self.unit_progress = unit_progress.progress
self.unit_max = unit_progress.max
progress = Progress(0, 0)
self.progress = progress.progress
self.progress_max = progress.max
self.percent_fulfilled = progress.get_percent()
self.fraction_fulfilled = progress.get_fraction()
self.satisfied_courses = []
return True
if substitutions is not None:
satisfied_courses = set()
subs_satisfied = 0
units_satisfied = 0
for sub in substitutions:
for course in courses:
if course.satisfies(sub, courses):
subs_satisfied += 1
units_satisfied += course.total_units
satisfied_courses.add(course)
break
if self.statement.is_plain_string and self.threshold is not None:
subject_progress = Progress(subs_satisfied,
self.threshold.cutoff_for_criterion(CRITERION_SUBJECTS))
unit_progress = Progress(units_satisfied,
self.threshold.cutoff_for_criterion(CRITERION_UNITS))
progress = subject_progress if self.threshold.criterion == CRITERION_SUBJECTS else unit_progress
self.is_fulfilled = progress.progress == progress.max
else:
subject_progress = Progress(subs_satisfied, len(substitutions))
self.is_fulfilled = subs_satisfied == len(substitutions)
unit_progress = Progress(subs_satisfied * DEFAULT_UNIT_COUNT, len(substitutions) * DEFAULT_UNIT_COUNT)
progress = subject_progress
self.subject_fulfillment = subject_progress
self.subject_progress = subject_progress.progress
self.subject_max = subject_progress.max
self.unit_fulfillment = unit_progress
self.unit_progress = unit_progress.progress
self.unit_max = unit_progress.max
self.progress = progress.progress
self.progress_max = progress.max
self.percent_fulfilled = progress.get_percent()
self.fraction_fulfilled = progress.get_fraction()
self.satisfied_courses = list(satisfied_courses)
return True
return False
def bypass_children(self):
"""Sets the is_bypassed flag of the recursive children of this progress object to True."""
for child in self.children:
child.is_bypassed = True
child.is_fulfilled = False
child.subject_fulfillment = Progress(0, 0)
child.subject_progress = 0
child.subject_max = 0
child.unit_fulfillment = Progress(0, 0)
child.unit_progress = 0
child.unit_max = 0
child.progress = 0
child.progress_max = 0
child.percent_fulfilled = 0
child.fraction_fulfilled = 0
child.satisfied_courses = []
child.assertion = None
child.bypass_children()
def compute(self, courses, progress_overrides, progress_assertions):
"""Computes and stores the status of the requirements statement using the
given list of Course objects."""
# Compute status of children and then self, adapted from mobile apps' computeRequirementsStatus method
satisfied_courses = set()
if self.compute_assertions(courses, progress_assertions):
self.bypass_children()
return
if self.list_path in progress_overrides:
manual_progress = progress_overrides[self.list_path]
else:
manual_progress = 0
self.is_bypassed = False
self.assertion = None
if self.statement.requirement is not None:
#it is a basic requirement
if self.statement.is_plain_string and manual_progress != 0 and self.threshold is not None:
#use manual progress
self.override_requirement(manual_progress)
return
else:
#Example: requirement CI-H, we want to show how many have been fulfilled
whole_courses, half_courses = self.courses_satisfying_req(courses)
satisfied_courses = whole_courses + half_courses
if not self.threshold is None:
#A specific number of courses is required
subject_progress = ceiling_thresh(len(whole_courses) + len(half_courses) // 2, self.threshold.cutoff_for_criterion(CRITERION_SUBJECTS))
unit_progress = ceiling_thresh(total_units(satisfied_courses), self.threshold.cutoff_for_criterion(CRITERION_UNITS))
is_fulfilled = self.threshold.is_satisfied_by(subject_progress.progress, unit_progress.progress)
else:
#Only one is needed
progress_subjects = min(len(satisfied_courses), 1)
is_fulfilled = len(satisfied_courses) > 0
subject_progress = ceiling_thresh(progress_subjects, 1)
if len(satisfied_courses) > 0:
unit_progress = ceiling_thresh(list(satisfied_courses)[0].total_units, DEFAULT_UNIT_COUNT)
else:
unit_progress = ceiling_thresh(0, DEFAULT_UNIT_COUNT)
progress = unit_progress if self.threshold is not None and self.threshold.criterion == CRITERION_UNITS else subject_progress
if len(self.children) > 0:
#It's a compound requirement
num_reqs_satisfied = 0
satisfied_by_category = []
satisfied_courses = set()
num_courses_satisfied = 0
open_children = []
for req_progress in self.children:
req_progress.compute(courses, progress_overrides, progress_assertions)
req_satisfied_courses = req_progress.satisfied_courses
# Don't count anything from a requirement that is ignored
if req_progress.assertion and req_progress.assertion.get("ignore", False):
continue
open_children.append(req_progress)
if req_progress.is_fulfilled and len(req_progress.satisfied_courses) > 0:
num_reqs_satisfied += 1
satisfied_courses.update(req_satisfied_courses)
satisfied_by_category.append(list(req_satisfied_courses))
# For thresholded ANY statements, children that are ALL statements
# count as a single satisfied course. ANY children count for
# all of their satisfied courses.
if req_progress.statement.connection_type == CONNECTION_TYPE_ALL and req_progress.children:
num_courses_satisfied += req_progress.is_fulfilled and len(req_progress.satisfied_courses) > 0
else:
num_courses_satisfied += len(req_satisfied_courses)
satisfied_by_category = [sat for prog, sat in sorted(zip(open_children, satisfied_by_category), key = lambda z: z[0].raw_fraction_fulfilled, reverse = True)]
sorted_progresses = sorted(open_children, key = lambda req: req.raw_fraction_fulfilled, reverse = True)
if self.threshold is None and self.distinct_threshold is None:
is_fulfilled = (num_reqs_satisfied > 0)
if self.statement.connection_type == CONNECTION_TYPE_ANY:
#Simple "any" statement
if len(sorted_progresses) > 0:
subject_progress = sorted_progresses[0].subject_fulfillment
unit_progress = sorted_progresses[0].unit_fulfillment
else:
subject_progress = Progress(0, 0)
unit_progress = Progress(0, 0)
else:
#"All" statement, will be finalized later
subject_progress = sum_progresses(sorted_progresses, CRITERION_SUBJECTS, None)
unit_progress = sum_progresses(sorted_progresses, CRITERION_UNITS, None)
else:
if self.distinct_threshold is not None:
#Clip the progresses to the ones which the user is closest to completing
num_progresses_to_count = min(self.distinct_threshold.get_actual_cutoff(), len(sorted_progresses))
sorted_progresses = sorted_progresses[:num_progresses_to_count]
satisfied_by_category = satisfied_by_category[:num_progresses_to_count]
satisfied_courses = set()
num_courses_satisfied = 0
for i, child in zip(range(num_progresses_to_count), open_children):
satisfied_courses.update(satisfied_by_category[i])
if child.statement.connection_type == CONNECTION_TYPE_ALL:
num_courses_satisfied += (child.is_fulfilled and len(child.satisfied_courses) > 0)
else:
num_courses_satisfied += len(satisfied_by_category[i])
if self.threshold is None and self.distinct_threshold is not None:
#Required number of statements
if self.distinct_threshold == THRESHOLD_TYPE_GTE or self.distinct_threshold.type == THRESHOLD_TYPE_GT:
is_fulfilled = num_reqs_satisfied >= self.distinct_threshold.get_actual_cutoff()
else:
is_fulfilled = True
subject_progress = sum_progresses(sorted_progresses, CRITERION_SUBJECTS, lambda x: max(x, 1))
unit_progress = sum_progresses(sorted_progresses, CRITERION_UNITS, lambda x: (x, DEFAULT_UNIT_COUNT)[x == 0])
elif self.threshold is not None:
#Required number of subjects or units
subject_progress = Progress(num_courses_satisfied, self.threshold.cutoff_for_criterion(CRITERION_SUBJECTS))
unit_progress = Progress(total_units(satisfied_courses), self.threshold.cutoff_for_criterion(CRITERION_UNITS))
if self.distinct_threshold is not None and (self.distinct_threshold.type == THRESHOLD_TYPE_GT or self.distinct_threshold.type == THRESHOLD_TYPE_GTE):
is_fulfilled = self.threshold.is_satisfied_by(subject_progress.progress, unit_progress.progress) and num_reqs_satisfied >= self.distinct_threshold.get_actual_cutoff()
if num_reqs_satisfied < self.distinct_threshold.get_actual_cutoff():
(subject_progress, unit_progress) = force_unfill_progresses(satisfied_by_category, self.distinct_threshold, self.threshold)
else:
is_fulfilled = self.threshold.is_satisfied_by(subject_progress.progress, unit_progress.progress)
if self.statement.connection_type == CONNECTION_TYPE_ALL:
#"All" statement - make above progresses more stringent
is_fulfilled = is_fulfilled and (num_reqs_satisfied == len(open_children))
if subject_progress.progress == subject_progress.max and len(open_children) > num_reqs_satisfied:
subject_progress.max += len(open_children) - num_reqs_satisfied
unit_progress.max += (len(open_children) - num_reqs_satisfied) * DEFAULT_UNIT_COUNT
#Polish up values
subject_progress = ceiling_thresh(subject_progress.progress, subject_progress.max)
unit_progress = ceiling_thresh(unit_progress.progress, unit_progress.max)
progress = unit_progress if self.threshold is not None and self.threshold.criterion == CRITERION_UNITS else subject_progress
progress_units = CRITERION_SUBJECTS if self.threshold is None else self.threshold.criterion
self.is_fulfilled = is_fulfilled
self.subject_fulfillment = subject_progress
self.subject_progress = subject_progress.progress
self.subject_max = subject_progress.max
self.unit_fulfillment = unit_progress
self.unit_progress = unit_progress.progress
self.unit_max = unit_progress.max
self.progress = progress.progress
self.progress_max = progress.max
self.percent_fulfilled = progress.get_percent()
self.fraction_fulfilled = progress.get_fraction()
self.raw_fraction_fulfilled = progress.get_raw_fraction(progress_units)
self.satisfied_courses = list(satisfied_courses)
def to_json_object(self, full = True, child_fn = None):
"""Returns a JSON dictionary containing the dictionary representation of
the enclosed requirements statement, as well as progress information."""
# Recursively decorate the JSON output of the children
# Add custom keys indicating progress for this statement
stmt_json = self.statement.to_json_object(full=False)
stmt_json[JSONProgressConstants.is_fulfilled] = self.is_fulfilled
stmt_json[JSONProgressConstants.progress] = self.progress
stmt_json[JSONProgressConstants.progress_max] = self.progress_max
stmt_json[JSONProgressConstants.percent_fulfilled] = self.percent_fulfilled
stmt_json[JSONProgressConstants.satisfied_courses] = map(lambda c: c.subject_id, self.satisfied_courses)
if self.is_bypassed:
stmt_json[JSONProgressConstants.is_bypassed] = self.is_bypassed
if self.assertion:
stmt_json[JSONProgressConstants.assertion] = self.assertion
if full:
if self.children:
if child_fn is None:
child_fn = lambda c: c.to_json_object(full=full)
stmt_json[JSONConstants.requirements] =[child_fn(child) for child in self.children]
return stmt_json
| requirements/progress.py | 25,548 | Each of these keys will be filled in a RequirementsStatement JSON
representation decorated by a RequirementsProgress object.
An object describing simple progress towards a requirement
Different from RequirementsProgress object as it only includes progress information,
not nested RequirementsProgress objects, fulfillment status, title, and other information
progress: number of units/subjects completed
max: number of units/subjects needed to fulfill requirement
Stores a user's progress towards a given requirements statement. This object
wraps a requirements statement and has a to_json_object() method which
returns the statement's own JSON dictionary representation with progress
information added.
Note: This class is maintained separately from the Django model so that
persistent information can be stored in a database-friendly format, while
information specific to a user's request is transient.
Initializes a progress object with the given requirements statement.
Sets the is_bypassed flag of the recursive children of this progress object to True.
Creates a progress object
Ensures that 0 < progress < maximum
Computes and stores the status of the requirements statement using the
given list of Course objects.
Computes the fulfillment of this requirement based on progress assertions, and returns
True if the requirement has an assertion available or False otherwise.
Assertions are in the format of a dictionary keyed by requirements list paths, where the
values are dictionaries containing three possible keys: "substitutions", which should be a
list of course IDs that combine to substitute for the requirement, "ignore", which
indicates that the requirement is not to be used when satisfying later requirements, and
"override", which is equivalent to the old manual progress value and indicates a progress
toward the requirement in the unit specified by the requirement's threshold type (only
used if the requirement is a plain string requirement and has a threshold). The order of
precedence is override, ignore, substitutions.
Returns the whole courses and the half courses satisfying this requirement
separately.
Adjusts the fulfillment and progress of RequirementsProgress object with both distinct thresholds and thresholds
These requirements follow the form "X subjects/units from at least N categories"
satisfied_by_category: list of lists of Courses for each category
current_distinct_threshold: threshold object for distinct threshold
current_threshold: threshold object for regular threshold
Sets the progress fulfillment variables based on a manual progress value, which is
expressed in either units or subjects depending on the requirement's threshold.
Adds together a list of Progress objects by combining them one by one
criterion_type: either subjects or units
maxFunc: describes how to combine the maximums of the Progress objects
Returns a JSON dictionary containing the dictionary representation of
the enclosed requirements statement, as well as progress information.
Finds the total units in a list of Course objects
list of subjects by category sorted by unitssplit subjects into two sections: fixed and freefixed subjects: must have one subject from each categoryfree subjects: remaining subjects to fill requirement can come from any categorychoose maximum-unit courses to fulfill requirement with least amount of courses possiblefill fixed subjects with maximum-unit course in each categoryremaining subjects/units to fillchoose free courses from all remaining coursesadd fixed and free courses to get total progress Progress assertions Separate whole and half coursesfill with dummy coursesList of substitutionsBoolean Compute status of children and then self, adapted from mobile apps' computeRequirementsStatus methodit is a basic requirementuse manual progressExample: requirement CI-H, we want to show how many have been fulfilledA specific number of courses is requiredOnly one is neededIt's a compound requirement Don't count anything from a requirement that is ignored For thresholded ANY statements, children that are ALL statements count as a single satisfied course. ANY children count for all of their satisfied courses.Simple "any" statement"All" statement, will be finalized laterClip the progresses to the ones which the user is closest to completingRequired number of statementsRequired number of subjects or units"All" statement - make above progresses more stringentPolish up values Recursively decorate the JSON output of the children Add custom keys indicating progress for this statement | 4,551 | en | 0.858053 |
"""Constants for the AVM FRITZ!SmartHome integration."""
from __future__ import annotations
import logging
from typing import Final
from homeassistant.components.binary_sensor import DEVICE_CLASS_WINDOW
from homeassistant.components.fritzbox.model import (
FritzBinarySensorEntityDescription,
FritzSensorEntityDescription,
)
from homeassistant.components.sensor import (
STATE_CLASS_MEASUREMENT,
STATE_CLASS_TOTAL_INCREASING,
)
from homeassistant.const import (
DEVICE_CLASS_BATTERY,
DEVICE_CLASS_ENERGY,
DEVICE_CLASS_POWER,
DEVICE_CLASS_TEMPERATURE,
ENERGY_KILO_WATT_HOUR,
PERCENTAGE,
POWER_WATT,
TEMP_CELSIUS,
)
ATTR_STATE_BATTERY_LOW: Final = "battery_low"
ATTR_STATE_DEVICE_LOCKED: Final = "device_locked"
ATTR_STATE_HOLIDAY_MODE: Final = "holiday_mode"
ATTR_STATE_LOCKED: Final = "locked"
ATTR_STATE_SUMMER_MODE: Final = "summer_mode"
ATTR_STATE_WINDOW_OPEN: Final = "window_open"
ATTR_TEMPERATURE_UNIT: Final = "temperature_unit"
CONF_CONNECTIONS: Final = "connections"
CONF_COORDINATOR: Final = "coordinator"
DEFAULT_HOST: Final = "fritz.box"
DEFAULT_USERNAME: Final = "admin"
DOMAIN: Final = "fritzbox"
LOGGER: Final[logging.Logger] = logging.getLogger(__package__)
PLATFORMS: Final[list[str]] = ["binary_sensor", "climate", "switch", "sensor"]
BINARY_SENSOR_TYPES: Final[tuple[FritzBinarySensorEntityDescription, ...]] = (
FritzBinarySensorEntityDescription(
key="alarm",
name="Alarm",
device_class=DEVICE_CLASS_WINDOW,
suitable=lambda device: device.has_alarm, # type: ignore[no-any-return]
is_on=lambda device: device.alert_state, # type: ignore[no-any-return]
),
)
SENSOR_TYPES: Final[tuple[FritzSensorEntityDescription, ...]] = (
FritzSensorEntityDescription(
key="temperature",
name="Temperature",
native_unit_of_measurement=TEMP_CELSIUS,
device_class=DEVICE_CLASS_TEMPERATURE,
state_class=STATE_CLASS_MEASUREMENT,
suitable=lambda device: (
device.has_temperature_sensor and not device.has_thermostat
),
native_value=lambda device: device.temperature, # type: ignore[no-any-return]
),
FritzSensorEntityDescription(
key="battery",
name="Battery",
native_unit_of_measurement=PERCENTAGE,
device_class=DEVICE_CLASS_BATTERY,
suitable=lambda device: device.battery_level is not None,
native_value=lambda device: device.battery_level, # type: ignore[no-any-return]
),
FritzSensorEntityDescription(
key="power_consumption",
name="Power Consumption",
native_unit_of_measurement=POWER_WATT,
device_class=DEVICE_CLASS_POWER,
state_class=STATE_CLASS_MEASUREMENT,
suitable=lambda device: device.has_powermeter, # type: ignore[no-any-return]
native_value=lambda device: device.power / 1000 if device.power else 0.0,
),
FritzSensorEntityDescription(
key="total_energy",
name="Total Energy",
native_unit_of_measurement=ENERGY_KILO_WATT_HOUR,
device_class=DEVICE_CLASS_ENERGY,
state_class=STATE_CLASS_TOTAL_INCREASING,
suitable=lambda device: device.has_powermeter, # type: ignore[no-any-return]
native_value=lambda device: device.energy / 1000 if device.energy else 0.0,
),
)
| homeassistant/components/fritzbox/const.py | 3,355 | Constants for the AVM FRITZ!SmartHome integration.
type: ignore[no-any-return] type: ignore[no-any-return] type: ignore[no-any-return] type: ignore[no-any-return] type: ignore[no-any-return] type: ignore[no-any-return] | 220 | en | 0.34959 |
#!/usr/bin/env python2
# Copyright (c) 2014-2015 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test addressindex generation and fetching
#
import time
from test_framework.test_framework import SagbitTestFramework
from test_framework.util import *
from test_framework.script import *
from test_framework.mininode import *
import binascii
class AddressIndexTest(SagbitTestFramework):
def setup_chain(self):
print("Initializing test directory "+self.options.tmpdir)
initialize_chain_clean(self.options.tmpdir, 4)
def setup_network(self):
self.nodes = []
# Nodes 0/1 are "wallet" nodes
self.nodes.append(start_node(0, self.options.tmpdir, ["-debug", "-relaypriority=0"]))
self.nodes.append(start_node(1, self.options.tmpdir, ["-debug", "-addressindex"]))
# Nodes 2/3 are used for testing
self.nodes.append(start_node(2, self.options.tmpdir, ["-debug", "-addressindex", "-relaypriority=0"]))
self.nodes.append(start_node(3, self.options.tmpdir, ["-debug", "-addressindex"]))
connect_nodes(self.nodes[0], 1)
connect_nodes(self.nodes[0], 2)
connect_nodes(self.nodes[0], 3)
self.is_network_split = False
self.sync_all()
def run_test(self):
print "Mining blocks..."
self.nodes[0].generate(105)
self.sync_all()
chain_height = self.nodes[1].getblockcount()
assert_equal(chain_height, 105)
assert_equal(self.nodes[1].getbalance(), 0)
assert_equal(self.nodes[2].getbalance(), 0)
# Check that balances are correct
balance0 = self.nodes[1].getaddressbalance("2N2JD6wb56AfK4tfmM6PwdVmoYk2dCKf4Br")
assert_equal(balance0["balance"], 0)
# Check p2pkh and p2sh address indexes
print "Testing p2pkh and p2sh address index..."
txid0 = self.nodes[0].sendtoaddress("mo9ncXisMeAoXwqcV5EWuyncbmCcQN4rVs", 10)
self.nodes[0].generate(1)
txidb0 = self.nodes[0].sendtoaddress("2N2JD6wb56AfK4tfmM6PwdVmoYk2dCKf4Br", 10)
self.nodes[0].generate(1)
txid1 = self.nodes[0].sendtoaddress("mo9ncXisMeAoXwqcV5EWuyncbmCcQN4rVs", 15)
self.nodes[0].generate(1)
txidb1 = self.nodes[0].sendtoaddress("2N2JD6wb56AfK4tfmM6PwdVmoYk2dCKf4Br", 15)
self.nodes[0].generate(1)
txid2 = self.nodes[0].sendtoaddress("mo9ncXisMeAoXwqcV5EWuyncbmCcQN4rVs", 20)
self.nodes[0].generate(1)
txidb2 = self.nodes[0].sendtoaddress("2N2JD6wb56AfK4tfmM6PwdVmoYk2dCKf4Br", 20)
self.nodes[0].generate(1)
self.sync_all()
txids = self.nodes[1].getaddresstxids("mo9ncXisMeAoXwqcV5EWuyncbmCcQN4rVs")
assert_equal(len(txids), 3)
assert_equal(txids[0], txid0)
assert_equal(txids[1], txid1)
assert_equal(txids[2], txid2)
txidsb = self.nodes[1].getaddresstxids("2N2JD6wb56AfK4tfmM6PwdVmoYk2dCKf4Br")
assert_equal(len(txidsb), 3)
assert_equal(txidsb[0], txidb0)
assert_equal(txidsb[1], txidb1)
assert_equal(txidsb[2], txidb2)
# Check that limiting by height works
print "Testing querying txids by range of block heights.."
height_txids = self.nodes[1].getaddresstxids({
"addresses": ["2N2JD6wb56AfK4tfmM6PwdVmoYk2dCKf4Br"],
"start": 105,
"end": 110
})
assert_equal(len(height_txids), 2)
assert_equal(height_txids[0], txidb0)
assert_equal(height_txids[1], txidb1)
# Check that multiple addresses works
multitxids = self.nodes[1].getaddresstxids({"addresses": ["2N2JD6wb56AfK4tfmM6PwdVmoYk2dCKf4Br", "mo9ncXisMeAoXwqcV5EWuyncbmCcQN4rVs"]})
assert_equal(len(multitxids), 6)
assert_equal(multitxids[0], txid0)
assert_equal(multitxids[1], txidb0)
assert_equal(multitxids[2], txid1)
assert_equal(multitxids[3], txidb1)
assert_equal(multitxids[4], txid2)
assert_equal(multitxids[5], txidb2)
# Check that balances are correct
balance0 = self.nodes[1].getaddressbalance("2N2JD6wb56AfK4tfmM6PwdVmoYk2dCKf4Br")
assert_equal(balance0["balance"], 45 * 100000000)
# Check that outputs with the same address will only return one txid
print "Testing for txid uniqueness..."
addressHash = "6349a418fc4578d10a372b54b45c280cc8c4382f".decode("hex")
scriptPubKey = CScript([OP_HASH160, addressHash, OP_EQUAL])
unspent = self.nodes[0].listunspent()
tx = CTransaction()
tx.vin = [CTxIn(COutPoint(int(unspent[0]["txid"], 16), unspent[0]["vout"]))]
tx.vout = [CTxOut(10, scriptPubKey), CTxOut(11, scriptPubKey)]
tx.rehash()
signed_tx = self.nodes[0].signrawtransaction(binascii.hexlify(tx.serialize()).decode("utf-8"))
sent_txid = self.nodes[0].sendrawtransaction(signed_tx["hex"], True)
self.nodes[0].generate(1)
self.sync_all()
txidsmany = self.nodes[1].getaddresstxids("2N2JD6wb56AfK4tfmM6PwdVmoYk2dCKf4Br")
assert_equal(len(txidsmany), 4)
assert_equal(txidsmany[3], sent_txid)
# Check that balances are correct
print "Testing balances..."
balance0 = self.nodes[1].getaddressbalance("2N2JD6wb56AfK4tfmM6PwdVmoYk2dCKf4Br")
assert_equal(balance0["balance"], 45 * 100000000 + 21)
# Check that balances are correct after spending
print "Testing balances after spending..."
privkey2 = "cSdkPxkAjA4HDr5VHgsebAPDEh9Gyub4HK8UJr2DFGGqKKy4K5sG"
address2 = "mgY65WSfEmsyYaYPQaXhmXMeBhwp4EcsQW"
addressHash2 = "0b2f0a0c31bfe0406b0ccc1381fdbe311946dadc".decode("hex")
scriptPubKey2 = CScript([OP_DUP, OP_HASH160, addressHash2, OP_EQUALVERIFY, OP_CHECKSIG])
self.nodes[0].importprivkey(privkey2)
unspent = self.nodes[0].listunspent()
tx = CTransaction()
tx.vin = [CTxIn(COutPoint(int(unspent[0]["txid"], 16), unspent[0]["vout"]))]
amount = unspent[0]["amount"] * 100000000
tx.vout = [CTxOut(amount, scriptPubKey2)]
tx.rehash()
signed_tx = self.nodes[0].signrawtransaction(binascii.hexlify(tx.serialize()).decode("utf-8"))
spending_txid = self.nodes[0].sendrawtransaction(signed_tx["hex"], True)
self.nodes[0].generate(1)
self.sync_all()
balance1 = self.nodes[1].getaddressbalance(address2)
assert_equal(balance1["balance"], amount)
tx = CTransaction()
tx.vin = [CTxIn(COutPoint(int(spending_txid, 16), 0))]
send_amount = 1 * 100000000 + 12840
change_amount = amount - send_amount - 10000
tx.vout = [CTxOut(change_amount, scriptPubKey2), CTxOut(send_amount, scriptPubKey)]
tx.rehash()
signed_tx = self.nodes[0].signrawtransaction(binascii.hexlify(tx.serialize()).decode("utf-8"))
sent_txid = self.nodes[0].sendrawtransaction(signed_tx["hex"], True)
self.nodes[0].generate(1)
self.sync_all()
balance2 = self.nodes[1].getaddressbalance(address2)
assert_equal(balance2["balance"], change_amount)
# Check that deltas are returned correctly
deltas = self.nodes[1].getaddressdeltas({"addresses": [address2], "start": 1, "end": 200})
balance3 = 0
for delta in deltas:
balance3 += delta["satoshis"]
assert_equal(balance3, change_amount)
assert_equal(deltas[0]["address"], address2)
assert_equal(deltas[0]["blockindex"], 1)
# Check that entire range will be queried
deltasAll = self.nodes[1].getaddressdeltas({"addresses": [address2]})
assert_equal(len(deltasAll), len(deltas))
# Check that deltas can be returned from range of block heights
deltas = self.nodes[1].getaddressdeltas({"addresses": [address2], "start": 113, "end": 113})
assert_equal(len(deltas), 1)
# Check that unspent outputs can be queried
print "Testing utxos..."
utxos = self.nodes[1].getaddressutxos({"addresses": [address2]})
assert_equal(len(utxos), 1)
assert_equal(utxos[0]["satoshis"], change_amount)
# Check that indexes will be updated with a reorg
print "Testing reorg..."
best_hash = self.nodes[0].getbestblockhash()
self.nodes[0].invalidateblock(best_hash)
self.nodes[1].invalidateblock(best_hash)
self.nodes[2].invalidateblock(best_hash)
self.nodes[3].invalidateblock(best_hash)
self.sync_all()
balance4 = self.nodes[1].getaddressbalance(address2)
assert_equal(balance4, balance1)
utxos2 = self.nodes[1].getaddressutxos({"addresses": [address2]})
assert_equal(len(utxos2), 1)
assert_equal(utxos2[0]["satoshis"], amount)
# Check sorting of utxos
self.nodes[2].generate(150)
txidsort1 = self.nodes[2].sendtoaddress(address2, 50)
self.nodes[2].generate(1)
txidsort2 = self.nodes[2].sendtoaddress(address2, 50)
self.nodes[2].generate(1)
self.sync_all()
utxos3 = self.nodes[1].getaddressutxos({"addresses": [address2]})
assert_equal(len(utxos3), 3)
assert_equal(utxos3[0]["height"], 114)
assert_equal(utxos3[1]["height"], 264)
assert_equal(utxos3[2]["height"], 265)
# Check mempool indexing
print "Testing mempool indexing..."
privKey3 = "cVfUn53hAbRrDEuMexyfgDpZPhF7KqXpS8UZevsyTDaugB7HZ3CD"
address3 = "mw4ynwhS7MmrQ27hr82kgqu7zryNDK26JB"
addressHash3 = "aa9872b5bbcdb511d89e0e11aa27da73fd2c3f50".decode("hex")
scriptPubKey3 = CScript([OP_DUP, OP_HASH160, addressHash3, OP_EQUALVERIFY, OP_CHECKSIG])
address4 = "2N8oFVB2vThAKury4vnLquW2zVjsYjjAkYQ"
scriptPubKey4 = CScript([OP_HASH160, addressHash3, OP_EQUAL])
unspent = self.nodes[2].listunspent()
tx = CTransaction()
tx.vin = [CTxIn(COutPoint(int(unspent[0]["txid"], 16), unspent[0]["vout"]))]
amount = unspent[0]["amount"] * 100000000
tx.vout = [CTxOut(amount, scriptPubKey3)]
tx.rehash()
signed_tx = self.nodes[2].signrawtransaction(binascii.hexlify(tx.serialize()).decode("utf-8"))
memtxid1 = self.nodes[2].sendrawtransaction(signed_tx["hex"], True)
time.sleep(2)
tx2 = CTransaction()
tx2.vin = [CTxIn(COutPoint(int(unspent[1]["txid"], 16), unspent[1]["vout"]))]
amount = unspent[1]["amount"] * 100000000
tx2.vout = [
CTxOut(amount / 4, scriptPubKey3),
CTxOut(amount / 4, scriptPubKey3),
CTxOut(amount / 4, scriptPubKey4),
CTxOut(amount / 4, scriptPubKey4)
]
tx2.rehash()
signed_tx2 = self.nodes[2].signrawtransaction(binascii.hexlify(tx2.serialize()).decode("utf-8"))
memtxid2 = self.nodes[2].sendrawtransaction(signed_tx2["hex"], True)
time.sleep(2)
mempool = self.nodes[2].getaddressmempool({"addresses": [address3]})
assert_equal(len(mempool), 3)
assert_equal(mempool[0]["txid"], memtxid1)
assert_equal(mempool[0]["address"], address3)
assert_equal(mempool[0]["index"], 0)
assert_equal(mempool[1]["txid"], memtxid2)
assert_equal(mempool[1]["index"], 0)
assert_equal(mempool[2]["txid"], memtxid2)
assert_equal(mempool[2]["index"], 1)
self.nodes[2].generate(1);
self.sync_all();
mempool2 = self.nodes[2].getaddressmempool({"addresses": [address3]})
assert_equal(len(mempool2), 0)
tx = CTransaction()
tx.vin = [
CTxIn(COutPoint(int(memtxid2, 16), 0)),
CTxIn(COutPoint(int(memtxid2, 16), 1))
]
tx.vout = [CTxOut(amount / 2 - 10000, scriptPubKey2)]
tx.rehash()
self.nodes[2].importprivkey(privKey3)
signed_tx3 = self.nodes[2].signrawtransaction(binascii.hexlify(tx.serialize()).decode("utf-8"))
memtxid3 = self.nodes[2].sendrawtransaction(signed_tx3["hex"], True)
time.sleep(2)
mempool3 = self.nodes[2].getaddressmempool({"addresses": [address3]})
assert_equal(len(mempool3), 2)
assert_equal(mempool3[0]["prevtxid"], memtxid2)
assert_equal(mempool3[0]["prevout"], 0)
assert_equal(mempool3[1]["prevtxid"], memtxid2)
assert_equal(mempool3[1]["prevout"], 1)
# sending and receiving to the same address
privkey1 = "cQY2s58LhzUCmEXN8jtAp1Etnijx78YRZ466w4ikX1V4UpTpbsf8"
address1 = "myAUWSHnwsQrhuMWv4Br6QsCnpB41vFwHn"
address1hash = "c192bff751af8efec15135d42bfeedf91a6f3e34".decode("hex")
address1script = CScript([OP_DUP, OP_HASH160, address1hash, OP_EQUALVERIFY, OP_CHECKSIG])
self.nodes[0].sendtoaddress(address1, 10)
self.nodes[0].generate(1)
self.sync_all()
utxos = self.nodes[1].getaddressutxos({"addresses": [address1]})
assert_equal(len(utxos), 1)
tx = CTransaction()
tx.vin = [
CTxIn(COutPoint(int(utxos[0]["txid"], 16), utxos[0]["outputIndex"]))
]
amount = utxos[0]["satoshis"] - 1000
tx.vout = [CTxOut(amount, address1script)]
tx.rehash()
self.nodes[0].importprivkey(privkey1)
signed_tx = self.nodes[0].signrawtransaction(binascii.hexlify(tx.serialize()).decode("utf-8"))
mem_txid = self.nodes[0].sendrawtransaction(signed_tx["hex"], True)
self.sync_all()
mempool_deltas = self.nodes[2].getaddressmempool({"addresses": [address1]})
assert_equal(len(mempool_deltas), 2)
# Include chaininfo in results
print "Testing results with chain info..."
deltas_with_info = self.nodes[1].getaddressdeltas({
"addresses": [address2],
"start": 1,
"end": 200,
"chainInfo": True
})
start_block_hash = self.nodes[1].getblockhash(1);
end_block_hash = self.nodes[1].getblockhash(200);
assert_equal(deltas_with_info["start"]["height"], 1)
assert_equal(deltas_with_info["start"]["hash"], start_block_hash)
assert_equal(deltas_with_info["end"]["height"], 200)
assert_equal(deltas_with_info["end"]["hash"], end_block_hash)
utxos_with_info = self.nodes[1].getaddressutxos({"addresses": [address2], "chainInfo": True})
expected_tip_block_hash = self.nodes[1].getblockhash(267);
assert_equal(utxos_with_info["height"], 267)
assert_equal(utxos_with_info["hash"], expected_tip_block_hash)
print "Passed\n"
if __name__ == '__main__':
AddressIndexTest().main()
| qa/rpc-tests/addressindex.py | 14,751 | !/usr/bin/env python2 Copyright (c) 2014-2015 The Bitcoin Core developers Distributed under the MIT software license, see the accompanying file COPYING or http://www.opensource.org/licenses/mit-license.php. Test addressindex generation and fetching Nodes 0/1 are "wallet" nodes Nodes 2/3 are used for testing Check that balances are correct Check p2pkh and p2sh address indexes Check that limiting by height works Check that multiple addresses works Check that balances are correct Check that outputs with the same address will only return one txid Check that balances are correct Check that balances are correct after spending Check that deltas are returned correctly Check that entire range will be queried Check that deltas can be returned from range of block heights Check that unspent outputs can be queried Check that indexes will be updated with a reorg Check sorting of utxos Check mempool indexing sending and receiving to the same address Include chaininfo in results | 977 | en | 0.839172 |
# Copyright (c) 2008-2013 Szczepan Faber, Serhiy Oplakanets, Herr Kaste
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from test_base import *
from mockito import mock, when, verify, VerificationError, verifyNoMoreInteractions
from mockito.verification import never
class VerificationErrorsTest(TestBase):
def testPrintsNicely(self):
theMock = mock()
try:
verify(theMock).foo()
except VerificationError, e:
self.assertEquals('\nWanted but not invoked: foo()\nInstead got: []', str(e))
def testPrintsNicelyOneArgument(self):
theMock = mock()
try:
verify(theMock).foo("bar")
except VerificationError, e:
self.assertEquals("\nWanted but not invoked: foo('bar')\nInstead got: []", str(e))
def testPrintsNicelyArguments(self):
theMock = mock()
try:
verify(theMock).foo(1, 2)
except VerificationError, e:
self.assertEquals('\nWanted but not invoked: foo(1, 2)\nInstead got: []', str(e))
def testPrintsNicelyStringArguments(self):
theMock = mock()
try:
verify(theMock).foo(1, 'foo')
except VerificationError, e:
self.assertEquals("\nWanted but not invoked: foo(1, 'foo')\nInstead got: []", str(e))
def testPrintsOutThatTheActualAndExpectedInvocationCountDiffers(self):
theMock = mock()
when(theMock).foo().thenReturn(0)
theMock.foo()
theMock.foo()
try:
verify(theMock).foo()
except VerificationError, e:
self.assertEquals("\nWanted times: 1, actual times: 2", str(e))
# TODO: implement
def disabled_PrintsNicelyWhenArgumentsDifferent(self):
theMock = mock()
theMock.foo('foo', 1)
try:
verify(theMock).foo(1, 'foo')
except VerificationError, e:
self.assertEquals(
"""Arguments are different.
Wanted: foo(1, 'foo')
Actual: foo('foo', 1)""", str(e))
def testPrintsUnwantedInteraction(self):
theMock = mock()
theMock.foo(1, 'foo')
try:
verifyNoMoreInteractions(theMock)
except VerificationError, e:
self.assertEquals("\nUnwanted interaction: foo(1, 'foo')", str(e))
def testPrintsNeverWantedInteractionsNicely(self):
theMock = mock()
theMock.foo()
self.assertRaisesMessage("\nUnwanted invocation of foo(), times: 1", verify(theMock, never).foo)
if __name__ == '__main__':
unittest.main()
| mockito-0.5.2/mockito_test/verification_errors_test.py | 3,501 | Copyright (c) 2008-2013 Szczepan Faber, Serhiy Oplakanets, Herr Kaste Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. TODO: implement | 1,136 | en | 0.851545 |
# version 0.1
# by DrLecter
import sys
from com.l2jfrozen import Config
from com.l2jfrozen.gameserver.model.quest import State
from com.l2jfrozen.gameserver.model.quest import QuestState
from com.l2jfrozen.gameserver.model.quest.jython import QuestJython as JQuest
qn = "420_LittleWings"
# variables section
REQUIRED_EGGS = 20
#Drop rates in %
BACK_DROP = 30
EGG_DROP = 50
#Quest items
FRY_STN,FRY_STN_DLX,FSN_LIST,FSN_LIST_DLX,TD_BCK_SKN,JUICE,SCALE_1,EX_EGG,\
SCALE_2,ZW_EGG,SCALE_3,KA_EGG,SCALE_4,SU_EGG,SCALE_5,SH_EGG,FRY_DUST = range(3816,3832)+[3499]
#NPCs
PM_COOPER,SG_CRONOS,GD_BYRON,MC_MARIA,FR_MYMYU = 30829,30610,30711,30608,30747
DK_EXARION,DK_ZWOV,DK_KALIBRAN,WM_SUZET,WM_SHAMHAI = range(30748,30753)
#mobs
TD_LORD = 20231 #toad lord
LO_LZRD_W = 20580 #exarion's
MS_SPIDER = 20233 #zwov's
RD_SCVNGR = 20551 #kalibran's
BO_OVERLD = 20270 #suzet's
DD_SEEKER = 20202 #shamhai's
#Rewards
FOOD = 4038
ARMOR = 3912
# helper functions section
def check_level(st) :
if st.getPlayer().getLevel() < 35 :
st.exitQuest(True)
return "420_low_level.htm"
return "Start.htm"
def check_stone(st,progress) :
if st.getQuestItemsCount(FRY_STN) == 1 :
st.set("cond","3")
if progress == 1 :
st.set("progress","3")
return "420_cronos_8.htm"
elif progress == 8 :
st.set("progress","10")
return "420_cronos_14.htm"
elif st.getQuestItemsCount(FRY_STN_DLX) == 1 :
if progress == 2 :
st.set("progress","4")
return "420_cronos_8.htm"
elif progress == 9 :
st.set("progress","11")
return "420_cronos_14.htm"
else :
return "420_cronos_7.htm"
def check_elements(st,progress) :
coal = st.getQuestItemsCount(1870)
char = st.getQuestItemsCount(1871)
gemd = st.getQuestItemsCount(2130)
gemc = st.getQuestItemsCount(2131)
snug = st.getQuestItemsCount(1873)
sofp = st.getQuestItemsCount(1875)
tdbk = st.getQuestItemsCount(TD_BCK_SKN)
if progress in [1,8] :
if coal >= 10 and char >= 10 and gemd >= 1 and snug >= 3 and tdbk >= 10 :
return "420_maria_2.htm"
else :
return "420_maria_1.htm"
elif progress in [2,9] :
if coal >= 10 and char >= 10 and gemc >= 1 and snug >= 5 and sofp >= 1 and tdbk >= 20 :
return "420_maria_4.htm"
else :
return "420_maria_1.htm"
def craft_stone(st,progress) :
if progress in [1,8]:
st.takeItems(1870,10)
st.takeItems(1871,10)
st.takeItems(2130,1)
st.takeItems(1873,3)
st.takeItems(TD_BCK_SKN,10)
st.takeItems(FSN_LIST,1)
st.giveItems(FRY_STN,1)
st.playSound("ItemSound.quest_itemget")
return "420_maria_3.htm"
elif progress in [2,9]:
st.takeItems(1870,10)
st.takeItems(1871,10)
st.takeItems(2131,1)
st.takeItems(1873,5)
st.takeItems(1875,1)
st.takeItems(TD_BCK_SKN,20)
st.takeItems(FSN_LIST_DLX,1)
st.giveItems(FRY_STN_DLX,1)
st.playSound("ItemSound.quest_itemget")
return "420_maria_5.htm"
def check_eggs(st, npc, progress) :
whom = int(st.get("dragon"))
if whom == 1 : eggs = EX_EGG
elif whom == 2 : eggs = ZW_EGG
elif whom == 3 : eggs = KA_EGG
elif whom == 4 : eggs = SU_EGG
elif whom == 5 : eggs = SH_EGG
if npc == "mymyu" :
if progress in [19,20] and st.getQuestItemsCount(eggs) == 1 :
return "420_"+npc+"_10.htm"
else :
if st.getQuestItemsCount(eggs) >= 20 :
return "420_"+npc+"_9.htm"
else :
return "420_"+npc+"_8.htm"
elif npc == "exarion" and whom == 1 :
if st.getQuestItemsCount(eggs) < 20 :
return "420_"+npc+"_3.htm"
else :
st.takeItems(eggs,20)
st.takeItems(SCALE_1,1)
if progress in [14,21] :
st.set("progress","19")
elif progress in [15,22] :
st.set("progress","20")
st.giveItems(eggs,1)
st.playSound("ItemSound.quest_itemget")
st.set("cond","7")
return "420_"+npc+"_4.htm"
elif npc == "zwov" and whom == 2 :
if st.getQuestItemsCount(eggs) < 20 :
return "420_"+npc+"_3.htm"
else :
st.takeItems(eggs,20)
st.takeItems(SCALE_2,1)
if progress in [14,21] :
st.set("progress","19")
elif progress in [15,22] :
st.set("progress","20")
st.giveItems(eggs,1)
st.set("cond","7")
st.playSound("ItemSound.quest_itemget")
return "420_"+npc+"_4.htm"
elif npc == "kalibran" and whom == 3 :
if st.getQuestItemsCount(eggs) < 20 :
return "420_"+npc+"_3.htm"
else :
st.takeItems(eggs,20)
# st.takeItems(SCALE_3,1)
return "420_"+npc+"_4.htm"
elif npc == "suzet" and whom == 4 :
if st.getQuestItemsCount(eggs) < 20 :
return "420_"+npc+"_4.htm"
else :
st.takeItems(eggs,20)
st.takeItems(SCALE_4,1)
if progress in [14,21] :
st.set("progress","19")
elif progress in [15,22] :
st.set("progress","20")
st.giveItems(eggs,1)
st.set("cond","7")
st.playSound("ItemSound.quest_itemget")
return "420_"+npc+"_5.htm"
elif npc == "shamhai" and whom == 5 :
if st.getQuestItemsCount(eggs) < 20 :
return "420_"+npc+"_3.htm"
else :
st.takeItems(eggs,20)
st.takeItems(SCALE_5,1)
if progress in [14,21] :
st.set("progress","19")
elif progress in [15,22] :
st.set("progress","20")
st.giveItems(eggs,1)
st.set("cond","7")
st.playSound("ItemSound.quest_itemget")
return "420_"+npc+"_4.htm"
return "check_eggs sux"
# Main Quest Code
class Quest (JQuest):
def __init__(self,id,name,descr): JQuest.__init__(self,id,name,descr)
def onEvent (self,event,st):
id = st.getState()
progress = st.getInt("progress")
if id == CREATED :
st.set("cond","0")
if event == "ido" :
st.setState(STARTING)
st.set("progress","0")
st.set("cond","1")
st.set("dragon","0")
st.playSound("ItemSound.quest_accept")
return "Starting.htm"
elif id == STARTING :
if event == "wait" :
return craft_stone(st,progress)
elif event == "cronos_2" :
return "420_cronos_2.htm"
elif event == "cronos_3" :
return "420_cronos_3.htm"
elif event == "cronos_4" :
return "420_cronos_4.htm"
elif event == "fsn" :
st.set("cond","2")
if progress == 0:
st.set("progress","1")
st.giveItems(FSN_LIST,1)
st.playSound("ItemSound.quest_itemget")
return "420_cronos_5.htm"
elif progress == 7:
st.set("progress","8")
st.giveItems(FSN_LIST,1)
st.playSound("ItemSound.quest_itemget")
return "420_cronos_12.htm"
elif event == "fsn_dlx" :
st.set("cond","2")
if progress == 0:
st.set("progress","2")
st.giveItems(FSN_LIST_DLX,1)
st.playSound("ItemSound.quest_itemget")
return "420_cronos_6.htm"
if progress == 7:
st.set("progress","9")
st.giveItems(FSN_LIST_DLX,1)
st.playSound("ItemSound.quest_itemget")
return "420_cronos_13.htm"
elif event == "showfsn" :
return "420_byron_2.htm"
elif event == "askmore" :
st.set("cond","4")
if progress == 3 :
st.set("progress","5")
return "420_byron_3.htm"
elif progress == 4 :
st.set("progress","6")
return "420_byron_4.htm"
elif event == "give_fsn" :
st.takeItems(FRY_STN,1)
return "420_mymyu_2.htm"
elif event == "give_fsn_dlx" :
st.takeItems(FRY_STN_DLX,1)
st.giveItems(FRY_DUST,1)
st.playSound("ItemSound.quest_itemget")
return "420_mymyu_4.htm"
elif event == "fry_ask" :
return "420_mymyu_5.htm"
elif event == "ask_abt" :
st.setState(STARTED)
st.set("cond","5")
st.giveItems(JUICE,1)
st.playSound("ItemSound.quest_itemget")
return "420_mymyu_6.htm"
elif id == STARTED :
if event == "exarion_1" :
st.giveItems(SCALE_1,1)
st.playSound("ItemSound.quest_itemget")
st.set("dragon","1")
st.set("cond","6")
st.set("progress",str(progress+9))
return "420_exarion_2.htm"
elif event == "kalibran_1" :
st.set("dragon","3")
st.set("cond","6")
st.giveItems(SCALE_3,1)
st.playSound("ItemSound.quest_itemget")
st.set("progress",str(progress+9))
return "420_kalibran_2.htm"
elif event == "kalibran_2" :
if st.getQuestItemsCount(SCALE_3) :
if progress in [14,21] :
st.set("progress","19")
elif progress in [15,22] :
st.set("progress","20")
st.takeItems(SCALE_3,1)
st.giveItems(KA_EGG,1)
st.set("cond","7")
st.playSound("ItemSound.quest_itemget")
return "420_kalibran_5.htm"
elif event == "zwov_1" :
st.set("dragon","2")
st.set("cond","6")
st.giveItems(SCALE_2,1)
st.playSound("ItemSound.quest_itemget")
st.set("progress",str(progress+9))
return "420_zwov_2.htm"
elif event == "shamhai_1" :
st.set("dragon","5")
st.set("cond","6")
st.giveItems(SCALE_5,1)
st.playSound("ItemSound.quest_itemget")
st.set("progress",str(progress+9))
return "420_shamhai_2.htm"
elif event == "suzet_1" :
return "420_suzet_2.htm"
elif event == "suzet_2" :
st.set("dragon","4")
st.set("cond","6")
st.giveItems(SCALE_4,1)
st.playSound("ItemSound.quest_itemget")
st.set("progress",str(progress+9))
return "420_suzet_3.htm"
elif event == "hatch" :
whom = int(st.get("dragon"))
if whom == 1 : eggs = EX_EGG
elif whom == 2 : eggs = ZW_EGG
elif whom == 3 : eggs = KA_EGG
elif whom == 4 : eggs = SU_EGG
elif whom == 5 : eggs = SH_EGG
if st.getQuestItemsCount(eggs) and progress in [19,20] :
st.takeItems(eggs,1)
st.set("cond","8")
if progress == 19 :
st.giveItems(3500+st.getRandom(3),1)
st.exitQuest(True)
st.playSound("ItemSound.quest_finish")
return "420_mymyu_15.htm"
elif progress == 20 :
return "420_mymyu_11.htm"
elif event == "give_dust" :
if st.getQuestItemsCount(FRY_DUST):
st.takeItems(FRY_DUST,1)
luck = st.getRandom(2)
if luck == 0 :
extra = ARMOR
qty = 1
htmltext = "420_mymyu_13.htm"
else :
extra = FOOD
qty = 100
htmltext = "420_mymyu_14.htm"
st.giveItems(3500+st.getRandom(3),1)
st.giveItems(extra,qty)
st.exitQuest(True)
st.playSound("ItemSound.quest_finish")
return htmltext
elif event == "no_dust" :
st.giveItems(3500+st.getRandom(3),1)
st.exitQuest(True)
st.playSound("ItemSound.quest_finish")
return "420_mymyu_12.htm"
def onTalk (self,npc,player):
htmltext = "<html><body>You are either not carrying out your quest or don't meet the criteria.</body></html>"
st = player.getQuestState(qn)
if not st : return htmltext
npcId = npc.getNpcId()
id = st.getState()
if id == COMPLETED:
st.setState(CREATED)
id = CREATED
progress = st.getInt("progress")
if npcId == PM_COOPER :
if id == CREATED :
return check_level(st)
elif id == STARTING and progress == 0 :
return "Starting.htm"
else :
return "Started.htm"
elif npcId == SG_CRONOS :
if id == STARTING :
if progress == 0 :
return "420_cronos_1.htm"
elif progress in [ 1,2,8,9 ] :
return check_stone(st,progress)
elif progress in [ 3,4,10,11 ] :
return "420_cronos_9.htm"
elif progress in [5,6,12,13 ]:
return "420_cronos_11.htm"
elif progress == 7 :
return "420_cronos_10.htm"
elif npcId == MC_MARIA :
if id == STARTING :
if ((progress in [ 1,8 ] ) and st.getQuestItemsCount(FSN_LIST)==1) or ((progress in [ 2,9 ] ) and st.getQuestItemsCount(FSN_LIST_DLX)==1):
return check_elements(st,progress)
elif progress in [ 3,4,5,6,7,10,11 ] :
return "420_maria_6.htm"
elif npcId == GD_BYRON :
if id == STARTING :
if ((progress in [ 1,8 ] ) and st.getQuestItemsCount(FSN_LIST)==1) or ((progress in [ 2,9 ] ) and st.getQuestItemsCount(FSN_LIST_DLX)==1):
return "420_byron_10.htm"
elif progress == 7 :
return "420_byron_9.htm"
elif (progress == 3 and st.getQuestItemsCount(FRY_STN)==1) or (progress == 4 and st.getQuestItemsCount(FRY_STN_DLX)==1):
return "420_byron_1.htm"
elif progress == 10 and st.getQuestItemsCount(FRY_STN)==1 :
st.set("progress","12")
return "420_byron_5.htm"
elif progress == 11 and st.getQuestItemsCount(FRY_STN_DLX)==1 :
st.set("progress","13")
return "420_byron_6.htm"
elif progress in [5,12] :
return "420_byron_7.htm"
elif progress in [6,13] :
return "420_byron_8.htm"
elif npcId == FR_MYMYU :
if id == STARTING :
if ( progress in [5,12] ) and st.getQuestItemsCount(FRY_STN) == 1 :
return "420_mymyu_1.htm"
elif ( progress in [6,13] ) and st.getQuestItemsCount(FRY_STN_DLX) == 1 :
return "420_mymyu_3.htm"
elif id == STARTED :
if progress < 14 and st.getQuestItemsCount(JUICE) == 1 :
return "420_mymyu_7.htm"
elif progress > 13 :
return check_eggs(st,"mymyu",progress)
elif npcId == DK_EXARION :
if id == STARTED :
if progress in [ 5,6,12,13 ] and st.getQuestItemsCount(JUICE) == 1:
st.takeItems(JUICE,1)
return "420_exarion_1.htm"
elif progress > 13 and st.getQuestItemsCount(SCALE_1) == 1:
return check_eggs(st,"exarion",progress)
elif progress in [ 19,20 ] and st.getQuestItemsCount(EX_EGG) == 1 :
return "420_exarion_5.htm"
elif npcId == DK_ZWOV :
if id == STARTED :
if progress in [ 5,6,12,13 ] and st.getQuestItemsCount(JUICE) == 1:
st.takeItems(JUICE,1)
return "420_zwov_1.htm"
elif progress > 13 and st.getQuestItemsCount(SCALE_2) == 1:
return check_eggs(st,"zwov",progress)
elif progress in [ 19,20 ] and st.getQuestItemsCount(ZW_EGG) == 1 :
return "420_zwov_5.htm"
elif npcId == DK_KALIBRAN :
if id == STARTED :
if progress in [ 5,6,12,13 ] and st.getQuestItemsCount(JUICE) == 1:
st.takeItems(JUICE,1)
return "420_kalibran_1.htm"
elif progress > 13 and st.getQuestItemsCount(SCALE_3) == 1:
return check_eggs(st,"kalibran",progress)
elif progress in [ 19,20 ] and st.getQuestItemsCount(KA_EGG) == 1 :
return "420_kalibran_6.htm"
elif npcId == WM_SUZET :
if id == STARTED :
if progress in [ 5,6,12,13 ] and st.getQuestItemsCount(JUICE) == 1:
st.takeItems(JUICE,1)
return "420_suzet_1.htm"
elif progress > 13 and st.getQuestItemsCount(SCALE_4) == 1:
return check_eggs(st,"suzet",progress)
elif progress in [ 19,20 ] and st.getQuestItemsCount(SU_EGG) == 1 :
return "420_suzet_6.htm"
elif npcId == WM_SHAMHAI :
if id == STARTED :
if progress in [ 5,6,12,13 ] and st.getQuestItemsCount(JUICE) == 1:
st.takeItems(JUICE,1)
return "420_shamhai_1.htm"
elif progress > 13 and st.getQuestItemsCount(SCALE_5) == 1:
return check_eggs(st,"shamhai",progress)
elif progress in [ 19,20 ] and st.getQuestItemsCount(SH_EGG) == 1 :
return "420_shamhai_5.htm"
return "<html><body>I have nothing to say to you</body></html>"
def onKill(self,npc,player,isPet):
st = player.getQuestState(qn)
if not st : return
id = st.getState()
npcId = npc.getNpcId()
#incipios drop
skins = st.getQuestItemsCount(TD_BCK_SKN)
if id == STARTING and (st.getQuestItemsCount(FSN_LIST) == 1 and skins < 10) or (st.getQuestItemsCount(FSN_LIST_DLX) == 1 and skins < 20) :
if npcId == TD_LORD :
count = 0
if st.getQuestItemsCount(FSN_LIST) == 1 :
count = 10
else :
count = 20
numItems, chance = divmod(BACK_DROP*Config.RATE_DROP_QUEST,100)
if st.getRandom(100) <= chance :
numItems += 1
numItems = int(numItems)
if numItems != 0 :
if count <= (skins + numItems) :
numItems = count - skins
st.playSound("ItemSound.quest_middle")
else :
st.playSound("ItemSound.quest_itemget")
st.giveItems(TD_BCK_SKN,numItems)
#dragon detection
elif id == STARTED and (st.get("progress") in [ "14","15","21","22" ]) :
whom = int(st.get("dragon"))
if whom == 1 :
eggs = EX_EGG
scale = SCALE_1
eggdropper = LO_LZRD_W
elif whom == 2 :
eggs = ZW_EGG
scale = SCALE_2
eggdropper = MS_SPIDER
elif whom == 3 :
eggs = KA_EGG
scale = SCALE_3
eggdropper = RD_SCVNGR
elif whom == 4 :
eggs = SU_EGG
scale = SCALE_4
eggdropper = BO_OVERLD
elif whom == 5 :
eggs = SH_EGG
scale = SCALE_5
eggdropper = DD_SEEKER
prevItems = st.getQuestItemsCount(eggs)
if st.getQuestItemsCount(scale) == 1 and prevItems < REQUIRED_EGGS :
if npcId == eggdropper :
chance = EGG_DROP*Config.RATE_DROP_QUEST
numItems, chance = divmod(chance,100)
if st.getRandom(100) <= chance :
numItems += 1
numItems = int(numItems)
if numItems != 0 :
if REQUIRED_EGGS <= (prevItems + numItems) :
numItems = REQUIRED_EGGS - prevItems
st.playSound("ItemSound.quest_middle")
else:
st.playSound("ItemSound.quest_itemget")
st.giveItems(eggs,numItems)
#fairy stone destruction
elif id == STARTING and st.getQuestItemsCount(FRY_STN_DLX) == 1 :
if npcId in range(20589,20600)+[20719]:
st.takeItems(FRY_STN_DLX,1)
st.set("progress","7")
return "you lost fairy stone deluxe!"
# Quest class and state definition
QUEST = Quest(420, qn, "Little Wings")
CREATED = State('Start', QUEST)
STARTING = State('Starting', QUEST)
STARTED = State('Started', QUEST)
COMPLETED = State('Completed', QUEST)
# Quest initialization
QUEST.setInitialState(CREATED)
# Quest NPC starter initialization
QUEST.addStartNpc(PM_COOPER)
# Quest Item Drop initialization
for i in [3499]+range(3816,3832):
STARTING.addQuestDrop(PM_COOPER,i,1)
# Quest mob initialization
#back skins
QUEST.addKillId(TD_LORD)
#fairy stone dlx destroyers
for i in range(20589,20600)+[21797]:
QUEST.addKillId(i)
#eggs
QUEST.addKillId(LO_LZRD_W)
QUEST.addKillId(RD_SCVNGR)
QUEST.addKillId(MS_SPIDER)
QUEST.addKillId(DD_SEEKER)
QUEST.addKillId(BO_OVERLD)
# Quest NPC initialization
QUEST.addTalkId(PM_COOPER)
QUEST.addTalkId(SG_CRONOS)
QUEST.addTalkId(GD_BYRON)
QUEST.addTalkId(MC_MARIA)
QUEST.addTalkId(FR_MYMYU)
for i in range(30748,30753):
QUEST.addTalkId(i) | datapack/data/scripts/quests/420_LittleWings/__init__.py | 20,811 | version 0.1 by DrLecter variables sectionDrop rates in %Quest itemsNPCsmobstoad lordexarion'szwov'skalibran'ssuzet'sshamhai'sRewards helper functions section st.takeItems(SCALE_3,1) Main Quest Codeincipios dropdragon detectionfairy stone destruction Quest class and state definition Quest initialization Quest NPC starter initialization Quest Item Drop initialization Quest mob initializationback skinsfairy stone dlx destroyerseggs Quest NPC initialization | 471 | en | 0.448808 |
space1 = 'X'
space2 = 'X'
space3 = 'X'
space4 = 'X'
space5 = 'X'
space6 = ' '
space7 = 'O'
space8 = ' '
space9 = ' '
print(' | | ')
print(' {} | {} | {} '.format(space1,space2,space3))
print(' | | ')
print('-----------')
print(' | | ')
print(' {} | {} | {} '.format(space4,space5,space6))
print(' | | ')
print('-----------')
print(' | | ')
print(' {} | {} | {} '.format(space7,space8,space9))
print(' | | ')
#toplinewinning
if (space1 == space2) and (space1 == space3):
print('WIN')
#do the other winning options | noughts_crosses.py | 583 | toplinewinningdo the other winning options | 42 | en | 0.677327 |
from django.test import TestCase
from django.contrib.auth import get_user_model
class ModelTests(TestCase):
""" Test creating a new user with an email is successful """
def test_create_user_with_email_successful(self):
payload = {'email': 'pudgeinvonyx@gmail.com', 'password': '1111qqqq='}
user = get_user_model().objects.create_user(
email=payload['email'],
password=payload['password']
)
self.assertEqual(user.email, payload['email'])
self.assertTrue(user.check_password(payload['password']))
def test_create_user_with_lowercase_email(self):
""" Test creating a new user with an lowercase email words """
payload = {'email': 'pudgeinvonyx@GMAIL.com', 'password': '1111qqqq='}
user = get_user_model().objects.create_user(
email=payload['email'],
password=payload['password']
)
self.assertEqual(user.email, payload['email'].lower())
def test_create_user_with_invalid_email(self):
""" Test creating a new user with an invalid email address """
with self.assertRaises(ValueError):
get_user_model().objects.create_user(None, "1234325")
def test_create_superuser_is_successful(self):
""" Test that create a new superuser """
user = get_user_model().objects.create_superuser("pudge@com.com", '1234')
self.assertTrue(user.is_superuser)
self.assertTrue(user.is_staff)
| app/core/tests/test_models.py | 1,479 | Test creating a new user with an email is successful
Test that create a new superuser
Test creating a new user with an invalid email address
Test creating a new user with an lowercase email words | 198 | en | 0.884014 |
from __future__ import print_function, division, absolute_import
from fontTools.misc.py23 import *
from fontTools.misc.textTools import safeEval, readHex
from fontTools.misc.encodingTools import getEncoding
from fontTools.ttLib import getSearchRange
from fontTools.unicode import Unicode
from . import DefaultTable
import sys
import struct
import array
import operator
class table__c_m_a_p(DefaultTable.DefaultTable):
def getcmap(self, platformID, platEncID):
for subtable in self.tables:
if (subtable.platformID == platformID and
subtable.platEncID == platEncID):
return subtable
return None # not found
def decompile(self, data, ttFont):
tableVersion, numSubTables = struct.unpack(">HH", data[:4])
self.tableVersion = int(tableVersion)
self.tables = tables = []
seenOffsets = {}
for i in range(numSubTables):
platformID, platEncID, offset = struct.unpack(
">HHl", data[4+i*8:4+(i+1)*8])
platformID, platEncID = int(platformID), int(platEncID)
format, length = struct.unpack(">HH", data[offset:offset+4])
if format in [8,10,12,13]:
format, reserved, length = struct.unpack(">HHL", data[offset:offset+8])
elif format in [14]:
format, length = struct.unpack(">HL", data[offset:offset+6])
if not length:
print("Error: cmap subtable is reported as having zero length: platformID %s, platEncID %s, format %s offset %s. Skipping table." % (platformID, platEncID,format, offset))
continue
table = CmapSubtable.newSubtable(format)
table.platformID = platformID
table.platEncID = platEncID
# Note that by default we decompile only the subtable header info;
# any other data gets decompiled only when an attribute of the
# subtable is referenced.
table.decompileHeader(data[offset:offset+int(length)], ttFont)
if offset in seenOffsets:
table.cmap = tables[seenOffsets[offset]].cmap
else:
seenOffsets[offset] = i
tables.append(table)
def compile(self, ttFont):
self.tables.sort() # sort according to the spec; see CmapSubtable.__lt__()
numSubTables = len(self.tables)
totalOffset = 4 + 8 * numSubTables
data = struct.pack(">HH", self.tableVersion, numSubTables)
tableData = b""
seen = {} # Some tables are the same object reference. Don't compile them twice.
done = {} # Some tables are different objects, but compile to the same data chunk
for table in self.tables:
try:
offset = seen[id(table.cmap)]
except KeyError:
chunk = table.compile(ttFont)
if chunk in done:
offset = done[chunk]
else:
offset = seen[id(table.cmap)] = done[chunk] = totalOffset + len(tableData)
tableData = tableData + chunk
data = data + struct.pack(">HHl", table.platformID, table.platEncID, offset)
return data + tableData
def toXML(self, writer, ttFont):
writer.simpletag("tableVersion", version=self.tableVersion)
writer.newline()
for table in self.tables:
table.toXML(writer, ttFont)
def fromXML(self, name, attrs, content, ttFont):
if name == "tableVersion":
self.tableVersion = safeEval(attrs["version"])
return
if name[:12] != "cmap_format_":
return
if not hasattr(self, "tables"):
self.tables = []
format = safeEval(name[12:])
table = CmapSubtable.newSubtable(format)
table.platformID = safeEval(attrs["platformID"])
table.platEncID = safeEval(attrs["platEncID"])
table.fromXML(name, attrs, content, ttFont)
self.tables.append(table)
class CmapSubtable(object):
@staticmethod
def getSubtableClass(format):
"""Return the subtable class for a format."""
return cmap_classes.get(format, cmap_format_unknown)
@staticmethod
def newSubtable(format):
"""Return a new instance of a subtable for format."""
subtableClass = CmapSubtable.getSubtableClass(format)
return subtableClass(format)
def __init__(self, format):
self.format = format
self.data = None
self.ttFont = None
def __getattr__(self, attr):
# allow lazy decompilation of subtables.
if attr[:2] == '__': # don't handle requests for member functions like '__lt__'
raise AttributeError(attr)
if self.data is None:
raise AttributeError(attr)
self.decompile(None, None) # use saved data.
self.data = None # Once this table has been decompiled, make sure we don't
# just return the original data. Also avoids recursion when
# called with an attribute that the cmap subtable doesn't have.
return getattr(self, attr)
def decompileHeader(self, data, ttFont):
format, length, language = struct.unpack(">HHH", data[:6])
assert len(data) == length, "corrupt cmap table format %d (data length: %d, header length: %d)" % (format, len(data), length)
self.format = int(format)
self.length = int(length)
self.language = int(language)
self.data = data[6:]
self.ttFont = ttFont
def toXML(self, writer, ttFont):
writer.begintag(self.__class__.__name__, [
("platformID", self.platformID),
("platEncID", self.platEncID),
("language", self.language),
])
writer.newline()
codes = sorted(self.cmap.items())
self._writeCodes(codes, writer)
writer.endtag(self.__class__.__name__)
writer.newline()
def getEncoding(self, default=None):
"""Returns the Python encoding name for this cmap subtable based on its platformID,
platEncID, and language. If encoding for these values is not known, by default
None is returned. That can be overriden by passing a value to the default
argument.
Note that if you want to choose a "preferred" cmap subtable, most of the time
self.isUnicode() is what you want as that one only returns true for the modern,
commonly used, Unicode-compatible triplets, not the legacy ones.
"""
return getEncoding(self.platformID, self.platEncID, self.language, default)
def isUnicode(self):
return (self.platformID == 0 or
(self.platformID == 3 and self.platEncID in [0, 1, 10]))
def isSymbol(self):
return self.platformID == 3 and self.platEncID == 0
def _writeCodes(self, codes, writer):
isUnicode = self.isUnicode()
for code, name in codes:
writer.simpletag("map", code=hex(code), name=name)
if isUnicode:
writer.comment(Unicode[code])
writer.newline()
def __lt__(self, other):
if not isinstance(other, CmapSubtable):
return NotImplemented
# implemented so that list.sort() sorts according to the spec.
selfTuple = (
getattr(self, "platformID", None),
getattr(self, "platEncID", None),
getattr(self, "language", None),
self.__dict__)
otherTuple = (
getattr(other, "platformID", None),
getattr(other, "platEncID", None),
getattr(other, "language", None),
other.__dict__)
return selfTuple < otherTuple
class cmap_format_0(CmapSubtable):
def decompile(self, data, ttFont):
# we usually get here indirectly from the subtable __getattr__ function, in which case both args must be None.
# If not, someone is calling the subtable decompile() directly, and must provide both args.
if data is not None and ttFont is not None:
self.decompileHeader(data, ttFont)
else:
assert (data is None and ttFont is None), "Need both data and ttFont arguments"
data = self.data # decompileHeader assigns the data after the header to self.data
assert 262 == self.length, "Format 0 cmap subtable not 262 bytes"
glyphIdArray = array.array("B")
glyphIdArray.fromstring(self.data)
self.cmap = cmap = {}
lenArray = len(glyphIdArray)
charCodes = list(range(lenArray))
names = map(self.ttFont.getGlyphName, glyphIdArray)
list(map(operator.setitem, [cmap]*lenArray, charCodes, names))
def compile(self, ttFont):
if self.data:
return struct.pack(">HHH", 0, 262, self.language) + self.data
charCodeList = sorted(self.cmap.items())
charCodes = [entry[0] for entry in charCodeList]
valueList = [entry[1] for entry in charCodeList]
assert charCodes == list(range(256))
valueList = map(ttFont.getGlyphID, valueList)
glyphIdArray = array.array("B", valueList)
data = struct.pack(">HHH", 0, 262, self.language) + glyphIdArray.tostring()
assert len(data) == 262
return data
def fromXML(self, name, attrs, content, ttFont):
self.language = safeEval(attrs["language"])
if not hasattr(self, "cmap"):
self.cmap = {}
cmap = self.cmap
for element in content:
if not isinstance(element, tuple):
continue
name, attrs, content = element
if name != "map":
continue
cmap[safeEval(attrs["code"])] = attrs["name"]
subHeaderFormat = ">HHhH"
class SubHeader(object):
def __init__(self):
self.firstCode = None
self.entryCount = None
self.idDelta = None
self.idRangeOffset = None
self.glyphIndexArray = []
class cmap_format_2(CmapSubtable):
def setIDDelta(self, subHeader):
subHeader.idDelta = 0
# find the minGI which is not zero.
minGI = subHeader.glyphIndexArray[0]
for gid in subHeader.glyphIndexArray:
if (gid != 0) and (gid < minGI):
minGI = gid
# The lowest gid in glyphIndexArray, after subtracting idDelta, must be 1.
# idDelta is a short, and must be between -32K and 32K. minGI can be between 1 and 64K.
# We would like to pick an idDelta such that the first glyphArray GID is 1,
# so that we are more likely to be able to combine glypharray GID subranges.
# This means that we have a problem when minGI is > 32K
# Since the final gi is reconstructed from the glyphArray GID by:
# (short)finalGID = (gid + idDelta) % 0x10000),
# we can get from a glypharray GID of 1 to a final GID of 65K by subtracting 2, and casting the
# negative number to an unsigned short.
if (minGI > 1):
if minGI > 0x7FFF:
subHeader.idDelta = -(0x10000 - minGI) -1
else:
subHeader.idDelta = minGI -1
idDelta = subHeader.idDelta
for i in range(subHeader.entryCount):
gid = subHeader.glyphIndexArray[i]
if gid > 0:
subHeader.glyphIndexArray[i] = gid - idDelta
def decompile(self, data, ttFont):
# we usually get here indirectly from the subtable __getattr__ function, in which case both args must be None.
# If not, someone is calling the subtable decompile() directly, and must provide both args.
if data is not None and ttFont is not None:
self.decompileHeader(data, ttFont)
else:
assert (data is None and ttFont is None), "Need both data and ttFont arguments"
data = self.data # decompileHeader assigns the data after the header to self.data
subHeaderKeys = []
maxSubHeaderindex = 0
# get the key array, and determine the number of subHeaders.
allKeys = array.array("H")
allKeys.fromstring(data[:512])
data = data[512:]
if sys.byteorder != "big":
allKeys.byteswap()
subHeaderKeys = [ key//8 for key in allKeys]
maxSubHeaderindex = max(subHeaderKeys)
#Load subHeaders
subHeaderList = []
pos = 0
for i in range(maxSubHeaderindex + 1):
subHeader = SubHeader()
(subHeader.firstCode, subHeader.entryCount, subHeader.idDelta, \
subHeader.idRangeOffset) = struct.unpack(subHeaderFormat, data[pos:pos + 8])
pos += 8
giDataPos = pos + subHeader.idRangeOffset-2
giList = array.array("H")
giList.fromstring(data[giDataPos:giDataPos + subHeader.entryCount*2])
if sys.byteorder != "big":
giList.byteswap()
subHeader.glyphIndexArray = giList
subHeaderList.append(subHeader)
# How this gets processed.
# Charcodes may be one or two bytes.
# The first byte of a charcode is mapped through the subHeaderKeys, to select
# a subHeader. For any subheader but 0, the next byte is then mapped through the
# selected subheader. If subheader Index 0 is selected, then the byte itself is
# mapped through the subheader, and there is no second byte.
# Then assume that the subsequent byte is the first byte of the next charcode,and repeat.
#
# Each subheader references a range in the glyphIndexArray whose length is entryCount.
# The range in glyphIndexArray referenced by a sunheader may overlap with the range in glyphIndexArray
# referenced by another subheader.
# The only subheader that will be referenced by more than one first-byte value is the subheader
# that maps the entire range of glyphID values to glyphIndex 0, e.g notdef:
# {firstChar 0, EntryCount 0,idDelta 0,idRangeOffset xx}
# A byte being mapped though a subheader is treated as in index into a mapping of array index to font glyphIndex.
# A subheader specifies a subrange within (0...256) by the
# firstChar and EntryCount values. If the byte value is outside the subrange, then the glyphIndex is zero
# (e.g. glyph not in font).
# If the byte index is in the subrange, then an offset index is calculated as (byteIndex - firstChar).
# The index to glyphIndex mapping is a subrange of the glyphIndexArray. You find the start of the subrange by
# counting idRangeOffset bytes from the idRangeOffset word. The first value in this subrange is the
# glyphIndex for the index firstChar. The offset index should then be used in this array to get the glyphIndex.
# Example for Logocut-Medium
# first byte of charcode = 129; selects subheader 1.
# subheader 1 = {firstChar 64, EntryCount 108,idDelta 42,idRangeOffset 0252}
# second byte of charCode = 66
# the index offset = 66-64 = 2.
# The subrange of the glyphIndexArray starting at 0x0252 bytes from the idRangeOffset word is:
# [glyphIndexArray index], [subrange array index] = glyphIndex
# [256], [0]=1 from charcode [129, 64]
# [257], [1]=2 from charcode [129, 65]
# [258], [2]=3 from charcode [129, 66]
# [259], [3]=4 from charcode [129, 67]
# So, the glyphIndex = 3 from the array. Then if idDelta is not zero and the glyph ID is not zero,
# add it to the glyphID to get the final glyphIndex
# value. In this case the final glyph index = 3+ 42 -> 45 for the final glyphIndex. Whew!
self.data = b""
self.cmap = cmap = {}
notdefGI = 0
for firstByte in range(256):
subHeadindex = subHeaderKeys[firstByte]
subHeader = subHeaderList[subHeadindex]
if subHeadindex == 0:
if (firstByte < subHeader.firstCode) or (firstByte >= subHeader.firstCode + subHeader.entryCount):
continue # gi is notdef.
else:
charCode = firstByte
offsetIndex = firstByte - subHeader.firstCode
gi = subHeader.glyphIndexArray[offsetIndex]
if gi != 0:
gi = (gi + subHeader.idDelta) % 0x10000
else:
continue # gi is notdef.
cmap[charCode] = gi
else:
if subHeader.entryCount:
charCodeOffset = firstByte * 256 + subHeader.firstCode
for offsetIndex in range(subHeader.entryCount):
charCode = charCodeOffset + offsetIndex
gi = subHeader.glyphIndexArray[offsetIndex]
if gi != 0:
gi = (gi + subHeader.idDelta) % 0x10000
else:
continue
cmap[charCode] = gi
# If not subHeader.entryCount, then all char codes with this first byte are
# mapped to .notdef. We can skip this subtable, and leave the glyphs un-encoded, which is the
# same as mapping it to .notdef.
# cmap values are GID's.
glyphOrder = self.ttFont.getGlyphOrder()
gids = list(cmap.values())
charCodes = list(cmap.keys())
lenCmap = len(gids)
try:
names = list(map(operator.getitem, [glyphOrder]*lenCmap, gids ))
except IndexError:
getGlyphName = self.ttFont.getGlyphName
names = list(map(getGlyphName, gids ))
list(map(operator.setitem, [cmap]*lenCmap, charCodes, names))
def compile(self, ttFont):
if self.data:
return struct.pack(">HHH", self.format, self.length, self.language) + self.data
kEmptyTwoCharCodeRange = -1
notdefGI = 0
items = sorted(self.cmap.items())
charCodes = [item[0] for item in items]
names = [item[1] for item in items]
nameMap = ttFont.getReverseGlyphMap()
lenCharCodes = len(charCodes)
try:
gids = list(map(operator.getitem, [nameMap]*lenCharCodes, names))
except KeyError:
nameMap = ttFont.getReverseGlyphMap(rebuild=True)
try:
gids = list(map(operator.getitem, [nameMap]*lenCharCodes, names))
except KeyError:
# allow virtual GIDs in format 2 tables
gids = []
for name in names:
try:
gid = nameMap[name]
except KeyError:
try:
if (name[:3] == 'gid'):
gid = eval(name[3:])
else:
gid = ttFont.getGlyphID(name)
except:
raise KeyError(name)
gids.append(gid)
# Process the (char code to gid) item list in char code order.
# By definition, all one byte char codes map to subheader 0.
# For all the two byte char codes, we assume that the first byte maps maps to the empty subhead (with an entry count of 0,
# which defines all char codes in its range to map to notdef) unless proven otherwise.
# Note that since the char code items are processed in char code order, all the char codes with the
# same first byte are in sequential order.
subHeaderKeys = [ kEmptyTwoCharCodeRange for x in range(256)] # list of indices into subHeaderList.
subHeaderList = []
# We force this subheader entry 0 to exist in the subHeaderList in the case where some one comes up
# with a cmap where all the one byte char codes map to notdef,
# with the result that the subhead 0 would not get created just by processing the item list.
charCode = charCodes[0]
if charCode > 255:
subHeader = SubHeader()
subHeader.firstCode = 0
subHeader.entryCount = 0
subHeader.idDelta = 0
subHeader.idRangeOffset = 0
subHeaderList.append(subHeader)
lastFirstByte = -1
items = zip(charCodes, gids)
for charCode, gid in items:
if gid == 0:
continue
firstbyte = charCode >> 8
secondByte = charCode & 0x00FF
if firstbyte != lastFirstByte: # Need to update the current subhead, and start a new one.
if lastFirstByte > -1:
# fix GI's and iDelta of current subheader.
self.setIDDelta(subHeader)
# If it was sunheader 0 for one-byte charCodes, then we need to set the subHeaderKeys value to zero
# for the indices matching the char codes.
if lastFirstByte == 0:
for index in range(subHeader.entryCount):
charCode = subHeader.firstCode + index
subHeaderKeys[charCode] = 0
assert (subHeader.entryCount == len(subHeader.glyphIndexArray)), "Error - subhead entry count does not match len of glyphID subrange."
# init new subheader
subHeader = SubHeader()
subHeader.firstCode = secondByte
subHeader.entryCount = 1
subHeader.glyphIndexArray.append(gid)
subHeaderList.append(subHeader)
subHeaderKeys[firstbyte] = len(subHeaderList) -1
lastFirstByte = firstbyte
else:
# need to fill in with notdefs all the code points between the last charCode and the current charCode.
codeDiff = secondByte - (subHeader.firstCode + subHeader.entryCount)
for i in range(codeDiff):
subHeader.glyphIndexArray.append(notdefGI)
subHeader.glyphIndexArray.append(gid)
subHeader.entryCount = subHeader.entryCount + codeDiff + 1
# fix GI's and iDelta of last subheader that we we added to the subheader array.
self.setIDDelta(subHeader)
# Now we add a final subheader for the subHeaderKeys which maps to empty two byte charcode ranges.
subHeader = SubHeader()
subHeader.firstCode = 0
subHeader.entryCount = 0
subHeader.idDelta = 0
subHeader.idRangeOffset = 2
subHeaderList.append(subHeader)
emptySubheadIndex = len(subHeaderList) - 1
for index in range(256):
if subHeaderKeys[index] == kEmptyTwoCharCodeRange:
subHeaderKeys[index] = emptySubheadIndex
# Since this is the last subheader, the GlyphIndex Array starts two bytes after the start of the
# idRangeOffset word of this subHeader. We can safely point to the first entry in the GlyphIndexArray,
# since the first subrange of the GlyphIndexArray is for subHeader 0, which always starts with
# charcode 0 and GID 0.
idRangeOffset = (len(subHeaderList)-1)*8 + 2 # offset to beginning of glyphIDArray from first subheader idRangeOffset.
subheadRangeLen = len(subHeaderList) -1 # skip last special empty-set subheader; we've already hardocodes its idRangeOffset to 2.
for index in range(subheadRangeLen):
subHeader = subHeaderList[index]
subHeader.idRangeOffset = 0
for j in range(index):
prevSubhead = subHeaderList[j]
if prevSubhead.glyphIndexArray == subHeader.glyphIndexArray: # use the glyphIndexArray subarray
subHeader.idRangeOffset = prevSubhead.idRangeOffset - (index-j)*8
subHeader.glyphIndexArray = []
break
if subHeader.idRangeOffset == 0: # didn't find one.
subHeader.idRangeOffset = idRangeOffset
idRangeOffset = (idRangeOffset - 8) + subHeader.entryCount*2 # one less subheader, one more subArray.
else:
idRangeOffset = idRangeOffset - 8 # one less subheader
# Now we can write out the data!
length = 6 + 512 + 8*len(subHeaderList) # header, 256 subHeaderKeys, and subheader array.
for subhead in subHeaderList[:-1]:
length = length + len(subhead.glyphIndexArray)*2 # We can't use subhead.entryCount, as some of the subhead may share subArrays.
dataList = [struct.pack(">HHH", 2, length, self.language)]
for index in subHeaderKeys:
dataList.append(struct.pack(">H", index*8))
for subhead in subHeaderList:
dataList.append(struct.pack(subHeaderFormat, subhead.firstCode, subhead.entryCount, subhead.idDelta, subhead.idRangeOffset))
for subhead in subHeaderList[:-1]:
for gi in subhead.glyphIndexArray:
dataList.append(struct.pack(">H", gi))
data = bytesjoin(dataList)
assert (len(data) == length), "Error: cmap format 2 is not same length as calculated! actual: " + str(len(data))+ " calc : " + str(length)
return data
def fromXML(self, name, attrs, content, ttFont):
self.language = safeEval(attrs["language"])
if not hasattr(self, "cmap"):
self.cmap = {}
cmap = self.cmap
for element in content:
if not isinstance(element, tuple):
continue
name, attrs, content = element
if name != "map":
continue
cmap[safeEval(attrs["code"])] = attrs["name"]
cmap_format_4_format = ">7H"
#uint16 endCode[segCount] # Ending character code for each segment, last = 0xFFFF.
#uint16 reservedPad # This value should be zero
#uint16 startCode[segCount] # Starting character code for each segment
#uint16 idDelta[segCount] # Delta for all character codes in segment
#uint16 idRangeOffset[segCount] # Offset in bytes to glyph indexArray, or 0
#uint16 glyphIndexArray[variable] # Glyph index array
def splitRange(startCode, endCode, cmap):
# Try to split a range of character codes into subranges with consecutive
# glyph IDs in such a way that the cmap4 subtable can be stored "most"
# efficiently. I can't prove I've got the optimal solution, but it seems
# to do well with the fonts I tested: none became bigger, many became smaller.
if startCode == endCode:
return [], [endCode]
lastID = cmap[startCode]
lastCode = startCode
inOrder = None
orderedBegin = None
subRanges = []
# Gather subranges in which the glyph IDs are consecutive.
for code in range(startCode + 1, endCode + 1):
glyphID = cmap[code]
if glyphID - 1 == lastID:
if inOrder is None or not inOrder:
inOrder = 1
orderedBegin = lastCode
else:
if inOrder:
inOrder = 0
subRanges.append((orderedBegin, lastCode))
orderedBegin = None
lastID = glyphID
lastCode = code
if inOrder:
subRanges.append((orderedBegin, lastCode))
assert lastCode == endCode
# Now filter out those new subranges that would only make the data bigger.
# A new segment cost 8 bytes, not using a new segment costs 2 bytes per
# character.
newRanges = []
for b, e in subRanges:
if b == startCode and e == endCode:
break # the whole range, we're fine
if b == startCode or e == endCode:
threshold = 4 # split costs one more segment
else:
threshold = 8 # split costs two more segments
if (e - b + 1) > threshold:
newRanges.append((b, e))
subRanges = newRanges
if not subRanges:
return [], [endCode]
if subRanges[0][0] != startCode:
subRanges.insert(0, (startCode, subRanges[0][0] - 1))
if subRanges[-1][1] != endCode:
subRanges.append((subRanges[-1][1] + 1, endCode))
# Fill the "holes" in the segments list -- those are the segments in which
# the glyph IDs are _not_ consecutive.
i = 1
while i < len(subRanges):
if subRanges[i-1][1] + 1 != subRanges[i][0]:
subRanges.insert(i, (subRanges[i-1][1] + 1, subRanges[i][0] - 1))
i = i + 1
i = i + 1
# Transform the ranges into startCode/endCode lists.
start = []
end = []
for b, e in subRanges:
start.append(b)
end.append(e)
start.pop(0)
assert len(start) + 1 == len(end)
return start, end
class cmap_format_4(CmapSubtable):
def decompile(self, data, ttFont):
# we usually get here indirectly from the subtable __getattr__ function, in which case both args must be None.
# If not, someone is calling the subtable decompile() directly, and must provide both args.
if data is not None and ttFont is not None:
self.decompileHeader(data, ttFont)
else:
assert (data is None and ttFont is None), "Need both data and ttFont arguments"
data = self.data # decompileHeader assigns the data after the header to self.data
(segCountX2, searchRange, entrySelector, rangeShift) = \
struct.unpack(">4H", data[:8])
data = data[8:]
segCount = segCountX2 // 2
allCodes = array.array("H")
allCodes.fromstring(data)
self.data = data = None
if sys.byteorder != "big":
allCodes.byteswap()
# divide the data
endCode = allCodes[:segCount]
allCodes = allCodes[segCount+1:] # the +1 is skipping the reservedPad field
startCode = allCodes[:segCount]
allCodes = allCodes[segCount:]
idDelta = allCodes[:segCount]
allCodes = allCodes[segCount:]
idRangeOffset = allCodes[:segCount]
glyphIndexArray = allCodes[segCount:]
lenGIArray = len(glyphIndexArray)
# build 2-byte character mapping
charCodes = []
gids = []
for i in range(len(startCode) - 1): # don't do 0xffff!
start = startCode[i]
delta = idDelta[i]
rangeOffset = idRangeOffset[i]
# *someone* needs to get killed.
partial = rangeOffset // 2 - start + i - len(idRangeOffset)
rangeCharCodes = list(range(startCode[i], endCode[i] + 1))
charCodes.extend(rangeCharCodes)
if rangeOffset == 0:
gids.extend([(charCode + delta) & 0xFFFF for charCode in rangeCharCodes])
else:
for charCode in rangeCharCodes:
index = charCode + partial
assert (index < lenGIArray), "In format 4 cmap, range (%d), the calculated index (%d) into the glyph index array is not less than the length of the array (%d) !" % (i, index, lenGIArray)
if glyphIndexArray[index] != 0: # if not missing glyph
glyphID = glyphIndexArray[index] + delta
else:
glyphID = 0 # missing glyph
gids.append(glyphID & 0xFFFF)
self.cmap = cmap = {}
lenCmap = len(gids)
glyphOrder = self.ttFont.getGlyphOrder()
try:
names = list(map(operator.getitem, [glyphOrder]*lenCmap, gids ))
except IndexError:
getGlyphName = self.ttFont.getGlyphName
names = list(map(getGlyphName, gids ))
list(map(operator.setitem, [cmap]*lenCmap, charCodes, names))
def compile(self, ttFont):
if self.data:
return struct.pack(">HHH", self.format, self.length, self.language) + self.data
charCodes = list(self.cmap.keys())
lenCharCodes = len(charCodes)
if lenCharCodes == 0:
startCode = [0xffff]
endCode = [0xffff]
else:
charCodes.sort()
names = list(map(operator.getitem, [self.cmap]*lenCharCodes, charCodes))
nameMap = ttFont.getReverseGlyphMap()
try:
gids = list(map(operator.getitem, [nameMap]*lenCharCodes, names))
except KeyError:
nameMap = ttFont.getReverseGlyphMap(rebuild=True)
try:
gids = list(map(operator.getitem, [nameMap]*lenCharCodes, names))
except KeyError:
# allow virtual GIDs in format 4 tables
gids = []
for name in names:
try:
gid = nameMap[name]
except KeyError:
try:
if (name[:3] == 'gid'):
gid = eval(name[3:])
else:
gid = ttFont.getGlyphID(name)
except:
raise KeyError(name)
gids.append(gid)
cmap = {} # code:glyphID mapping
list(map(operator.setitem, [cmap]*len(charCodes), charCodes, gids))
# Build startCode and endCode lists.
# Split the char codes in ranges of consecutive char codes, then split
# each range in more ranges of consecutive/not consecutive glyph IDs.
# See splitRange().
lastCode = charCodes[0]
endCode = []
startCode = [lastCode]
for charCode in charCodes[1:]: # skip the first code, it's the first start code
if charCode == lastCode + 1:
lastCode = charCode
continue
start, end = splitRange(startCode[-1], lastCode, cmap)
startCode.extend(start)
endCode.extend(end)
startCode.append(charCode)
lastCode = charCode
start, end = splitRange(startCode[-1], lastCode, cmap)
startCode.extend(start)
endCode.extend(end)
startCode.append(0xffff)
endCode.append(0xffff)
# build up rest of cruft
idDelta = []
idRangeOffset = []
glyphIndexArray = []
for i in range(len(endCode)-1): # skip the closing codes (0xffff)
indices = []
for charCode in range(startCode[i], endCode[i] + 1):
indices.append(cmap[charCode])
if (indices == list(range(indices[0], indices[0] + len(indices)))):
idDelta.append((indices[0] - startCode[i]) % 0x10000)
idRangeOffset.append(0)
else:
# someone *definitely* needs to get killed.
idDelta.append(0)
idRangeOffset.append(2 * (len(endCode) + len(glyphIndexArray) - i))
glyphIndexArray.extend(indices)
idDelta.append(1) # 0xffff + 1 == (tadaa!) 0. So this end code maps to .notdef
idRangeOffset.append(0)
# Insane.
segCount = len(endCode)
segCountX2 = segCount * 2
searchRange, entrySelector, rangeShift = getSearchRange(segCount, 2)
charCodeArray = array.array("H", endCode + [0] + startCode)
idDeltaArray = array.array("H", idDelta)
restArray = array.array("H", idRangeOffset + glyphIndexArray)
if sys.byteorder != "big":
charCodeArray.byteswap()
idDeltaArray.byteswap()
restArray.byteswap()
data = charCodeArray.tostring() + idDeltaArray.tostring() + restArray.tostring()
length = struct.calcsize(cmap_format_4_format) + len(data)
header = struct.pack(cmap_format_4_format, self.format, length, self.language,
segCountX2, searchRange, entrySelector, rangeShift)
return header + data
def fromXML(self, name, attrs, content, ttFont):
self.language = safeEval(attrs["language"])
if not hasattr(self, "cmap"):
self.cmap = {}
cmap = self.cmap
for element in content:
if not isinstance(element, tuple):
continue
nameMap, attrsMap, dummyContent = element
if nameMap != "map":
assert 0, "Unrecognized keyword in cmap subtable"
cmap[safeEval(attrsMap["code"])] = attrsMap["name"]
class cmap_format_6(CmapSubtable):
def decompile(self, data, ttFont):
# we usually get here indirectly from the subtable __getattr__ function, in which case both args must be None.
# If not, someone is calling the subtable decompile() directly, and must provide both args.
if data is not None and ttFont is not None:
self.decompileHeader(data, ttFont)
else:
assert (data is None and ttFont is None), "Need both data and ttFont arguments"
data = self.data # decompileHeader assigns the data after the header to self.data
firstCode, entryCount = struct.unpack(">HH", data[:4])
firstCode = int(firstCode)
data = data[4:]
#assert len(data) == 2 * entryCount # XXX not true in Apple's Helvetica!!!
glyphIndexArray = array.array("H")
glyphIndexArray.fromstring(data[:2 * int(entryCount)])
if sys.byteorder != "big":
glyphIndexArray.byteswap()
self.data = data = None
self.cmap = cmap = {}
lenArray = len(glyphIndexArray)
charCodes = list(range(firstCode, firstCode + lenArray))
glyphOrder = self.ttFont.getGlyphOrder()
try:
names = list(map(operator.getitem, [glyphOrder]*lenArray, glyphIndexArray ))
except IndexError:
getGlyphName = self.ttFont.getGlyphName
names = list(map(getGlyphName, glyphIndexArray ))
list(map(operator.setitem, [cmap]*lenArray, charCodes, names))
def compile(self, ttFont):
if self.data:
return struct.pack(">HHH", self.format, self.length, self.language) + self.data
cmap = self.cmap
codes = list(cmap.keys())
if codes: # yes, there are empty cmap tables.
codes = list(range(codes[0], codes[-1] + 1))
firstCode = codes[0]
valueList = [cmap.get(code, ".notdef") for code in codes]
valueList = map(ttFont.getGlyphID, valueList)
glyphIndexArray = array.array("H", valueList)
if sys.byteorder != "big":
glyphIndexArray.byteswap()
data = glyphIndexArray.tostring()
else:
data = b""
firstCode = 0
header = struct.pack(">HHHHH",
6, len(data) + 10, self.language, firstCode, len(codes))
return header + data
def fromXML(self, name, attrs, content, ttFont):
self.language = safeEval(attrs["language"])
if not hasattr(self, "cmap"):
self.cmap = {}
cmap = self.cmap
for element in content:
if not isinstance(element, tuple):
continue
name, attrs, content = element
if name != "map":
continue
cmap[safeEval(attrs["code"])] = attrs["name"]
class cmap_format_12_or_13(CmapSubtable):
def __init__(self, format):
self.format = format
self.reserved = 0
self.data = None
self.ttFont = None
def decompileHeader(self, data, ttFont):
format, reserved, length, language, nGroups = struct.unpack(">HHLLL", data[:16])
assert len(data) == (16 + nGroups*12) == (length), "corrupt cmap table format %d (data length: %d, header length: %d)" % (self.format, len(data), length)
self.format = format
self.reserved = reserved
self.length = length
self.language = language
self.nGroups = nGroups
self.data = data[16:]
self.ttFont = ttFont
def decompile(self, data, ttFont):
# we usually get here indirectly from the subtable __getattr__ function, in which case both args must be None.
# If not, someone is calling the subtable decompile() directly, and must provide both args.
if data is not None and ttFont is not None:
self.decompileHeader(data, ttFont)
else:
assert (data is None and ttFont is None), "Need both data and ttFont arguments"
data = self.data # decompileHeader assigns the data after the header to self.data
charCodes = []
gids = []
pos = 0
for i in range(self.nGroups):
startCharCode, endCharCode, glyphID = struct.unpack(">LLL",data[pos:pos+12] )
pos += 12
lenGroup = 1 + endCharCode - startCharCode
charCodes.extend(list(range(startCharCode, endCharCode +1)))
gids.extend(self._computeGIDs(glyphID, lenGroup))
self.data = data = None
self.cmap = cmap = {}
lenCmap = len(gids)
glyphOrder = self.ttFont.getGlyphOrder()
try:
names = list(map(operator.getitem, [glyphOrder]*lenCmap, gids ))
except IndexError:
getGlyphName = self.ttFont.getGlyphName
names = list(map(getGlyphName, gids ))
list(map(operator.setitem, [cmap]*lenCmap, charCodes, names))
def compile(self, ttFont):
if self.data:
return struct.pack(">HHLLL", self.format, self.reserved, self.length, self.language, self.nGroups) + self.data
charCodes = list(self.cmap.keys())
lenCharCodes = len(charCodes)
names = list(self.cmap.values())
nameMap = ttFont.getReverseGlyphMap()
try:
gids = list(map(operator.getitem, [nameMap]*lenCharCodes, names))
except KeyError:
nameMap = ttFont.getReverseGlyphMap(rebuild=True)
try:
gids = list(map(operator.getitem, [nameMap]*lenCharCodes, names))
except KeyError:
# allow virtual GIDs in format 12 tables
gids = []
for name in names:
try:
gid = nameMap[name]
except KeyError:
try:
if (name[:3] == 'gid'):
gid = eval(name[3:])
else:
gid = ttFont.getGlyphID(name)
except:
raise KeyError(name)
gids.append(gid)
cmap = {} # code:glyphID mapping
list(map(operator.setitem, [cmap]*len(charCodes), charCodes, gids))
charCodes.sort()
index = 0
startCharCode = charCodes[0]
startGlyphID = cmap[startCharCode]
lastGlyphID = startGlyphID - self._format_step
lastCharCode = startCharCode - 1
nGroups = 0
dataList = []
maxIndex = len(charCodes)
for index in range(maxIndex):
charCode = charCodes[index]
glyphID = cmap[charCode]
if not self._IsInSameRun(glyphID, lastGlyphID, charCode, lastCharCode):
dataList.append(struct.pack(">LLL", startCharCode, lastCharCode, startGlyphID))
startCharCode = charCode
startGlyphID = glyphID
nGroups = nGroups + 1
lastGlyphID = glyphID
lastCharCode = charCode
dataList.append(struct.pack(">LLL", startCharCode, lastCharCode, startGlyphID))
nGroups = nGroups + 1
data = bytesjoin(dataList)
lengthSubtable = len(data) +16
assert len(data) == (nGroups*12) == (lengthSubtable-16)
return struct.pack(">HHLLL", self.format, self.reserved, lengthSubtable, self.language, nGroups) + data
def toXML(self, writer, ttFont):
writer.begintag(self.__class__.__name__, [
("platformID", self.platformID),
("platEncID", self.platEncID),
("format", self.format),
("reserved", self.reserved),
("length", self.length),
("language", self.language),
("nGroups", self.nGroups),
])
writer.newline()
codes = sorted(self.cmap.items())
self._writeCodes(codes, writer)
writer.endtag(self.__class__.__name__)
writer.newline()
def fromXML(self, name, attrs, content, ttFont):
self.format = safeEval(attrs["format"])
self.reserved = safeEval(attrs["reserved"])
self.length = safeEval(attrs["length"])
self.language = safeEval(attrs["language"])
self.nGroups = safeEval(attrs["nGroups"])
if not hasattr(self, "cmap"):
self.cmap = {}
cmap = self.cmap
for element in content:
if not isinstance(element, tuple):
continue
name, attrs, content = element
if name != "map":
continue
cmap[safeEval(attrs["code"])] = attrs["name"]
class cmap_format_12(cmap_format_12_or_13):
_format_step = 1
def __init__(self, format=12):
cmap_format_12_or_13.__init__(self, format)
def _computeGIDs(self, startingGlyph, numberOfGlyphs):
return list(range(startingGlyph, startingGlyph + numberOfGlyphs))
def _IsInSameRun(self, glyphID, lastGlyphID, charCode, lastCharCode):
return (glyphID == 1 + lastGlyphID) and (charCode == 1 + lastCharCode)
class cmap_format_13(cmap_format_12_or_13):
_format_step = 0
def __init__(self, format=13):
cmap_format_12_or_13.__init__(self, format)
def _computeGIDs(self, startingGlyph, numberOfGlyphs):
return [startingGlyph] * numberOfGlyphs
def _IsInSameRun(self, glyphID, lastGlyphID, charCode, lastCharCode):
return (glyphID == lastGlyphID) and (charCode == 1 + lastCharCode)
def cvtToUVS(threeByteString):
data = b"\0" + threeByteString
val, = struct.unpack(">L", data)
return val
def cvtFromUVS(val):
assert 0 <= val < 0x1000000
fourByteString = struct.pack(">L", val)
return fourByteString[1:]
class cmap_format_14(CmapSubtable):
def decompileHeader(self, data, ttFont):
format, length, numVarSelectorRecords = struct.unpack(">HLL", data[:10])
self.data = data[10:]
self.length = length
self.numVarSelectorRecords = numVarSelectorRecords
self.ttFont = ttFont
self.language = 0xFF # has no language.
def decompile(self, data, ttFont):
if data is not None and ttFont is not None:
self.decompileHeader(data, ttFont)
else:
assert (data is None and ttFont is None), "Need both data and ttFont arguments"
data = self.data
self.cmap = {} # so that clients that expect this to exist in a cmap table won't fail.
uvsDict = {}
recOffset = 0
for n in range(self.numVarSelectorRecords):
uvs, defOVSOffset, nonDefUVSOffset = struct.unpack(">3sLL", data[recOffset:recOffset +11])
recOffset += 11
varUVS = cvtToUVS(uvs)
if defOVSOffset:
startOffset = defOVSOffset - 10
numValues, = struct.unpack(">L", data[startOffset:startOffset+4])
startOffset +=4
for r in range(numValues):
uv, addtlCnt = struct.unpack(">3sB", data[startOffset:startOffset+4])
startOffset += 4
firstBaseUV = cvtToUVS(uv)
cnt = addtlCnt+1
baseUVList = list(range(firstBaseUV, firstBaseUV+cnt))
glyphList = [None]*cnt
localUVList = zip(baseUVList, glyphList)
try:
uvsDict[varUVS].extend(localUVList)
except KeyError:
uvsDict[varUVS] = list(localUVList)
if nonDefUVSOffset:
startOffset = nonDefUVSOffset - 10
numRecs, = struct.unpack(">L", data[startOffset:startOffset+4])
startOffset +=4
localUVList = []
for r in range(numRecs):
uv, gid = struct.unpack(">3sH", data[startOffset:startOffset+5])
startOffset += 5
uv = cvtToUVS(uv)
glyphName = self.ttFont.getGlyphName(gid)
localUVList.append( [uv, glyphName] )
try:
uvsDict[varUVS].extend(localUVList)
except KeyError:
uvsDict[varUVS] = localUVList
self.uvsDict = uvsDict
def toXML(self, writer, ttFont):
writer.begintag(self.__class__.__name__, [
("platformID", self.platformID),
("platEncID", self.platEncID),
("format", self.format),
("length", self.length),
("numVarSelectorRecords", self.numVarSelectorRecords),
])
writer.newline()
uvsDict = self.uvsDict
uvsList = sorted(uvsDict.keys())
for uvs in uvsList:
uvList = uvsDict[uvs]
uvList.sort(key=lambda item: (item[1] is not None, item[0], item[1]))
for uv, gname in uvList:
if gname is None:
gname = "None"
# I use the arg rather than th keyword syntax in order to preserve the attribute order.
writer.simpletag("map", [ ("uvs",hex(uvs)), ("uv",hex(uv)), ("name", gname)] )
writer.newline()
writer.endtag(self.__class__.__name__)
writer.newline()
def fromXML(self, name, attrs, content, ttFont):
self.format = safeEval(attrs["format"])
self.length = safeEval(attrs["length"])
self.numVarSelectorRecords = safeEval(attrs["numVarSelectorRecords"])
self.language = 0xFF # provide a value so that CmapSubtable.__lt__() won't fail
if not hasattr(self, "cmap"):
self.cmap = {} # so that clients that expect this to exist in a cmap table won't fail.
if not hasattr(self, "uvsDict"):
self.uvsDict = {}
uvsDict = self.uvsDict
for element in content:
if not isinstance(element, tuple):
continue
name, attrs, content = element
if name != "map":
continue
uvs = safeEval(attrs["uvs"])
uv = safeEval(attrs["uv"])
gname = attrs["name"]
if gname == "None":
gname = None
try:
uvsDict[uvs].append( [uv, gname])
except KeyError:
uvsDict[uvs] = [ [uv, gname] ]
def compile(self, ttFont):
if self.data:
return struct.pack(">HLL", self.format, self.length, self.numVarSelectorRecords) + self.data
uvsDict = self.uvsDict
uvsList = sorted(uvsDict.keys())
self.numVarSelectorRecords = len(uvsList)
offset = 10 + self.numVarSelectorRecords*11 # current value is end of VarSelectorRecords block.
data = []
varSelectorRecords =[]
for uvs in uvsList:
entryList = uvsDict[uvs]
defList = [entry for entry in entryList if entry[1] is None]
if defList:
defList = [entry[0] for entry in defList]
defOVSOffset = offset
defList.sort()
lastUV = defList[0]
cnt = -1
defRecs = []
for defEntry in defList:
cnt +=1
if (lastUV+cnt) != defEntry:
rec = struct.pack(">3sB", cvtFromUVS(lastUV), cnt-1)
lastUV = defEntry
defRecs.append(rec)
cnt = 0
rec = struct.pack(">3sB", cvtFromUVS(lastUV), cnt)
defRecs.append(rec)
numDefRecs = len(defRecs)
data.append(struct.pack(">L", numDefRecs))
data.extend(defRecs)
offset += 4 + numDefRecs*4
else:
defOVSOffset = 0
ndefList = [entry for entry in entryList if entry[1] is not None]
if ndefList:
nonDefUVSOffset = offset
ndefList.sort()
numNonDefRecs = len(ndefList)
data.append(struct.pack(">L", numNonDefRecs))
offset += 4 + numNonDefRecs*5
for uv, gname in ndefList:
gid = ttFont.getGlyphID(gname)
ndrec = struct.pack(">3sH", cvtFromUVS(uv), gid)
data.append(ndrec)
else:
nonDefUVSOffset = 0
vrec = struct.pack(">3sLL", cvtFromUVS(uvs), defOVSOffset, nonDefUVSOffset)
varSelectorRecords.append(vrec)
data = bytesjoin(varSelectorRecords) + bytesjoin(data)
self.length = 10 + len(data)
headerdata = struct.pack(">HLL", self.format, self.length, self.numVarSelectorRecords)
self.data = headerdata + data
return self.data
class cmap_format_unknown(CmapSubtable):
def toXML(self, writer, ttFont):
cmapName = self.__class__.__name__[:12] + str(self.format)
writer.begintag(cmapName, [
("platformID", self.platformID),
("platEncID", self.platEncID),
])
writer.newline()
writer.dumphex(self.data)
writer.endtag(cmapName)
writer.newline()
def fromXML(self, name, attrs, content, ttFont):
self.data = readHex(content)
self.cmap = {}
def decompileHeader(self, data, ttFont):
self.language = 0 # dummy value
self.data = data
def decompile(self, data, ttFont):
# we usually get here indirectly from the subtable __getattr__ function, in which case both args must be None.
# If not, someone is calling the subtable decompile() directly, and must provide both args.
if data is not None and ttFont is not None:
self.decompileHeader(data, ttFont)
else:
assert (data is None and ttFont is None), "Need both data and ttFont arguments"
def compile(self, ttFont):
if self.data:
return self.data
else:
return None
cmap_classes = {
0: cmap_format_0,
2: cmap_format_2,
4: cmap_format_4,
6: cmap_format_6,
12: cmap_format_12,
13: cmap_format_13,
14: cmap_format_14,
}
| FontTools/fontTools/ttLib/tables/_c_m_a_p.py | 45,664 | Returns the Python encoding name for this cmap subtable based on its platformID,
platEncID, and language. If encoding for these values is not known, by default
None is returned. That can be overriden by passing a value to the default
argument.
Note that if you want to choose a "preferred" cmap subtable, most of the time
self.isUnicode() is what you want as that one only returns true for the modern,
commonly used, Unicode-compatible triplets, not the legacy ones.
Return the subtable class for a format.
Return a new instance of a subtable for format.
not found Note that by default we decompile only the subtable header info; any other data gets decompiled only when an attribute of the subtable is referenced. sort according to the spec; see CmapSubtable.__lt__() Some tables are the same object reference. Don't compile them twice. Some tables are different objects, but compile to the same data chunk allow lazy decompilation of subtables. don't handle requests for member functions like '__lt__' use saved data. Once this table has been decompiled, make sure we don't just return the original data. Also avoids recursion when called with an attribute that the cmap subtable doesn't have. implemented so that list.sort() sorts according to the spec. we usually get here indirectly from the subtable __getattr__ function, in which case both args must be None. If not, someone is calling the subtable decompile() directly, and must provide both args. decompileHeader assigns the data after the header to self.data find the minGI which is not zero. The lowest gid in glyphIndexArray, after subtracting idDelta, must be 1. idDelta is a short, and must be between -32K and 32K. minGI can be between 1 and 64K. We would like to pick an idDelta such that the first glyphArray GID is 1, so that we are more likely to be able to combine glypharray GID subranges. This means that we have a problem when minGI is > 32K Since the final gi is reconstructed from the glyphArray GID by: (short)finalGID = (gid + idDelta) % 0x10000), we can get from a glypharray GID of 1 to a final GID of 65K by subtracting 2, and casting the negative number to an unsigned short. we usually get here indirectly from the subtable __getattr__ function, in which case both args must be None. If not, someone is calling the subtable decompile() directly, and must provide both args. decompileHeader assigns the data after the header to self.data get the key array, and determine the number of subHeaders.Load subHeaders How this gets processed. Charcodes may be one or two bytes. The first byte of a charcode is mapped through the subHeaderKeys, to select a subHeader. For any subheader but 0, the next byte is then mapped through the selected subheader. If subheader Index 0 is selected, then the byte itself is mapped through the subheader, and there is no second byte. Then assume that the subsequent byte is the first byte of the next charcode,and repeat. Each subheader references a range in the glyphIndexArray whose length is entryCount. The range in glyphIndexArray referenced by a sunheader may overlap with the range in glyphIndexArray referenced by another subheader. The only subheader that will be referenced by more than one first-byte value is the subheader that maps the entire range of glyphID values to glyphIndex 0, e.g notdef: {firstChar 0, EntryCount 0,idDelta 0,idRangeOffset xx} A byte being mapped though a subheader is treated as in index into a mapping of array index to font glyphIndex. A subheader specifies a subrange within (0...256) by the firstChar and EntryCount values. If the byte value is outside the subrange, then the glyphIndex is zero (e.g. glyph not in font). If the byte index is in the subrange, then an offset index is calculated as (byteIndex - firstChar). The index to glyphIndex mapping is a subrange of the glyphIndexArray. You find the start of the subrange by counting idRangeOffset bytes from the idRangeOffset word. The first value in this subrange is the glyphIndex for the index firstChar. The offset index should then be used in this array to get the glyphIndex. Example for Logocut-Medium first byte of charcode = 129; selects subheader 1. subheader 1 = {firstChar 64, EntryCount 108,idDelta 42,idRangeOffset 0252} second byte of charCode = 66 the index offset = 66-64 = 2. The subrange of the glyphIndexArray starting at 0x0252 bytes from the idRangeOffset word is: [glyphIndexArray index], [subrange array index] = glyphIndex [256], [0]=1 from charcode [129, 64] [257], [1]=2 from charcode [129, 65] [258], [2]=3 from charcode [129, 66] [259], [3]=4 from charcode [129, 67] So, the glyphIndex = 3 from the array. Then if idDelta is not zero and the glyph ID is not zero, add it to the glyphID to get the final glyphIndex value. In this case the final glyph index = 3+ 42 -> 45 for the final glyphIndex. Whew! gi is notdef. gi is notdef. If not subHeader.entryCount, then all char codes with this first byte are mapped to .notdef. We can skip this subtable, and leave the glyphs un-encoded, which is the same as mapping it to .notdef. cmap values are GID's. allow virtual GIDs in format 2 tables Process the (char code to gid) item list in char code order. By definition, all one byte char codes map to subheader 0. For all the two byte char codes, we assume that the first byte maps maps to the empty subhead (with an entry count of 0, which defines all char codes in its range to map to notdef) unless proven otherwise. Note that since the char code items are processed in char code order, all the char codes with the same first byte are in sequential order. list of indices into subHeaderList. We force this subheader entry 0 to exist in the subHeaderList in the case where some one comes up with a cmap where all the one byte char codes map to notdef, with the result that the subhead 0 would not get created just by processing the item list. Need to update the current subhead, and start a new one. fix GI's and iDelta of current subheader. If it was sunheader 0 for one-byte charCodes, then we need to set the subHeaderKeys value to zero for the indices matching the char codes. init new subheader need to fill in with notdefs all the code points between the last charCode and the current charCode. fix GI's and iDelta of last subheader that we we added to the subheader array. Now we add a final subheader for the subHeaderKeys which maps to empty two byte charcode ranges. Since this is the last subheader, the GlyphIndex Array starts two bytes after the start of the idRangeOffset word of this subHeader. We can safely point to the first entry in the GlyphIndexArray, since the first subrange of the GlyphIndexArray is for subHeader 0, which always starts with charcode 0 and GID 0. offset to beginning of glyphIDArray from first subheader idRangeOffset. skip last special empty-set subheader; we've already hardocodes its idRangeOffset to 2. use the glyphIndexArray subarray didn't find one. one less subheader, one more subArray. one less subheader Now we can write out the data! header, 256 subHeaderKeys, and subheader array. We can't use subhead.entryCount, as some of the subhead may share subArrays.uint16 endCode[segCount] Ending character code for each segment, last = 0xFFFF.uint16 reservedPad This value should be zerouint16 startCode[segCount] Starting character code for each segmentuint16 idDelta[segCount] Delta for all character codes in segmentuint16 idRangeOffset[segCount] Offset in bytes to glyph indexArray, or 0uint16 glyphIndexArray[variable] Glyph index array Try to split a range of character codes into subranges with consecutive glyph IDs in such a way that the cmap4 subtable can be stored "most" efficiently. I can't prove I've got the optimal solution, but it seems to do well with the fonts I tested: none became bigger, many became smaller. Gather subranges in which the glyph IDs are consecutive. Now filter out those new subranges that would only make the data bigger. A new segment cost 8 bytes, not using a new segment costs 2 bytes per character. the whole range, we're fine split costs one more segment split costs two more segments Fill the "holes" in the segments list -- those are the segments in which the glyph IDs are _not_ consecutive. Transform the ranges into startCode/endCode lists. we usually get here indirectly from the subtable __getattr__ function, in which case both args must be None. If not, someone is calling the subtable decompile() directly, and must provide both args. decompileHeader assigns the data after the header to self.data divide the data the +1 is skipping the reservedPad field build 2-byte character mapping don't do 0xffff! *someone* needs to get killed. if not missing glyph missing glyph allow virtual GIDs in format 4 tables code:glyphID mapping Build startCode and endCode lists. Split the char codes in ranges of consecutive char codes, then split each range in more ranges of consecutive/not consecutive glyph IDs. See splitRange(). skip the first code, it's the first start code build up rest of cruft skip the closing codes (0xffff) someone *definitely* needs to get killed. 0xffff + 1 == (tadaa!) 0. So this end code maps to .notdef Insane. we usually get here indirectly from the subtable __getattr__ function, in which case both args must be None. If not, someone is calling the subtable decompile() directly, and must provide both args. decompileHeader assigns the data after the header to self.dataassert len(data) == 2 * entryCount XXX not true in Apple's Helvetica!!! yes, there are empty cmap tables. we usually get here indirectly from the subtable __getattr__ function, in which case both args must be None. If not, someone is calling the subtable decompile() directly, and must provide both args. decompileHeader assigns the data after the header to self.data allow virtual GIDs in format 12 tables code:glyphID mapping has no language. so that clients that expect this to exist in a cmap table won't fail. I use the arg rather than th keyword syntax in order to preserve the attribute order. provide a value so that CmapSubtable.__lt__() won't fail so that clients that expect this to exist in a cmap table won't fail. current value is end of VarSelectorRecords block. dummy value we usually get here indirectly from the subtable __getattr__ function, in which case both args must be None. If not, someone is calling the subtable decompile() directly, and must provide both args. | 10,498 | en | 0.852224 |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The Exponential distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import random_ops
from tensorflow.python.ops.distributions import gamma
from tensorflow.python.util.tf_export import tf_export
__all__ = [
"Exponential",
"ExponentialWithSoftplusRate",
]
@tf_export("distributions.Exponential")
class Exponential(gamma.Gamma):
"""Exponential distribution.
The Exponential distribution is parameterized by an event `rate` parameter.
#### Mathematical Details
The probability density function (pdf) is,
```none
pdf(x; lambda, x > 0) = exp(-lambda x) / Z
Z = 1 / lambda
```
where `rate = lambda` and `Z` is the normalizaing constant.
The Exponential distribution is a special case of the Gamma distribution,
i.e.,
```python
Exponential(rate) = Gamma(concentration=1., rate)
```
The Exponential distribution uses a `rate` parameter, or "inverse scale",
which can be intuited as,
```none
X ~ Exponential(rate=1)
Y = X / rate
```
"""
def __init__(self,
rate,
validate_args=False,
allow_nan_stats=True,
name="Exponential"):
"""Construct Exponential distribution with parameter `rate`.
Args:
rate: Floating point tensor, equivalent to `1 / mean`. Must contain only
positive values.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`, statistics
(e.g., mean, mode, variance) use the value "`NaN`" to indicate the
result is undefined. When `False`, an exception is raised if one or
more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
"""
parameters = locals()
# Even though all statistics of are defined for valid inputs, this is not
# true in the parent class "Gamma." Therefore, passing
# allow_nan_stats=True
# through to the parent class results in unnecessary asserts.
with ops.name_scope(name, values=[rate]):
self._rate = ops.convert_to_tensor(rate, name="rate")
super(Exponential, self).__init__(
concentration=array_ops.ones([], dtype=self._rate.dtype),
rate=self._rate,
allow_nan_stats=allow_nan_stats,
validate_args=validate_args,
name=name)
# While the Gamma distribution is not reparameterizable, the exponential
# distribution is.
self._reparameterization_type = True
self._parameters = parameters
self._graph_parents += [self._rate]
@staticmethod
def _param_shapes(sample_shape):
return {"rate": ops.convert_to_tensor(sample_shape, dtype=dtypes.int32)}
@property
def rate(self):
return self._rate
def _sample_n(self, n, seed=None):
shape = array_ops.concat([[n], array_ops.shape(self._rate)], 0)
# Uniform variates must be sampled from the open-interval `(0, 1)` rather
# than `[0, 1)`. To do so, we use `np.finfo(self.dtype.as_numpy_dtype).tiny`
# because it is the smallest, positive, "normal" number. A "normal" number
# is such that the mantissa has an implicit leading 1. Normal, positive
# numbers x, y have the reasonable property that, `x + y >= max(x, y)`. In
# this case, a subnormal number (i.e., np.nextafter) can cause us to sample
# 0.
sampled = random_ops.random_uniform(
shape,
minval=np.finfo(self.dtype.as_numpy_dtype).tiny,
maxval=1.,
seed=seed,
dtype=self.dtype)
return -math_ops.log(sampled) / self._rate
class ExponentialWithSoftplusRate(Exponential):
"""Exponential with softplus transform on `rate`."""
def __init__(self,
rate,
validate_args=False,
allow_nan_stats=True,
name="ExponentialWithSoftplusRate"):
parameters = locals()
with ops.name_scope(name, values=[rate]):
super(ExponentialWithSoftplusRate, self).__init__(
rate=nn.softplus(rate, name="softplus_rate"),
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
name=name)
self._parameters = parameters
| venv1/Lib/site-packages/tensorflow/python/ops/distributions/exponential.py | 5,527 | Exponential distribution.
The Exponential distribution is parameterized by an event `rate` parameter.
#### Mathematical Details
The probability density function (pdf) is,
```none
pdf(x; lambda, x > 0) = exp(-lambda x) / Z
Z = 1 / lambda
```
where `rate = lambda` and `Z` is the normalizaing constant.
The Exponential distribution is a special case of the Gamma distribution,
i.e.,
```python
Exponential(rate) = Gamma(concentration=1., rate)
```
The Exponential distribution uses a `rate` parameter, or "inverse scale",
which can be intuited as,
```none
X ~ Exponential(rate=1)
Y = X / rate
```
Exponential with softplus transform on `rate`.
Construct Exponential distribution with parameter `rate`.
Args:
rate: Floating point tensor, equivalent to `1 / mean`. Must contain only
positive values.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`, statistics
(e.g., mean, mode, variance) use the value "`NaN`" to indicate the
result is undefined. When `False`, an exception is raised if one or
more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
The Exponential distribution class.
Copyright 2016 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ============================================================================== Even though all statistics of are defined for valid inputs, this is not true in the parent class "Gamma." Therefore, passing allow_nan_stats=True through to the parent class results in unnecessary asserts. While the Gamma distribution is not reparameterizable, the exponential distribution is. Uniform variates must be sampled from the open-interval `(0, 1)` rather than `[0, 1)`. To do so, we use `np.finfo(self.dtype.as_numpy_dtype).tiny` because it is the smallest, positive, "normal" number. A "normal" number is such that the mantissa has an implicit leading 1. Normal, positive numbers x, y have the reasonable property that, `x + y >= max(x, y)`. In this case, a subnormal number (i.e., np.nextafter) can cause us to sample 0. | 2,820 | en | 0.750303 |
#from numba import jit
import numpy as np
#from joblib import Parallel, delayed, parallel_backend
#from joblib import load, dump
#import tempfile
#import shutil
#import os
#
#import sys
#sys.path.append('pyunicorn_timeseries')
#from pyunicorn_timeseries.surrogates import Surrogates
def set_model_constants(xx=50.E3,nx=100,va=10.,tmax=60*360*24*3600.,avep=24*3600.,dt=3600.,period=3600*24*360*1,B=2.,T0=273.15+6,dT=2.,Cs=1.E-3,Cp=1030.,ra=1.5,ro=1030.,ri=900.,Cpo=4.E3,Cpi=2.9E3,H=200.,vo=0.2,Hb=1.E3,Li=3.3E6,Tf=273.15-1.8,SW0=50.,SW_anom=100.,emissivity=0.99,Da=1.E6,Do=5.E2,tau_entrainment=30*24*3600.,**args):
'''Setup model constants. All of the constants have fixed values, but one can pass in own values or even some arbitrary values via **args.'''
#
C={}
C['xx'] = xx #grid size in [m]
C['nx'] = nx #number of grid cell - the total width of the domain is xx*nx long
C['va'] = va #wind in m/s
#
C['tmax'] = tmax #tmax seconds
C['dt'] = dt #timestep
#
C['avep'] = avep #averaging period in seconds
#
C['period'] = period #period of boundary restoring
C['Cs'] = Cs #exchange coefficient for bulk formula
C['Cp'] = Cp #air heat capacity
C['ra'] = ra #density of air [kg/m3]
C['ro'] = ro #density of sea water [kg/m3]
C['ri'] = ri #density of sea ice [kg/m3]
C['Cpo'] = Cpo #sea water heat capacity
C['T0'] = T0 #initial temp in degC
C['dT'] = dT #initial temp perturbationHb=2E3
C['H'] = H #mixed layer depth in ocean [m]
C['vo'] = vo #ocean current speed [m/s]
C['Hb'] = Hb #boundary layer height in the atmosphere [m]
C['Cpi'] = Cpi #sea ice heat capacity [J/ Kg K]
C['Li'] = Li #Latent heat of fusion of sea water [J / kg K]
C['Tf'] = Tf #Freezing point of sea water [C]
C['B'] = B # long-wave radiation constant [W/m2]
C['emissivity'] = emissivity #surface emissivity
C['SW0'] = SW0 # background net downwelling SW radiation
C['SW_anom']= SW_anom # amplitude of annual cycle in SW radiation
C['Da'] = Da # atmospheric diffusion [m2/s]
C['Do'] = Do # ocean diffusion [m2/s]
C['tau_entrainment'] = tau_entrainment # ocean entrainment/damping timescale
for var in args.keys():
C[var]=args[var]
#
return C
def CoupledChannel(C,forcing, T_boundary=None, dt_f=30*24*3600, restoring=False,ice_model=True,atm_adv=True,spatial_pattern=None,atm_DA_tendencies=None,ocn_DA_tendencies=None, return_coupled_fluxes=False,random_amp=0.1):
'''
This is the main function for the coupled ocean--atm channel model.
## INPUT VARIABLES ##
tmax: running time in seconds
avep: averaging period for the ouput
T0: initial temperature
forcing: dimensionless scaling for the heat flux forcing - default strength is 5 W/m2
dt_f: timestep of the forcing
atm_adv: boolean, advective atmosphere
atm_ocn: boolean, advective ocean
'''
#
# number of simulation timesteps and output timesteps
nt = int(C['tmax']/C['dt']) #simulation
nt1 = int(C['tmax']/C['avep']) #output
# rtas = np.random.rand(C['nx'])
# intitialize the model variables, first dimension is due to 2 timesteps deep scheme
sst = C['T0']*np.ones((2,C['nx']))
tas = C['T0']*np.ones((2,C['nx'])) #+rtas
hice = np.zeros((2,C['nx']))
# INCOMING SHORTWAVE RADIATION
SW0 = np.tile(C['SW0'][:,np.newaxis],(1,nt))
naxis = np.tile(np.arange(nt)[np.newaxis,],(C['nx'],1))
SW_warming = np.max(np.concatenate([(SW0-C['SW_anom']*np.cos(2*np.pi*(naxis*C['dt'])/(360*24*3600)))[np.newaxis,],np.zeros((C['nx'],nt))[np.newaxis,]],axis=0),0)
# If boundary conditions are not defined, then set initially to T0
if np.all(T_boundary==None):
T_boundary=C['T0']*np.ones(nt)
#
sst_boundary=T_boundary[0]*np.ones((2)) #nt+1
# evolve_boundary=True
#else:
# sst_boundary=np.concatenate((sst_boundary[np.newaxis,],sst_boundary[np.newaxis,]),axis=0)
# evolve_boundary=False
#
# interpolate forcing to the new timescale
if np.all(forcing!=None):
forcing = np.interp(np.arange(0,len(forcing)*dt_f,C['dt']),np.arange(0,len(forcing)*dt_f,dt_f),forcing)
else:
forcing = np.zeros(nt+1)
#
# initialize outputs
sst_out = np.zeros((nt1,C['nx']))
tas_out = np.zeros((nt1,C['nx']))
hice_out = np.zeros((nt1,C['nx']))
sflx_f_out = np.zeros((nt1,C['nx'])) #forcing
sflx_out = np.zeros((nt1,C['nx']))
# spatial pattern of the forcing - assume a sine wave
if np.all(spatial_pattern==None):
spatial_pattern=np.ones(C['nx'])
#
if np.all(atm_DA_tendencies!=None):
use_atm_tendencies=True
else:
use_atm_tendencies=False
if np.all(ocn_DA_tendencies!=None):
use_ocn_tendencies=True
else:
use_ocn_tendencies=False
#
if return_coupled_fluxes:
atm_DA_tendencies = np.zeros((nt,C['nx']))
ocn_DA_tendencies = np.zeros((nt,C['nx']))
# initialize counters
c=0; c2=0; c3=0; n=1
#####################
# --- TIME LOOP ---
#####################
for nn in range(nt):
#
# FORCING - WILL BE ZERO IF NOT SPECIFIED, no spatial pattern if not specified
sflx=forcing[nn]*spatial_pattern #+ forcing[nn]*random_amp*np.random.rand(C['nx'])
#
# save the forcing component
#
sflx_f_out[c,:]=sflx_f_out[c,:]+sflx
#
# SURFACE HEAT FLUXES
# Add sensible heat flux to the total surface flux in W/m**-2
sflx=sflx+C['ra']*C['Cp']*C['va']*C['Cs']*(sst[n-1,:]-tas[n-1,:])
# RADIATIVE FLUXES - LW will cool the atmosphere, SW will warm the ocean
LW_cooling = C['emissivity']*5.67E-8*(tas[n-1,:]**4)
#
# OCEAN BOUNDARY CONDITION
#if evolve_boundary:
sst_boundary_tendency=SW_warming[0,nn]*C['dt']/(C['H']*C['Cpo']*C['ro'])-C['emissivity']*5.67E-8*(sst_boundary[n-1]**4)*C['dt']/(C['H']*C['Cpo']*C['ro'])+(T_boundary[nn]-sst_boundary[n-1])*C['dt']/C['period']
############################################
#
# ATMOSPHERE
#
############################################
#
# ADVECTION
#
# set atm_adv=False is no atmospheric advection - note that we still need to know the wind speed to resolve heat fluxes
if atm_adv:
a_adv = np.concatenate([sst_boundary[n-1]-tas[n-1,:1],tas[n-1,:-1]-tas[n-1,1:]],axis=0)*(C['va']*C['dt']/C['xx'])
else:
a_adv = 0
#
# DIFFUSION
#
a_diff = (tas[n-1,2:]+tas[n-1,:-2]-2*tas[n-1,1:-1])*(C['Da']*C['dt']/(C['xx']**2))
a_diff0 = (tas[n-1,1]+sst_boundary[n-1]-2*tas[n-1,0])*(C['Da']*C['dt']/(C['xx']**2))
a_diff = np.concatenate([np.array([a_diff0]),a_diff,a_diff[-1:]],axis=0)
#
# SURFACE FLUXES
#
a_netsflx = (sflx*C['dt'])/(C['Hb']*C['Cp']*C['ra']) - LW_cooling*C['dt']/(C['Hb']*C['Cp']*C['ra'])
#
#
if return_coupled_fluxes:
atm_DA_tendencies[nn,:] = a_adv + a_diff
#
# ATM UPDATE
#
if use_atm_tendencies:
tas[n,:] = tas[n-1,:] + a_netsflx + atm_DA_tendencies[c3,:]
else:
tas[n,:] = tas[n-1,:] + a_netsflx + a_adv + a_diff
#
################################################
#
# OCEAN
#
################################################
# AND DIFFUSION + ENTRAINMENT
# ocean advection
#
# ADVECTION set vo=0 for stagnant ocean (slab)
#
o_adv = np.concatenate([sst_boundary[n-1]-sst[n-1,:1],sst[n-1,:-1]-sst[n-1,1:]],axis=0)*(C['vo']*C['dt']/C['xx'])
#
# DIFFUSION
#
o_diff = (sst[n-1,2:]+sst[n-1,:-2]-2*sst[n-1,1:-1])*(C['Do']*C['dt']/(C['xx']**2))
o_diff0 = (sst[n-1,1]+sst_boundary[n-1]-2*sst[n-1,0])*(C['Do']*C['dt']/(C['xx']**2))
o_diff = np.concatenate([np.array([o_diff0]),o_diff,o_diff[-1:]],axis=0)
#
# ENTRAINMENT - RESTORING TO AN AMBIENT WATER MASS (CAN BE SEEN AS LATERAL OR VERTICAL MIXING)
# set tau_entrainment=0 for no entrainment
if C['tau_entrainment']>0:
o_entrain = (C['T0']-sst[n-1,:])*C['dt']/C['tau_entrainment']
else:
o_entrain = 0
#
# SURFACE FLUXES
#
o_netsflx = -sflx*C['dt']/(C['H']*C['Cpo']*C['ro'])+SW_warming[:,nn]*C['dt']/(C['H']*C['Cpo']*C['ro'])
#
if return_coupled_fluxes:
ocn_DA_tendencies[nn,:] = o_adv + o_diff + o_entrain
#
# OCN update
if use_ocn_tendencies:
sst[n,:] = sst[n-1,:] + o_netsflx + ocn_DA_tendencies[c3,:]
else:
sst[n,:] = sst[n-1,:] + o_netsflx + o_adv + o_diff + o_entrain
#
if ice_model:
# THIS IS A DIAGNOSTIC SEA ICE MODEL
#
# SST is first allowed to cool below freezing and then we form sea ice from the excess_freeze
# i.e the amount that heat that is used to cool SST below freezing is converted to ice instead.
# Similarly, SST is allowed to warm above Tf even if there is ice, and then excess_melt,
# i.e. the amount of heat that is used to warm the water is first used to melt ice,
# and then the rest can warm the water.
#
# This scheme conserves energy - it simply switches it between ocean and ice storages
#
# advection
#hice[n-1,1:]=hice[n-1,1:]-(hice[n-1,:-1]-hice[n-1,1:])*(C['vo']*C['dt']/C['xx'])
#dhice = (hice[n-1,:-1]-hice[n-1,1:])*(C['vo']*C['dt']/C['xx'])
#hice[n-1,:-1] = hice[n-1,:-1] -dhice
#hice[n-1,-1] = hice[n-1,-1] + dhice[-1]
#
ice_mask = (hice[n-1,:]>0).astype(np.float) #cells where there is ice to melt
freezing_mask = (sst[n,:]<C['Tf']).astype(np.float) #cells where freezing will happen
# change in energy
dEdt = C['H']*C['ro']*C['Cpo']*(sst[n,:]-sst[n-1,:])/C['dt']
# negative change in energy will produce ice whenver the water would otherwise cool below freezing
excess_freeze = freezing_mask*np.max([-dEdt,np.zeros(C['nx'])],axis=0)
# positive change will melt ice where there is ice
excess_melt = ice_mask*np.max([dEdt,np.zeros(C['nx'])],axis=0)
# note that freezing and melting will never happen at the same time in the same cell
# freezing
dhice_freeze = C['dt']*excess_freeze/(C['Li']*C['ri'])
# melting
dhice_melt= C['dt']*excess_melt/(C['Li']*C['ri'])
# update
hice[n,:] = hice[n-1,:] + dhice_freeze - dhice_melt
# check how much energy was used for melting sea ice - remove this energy from ocean
hice_melt = (dhice_melt>0).astype(np.float)*np.min([dhice_melt,hice[n-1,:]],axis=0)
# Do not allow ice to be negative - that energy is kept in the ocean all the time.
# The line above ensures that not more energy than is needed to melt the whole ice cover
# is removed from the ocean at any given time
hice[n,:] = np.max([hice[n,:],np.zeros(C['nx'])],axis=0)
#
# Update SST
# Give back the energy that was used for freezing (will keep the water temperature above freezing)
sst[n,:] = sst[n,:] + C['dt']*excess_freeze/(C['H']*C['Cpo']*C['ro'])
# take out the heat that was used to melt ice
# (need to cap to hice, the extra heat is never used and will stay in the ocean)
sst[n,:] = sst[n,:] - hice_melt*(C['Li']*C['ri'])/(C['ro']*C['Cpo']*C['H'])
#
#############################
# --- PREPARE OUTPUT ----
#############################
# accumulate output
tas_out[c,:] = tas_out[c,:]+tas[n,:]
sst_out[c,:] = sst_out[c,:]+sst[n,:]
hice_out[c,:] = hice_out[c,:]+hice[n,:]
sflx_out[c,:] = sflx_out[c,:]+sflx
# accumulate averaging counter
c2=c2+1
c3=c3+1
if ((nn+1)*C['dt'])%(360*24*3600)==0:
#print(nn)
c3=0
#calculate the average for the output
if (((nn+1)*C['dt'])%C['avep']==0 and nn>0):
tas_out[c,:] = tas_out[c,:]/c2
sst_out[c,:] = sst_out[c,:]/c2
sflx_out[c,:] = sflx_out[c,:]/c2
sflx_f_out[c,:] = sflx_f_out[c,:]/c2
hice_out[c,:] = hice_out[c,:]/c2
# update counters
c = c+1
c2 = 0
if ((nn+1)*C['dt'])%(360*24*3600)==0:
print('Year ', (nn+1)*C['dt']/(360*24*3600), sst[1,int(C['nx']/4)], sst[1,int(3*C['nx']/4)])
#update the variables
tas[0,:] = tas[1,:].copy()
sst[0,:] = sst[1,:].copy()
hice[0,:] = hice[1,:].copy()
# SST at the boundary
sst_boundary[n-1]=sst_boundary[n-1]+sst_boundary_tendency
#
#
# if there is no ice, set to nan
hice_out[np.where(hice_out==0)]=np.nan
#
if return_coupled_fluxes:
return tas_out, sst_out, hice_out, sflx_out, sflx_f_out, nt1, nt, atm_DA_tendencies, ocn_DA_tendencies
else:
return tas_out, sst_out, hice_out, sflx_out, sflx_f_out, nt1, nt
#@jit(nopython=True)
def CoupledChannel_time(nt,nx,xx,dt,avep,sst,tas,hice,sst_boundary,sst_out,tas_out,hice_out,sflx_f_out,sflx_out,forcing,spatial_pattern,ra,Cp,va,vo,Da,Do,Cs,T0,Tf,emissivity,SW0,SW_anom,H,Hb,Cpo,ro,tau_entrainment,Li,ri,use_ocn_tendencies,use_atm_tendencies, atm_DA_tendencies, ocn_DA_tendencies,ice_model,atm_adv,return_coupled_fluxes):
'''
Separate time loop to enable numba
'''
#initialize counters
c=0; c2=0; c3=0; n=1
#####################
# --- TIME LOOP ---
#####################
for nn in range(nt):
#
# FORCING - WILL BE ZERO IF NOT SPECIFIED, no spatial pattern if not specified
sflx=forcing[nn]*spatial_pattern #+ forcing[nn]*random_amp*np.random.rand(C['nx'])
#
# save the forcing component
#
sflx_f_out[c,:]=sflx_f_out[c,:]+sflx
#
# SURFACE HEAT FLUXES
# Add sensible heat flux to the total surface flux in W/m**-2
sflx=sflx+ra*Cp*va*Cs*(sst[n-1,:]-tas[n-1,:])
# RADIATIVE FLUXES - LW will cool the atmosphere, SW will warm the ocean
LW_cooling = emissivity*5.67E-8*(tas[n-1,:]**4)
SW_warming = SW0+max(SW_anom*np.sin(2*float(nn)*dt*np.pi/(360*24*3600)),0.0)
#net_radiation = SW_warming-LW_cooling
net_radiation = -LW_cooling
#
# OCEAN BOUNDARY CONDITION - SET dT to zero to suppress the sin
sst_boundary[n]=sst_boundary[n-1]+SW_warming[0]*dt/(H*Cpo*ro)-emissivity*5.67E-8*(sst_boundary[n-1]**4)*dt/(H*Cpo*ro)+(T0-sst_boundary[n-1])*dt/(360*24*3600) #C['T0']+C['dT']*np.sin(nn*C['dt']*np.pi/C['period']) +
#
# ATMOSPHERE - ADVECTION AND DIFFUSION
# set atm_adv=False is no atmospheric advection - note that we need to know the wind speed to resolve heat fluxes
if atm_adv:
a_adv = np.concatenate((sst_boundary[n-1]-tas[n-1,:1],tas[n-1,:-1]-tas[n-1,1:]),axis=0)*(va*dt/xx)
#tas[n,0]=tas[n-1,0]+(C['T0']-tas[n-1,0])*(C['va']*C['dt']/C['xx']) #always constant temperature blowing over the ocean from land
#tas[n,0]=tas[n-1,0]+(sst[n,0]-tas[n-1,0])*(C['va']*C['dt']/C['xx']) #atmospheric temperature at the boundary is in equilibrium with the ocean
#tas[n,1:]=tas[n-1,1:]+(tas[n-1,:-1]-tas[n-1,1:])*(C['va']*C['dt']/C['xx'])
else:
#tas[n,:] = tas[n-1,0]
a_adv = np.zeros(nx)
#
# DIFFUSION
#
#tas[n,1:-1] = tas[n,1:-1] + (tas[n-1,2:]+tas[n-1,:-2]-2*tas[n-1,1:-1])*(C['Da']*C['dt']/(C['xx']**2))
a_diff = (tas[n-1,2:]+tas[n-1,:-2]-2*tas[n-1,1:-1])*(Da*dt/(xx**2))
a_diff0 = (tas[n-1,1]+sst_boundary[n-1]-2*tas[n-1,0])*(Da*dt/(xx**2))
a_diff = np.concatenate((np.array([a_diff0]),a_diff,a_diff[-1:]),axis=0)
#
# ATMOSPHERE - SURFACE FLUXES
#
a_netsflx = (sflx*dt)/(Hb*Cp*ra) + net_radiation*dt/(Hb*Cp*ra)
#
# full update
#
#
if return_coupled_fluxes:
atm_DA_tendencies[nn,:]=np.sum((a_adv,a_diff),axis=0)
#
if use_atm_tendencies:
tas[n,:] = tas[n-1,:] + a_netsflx + atm_DA_tendencies[c3,:]
else:
tas[n,:] = tas[n-1,:] + a_netsflx + a_adv + a_diff
#
# OCEAN - ADVECTION AND DIFFUSION + ENTRAINMENT
# ocean advection
# set vo=0 for stagnant ocean (slab)
#
#sst[n,1:] = sst[n-1,1:]+(sst[n-1,:-1]-sst[n-1,1:])*(1-ocn_mixing_ratio)*(C['vo']*C['dt']/C['xx'])+(C['T0']-sst[n-1,1:])*ocn_mixing_ratio*(C['vo']*C['dt']/C['xx'])
o_adv = np.concatenate((sst_boundary[n-1]-sst[n-1,:1],sst[n-1,:-1]-sst[n-1,1:]),axis=0)*(vo*dt/xx)
# DIFFUSION
#sst[n,1:-1] = sst[n,1:-1] + (sst[n-1,2:]+sst[n-1,:-2]-2*sst[n-1,1:-1])*(C['Do']*C['dt']/(C['xx']**2))
o_diff = (sst[n-1,2:]+sst[n-1,:-2]-2*sst[n-1,1:-1])*(Do*dt/(xx**2))
o_diff0 = (sst[n-1,1]+sst_boundary[n-1]-2*sst[n-1,0])*(Do*dt/(xx**2))
o_diff = np.concatenate((np.array([o_diff0]),o_diff,o_diff[-1:]),axis=0)
# ENTRAINMENT (damping by a lower layer)
o_entrain = (T0-sst[n-1,:])*dt/tau_entrainment
#sst[n,1:]=sst[n,1:]+(C['T0']-sst[n-1,1:])*C['dt']/C['tau_entrainment']
#
# OCEAN - SURFACE FLUXES
#
o_netsflx = -sflx*dt/(H*Cpo*ro)+SW_warming*dt/(H*Cpo*ro)
#sst[n,:]=sst[n,:]-(sflx*C['dt'])/(C['H']*C['Cpo']*C['ro'])
if return_coupled_fluxes:
ocn_DA_tendencies[nn,:] = o_adv + o_diff + o_entrain
# OCN update
if use_ocn_tendencies:
sst[n,:] = sst[n-1,:] + o_netsflx + ocn_DA_tendencies[c3,:]
else:
sst[n,:] = sst[n-1,:] + o_netsflx + o_adv + o_diff + o_entrain
#
if ice_model:
# THIS IS A DIAGNOSTIC SEA ICE MODEL
#
# sst is first allowed to cool below freezing and then we forM sea ice from the excess_freeze
# i.e the amount that heat that is used to cool sst below freezing is converted to ice instead
# similarly sst is allowed to warm above Tf even if there is ice, and then excess_melt,
# i.e. the amount of heat that is used to warm the water is first used to melt ice,
# and then the rest can warm water. This scheme conserves energy - it simply switches it between ocean and ice
#
ice_mask = (hice[n-1,:]>0).astype(np.float) #cells where there is ice to melt
freezing_mask = (sst[n,:]<Tf).astype(np.float) #cells where freezing will happen
# change in energy
dEdt = H*ro*Cpo*(sst[n,:]-sst[n-1,:])/dt
# negative change in energy will produce ice whenver the water would otherwise cool below freezing
excess_freeze = freezing_mask*np.max([-dEdt,np.zeros(nx)],axis=0)
# positive change will melt ice where there is ice
excess_melt = ice_mask*np.max([dEdt,np.zeros(nx)],axis=0)
# note that freezing and melting will never happen at the same time in the same cell
# freezing
dhice_freeze = dt*excess_freeze/(Li*ri)
# melting
dhice_melt= dt*excess_melt/(Li*ri)
# update
hice[n,:] = hice[n-1,:] + dhice_freeze - dhice_melt
# check how much energy was used for melting sea ice - remove this energy from ocean
hice_melt = (dhice_melt>0).astype(np.float)*np.min([dhice_melt,hice[n-1,:]],axis=0)
# Do not allow ice to be negative - that energy is kept in the ocean all the time.
# The line above ensures that not more energy than is needed to melt the whole ice cover
# is removed from the ocean at any given time
hice[n,:] = np.max([hice[n,:],np.zeros(nx)],axis=0)
#
# Update SST
# Give back the energy that was used for freezing (will keep the water temperature above freezing)
sst[n,:] = sst[n,:] + dt*excess_freeze/(H*Cpo*ro)
# take out the heat that was used to melt ice
# (need to cap to hice, the extra heat is never used and will stay in the ocean)
sst[n,:] = sst[n,:] - hice_melt*(Li*ri)/(ro*Cpo*H)
#
#############################
# --- PREPARE OUTPUT ----
#############################
#accumulate
tas_out[c,:] = tas_out[c,:]+tas[n,:]
sst_out[c,:] = sst_out[c,:]+sst[n,:]
hice_out[c,:] = hice_out[c,:]+hice[n,:]
sflx_out[c,:] = sflx_out[c,:]+sflx
# accumulate averaging counter
c2=c2+1
c3=c3+1
if ((nn+1)*dt)%(360*24*3600)==0:
#print(nn)
c3=0
#calculate the average for the output
if (((nn+1)*dt)%avep==0 and nn>0):
tas_out[c,:] = tas_out[c,:]/c2
sst_out[c,:] = sst_out[c,:]/c2
sflx_out[c,:] = sflx_out[c,:]/c2
sflx_f_out[c,:] = sflx_f_out[c,:]/c2
hice_out[c,:] = hice_out[c,:]/c2
# update counters
c = c+1
c2 = 0
#if ((nn+1)*C['dt'])%(360*24*3600)==0:
# print('Year ', (nn+1)*C['dt']/(360*24*3600), sst[1,int(C['nx']/4)], sst[1,int(3*C['nx']/4)])
#update the variables
tas[0,:] = tas[1,:].copy()
sst[0,:] = sst[1,:].copy()
hice[0,:] = hice[1,:].copy()
sst_boundary[0]=sst_boundary[1].copy()
#
hice_out[np.where(hice_out==0)]=np.nan
#
return tas_out, sst_out, hice_out, sflx_out, sflx_f_out, atm_DA_tendencies, ocn_DA_tendencies
def CoupledChannel2(C,forcing, dt_f=30*24*3600, ocn_mixing_ratio=0, restoring=False,ice_model=True,atm_adv=True,spatial_pattern=None,atm_DA_tendencies=None,ocn_DA_tendencies=None, return_coupled_fluxes=False,random_amp=0.1):
'''
This is the main function for the coupled ocean--atm channel model.
## INPUT VARIABLES ##
tmax: running time in seconds
avep: averaging period for the ouput
T0: initial temperature
forcing: dimensionless scaling for the heat flux forcing - default strength is 5 W/m2
dt_f: timestep of the forcing
atm_adv: boolean, advective atmosphere
atm_ocn: boolean, advective ocean
ocn_mixing: add non-local mixing to ocean
ocn_mixing_ratio: 0-1 ratio between advection and mixing (0 only advection; 1 only mixing)
'''
#
#print(C)
#print(C['T0'],C['SW0'],C['Da'],C['xx'])
#
nt=int(C['tmax']/C['dt']) #steps
nt1=int(C['tmax']/C['avep'])
tau=float(C['period'])/float(C['dt']) #this is period/dt, previously nt/8
rtas=np.random.rand(C['nx'])
#print(rtas.max())
#intitialize the model variables, only 2 timesteps deep scheme
sst=C['T0']*np.ones((2,C['nx']))
tas=C['T0']*np.ones((2,C['nx']))+rtas
hice=np.zeros((2,C['nx']))
sst_boundary=C['T0']*np.ones((2))
#
#print(sst.max(),tas.max())
#interpolate forcing to the new timescale
if np.all(forcing!=None):
forcing = np.interp(np.arange(0,len(forcing)*dt_f,C['dt']),np.arange(0,len(forcing)*dt_f,dt_f),forcing)
else:
forcing = np.zeros(nt+1)
#
#initialize outputs
sst_out = np.zeros((nt1,C['nx']))
tas_out = np.zeros((nt1,C['nx']))
hice_out = np.zeros((nt1,C['nx']))
sflx_f_out = np.zeros((nt1,C['nx'])) #forcing
sflx_out = np.zeros((nt1,C['nx']))
#spatial pattern of the forcing - assume a sine wave
if np.all(spatial_pattern==None):
spatial_pattern=np.ones(C['nx'])
#
if np.all(atm_DA_tendencies!=None):
use_atm_tendencies=True
else:
use_atm_tendencies=False
if np.all(ocn_DA_tendencies!=None):
use_ocn_tendencies=True
else:
use_ocn_tendencies=False
#
atm_DA_tendencies = np.zeros((nt,C['nx']))
ocn_DA_tendencies = np.zeros((nt,C['nx']))
#
tas_out, sst_out, hice_out, sflx_out, sflx_f_out, atm_DA_tendencies, ocn_DA_tendencies=CoupledChannel_time(nt,C['nx'],C['xx'],C['dt'],C['avep'],sst,tas,hice,sst_boundary,sst_out,tas_out,hice_out,sflx_f_out,sflx_out,forcing,spatial_pattern,C['ra'],C['Cp'],C['va'],C['vo'],C['Da'],C['Do'],C['Cs'],C['T0'],C['Tf'],C['emissivity'],C['SW0'],C['SW_anom'],C['H'],C['Hb'],C['Cpo'],C['ro'],C['tau_entrainment'],C['Li'],C['ri'],use_ocn_tendencies,use_atm_tendencies, atm_DA_tendencies, ocn_DA_tendencies,ice_model,atm_adv,return_coupled_fluxes)
#
if return_coupled_fluxes:
return tas_out, sst_out, hice_out, sflx_out, sflx_f_out, nt1, nt, atm_DA_tendencies, ocn_DA_tendencies
else:
return tas_out, sst_out, hice_out, sflx_out, sflx_f_out, nt1, nt
| coupled_channel/cutils.py | 25,330 | This is the main function for the coupled ocean--atm channel model.
## INPUT VARIABLES ##
tmax: running time in seconds
avep: averaging period for the ouput
T0: initial temperature
forcing: dimensionless scaling for the heat flux forcing - default strength is 5 W/m2
dt_f: timestep of the forcing
atm_adv: boolean, advective atmosphere
atm_ocn: boolean, advective ocean
This is the main function for the coupled ocean--atm channel model.
## INPUT VARIABLES ##
tmax: running time in seconds
avep: averaging period for the ouput
T0: initial temperature
forcing: dimensionless scaling for the heat flux forcing - default strength is 5 W/m2
dt_f: timestep of the forcing
atm_adv: boolean, advective atmosphere
atm_ocn: boolean, advective ocean
ocn_mixing: add non-local mixing to ocean
ocn_mixing_ratio: 0-1 ratio between advection and mixing (0 only advection; 1 only mixing)
Separate time loop to enable numba
Setup model constants. All of the constants have fixed values, but one can pass in own values or even some arbitrary values via **args.
from numba import jitfrom joblib import Parallel, delayed, parallel_backendfrom joblib import load, dumpimport tempfileimport shutilimport osimport syssys.path.append('pyunicorn_timeseries')from pyunicorn_timeseries.surrogates import Surrogatesgrid size in [m]number of grid cell - the total width of the domain is xx*nx longwind in m/stmax secondstimestepaveraging period in seconds period of boundary restoringexchange coefficient for bulk formulaair heat capacitydensity of air [kg/m3]density of sea water [kg/m3]density of sea ice [kg/m3]sea water heat capacityinitial temp in degCinitial temp perturbationHb=2E3mixed layer depth in ocean [m]ocean current speed [m/s]boundary layer height in the atmosphere [m]sea ice heat capacity [J/ Kg K]Latent heat of fusion of sea water [J / kg K]Freezing point of sea water [C] long-wave radiation constant [W/m2]surface emissivity background net downwelling SW radiation amplitude of annual cycle in SW radiation atmospheric diffusion [m2/s] ocean diffusion [m2/s] ocean entrainment/damping timescale number of simulation timesteps and output timestepssimulationoutput rtas = np.random.rand(C['nx']) intitialize the model variables, first dimension is due to 2 timesteps deep scheme+rtas INCOMING SHORTWAVE RADIATION If boundary conditions are not defined, then set initially to T0nt+1 evolve_boundary=Trueelse: sst_boundary=np.concatenate((sst_boundary[np.newaxis,],sst_boundary[np.newaxis,]),axis=0) evolve_boundary=False interpolate forcing to the new timescale initialize outputsforcing spatial pattern of the forcing - assume a sine wave initialize counters --- TIME LOOP --- FORCING - WILL BE ZERO IF NOT SPECIFIED, no spatial pattern if not specified+ forcing[nn]*random_amp*np.random.rand(C['nx']) save the forcing component SURFACE HEAT FLUXES Add sensible heat flux to the total surface flux in W/m**-2 RADIATIVE FLUXES - LW will cool the atmosphere, SW will warm the ocean OCEAN BOUNDARY CONDITIONif evolve_boundary: ATMOSPHERE ADVECTION set atm_adv=False is no atmospheric advection - note that we still need to know the wind speed to resolve heat fluxes DIFFUSION SURFACE FLUXES ATM UPDATE OCEAN AND DIFFUSION + ENTRAINMENT ocean advection ADVECTION set vo=0 for stagnant ocean (slab) DIFFUSION ENTRAINMENT - RESTORING TO AN AMBIENT WATER MASS (CAN BE SEEN AS LATERAL OR VERTICAL MIXING) set tau_entrainment=0 for no entrainment SURFACE FLUXES OCN update THIS IS A DIAGNOSTIC SEA ICE MODEL SST is first allowed to cool below freezing and then we form sea ice from the excess_freeze i.e the amount that heat that is used to cool SST below freezing is converted to ice instead. Similarly, SST is allowed to warm above Tf even if there is ice, and then excess_melt, i.e. the amount of heat that is used to warm the water is first used to melt ice, and then the rest can warm the water. This scheme conserves energy - it simply switches it between ocean and ice storages advectionhice[n-1,1:]=hice[n-1,1:]-(hice[n-1,:-1]-hice[n-1,1:])*(C['vo']*C['dt']/C['xx'])dhice = (hice[n-1,:-1]-hice[n-1,1:])*(C['vo']*C['dt']/C['xx'])hice[n-1,:-1] = hice[n-1,:-1] -dhicehice[n-1,-1] = hice[n-1,-1] + dhice[-1]cells where there is ice to meltcells where freezing will happen change in energy negative change in energy will produce ice whenver the water would otherwise cool below freezing positive change will melt ice where there is ice note that freezing and melting will never happen at the same time in the same cell freezing melting update check how much energy was used for melting sea ice - remove this energy from ocean Do not allow ice to be negative - that energy is kept in the ocean all the time. The line above ensures that not more energy than is needed to melt the whole ice cover is removed from the ocean at any given time Update SST Give back the energy that was used for freezing (will keep the water temperature above freezing) take out the heat that was used to melt ice (need to cap to hice, the extra heat is never used and will stay in the ocean) --- PREPARE OUTPUT ---- accumulate output accumulate averaging counterprint(nn)calculate the average for the output update countersupdate the variables SST at the boundary if there is no ice, set to nan@jit(nopython=True)initialize counters --- TIME LOOP --- FORCING - WILL BE ZERO IF NOT SPECIFIED, no spatial pattern if not specified+ forcing[nn]*random_amp*np.random.rand(C['nx']) save the forcing component SURFACE HEAT FLUXES Add sensible heat flux to the total surface flux in W/m**-2 RADIATIVE FLUXES - LW will cool the atmosphere, SW will warm the oceannet_radiation = SW_warming-LW_cooling OCEAN BOUNDARY CONDITION - SET dT to zero to suppress the sinC['T0']+C['dT']*np.sin(nn*C['dt']*np.pi/C['period']) + ATMOSPHERE - ADVECTION AND DIFFUSION set atm_adv=False is no atmospheric advection - note that we need to know the wind speed to resolve heat fluxestas[n,0]=tas[n-1,0]+(C['T0']-tas[n-1,0])*(C['va']*C['dt']/C['xx']) always constant temperature blowing over the ocean from landtas[n,0]=tas[n-1,0]+(sst[n,0]-tas[n-1,0])*(C['va']*C['dt']/C['xx']) atmospheric temperature at the boundary is in equilibrium with the oceantas[n,1:]=tas[n-1,1:]+(tas[n-1,:-1]-tas[n-1,1:])*(C['va']*C['dt']/C['xx'])tas[n,:] = tas[n-1,0] DIFFUSION tas[n,1:-1] = tas[n,1:-1] + (tas[n-1,2:]+tas[n-1,:-2]-2*tas[n-1,1:-1])*(C['Da']*C['dt']/(C['xx']**2)) ATMOSPHERE - SURFACE FLUXES full update OCEAN - ADVECTION AND DIFFUSION + ENTRAINMENT ocean advection set vo=0 for stagnant ocean (slab)sst[n,1:] = sst[n-1,1:]+(sst[n-1,:-1]-sst[n-1,1:])*(1-ocn_mixing_ratio)*(C['vo']*C['dt']/C['xx'])+(C['T0']-sst[n-1,1:])*ocn_mixing_ratio*(C['vo']*C['dt']/C['xx']) DIFFUSIONsst[n,1:-1] = sst[n,1:-1] + (sst[n-1,2:]+sst[n-1,:-2]-2*sst[n-1,1:-1])*(C['Do']*C['dt']/(C['xx']**2)) ENTRAINMENT (damping by a lower layer)sst[n,1:]=sst[n,1:]+(C['T0']-sst[n-1,1:])*C['dt']/C['tau_entrainment'] OCEAN - SURFACE FLUXES sst[n,:]=sst[n,:]-(sflx*C['dt'])/(C['H']*C['Cpo']*C['ro']) OCN update THIS IS A DIAGNOSTIC SEA ICE MODEL sst is first allowed to cool below freezing and then we forM sea ice from the excess_freeze i.e the amount that heat that is used to cool sst below freezing is converted to ice instead similarly sst is allowed to warm above Tf even if there is ice, and then excess_melt, i.e. the amount of heat that is used to warm the water is first used to melt ice, and then the rest can warm water. This scheme conserves energy - it simply switches it between ocean and icecells where there is ice to meltcells where freezing will happen change in energy negative change in energy will produce ice whenver the water would otherwise cool below freezing positive change will melt ice where there is ice note that freezing and melting will never happen at the same time in the same cell freezing melting update check how much energy was used for melting sea ice - remove this energy from ocean Do not allow ice to be negative - that energy is kept in the ocean all the time. The line above ensures that not more energy than is needed to melt the whole ice cover is removed from the ocean at any given time Update SST Give back the energy that was used for freezing (will keep the water temperature above freezing) take out the heat that was used to melt ice (need to cap to hice, the extra heat is never used and will stay in the ocean) --- PREPARE OUTPUT ----accumulate accumulate averaging counterprint(nn)calculate the average for the output update countersif ((nn+1)*C['dt'])%(360*24*3600)==0: print('Year ', (nn+1)*C['dt']/(360*24*3600), sst[1,int(C['nx']/4)], sst[1,int(3*C['nx']/4)])update the variablesprint(C)print(C['T0'],C['SW0'],C['Da'],C['xx'])stepsthis is period/dt, previously nt/8 print(rtas.max())intitialize the model variables, only 2 timesteps deep schemeprint(sst.max(),tas.max())interpolate forcing to the new timescaleinitialize outputsforcingspatial pattern of the forcing - assume a sine wave | 9,043 | en | 0.774505 |
"""
Subdivide Cells
~~~~~~~~~~~~~~~
Increase the number of triangles in a single, connected triangular mesh.
The :func:`pyvista.PolyDataFilters.subdivide` filter utilitizes three different
subdivision algorithms to subdivide a mesh's cells: `butterfly`, `loop`,
or `linear`.
"""
from pyvista import examples
import pyvista as pv
###############################################################################
# First, let's load a **triangulated** mesh to subdivide. We can use the
# :func:`pyvista.DataSetFilters.triangulate` filter to ensure the mesh we are
# using is purely triangles.
mesh = examples.download_bunny_coarse().triangulate()
cpos = [(-0.02788175062966399, 0.19293295656233056, 0.4334449972621349),
(-0.053260899930287015, 0.08881197167521734, -9.016948161029588e-05),
(-0.10170607813337212, 0.9686438023715356, -0.22668272496584665)]
###############################################################################
# Now, lets do a few subdivisions with the mesh and compare the results.
# Below is a helper function to make a comparison plot of thee different
# subdivisions.
def plot_subdivisions(mesh, a, b):
display_args = dict(show_edges=True, color=True)
p = pv.Plotter(shape=(3,3))
for i in range(3):
p.subplot(i,0)
p.add_mesh(mesh, **display_args)
p.add_text("Original Mesh")
def row_plot(row, subfilter):
subs = [a, b]
for i in range(2):
p.subplot(row, i+1)
p.add_mesh(mesh.subdivide(subs[i], subfilter=subfilter), **display_args)
p.add_text(f"{subfilter} subdivision of {subs[i]}")
row_plot(0, "linear")
row_plot(1, "butterfly")
row_plot(2, "loop")
p.link_views()
p.view_isometric()
return p
###############################################################################
# Run the subdivisions for 1 and 3 levels.
plotter = plot_subdivisions(mesh, 1, 3)
plotter.camera_position = cpos
plotter.show()
| examples/01-filter/subdivide.py | 1,957 | Subdivide Cells
~~~~~~~~~~~~~~~
Increase the number of triangles in a single, connected triangular mesh.
The :func:`pyvista.PolyDataFilters.subdivide` filter utilitizes three different
subdivision algorithms to subdivide a mesh's cells: `butterfly`, `loop`,
or `linear`.
First, let's load a **triangulated** mesh to subdivide. We can use the :func:`pyvista.DataSetFilters.triangulate` filter to ensure the mesh we are using is purely triangles. Now, lets do a few subdivisions with the mesh and compare the results. Below is a helper function to make a comparison plot of thee different subdivisions. Run the subdivisions for 1 and 3 levels. | 645 | en | 0.741904 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@package EFIT2D_Classes
Support Library: efit2d-pyopencl
Manuscript Title: Optimized OpenCL implementation of the Elastodynamic Finite Integration Technique for viscoelastic media
Authors: M Molero, U Iturraran-Viveros, S Aparicio, M.G. Hernández
Program title: EFIT2D-PyOpenCL
Journal reference: Comput. Phys. Commun.
Programming language: Python.
External routines: numpy, scipy, matplotlib, glumpy, pyopencl
Computer: computers having GPU or Multicore CPU with OpenCL drivers.
All classes here defined are used to define:
- The scenario,
- Material objects,
- Input sources,
- Inspection setup,
- Simulation parameters
"""
import numpy as np
from math import sin, cos, sqrt, pi, exp
import random
import time
from scipy import signal
from scipy.fftpack import fftshift
from skimage.transform import rotate
try:
from Image import Image
except:
from PIL import Image
from matplotlib import cm
import matplotlib.pyplot as plt
def imresize(arr, size, **kwargs):
from PIL import Image
size_list = [int(arr.shape[0] * size), int(arr.shape[1] * size)]
return np.array(Image.fromarray(arr).resize(size_list))
def imrotate(arr, angle, **kwargs):
return rotate(arr, angle=angle)
def RaisedCosinePulse(t, Freq, Amplitude):
"""
Raised-Cosine Pulse
@param t time vector
@param Freq Frequency in Hz
@param Amplitude Real Value of Amplitude
@return Output signal vector
@retval P vector of length equals to the time vector t
"""
N = np.size(t,0)
P = np.zeros((N,),dtype=np.float32)
for m in range(0,N):
if t[m] <= 2.0/Freq:
P[m] = Amplitude *(1-cos(pi*Freq*t[m]))*cos(2*pi*Freq*t[m])
return P
def ricker(t,ts,fsavg):
"""
Ricker Pulse
@param t time vector
@param ts temporal delay
@param fsavg pulse width parameter
@return Output signal vector
"""
a = fsavg*pi*(t-ts)
a2 = a*a
return ((1.0-2.0*a2)*np.exp(-a2))
##
class NewImage:
"""
Class NewImage: Definition of the Main Geometric Scenario.
"""
def __init__(self, Width=40, Height=40,Pixel_mm=10,label=0,SPML=False):
"""
Constructor of the Class NewImage
@param Width Width of the Scenario
@param Height Height of the Scenario
@param Pixel_mm Ratio Pixel per mm
@param label Label
@param SPML Flag used to indicate the boundary conditions
"""
## Width of the Scenario
self.Width = Width
## Height of the Scenario
self.Height = Height
## Ratio Pixel per mm
self.Pixel_mm = Pixel_mm
## Label
self.Label = label
## Flag used to indicate the boundary conditions
self.SPML = SPML
## Dimension 1 of the Scenario Matrix
self.M = int(self.Height * self.Pixel_mm)
## Dimension 2 od the Scenario Matrix
self.N = int(self.Width * self.Pixel_mm)
## Scenarion Matrix (MxN)
self.I = np.ones((self.M,self.N),dtype=np.uint8)*label
self.Itemp = 0
## Size of the Boundary Layer
self.Tap = 0
## Configure if boundary layers will be treated as absorbing layers or air layers.
#
# False: Absorbing layers
#
# True : Air boundaries
self.AirBoundary = False
def createLayer(self, centerW, centerH, Width, Height, label, Theta=0):
"""
Create a Layer
@param centerW center in width-axis of the Layer
@param centerH center in height-axis of the Layer
@param Width Width of the Layer
@param Height Height of the Layer
@param label Label of the layer
@param Theta Rotation Angle
"""
a = int(Height*self.Pixel_mm/2.0)
b = int(Width*self.Pixel_mm/2.0)
for x in range(-a,a):
for y in range(-b,b):
tempX = round (x + centerH*self.Pixel_mm)
tempY = round (y + centerW*self.Pixel_mm)
self.I[tempX,tempY] = label
if Theta != 0:
self.I = imrotate(self.I,Theta,interp='nearest')
def createABS(self,Tap):
"""
Create the boundary layers depending on the boundary conditions required
@param Tap Layer Size
"""
self.Tap = Tap
self.SPML = True
self.AirBoundary = False
self.M, self.N = np.shape(self.I)
TP = round(Tap* self.Pixel_mm )
M_pml = int( self.M + 2*TP )
N_pml = int( self.N + 2*TP )
self.Itemp = 255.0*np.ones((M_pml,N_pml),dtype=np.uint8)
self.Itemp[TP : M_pml-TP, TP : N_pml-TP] = np.copy(self.I)
class Material:
"""
Class Material: Definition of a material
@param name Material Name
@param rho Density (kg/m3)
@param c11 C11 (Pa)
@param c12 C12 (Pa)
@param c22 C22 (Pa)
@param c44 C44 (Pa)
@param eta_v Bulk Viscosity Constant (Pa s)
@param eta_s Shear Viscosity Constant (Pa s)
@param label Material Label
"""
def __init__(self, name="Water",rho=1000,c11=2.19e9,c12=0.0,c22=0.0,c44=0.0,eta_v=0, eta_s=0,label=0):
"""
Constructor of the Material object
"""
## Material Name
self.name = name
##Density (kg/m3)
self.rho = rho
## C11 (Pa)
self.c11 = c11
## C12 (Pa)
self.c12 = c12
## C22 (Pa)
self.c22 = c22
## C44 (Pa)
self.c44 = c44
## Longitudinal Velocity (m/s)
self.VL = sqrt( c11/rho )
## Shear Velocity (m/s)
self.VT = sqrt( c44/rho )
## Bulk Viscosity Constant (Pa s)
self.eta_v = eta_v
## Shear Viscosity Constant (Pa s)
self.eta_s = eta_s
## Material Label
self.Label = label
def __str__(self):
return "Material:"
def __repr__(self):
return "Material:"
class Source:
"""
Class Source: Define the Inspection Type
@param TypeLaunch Type of Inspection: Transmission or PulseEcho
"""
def __init__(self,TypeLaunch = 'Transmission'):
## Type of Inspection: Transmission or PulseEcho
self.TypeLaunch = TypeLaunch
## Define the location of the transducers in function of the type of the Inspection
self.Theta = 0
if self.TypeLaunch == 'PulseEcho':
self.pulseEcho()
elif self.TypeLaunch == 'Transmission':
self.transmission()
def __str__(self):
return "Source: "
def __repr__(self):
return "Source: "
def pulseEcho(self):
"""
Define Theta for PulseEcho Inspection. PulseEcho Inspection uses the same transducer acting as emitter and as receiver
"""
self.Theta = [270*pi/180, 270*pi/180]
def transmission(self):
"""
Define Theta for Transmission Inspection. Transmision uses two transducers, one used as emitter and another as receiver
"""
self.Theta = [270*pi/180, 90*pi/180]
class Transducer:
"""
Class Transducer: Definition of the Transducer Object
@param Size Transducer Size
@param Offset Offset position of the Transducer. By default is set to zero
@param BorderOffset Border offset position of the Transducer. By default is set to zero
@param Location Location is set to zero that indicates Up location
@param name Transducer Name
"""
def __init__(self, Size = 10, Offset=0, BorderOffset=0, Location=0, name = 'emisor'):
"""
Constructor of the Class Transducer
"""
# Location = 0 => Top
## Transducer Size
self.Size = Size
## Offset position of the Transducer. By default is set to zero
#
# This offset is measured taking into account the center of the Scenario in the width-axis
#
# Positive Values indicate offsets toward the right
#
# Negative values indicate offsets toward the left
self.Offset = Offset
## Border offset position of the Transducer. By default is set to zero
#
# This border offset takes into account the center od the Scenario in the width axis
# but this offset is measured in direction of the height-axis
#
# Only Positive values must be defined.
self.BorderOffset = BorderOffset
##Size of the trasnducer in Pixels
self.SizePixel = 0
## Location-> 0: Top. This version only works when the location=0
self.Location = Location
## Name of the transducer
self.name = name
def __str__(self):
return "Transducer: "
def __repr__(self):
return "Transducer: "
####################################################################################
class Signal:
"""
Class Signal: Signal Definition (Source Input for the Simulation)
@param Amplitude Signal Amplitude
@param Frequency Frequency Amplitude
@param Name Name of the Signal: RaisedCosinePulse or RickerPulse
@param ts Time Delay: used only for RickerPulse
"""
def __init__(self, Amplitude=1, Frequency=1e6, name ="RaisedCosinePulse", ts=1):
## Signal Amplitude
self.Amplitude = Amplitude
## Frequency Amplitude
self.Frequency = Frequency
## Name of the Signal: RaisedCosinePulse or RickerPulse
self.name = name
## Time Delay: used only for RickerPulse
if ts == 1:
self.ts = 3.0/Frequency;
def __str__(self):
return "Signal: "
def __repr__(self):
return "Signal: "
def generate(self,t):
"""
Generate the signal waveform
@param t vector time
@return signal vector with the same length as the vector time
"""
if self.name == "RaisedCosinePulse":
return RaisedCosinePulse(t, self.Frequency, self.Amplitude)
elif self.name == "RickerPulse":
return ricker(t, self.ts, self.Frequency)
def saveSignal(self,t):
"""
Save the signal waveform into the object
@param t vector time
"""
self.time_signal = self.generate(t)
######################################
class Inspection:
"""
Class Inspection: used for the configuration of the inspections to be emulated
"""
def __init__(self):
"""
Constructor of the Class Inspection
"""
## Position of the Transducer (Angle)
self.Theta = 0
## Vector x-axis Position of the Transducer
self.XL = 0
## Vector y-axis Position of the Transducer
self.YL = 0
##
self.IR = 0
def __str__(self):
return "Inspection: "
def __repr__(self):
return "Inspection: "
def setTransmisor(self, source, transducer, x2, y2, X0, Y0):
self.Theta = source.Theta
Ntheta = np.size(self.Theta,0)
NXL = int(2*transducer.SizePixel)
xL = np.zeros((NXL,),dtype=np.float32)
yL = np.zeros((NXL,),dtype=np.float32)
for m in range(0,Ntheta):
if np.abs(np.cos(self.Theta[m])) < 1e-5:
yL = np.linspace(y2[m]-transducer.SizePixel,y2[m]+transducer.SizePixel,num=NXL, endpoint=True)
xL[:] = x2[m]*np.ones((NXL,),dtype=np.float32)
elif np.abs(np.cos(self.Theta[m])) == 1:
xL[:] = np.linspace(x2[m]-transducer.SizePixel, x2[m]+transducer.SizePixel,num=NXL, endpoint=True)
yL[:] = y2[m] - ( (x2[m]-X0 )/( y2[m]-Y0 ) )*( xL[:]-x2[m] )
else:
xL[:] = np.linspace(x2[m]-(transducer.SizePixel*np.abs(np.cos(self.Theta[m]))),x2[m]+(transducer.SizePixel*np.abs(np.cos(self.Theta[m]))), num=NXL, endpoint=True )
yL[:] = y2[m] - ( (x2[m]-X0 )/( y2[m]-Y0 ) )*( xL[:]-x2[m] )
if m==0:
self.XL = np.zeros((np.size(xL,0),Ntheta),dtype=np.float32)
self.YL = np.zeros((np.size(xL,0),Ntheta),dtype=np.float32)
self.XL[:,m] = (np.around(xL[:]))
self.YL[:,m] = (np.around(yL[:]))
def addOffset(self, image, transducer, NRI):
"""
Handle Offset
"""
NXL = np.size(self.XL,0)
Ntheta = np.size(self.Theta,0)
M_pml, N_pml = np.shape(image.Itemp)
self.YL += (np.around(transducer.Offset * image.Pixel_mm * NRI / float(N_pml)))
self.IR = np.zeros((Ntheta,Ntheta),dtype=np.float32)
B = list(range(0,Ntheta))
self.IR[:,0] = np.int32(B[:])
for i in range(1,Ntheta):
B = np.roll(B,-1)
self.IR[:,i] = np.int32(B)
def addBorderOffset(self, image, transducer, MRI):
"""
Handle Border Offset
"""
M_pml, N_pml = np.shape(image.Itemp)
ratio = float(MRI) / float(M_pml)
self.XL[:,0] += (np.around(transducer.BorderOffset * image.Pixel_mm * ratio) )
self.XL[:,1] -= (np.around(transducer.BorderOffset * image.Pixel_mm * ratio) )
def flip(self):
self.XL = np.fliplr(self.XL)
def SetReception(self,T):
ReceptorX = (self.XL)
ReceptorY = (self.YL)
M,N = np.shape(ReceptorX)
temp = np.zeros((M,N-1),dtype=np.float32)
for mm in range(0,M):
for ir in range(0,N-1):
temp[mm,ir] = T[ int(ReceptorX[ mm,int(self.IR[0,ir+1]) ] ) , int(ReceptorY[ mm,int(self.IR[0,ir+1]) ]) ]
if self.Field:
return temp.transpose()
else:
return np.mean(temp,0)
def SetReceptionVector(self, T, x, y):
M = np.size(x)
temp = np.zeros((M,),dtype=np.float32)
for mm in range(0,M):
temp[mm] = T[(int(x[mm])),(int(y[mm]))]
return temp
class SimulationModel:
"""
Class Simulation: setup the parameters for the numerical simulation
Usage:
- First Define an Instance of the SimulationModel Object
- Execute the method class: jobParameters using as input the materials list
- Execute the method class: createNumerical Model using as input the scenario
- Execute the method class: initReceivers to initialize the receivers
- Execute the mtehod class: save signal using as input the attribute simModel.t
- Save the Device into the simModel.Device attribute
@param TimeScale Scale Time Factor
@param MaxFreq Maximum Frequency
@param PointCycle Points per Cycle
@param SimTime Time Simuation
@param SpatialScale Spatial Scale: 1 -> meters, 1e-3 -> millimeters
"""
def __init__(self,TimeScale=1, MaxFreq=2e6, PointCycle=10, SimTime=50e6, SpatialScale=1e-3):
## Scale Time Factor
self.TimeScale = TimeScale
## Maximum Frequency
self.MaxFreq = MaxFreq # MHz
## Points per Cycle
self.PointCycle = PointCycle
## Time Simuation
self.SimTime = SimTime # microseconds
## Spatial Scale: 1 -> meters, 1e-3 -> millimeters
self.SpatialScale = SpatialScale
## Spatial Discretization
self.dx = 0
## Temporal Discretization
self.dt = 0
self.Rgrid = 0
self.TapG = 0
self.t = 0
self.Ntiempo = 0
self.MRI,self.NRI = (0,0)
self.receiver_signals = 0
self.Device = 'CPU'
self.XL = 0
self.YL = 0
def __str__(self):
return "Simulation Model: "
def __repr__(self):
return "Simulation Model: "
def jobParameters(self,materiales):
"""
Define Main Simulation Parameters
@parm materiales Materials List
"""
indVL = [mat.VL for mat in materiales if mat.VL > 400]
indVT = [mat.VT for mat in materiales if mat.VT > 400]
VL = np.array(indVL)
VT = np.array(indVT)
V = np.hstack( (VL, VT) )
self.dx = np.float32( np.min([V]) / (self.PointCycle*self.MaxFreq) )
self.dt = self.TimeScale * np.float32( 0.7071 * self.dx / ( np.max([V]) ) )
self.Ntiempo = int(round(self.SimTime/self.dt))
self.t = self.dt*np.arange(0,self.Ntiempo)
def createNumericalModel(self, image):
"""
Create the Numerical Model
@param image The Scenario Object
"""
#Spatial Scale
Mp = np.shape(image.Itemp)[0]*self.SpatialScale/image.Pixel_mm/self.dx
self.Rgrid = Mp/np.shape(image.Itemp)[0]
self.TapG = np.around(image.Tap * self.Rgrid * image.Pixel_mm)
self.Im = imresize(image.Itemp, self.Rgrid, interp='nearest')
self.MRI,self.NRI = np.shape(self.Im)
print("dt: " + str(self.dt) + " dx: " + str(self.dx) + " Grid: " + str(self.MRI) + " x " + str(self.NRI))
def initReceivers(self):
"""
Initialize the receivers
"""
self.receiver_signals = 0
def setDevice(self,Device):
"""
Set the Computation Device
@param Device Device to be used
Define the device used to compute the simulations:
- "CPU" : uses the global memory in th CPU
- "GPU_Global" : uses the global memory in the GPU
- "GPU_Local" : uses the local memory in the GPU
"""
if Device == 0:
self.Device = 'CPU'
elif Device ==1:
self.Device = 'GPU_Global'
elif Device ==2:
self.Device = 'GPU_Local'
| EFIT2D_Classes.py | 16,007 | Class Inspection: used for the configuration of the inspections to be emulated
Class Material: Definition of a material
@param name Material Name
@param rho Density (kg/m3)
@param c11 C11 (Pa)
@param c12 C12 (Pa)
@param c22 C22 (Pa)
@param c44 C44 (Pa)
@param eta_v Bulk Viscosity Constant (Pa s)
@param eta_s Shear Viscosity Constant (Pa s)
@param label Material Label
Class NewImage: Definition of the Main Geometric Scenario.
Class Signal: Signal Definition (Source Input for the Simulation)
@param Amplitude Signal Amplitude
@param Frequency Frequency Amplitude
@param Name Name of the Signal: RaisedCosinePulse or RickerPulse
@param ts Time Delay: used only for RickerPulse
Class Simulation: setup the parameters for the numerical simulation
Usage:
- First Define an Instance of the SimulationModel Object
- Execute the method class: jobParameters using as input the materials list
- Execute the method class: createNumerical Model using as input the scenario
- Execute the method class: initReceivers to initialize the receivers
- Execute the mtehod class: save signal using as input the attribute simModel.t
- Save the Device into the simModel.Device attribute
@param TimeScale Scale Time Factor
@param MaxFreq Maximum Frequency
@param PointCycle Points per Cycle
@param SimTime Time Simuation
@param SpatialScale Spatial Scale: 1 -> meters, 1e-3 -> millimeters
Class Source: Define the Inspection Type
@param TypeLaunch Type of Inspection: Transmission or PulseEcho
Class Transducer: Definition of the Transducer Object
@param Size Transducer Size
@param Offset Offset position of the Transducer. By default is set to zero
@param BorderOffset Border offset position of the Transducer. By default is set to zero
@param Location Location is set to zero that indicates Up location
@param name Transducer Name
Raised-Cosine Pulse
@param t time vector
@param Freq Frequency in Hz
@param Amplitude Real Value of Amplitude
@return Output signal vector
@retval P vector of length equals to the time vector t
Constructor of the Class NewImage
@param Width Width of the Scenario
@param Height Height of the Scenario
@param Pixel_mm Ratio Pixel per mm
@param label Label
@param SPML Flag used to indicate the boundary conditions
Constructor of the Material object
Constructor of the Class Transducer
Constructor of the Class Inspection
Handle Border Offset
Handle Offset
Create the boundary layers depending on the boundary conditions required
@param Tap Layer Size
Create a Layer
@param centerW center in width-axis of the Layer
@param centerH center in height-axis of the Layer
@param Width Width of the Layer
@param Height Height of the Layer
@param label Label of the layer
@param Theta Rotation Angle
Create the Numerical Model
@param image The Scenario Object
Generate the signal waveform
@param t vector time
@return signal vector with the same length as the vector time
Initialize the receivers
Define Main Simulation Parameters
@parm materiales Materials List
Define Theta for PulseEcho Inspection. PulseEcho Inspection uses the same transducer acting as emitter and as receiver
Ricker Pulse
@param t time vector
@param ts temporal delay
@param fsavg pulse width parameter
@return Output signal vector
Save the signal waveform into the object
@param t vector time
Set the Computation Device
@param Device Device to be used
Define the device used to compute the simulations:
- "CPU" : uses the global memory in th CPU
- "GPU_Global" : uses the global memory in the GPU
- "GPU_Local" : uses the local memory in the GPU
Define Theta for Transmission Inspection. Transmision uses two transducers, one used as emitter and another as receiver
@package EFIT2D_Classes
Support Library: efit2d-pyopencl
Manuscript Title: Optimized OpenCL implementation of the Elastodynamic Finite Integration Technique for viscoelastic media
Authors: M Molero, U Iturraran-Viveros, S Aparicio, M.G. Hernández
Program title: EFIT2D-PyOpenCL
Journal reference: Comput. Phys. Commun.
Programming language: Python.
External routines: numpy, scipy, matplotlib, glumpy, pyopencl
Computer: computers having GPU or Multicore CPU with OpenCL drivers.
All classes here defined are used to define:
- The scenario,
- Material objects,
- Input sources,
- Inspection setup,
- Simulation parameters
!/usr/bin/env python -*- coding: utf-8 -*- Width of the Scenario Height of the Scenario Ratio Pixel per mm Label Flag used to indicate the boundary conditions Dimension 1 of the Scenario Matrix Dimension 2 od the Scenario Matrix Scenarion Matrix (MxN) Size of the Boundary Layer Configure if boundary layers will be treated as absorbing layers or air layers. False: Absorbing layers True : Air boundaries Material NameDensity (kg/m3) C11 (Pa) C12 (Pa) C22 (Pa) C44 (Pa) Longitudinal Velocity (m/s) Shear Velocity (m/s) Bulk Viscosity Constant (Pa s) Shear Viscosity Constant (Pa s) Material Label Type of Inspection: Transmission or PulseEcho Define the location of the transducers in function of the type of the Inspection Location = 0 => Top Transducer Size Offset position of the Transducer. By default is set to zero This offset is measured taking into account the center of the Scenario in the width-axis Positive Values indicate offsets toward the right Negative values indicate offsets toward the left Border offset position of the Transducer. By default is set to zero This border offset takes into account the center od the Scenario in the width axis but this offset is measured in direction of the height-axis Only Positive values must be defined.Size of the trasnducer in Pixels Location-> 0: Top. This version only works when the location=0 Name of the transducer Signal Amplitude Frequency Amplitude Name of the Signal: RaisedCosinePulse or RickerPulse Time Delay: used only for RickerPulse Position of the Transducer (Angle) Vector x-axis Position of the Transducer Vector y-axis Position of the Transducer Scale Time Factor Maximum Frequency MHz Points per Cycle Time Simuation microseconds Spatial Scale: 1 -> meters, 1e-3 -> millimeters Spatial Discretization Temporal DiscretizationSpatial Scale | 6,446 | en | 0.638515 |
import csv
import enum
class Usability(enum.Enum):
UNKNOWN = 0
USER = 1
BOT = 2
BOTH = 4
class MethodInfo:
def __init__(self, name, usability, errors):
self.name = name
self.errors = errors
try:
self.usability = {
'unknown': Usability.UNKNOWN,
'user': Usability.USER,
'bot': Usability.BOT,
'both': Usability.BOTH,
}[usability.lower()]
except KeyError:
raise ValueError('Usability must be either user, bot, both or '
'unknown, not {}'.format(usability)) from None
def parse_methods(csv_file, errors_dict):
"""
Parses the input CSV file with columns (method, usability, errors)
and yields `MethodInfo` instances as a result.
"""
with csv_file.open(newline='') as f:
f = csv.reader(f)
next(f, None) # header
for line, (method, usability, errors) in enumerate(f, start=2):
try:
errors = [errors_dict[x] for x in errors.split()]
except KeyError:
raise ValueError('Method {} references unknown errors {}'
.format(method, errors)) from None
yield MethodInfo(method, usability, errors)
| telethon_generator/parsers/methods.py | 1,312 | Parses the input CSV file with columns (method, usability, errors)
and yields `MethodInfo` instances as a result.
header | 122 | en | 0.699982 |
from math import pi
import pandas as pd
from bokeh.plotting import figure, output_file, show
from bokeh.sampledata.stocks import MSFT
df = pd.DataFrame(MSFT)[:50]
df["date"] = pd.to_datetime(df["date"])
inc = df.close > df.open
dec = df.open > df.close
w = 12*60*60*1000 # half day in ms
TOOLS = "pan,wheel_zoom,box_zoom,reset,save"
p = figure(x_axis_type="datetime", tools=TOOLS, plot_width=1000, title = "MSFT Candlestick")
p.xaxis.major_label_orientation = pi/4
p.grid.grid_line_alpha=0.3
p.segment(df.date, df.high, df.date, df.low, color="black")
p.vbar(df.date[inc], w, df.open[inc], df.close[inc], fill_color="#D5E1DD", line_color="black")
p.vbar(df.date[dec], w, df.open[dec], df.close[dec], fill_color="#F2583E", line_color="black")
output_file("candlestick.html", title="candlestick.py example")
show(p) # open a browser
| examples/plotting/file/candlestick.py | 841 | half day in ms open a browser | 29 | en | 0.631442 |
"""Classes for more complex applications that have tabbed or paged navigation."""
from collections import OrderedDict
from copy import deepcopy
import dash_bootstrap_components as dbc
import dash_core_components as dcc
import dash_html_components as html
from implements import implements
from .utils_app import AppBase, AppInterface
TODO_CLIENT_CALLBACK = '''
TODO: Create clientside callbacks dynamically to update the title on navigation
See: http://dash.plotly.com/external-resources
```py
app.clientside_callback(
"""
function(tab_value) {
if (tab_value === 'tab-1') {
document.title = 'Tab 1'
} else if (tab_value === 'tab-2') {
document.title = 'Tab 2'
}
}
""",
Output('blank-output', 'children'),
[Input('tabs-example', 'value')]
)
```
'''
# TODO: Try to see if I can resolve the interface differences or if I need make a subclass interface
# @implements(AppInterface) # noqa: H601
class AppWithNavigation(AppBase):
"""Base class for building Dash Application with tabs or URL routing."""
app = None
"""Main Dash application to pass to all child tabs."""
nav_lookup = None
"""OrderedDict based on the list of tuples from `self.define_nav_elements()`."""
nav_layouts = None
"""Dictionary with nav_names as keys and corresponding layout as value."""
def define_nav_elements(self):
"""Return list of initialized pages or tabs accordingly.
Should return, list: each item is an initialized app (ex `[AppBase(self.app)]` in the order each tab is rendered
Raises:
NotImplementedError: Child class must implement this method
"""
raise NotImplementedError('define_nav_elements must be implemented by child class') # pragma: no cover
def create(self, **kwargs):
"""Create each navigation componet, storing the layout. Then parent class to create application.
Args:
kwargs: keyword arguments passed to `self.create`
"""
# Initialize the lookup for each tab then configure each tab
self.nav_lookup = OrderedDict([(tab.name, tab) for tab in self.define_nav_elements()])
self.nav_layouts = {}
for nav_name, nav in self.nav_lookup.items():
nav.create(assign_layout=False)
self.nav_layouts[nav_name] = nav.return_layout()
# Store validation_layout that is later used for callback verification in base class
self.validation_layout = [*map(deepcopy, self.nav_layouts.values())]
# Initialize parent application that handles navigation
super().create(**kwargs)
def initialization(self) -> None:
"""Initialize ids with `self.register_uniq_ids([...])` and other one-time actions."""
super().initialization()
self.register_uniq_ids(self.app_ids)
def create_elements(self) -> None:
"""Override method as not needed at navigation-level."""
... # pragma: no cover
def create_callbacks(self) -> None:
"""Override method as not needed at navigation-level."""
... # pragma: no cover
@implements(AppInterface) # noqa: H601
class StaticTab(AppBase):
"""Simple App without charts or callbacks."""
basic_style = {
'marginLeft': 'auto',
'marginRight': 'auto',
'maxWidth': '1000px',
'paddingTop': '10px',
}
def initialization(self) -> None:
"""Initialize ids with `self.register_uniq_ids([...])` and other one-time actions."""
super().initialization()
self.register_uniq_ids(['N/A'])
def create_elements(self) -> None:
"""Initialize the charts, tables, and other Dash elements.."""
...
def create_callbacks(self) -> None:
"""Register callbacks necessary for this tab."""
...
class AppWithTabs(AppWithNavigation):
"""Base class for building Dash Application with tabs."""
# App ids
id_tabs_content = 'tabs-wrapper'
id_tabs_select = 'tabs-content'
app_ids = [id_tabs_content, id_tabs_select]
"""List of all ids for the top-level tab view. Will be mapped to `self._il` for globally unique ids."""
def return_layout(self) -> dict:
"""Return Dash application layout.
Returns:
dict: Dash HTML object
"""
tabs = [dcc.Tab(label=name, value=name) for name, tab in self.nav_lookup.items()]
return html.Div(
children=[
dcc.Tabs(
id=self._il[self.id_tabs_select], value=list(self.nav_lookup.keys())[0],
children=tabs,
),
html.Div(id=self._il[self.id_tabs_content]),
],
)
def create_callbacks(self) -> None:
"""Register the navigation callback."""
outputs = [(self.id_tabs_content, 'children')]
inputs = [(self.id_tabs_select, 'value')]
@self.callback(outputs, inputs, [])
def render_tab(tab_name):
return [self.nav_layouts[tab_name]]
# > PLANNED: Make the tabs and chart compact as well when the compact argument is set to True
class FullScreenAppWithTabs(AppWithTabs): # noqa: H601
"""Base class for building Dash Application with tabs that uses the full window."""
tabs_location = 'left'
"""Tab orientation setting. One of `(left, top, bottom, right)`."""
tabs_margin = '10%'
"""Adjust this setting based on the width or height of the tabs to prevent the content from overlapping the tabs."""
tabs_compact = False
"""Boolean setting to toggle between a padded tab layout if False and a minimal compact version if True."""
def verify_app_initialization(self):
"""Check that the app was properly initialized.
Raises:
RuntimeError: if child class has not called `self.register_uniq_ids`
"""
super().verify_app_initialization()
allowed_locations = ('left', 'top', 'bottom', 'right')
if self.tabs_location not in allowed_locations: # pragma: no cover
raise RuntimeError(f'`self.tabs_location = {self.tabs_location}` is not in {allowed_locations}')
def return_layout(self) -> dict:
"""Return Dash application layout.
Returns:
dict: Dash HTML object
"""
return html.Div(
children=[
self.tab_menu(),
html.Div(
style={f'margin-{self.tabs_location}': self.tabs_margin},
children=[html.Div(id=self._il[self.id_tabs_content])],
),
],
)
def generate_tab_kwargs(self):
"""Create the tab keyword arguments. Intended to be modified through inheritance.
Returns:
tuple: keyword arguments and styling for the dcc.Tab elements
- tab_kwargs: with at minimum keys `(style, selected_style)` for dcc.Tab
- tabs_kwargs: to be passed to dcc.Tabs
- tabs_style: style for the dcc.Tabs HTML element
"""
# Unselected tab style
if self.tabs_compact:
tab_style = {'padding': '2px 4px 2px 4px'}
tabs_padding = '6px 0 0 2px'
else:
tab_style = {'padding': '10px 20px 10px 20px'}
tabs_padding = '15px 0 0 5px'
# Extend tab style for selected case
selected_style = deepcopy(tab_style)
opposite_lookup = {'top': 'bottom', 'bottom': 'top', 'left': 'right', 'right': 'left'}
tabs_style = { # noqa: ECE001
'backgroundColor': '#F9F9F9',
'padding': tabs_padding,
'position': 'fixed',
'zIndex': '999',
f'border{opposite_lookup[self.tabs_location].title()}': '1px solid #d6d6d6',
self.tabs_location: '0',
}
if self.tabs_location in ['left', 'right']:
# Configure for vertical case
selected_style['border-left'] = '3px solid #119DFF'
tabs_kwargs = {
'vertical': True,
'style': {'width': '100%'},
'parent_style': {'width': '100%'},
}
tabs_style['top'] = '0'
tabs_style['bottom'] = '0'
tabs_style['width'] = 'auto'
else:
# Configure for horizontal case
selected_style['border-top'] = '3px solid #119DFF'
tabs_kwargs = {}
tabs_style['height'] = 'auto'
tabs_style['right'] = '0'
tabs_style['left'] = '0'
tab_kwargs = {'style': tab_style, 'selected_style': selected_style}
return (tab_kwargs, tabs_kwargs, tabs_style)
def tab_menu(self):
"""Return the HTML elements for the tab menu.
Returns:
dict: Dash HTML object
"""
tab_kwargs, tabs_kwargs, tabs_style = self.generate_tab_kwargs()
tabs = [dcc.Tab(label=name, value=name, **tab_kwargs) for name, tab in self.nav_lookup.items()]
return html.Div(
children=[
dcc.Tabs(
id=self._il[self.id_tabs_select], value=list(self.nav_lookup.keys())[0],
children=tabs, **tabs_kwargs,
),
], style=tabs_style,
)
class AppMultiPage(AppWithNavigation): # noqa: H601
"""Base class for building Dash Application with multiple pages."""
navbar_links = None
"""Base class must create list of tuples `[('Link Name', '/link'), ]` to use default `self.nav_bar()`."""
dropdown_links = None
"""Base class must create list of tuples `[('Link Name', '/link'), ]` to use default `self.nav_bar()`."""
logo = None
"""Optional path to logo. If None, no logo will be shown in navbar."""
# App ids
id_url = 'pages-url'
id_pages_content = 'pages-wrapper'
id_toggler = 'nav-toggle'
id_collapse = 'nav-collapse'
app_ids = [id_url, id_pages_content, id_toggler, id_collapse]
"""List of all ids for the top-level pages view. Will be mapped to `self._il` for globally unique ids."""
def return_layout(self) -> dict:
"""Return Dash application layout.
Returns:
dict: Dash HTML object
"""
return html.Div(
children=[
dcc.Location(id=self._il[self.id_url], refresh=False),
self.nav_bar(),
html.Div(id=self._il[self.id_pages_content]),
],
)
def nav_bar(self):
"""Return the HTML elements for the navigation menu.
Returns:
dict: Dash HTML object
"""
# Create brand icon and name where icon in optional
brand = []
if self.logo:
brand.append(dbc.Col(html.Img(src=self.logo, height='25px')))
brand.append(dbc.Col(dbc.NavbarBrand(self.name, className='ml-2')))
# Create links in navbar and dropdown. Both are optional
links = []
if self.navbar_links:
links.append(
dbc.Nav(
children=[dbc.NavItem(dbc.NavLink(name, href=link)) for name, link in self.navbar_links],
fill=True,
navbar=True,
),
)
if self.dropdown_links:
links.append(
dbc.Nav(
dbc.DropdownMenu(
children=[dbc.DropdownMenuItem(name, href=link) for name, link in self.dropdown_links],
in_navbar=True,
label='Links',
nav=True,
),
navbar=True,
),
)
# Layout default navbar
return dbc.Navbar(
children=[
dbc.NavLink(
[
dbc.Row(
children=brand,
align='center',
no_gutters=True,
),
], href='/',
),
dbc.NavbarToggler(id=self._il[self.id_toggler]),
dbc.Collapse(
dbc.Row(
children=links,
no_gutters=True,
className='flex-nowrap mt-3 mt-md-0',
align='center',
),
id=self._il[self.id_collapse],
navbar=True,
),
],
sticky='top',
color='dark',
dark=True,
)
def create_callbacks(self) -> None:
"""Register the navigation callback."""
outputs = [(self.id_pages_content, 'children')]
inputs = [(self.id_url, 'pathname')]
@self.callback(outputs, inputs, [])
def render_page(pathname):
try:
# TODO: Demo how pages could use parameters from pathname
return [self.nav_layouts[self.select_page_name(pathname)]]
except Exception as err:
return [html.Div(children=[f'Error rendering "{pathname}":\n{err}'])]
@self.callback(
[(self.id_collapse, 'is_open')],
[(self.id_toggler, 'n_clicks')],
[(self.id_collapse, 'is_open')],
)
def toggle_navbar_collapse(n_clicks, is_open):
return [not is_open if n_clicks else is_open]
def select_page_name(self, pathname):
"""Return the page name determined based on the pathname.
Should return str: page name
Args:
pathname: relative pathname from URL
Raises:
NotImplementedError: Child class must implement this method
"""
raise NotImplementedError('nav_bar must be implemented by child class') # pragma: no cover
| dash_charts/utils_app_with_navigation.py | 13,863 | Base class for building Dash Application with multiple pages.
Base class for building Dash Application with tabs or URL routing.
Base class for building Dash Application with tabs.
Base class for building Dash Application with tabs that uses the full window.
Simple App without charts or callbacks.
Create each navigation componet, storing the layout. Then parent class to create application.
Args:
kwargs: keyword arguments passed to `self.create`
Override method as not needed at navigation-level.
Register callbacks necessary for this tab.
Register the navigation callback.
Register the navigation callback.
Override method as not needed at navigation-level.
Initialize the charts, tables, and other Dash elements..
Return list of initialized pages or tabs accordingly.
Should return, list: each item is an initialized app (ex `[AppBase(self.app)]` in the order each tab is rendered
Raises:
NotImplementedError: Child class must implement this method
Create the tab keyword arguments. Intended to be modified through inheritance.
Returns:
tuple: keyword arguments and styling for the dcc.Tab elements
- tab_kwargs: with at minimum keys `(style, selected_style)` for dcc.Tab
- tabs_kwargs: to be passed to dcc.Tabs
- tabs_style: style for the dcc.Tabs HTML element
Initialize ids with `self.register_uniq_ids([...])` and other one-time actions.
Initialize ids with `self.register_uniq_ids([...])` and other one-time actions.
Return the HTML elements for the navigation menu.
Returns:
dict: Dash HTML object
Return Dash application layout.
Returns:
dict: Dash HTML object
Return Dash application layout.
Returns:
dict: Dash HTML object
Return Dash application layout.
Returns:
dict: Dash HTML object
Return the page name determined based on the pathname.
Should return str: page name
Args:
pathname: relative pathname from URL
Raises:
NotImplementedError: Child class must implement this method
Return the HTML elements for the tab menu.
Returns:
dict: Dash HTML object
Check that the app was properly initialized.
Raises:
RuntimeError: if child class has not called `self.register_uniq_ids`
Classes for more complex applications that have tabbed or paged navigation.
TODO: Try to see if I can resolve the interface differences or if I need make a subclass interface @implements(AppInterface) noqa: H601 pragma: no cover Initialize the lookup for each tab then configure each tab Store validation_layout that is later used for callback verification in base class Initialize parent application that handles navigation pragma: no cover pragma: no cover noqa: H601 App ids > PLANNED: Make the tabs and chart compact as well when the compact argument is set to True noqa: H601 pragma: no cover Unselected tab style Extend tab style for selected case noqa: ECE001 Configure for vertical case Configure for horizontal case noqa: H601 App ids Create brand icon and name where icon in optional Create links in navbar and dropdown. Both are optional Layout default navbar TODO: Demo how pages could use parameters from pathname pragma: no cover | 3,122 | en | 0.707001 |
from test.test_json import PyTest, CTest
# 2007-10-05
JSONDOCS = [
# http://json.org/JSON_checker/test/fail1.json
'"A JSON payload should be an object or array, not a string."',
# http://json.org/JSON_checker/test/fail2.json
'["Unclosed array"',
# http://json.org/JSON_checker/test/fail3.json
'{unquoted_key: "keys must be quoted"}',
# http://json.org/JSON_checker/test/fail4.json
'["extra comma",]',
# http://json.org/JSON_checker/test/fail5.json
'["double extra comma",,]',
# http://json.org/JSON_checker/test/fail6.json
'[ , "<-- missing value"]',
# http://json.org/JSON_checker/test/fail7.json
'["Comma after the close"],',
# http://json.org/JSON_checker/test/fail8.json
'["Extra close"]]',
# http://json.org/JSON_checker/test/fail9.json
'{"Extra comma": true,}',
# http://json.org/JSON_checker/test/fail10.json
'{"Extra value after close": true} "misplaced quoted value"',
# http://json.org/JSON_checker/test/fail11.json
'{"Illegal expression": 1 + 2}',
# http://json.org/JSON_checker/test/fail12.json
'{"Illegal invocation": alert()}',
# http://json.org/JSON_checker/test/fail13.json
'{"Numbers cannot have leading zeroes": 013}',
# http://json.org/JSON_checker/test/fail14.json
'{"Numbers cannot be hex": 0x14}',
# http://json.org/JSON_checker/test/fail15.json
'["Illegal backslash escape: \\x15"]',
# http://json.org/JSON_checker/test/fail16.json
'[\\naked]',
# http://json.org/JSON_checker/test/fail17.json
'["Illegal backslash escape: \\017"]',
# http://json.org/JSON_checker/test/fail18.json
'[[[[[[[[[[[[[[[[[[[["Too deep"]]]]]]]]]]]]]]]]]]]]',
# http://json.org/JSON_checker/test/fail19.json
'{"Missing colon" null}',
# http://json.org/JSON_checker/test/fail20.json
'{"Double colon":: null}',
# http://json.org/JSON_checker/test/fail21.json
'{"Comma instead of colon", null}',
# http://json.org/JSON_checker/test/fail22.json
'["Colon instead of comma": false]',
# http://json.org/JSON_checker/test/fail23.json
'["Bad value", truth]',
# http://json.org/JSON_checker/test/fail24.json
"['single quote']",
# http://json.org/JSON_checker/test/fail25.json
'["\ttab\tcharacter\tin\tstring\t"]',
# http://json.org/JSON_checker/test/fail26.json
'["tab\\ character\\ in\\ string\\ "]',
# http://json.org/JSON_checker/test/fail27.json
'["line\nbreak"]',
# http://json.org/JSON_checker/test/fail28.json
'["line\\\nbreak"]',
# http://json.org/JSON_checker/test/fail29.json
'[0e]',
# http://json.org/JSON_checker/test/fail30.json
'[0e+]',
# http://json.org/JSON_checker/test/fail31.json
'[0e+-1]',
# http://json.org/JSON_checker/test/fail32.json
'{"Comma instead if closing brace": true,',
# http://json.org/JSON_checker/test/fail33.json
'["mismatch"}',
# http://code.google.com/p/simplejson/issues/detail?id=3
'["A\u001FZ control characters in string"]',
]
SKIPS = {
1: "why not have a string payload?",
18: "spec doesn't specify any nesting limitations",
}
class TestFail:
def test_failures(self):
for idx, doc in enumerate(JSONDOCS):
idx = idx + 1
if idx in SKIPS:
self.loads(doc)
continue
try:
self.loads(doc)
except self.JSONDecodeError:
pass
else:
self.fail("Expected failure for fail{0}.json: {1!r}".format(idx, doc))
def test_non_string_keys_dict(self):
data = {'a' : 1, (1, 2) : 2}
#This is for c encoder
self.assertRaises(TypeError, self.dumps, data)
#This is for python encoder
self.assertRaises(TypeError, self.dumps, data, indent=True)
def test_truncated_input(self):
test_cases = [
('', 'Expecting value', 0),
('[', 'Expecting value', 1),
('[42', "Expecting ',' delimiter", 3),
('[42,', 'Expecting value', 4),
('["', 'Unterminated string starting at', 1),
('["spam', 'Unterminated string starting at', 1),
('["spam"', "Expecting ',' delimiter", 7),
('["spam",', 'Expecting value', 8),
('{', 'Expecting property name enclosed in double quotes', 1),
('{"', 'Unterminated string starting at', 1),
('{"spam', 'Unterminated string starting at', 1),
('{"spam"', "Expecting ':' delimiter", 7),
('{"spam":', 'Expecting value', 8),
('{"spam":42', "Expecting ',' delimiter", 10),
('{"spam":42,', 'Expecting property name enclosed in double quotes', 11),
]
test_cases += [
('"', 'Unterminated string starting at', 0),
('"spam', 'Unterminated string starting at', 0),
]
for data, msg, idx in test_cases:
with self.assertRaises(self.JSONDecodeError) as cm:
self.loads(data)
err = cm.exception
self.assertEqual(err.msg, msg)
self.assertEqual(err.pos, idx)
self.assertEqual(err.lineno, 1)
self.assertEqual(err.colno, idx + 1)
self.assertEqual(str(err),
'%s: line 1 column %d (char %d)' %
(msg, idx + 1, idx))
def test_unexpected_data(self):
test_cases = [
('[,', 'Expecting value', 1),
('{"spam":[}', 'Expecting value', 9),
('[42:', "Expecting ',' delimiter", 3),
('[42 "spam"', "Expecting ',' delimiter", 4),
('[42,]', 'Expecting value', 4),
('{"spam":[42}', "Expecting ',' delimiter", 11),
('["]', 'Unterminated string starting at', 1),
('["spam":', "Expecting ',' delimiter", 7),
('["spam",]', 'Expecting value', 8),
('{:', 'Expecting property name enclosed in double quotes', 1),
('{,', 'Expecting property name enclosed in double quotes', 1),
('{42', 'Expecting property name enclosed in double quotes', 1),
('[{]', 'Expecting property name enclosed in double quotes', 2),
('{"spam",', "Expecting ':' delimiter", 7),
('{"spam"}', "Expecting ':' delimiter", 7),
('[{"spam"]', "Expecting ':' delimiter", 8),
('{"spam":}', 'Expecting value', 8),
('[{"spam":]', 'Expecting value', 9),
('{"spam":42 "ham"', "Expecting ',' delimiter", 11),
('[{"spam":42]', "Expecting ',' delimiter", 11),
('{"spam":42,}', 'Expecting property name enclosed in double quotes', 11),
]
for data, msg, idx in test_cases:
with self.assertRaises(self.JSONDecodeError) as cm:
self.loads(data)
err = cm.exception
self.assertEqual(err.msg, msg)
self.assertEqual(err.pos, idx)
self.assertEqual(err.lineno, 1)
self.assertEqual(err.colno, idx + 1)
self.assertEqual(str(err),
'%s: line 1 column %d (char %d)' %
(msg, idx + 1, idx))
def test_extra_data(self):
test_cases = [
('[]]', 'Extra data', 2),
('{}}', 'Extra data', 2),
('[],[]', 'Extra data', 2),
('{},{}', 'Extra data', 2),
]
test_cases += [
('42,"spam"', 'Extra data', 2),
('"spam",42', 'Extra data', 6),
]
for data, msg, idx in test_cases:
with self.assertRaises(self.JSONDecodeError) as cm:
self.loads(data)
err = cm.exception
self.assertEqual(err.msg, msg)
self.assertEqual(err.pos, idx)
self.assertEqual(err.lineno, 1)
self.assertEqual(err.colno, idx + 1)
self.assertEqual(str(err),
'%s: line 1 column %d (char %d)' %
(msg, idx + 1, idx))
def test_linecol(self):
test_cases = [
('!', 1, 1, 0),
(' !', 1, 2, 1),
('\n!', 2, 1, 1),
('\n \n\n !', 4, 6, 10),
]
for data, line, col, idx in test_cases:
with self.assertRaises(self.JSONDecodeError) as cm:
self.loads(data)
err = cm.exception
self.assertEqual(err.msg, 'Expecting value')
self.assertEqual(err.pos, idx)
self.assertEqual(err.lineno, line)
self.assertEqual(err.colno, col)
self.assertEqual(str(err),
'Expecting value: line %s column %d (char %d)' %
(line, col, idx))
class TestPyFail(TestFail, PyTest): pass
class TestCFail(TestFail, CTest): pass
| Mark_attandance_py_selenium/py/App/Python/Lib/test/test_json/test_fail.py | 8,884 | 2007-10-05 http://json.org/JSON_checker/test/fail1.json http://json.org/JSON_checker/test/fail2.json http://json.org/JSON_checker/test/fail3.json http://json.org/JSON_checker/test/fail4.json http://json.org/JSON_checker/test/fail5.json http://json.org/JSON_checker/test/fail6.json http://json.org/JSON_checker/test/fail7.json http://json.org/JSON_checker/test/fail8.json http://json.org/JSON_checker/test/fail9.json http://json.org/JSON_checker/test/fail10.json http://json.org/JSON_checker/test/fail11.json http://json.org/JSON_checker/test/fail12.json http://json.org/JSON_checker/test/fail13.json http://json.org/JSON_checker/test/fail14.json http://json.org/JSON_checker/test/fail15.json http://json.org/JSON_checker/test/fail16.json http://json.org/JSON_checker/test/fail17.json http://json.org/JSON_checker/test/fail18.json http://json.org/JSON_checker/test/fail19.json http://json.org/JSON_checker/test/fail20.json http://json.org/JSON_checker/test/fail21.json http://json.org/JSON_checker/test/fail22.json http://json.org/JSON_checker/test/fail23.json http://json.org/JSON_checker/test/fail24.json http://json.org/JSON_checker/test/fail25.json http://json.org/JSON_checker/test/fail26.json http://json.org/JSON_checker/test/fail27.json http://json.org/JSON_checker/test/fail28.json http://json.org/JSON_checker/test/fail29.json http://json.org/JSON_checker/test/fail30.json http://json.org/JSON_checker/test/fail31.json http://json.org/JSON_checker/test/fail32.json http://json.org/JSON_checker/test/fail33.json http://code.google.com/p/simplejson/issues/detail?id=3This is for c encoderThis is for python encoder | 1,621 | en | 0.353131 |
import pytest
from sanic import Sanic
from sanic.response import json
from sanic_jwt import Authentication, exceptions, Initialize
class WrongAuthentication(Authentication):
async def build_payload(self, user, *args, **kwargs):
return {"not_user_id": 1}
class AnotherWrongAuthentication(Authentication):
async def build_payload(self, user, *args, **kwargs):
return list(range(5))
class AuthenticationWithNoMethod(Authentication):
authenticate = "foobar"
class AuthenticationInClassBody(Authentication):
async def authenticate(self, request, *args, **kwargs):
return {"user_id": 1}
async def authenticate(request, *args, **kwargs):
return {"user_id": 1}
def test_authentication_subclass_without_authenticate_parameter():
app = Sanic()
with pytest.raises(exceptions.AuthenticateNotImplemented):
Initialize(app, authentication_class=WrongAuthentication)
def test_authentication_subclass_with_autenticate_not_as_method():
app = Sanic()
with pytest.raises(exceptions.AuthenticateNotImplemented):
Initialize(app, authentication_class=AuthenticationWithNoMethod)
def test_authentication_subbclass_with_method_in_class():
app = Sanic()
sanicjwt = Initialize(app, authentication_class=AuthenticationInClassBody)
_, response = app.test_client.post(
"/auth", json={"username": "user1", "password": "abcxyz"}
)
assert response.status == 200
assert sanicjwt.config.access_token_name() in response.json
def test_payload_without_correct_key():
app = Sanic()
Initialize(app, authenticate=authenticate, authentication_class=WrongAuthentication)
_, response = app.test_client.post(
"/auth", json={"username": "user1", "password": "abcxyz"}
)
assert response.status == 500
assert response.json.get("exception") == "InvalidPayload"
def test_payload_not_a_dict():
app = Sanic()
Initialize(
app, authenticate=authenticate, authentication_class=AnotherWrongAuthentication
)
_, response = app.test_client.post(
"/auth", json={"username": "user1", "password": "abcxyz"}
)
assert response.status == 500
assert response.json.get("exception") == "InvalidPayload"
def test_wrong_header(app):
sanic_app, sanic_jwt = app
_, response = sanic_app.test_client.post(
"/auth", json={"username": "user1", "password": "abcxyz"}
)
access_token = response.json.get(sanic_jwt.config.access_token_name(), None)
assert response.status == 200
assert access_token is not None
_, response = sanic_app.test_client.get(
"/protected", headers={"Authorization": "Foobar {}".format(access_token)}
)
assert response.status == 401
assert response.json.get("exception") == "Unauthorized"
assert "Authorization header is invalid." in response.json.get("reasons")
# assert "Auth required." in response.json.get('reasons')
def test_tricky_debug_option_true(app):
sanic_app, sanic_jwt = app
@sanic_app.route("/another_protected")
@sanic_jwt.protected(debug=lambda: True)
def another_protected(request):
return json({"protected": True, "is_debug": request.app.auth.config.debug()})
# @sanic_app.exception(Exception)
# def in_case_of_exception(request, exception):
# exc_name = exception.args[0].__class__.__name__
# status_code = exception.args[0].status_code
# return json({"exception": exc_name}, status=status_code)
_, response = sanic_app.test_client.post(
"/auth", json={"username": "user1", "password": "abcxyz"}
)
access_token = response.json.get(sanic_jwt.config.access_token_name(), None)
assert response.status == 200
assert access_token is not None
_, response = sanic_app.test_client.get(
"/protected", headers={"Authorization": "Bearer {}".format(access_token)}
)
assert response.status == 200
_, response = sanic_app.test_client.get("/another_protected")
assert response.json.get("exception") == "Unauthorized"
assert response.status == 400
assert "Authorization header not present." in response.json.get("reasons")
_, response = sanic_app.test_client.get(
"/another_protected",
headers={"Authorization": "Foobar {}".format(access_token)},
)
assert response.json.get("exception") == "Unauthorized"
assert response.status == 400
assert "Authorization header is invalid." in response.json.get("reasons")
def test_tricky_debug_option_false(app):
sanic_app, sanic_jwt = app
@sanic_app.route("/another_protected")
@sanic_jwt.protected(debug=lambda: False)
def another_protected(request):
return json({"protected": True, "is_debug": request.app.auth.config.debug()})
# @sanic_app.exception(Exception)
# def in_case_of_exception(request, exception):
# exc_name = exception.args[0].__class__.__name__
# status_code = exception.args[0].status_code
# return json({"exception": exc_name}, status=status_code)
_, response = sanic_app.test_client.post(
"/auth", json={"username": "user1", "password": "abcxyz"}
)
access_token = response.json.get(sanic_jwt.config.access_token_name(), None)
assert response.status == 200
assert access_token is not None
_, response = sanic_app.test_client.get(
"/protected", headers={"Authorization": "Bearer {}".format(access_token)}
)
assert response.status == 200
_, response = sanic_app.test_client.get("/another_protected")
assert response.json.get("exception") == "Unauthorized"
assert response.status == 401
assert "Authorization header not present." in response.json.get("reasons")
_, response = sanic_app.test_client.get(
"/another_protected",
headers={"Authorization": "Foobar {}".format(access_token)},
)
assert response.json.get("exception") == "Unauthorized"
assert response.status == 401
assert "Authorization header is invalid." in response.json.get("reasons")
| tests/test_authentication.py | 6,080 | assert "Auth required." in response.json.get('reasons') @sanic_app.exception(Exception) def in_case_of_exception(request, exception): exc_name = exception.args[0].__class__.__name__ status_code = exception.args[0].status_code return json({"exception": exc_name}, status=status_code) @sanic_app.exception(Exception) def in_case_of_exception(request, exception): exc_name = exception.args[0].__class__.__name__ status_code = exception.args[0].status_code return json({"exception": exc_name}, status=status_code) | 533 | en | 0.277494 |
import os
import copy
import pytest
import time
import shutil
import tempfile
import logging
from _pytest.logging import caplog as _caplog
from contextlib import suppress
from panoptes.utils.logging import logger
from panoptes.utils.database import PanDB
from panoptes.utils.config.client import get_config
from panoptes.utils.config.client import set_config
from panoptes.utils.config.server import config_server
# Doctest modules
import numpy as np
from matplotlib import pyplot as plt
_all_databases = ['file', 'memory']
logger.enable('panoptes')
logger.level("testing", no=15, icon="🤖", color="<YELLOW><black>")
log_file_path = os.path.join(
os.getenv('PANLOG', '/var/panoptes/logs'),
'panoptes-testing.log'
)
log_fmt = "<lvl>{level:.1s}</lvl> " \
"<light-blue>{time:MM-DD HH:mm:ss.ss!UTC}</>" \
"<blue>({time:HH:mm:ss.ss})</> " \
"| <c>{name} {function}:{line}</c> | " \
"<lvl>{message}</lvl>\n"
startup_message = ' STARTING NEW PYTEST RUN '
logger.add(log_file_path,
enqueue=True, # multiprocessing
format=log_fmt,
colorize=True,
backtrace=True,
diagnose=True,
catch=True,
# Start new log file for each testing run.
rotation=lambda msg, _: startup_message in msg,
level='TRACE')
logger.log('testing', '*' * 25 + startup_message + '*' * 25)
def pytest_addoption(parser):
db_names = ",".join(_all_databases) + ' (or all for all databases)'
group = parser.getgroup("PANOPTES pytest options")
group.addoption(
"--astrometry",
action="store_true",
default=False,
help="If tests that require solving should be run")
group.addoption(
"--theskyx",
action="store_true",
default=False,
help="If running tests alongside a running TheSkyX program.")
group.addoption(
"--test-databases",
nargs="+",
default=['file'],
help=("Test databases in the list. List items can include: " + db_names +
". Note that travis-ci will test all of them by default."))
@pytest.fixture(scope='session')
def db_name():
return 'panoptes_testing'
@pytest.fixture(scope='session')
def images_dir(tmpdir_factory):
directory = tmpdir_factory.mktemp('images')
return str(directory)
@pytest.fixture(scope='session')
def config_path():
return os.path.expandvars('${PANDIR}/panoptes-utils/tests/panoptes_utils_testing.yaml')
@pytest.fixture(scope='session', autouse=True)
def static_config_server(config_path, images_dir, db_name):
logger.log('testing', f'Starting static_config_server for testing session')
proc = config_server(
config_file=config_path,
ignore_local=True,
auto_save=False
)
logger.log('testing', f'static_config_server started with {proc.pid=}')
# Give server time to start
while get_config('name') is None: # pragma: no cover
logger.log('testing', f'Waiting for static_config_server {proc.pid=}, sleeping 1 second.')
time.sleep(1)
logger.log('testing', f'Startup config_server name=[{get_config("name")}]')
# Adjust various config items for testing
unit_id = 'PAN000'
logger.log('testing', f'Setting testing name and unit_id to {unit_id}')
set_config('pan_id', unit_id)
logger.log('testing', f'Setting testing database to {db_name}')
set_config('db.name', db_name)
fields_file = 'simulator.yaml'
logger.log('testing', f'Setting testing scheduler fields_file to {fields_file}')
set_config('scheduler.fields_file', fields_file)
logger.log('testing', f'Setting temporary image directory for testing')
set_config('directories.images', images_dir)
yield
logger.log('testing', f'Killing static_config_server started with PID={proc.pid}')
proc.terminate()
@pytest.fixture(scope='function', params=_all_databases)
def db_type(request):
db_list = request.config.option.test_databases
if request.param not in db_list and 'all' not in db_list: # pragma: no cover
pytest.skip(f"Skipping {request.param} DB, set --test-all-databases=True")
PanDB.permanently_erase_database(
request.param, 'panoptes_testing', really='Yes', dangerous='Totally')
return request.param
@pytest.fixture(scope='function')
def db(db_type):
return PanDB(db_type=db_type, db_name='panoptes_testing', connect=True)
@pytest.fixture(scope='function')
def save_environ():
old_env = copy.deepcopy(os.environ)
yield
os.environ = old_env
@pytest.fixture(scope='session')
def data_dir():
return os.path.expandvars('/var/panoptes/panoptes-utils/tests/data')
@pytest.fixture(scope='function')
def unsolved_fits_file(data_dir):
orig_file = os.path.join(data_dir, 'unsolved.fits')
with tempfile.TemporaryDirectory() as tmpdirname:
copy_file = shutil.copy2(orig_file, tmpdirname)
yield copy_file
@pytest.fixture(scope='function')
def solved_fits_file(data_dir):
orig_file = os.path.join(data_dir, 'solved.fits.fz')
with tempfile.TemporaryDirectory() as tmpdirname:
copy_file = shutil.copy2(orig_file, tmpdirname)
yield copy_file
@pytest.fixture(scope='function')
def tiny_fits_file(data_dir):
orig_file = os.path.join(data_dir, 'tiny.fits')
with tempfile.TemporaryDirectory() as tmpdirname:
copy_file = shutil.copy2(orig_file, tmpdirname)
yield copy_file
@pytest.fixture(scope='function')
def noheader_fits_file(data_dir):
orig_file = os.path.join(data_dir, 'noheader.fits')
with tempfile.TemporaryDirectory() as tmpdirname:
copy_file = shutil.copy2(orig_file, tmpdirname)
yield copy_file
@pytest.fixture(scope='function')
def cr2_file(data_dir):
cr2_path = os.path.join(data_dir, 'canon.cr2')
if not os.path.exists(cr2_path):
pytest.skip("No CR2 file found, skipping test.")
with tempfile.TemporaryDirectory() as tmpdirname:
copy_file = shutil.copy2(cr2_path, tmpdirname)
yield copy_file
@pytest.fixture(autouse=True)
def add_doctest_dependencies(doctest_namespace):
doctest_namespace['np'] = np
doctest_namespace['plt'] = plt
@pytest.fixture
def caplog(_caplog):
class PropogateHandler(logging.Handler):
def emit(self, record):
logging.getLogger(record.name).handle(record)
logger.enable('panoptes')
handler_id = logger.add(PropogateHandler(), format="{message}")
yield _caplog
with suppress(ValueError):
logger.remove(handler_id)
| conftest.py | 6,582 | Doctest modules multiprocessing Start new log file for each testing run. Give server time to start pragma: no cover Adjust various config items for testing pragma: no cover | 172 | en | 0.718363 |
"""
This is a setup.py script generated by py2applet
Usage:
python setup.py py2app
"""
from setuptools import setup
APP = ['Patient Discharge System v2.0.py']
DATA_FILES = ['model.docx', 'logo.gif']
OPTIONS = {}
setup(
app=APP,
data_files=DATA_FILES,
options={'py2app': OPTIONS},
setup_requires=['py2app'],
)
| setup.py | 333 | This is a setup.py script generated by py2applet
Usage:
python setup.py py2app | 83 | en | 0.867131 |
# SPDX-License-Identifier: Apache-2.0
#
# Copyright (C) 2015, ARM Limited and contributors.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
import os
from test import LisaTest
TESTS_DIRECTORY = os.path.dirname(os.path.realpath(__file__))
TESTS_CONF = os.path.join(TESTS_DIRECTORY, "rfc.config")
class RFC(LisaTest):
"""Tests for the Energy-Aware Scheduler"""
test_conf = TESTS_CONF
experiments_conf = TESTS_CONF
@classmethod
def setUpClass(cls, *args, **kwargs):
super(RFC, cls).runExperiments(args, kwargs)
def test_run(self):
"""A dummy test just to run configured workloads"""
pass
# vim :set tabstop=4 shiftwidth=4 expandtab
| tests/eas/rfc.py | 1,196 | Tests for the Energy-Aware Scheduler
A dummy test just to run configured workloads
SPDX-License-Identifier: Apache-2.0 Copyright (C) 2015, ARM Limited and contributors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. vim :set tabstop=4 shiftwidth=4 expandtab | 730 | en | 0.810831 |
# Copyright (c) 2016-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import schema
from caffe2.python.layers.layers import (
get_categorical_limit,
ModelLayer,
IdList
)
import numpy as np
class MergeIdLists(ModelLayer):
"""Merge multiple ID_LISTs into a single ID_LIST
Arguments:
model: A layer model instance
input_record: Tuple (Struct) of ID_LIST features to be
merged
Returns:
the merged ID_LIST feature
"""
def __init__(self, model, input_record, name='merged'):
super(MergeIdLists, self).__init__(model, name, input_record)
assert all(schema.equal_schemas(x, IdList) for x in input_record), \
"Inputs to MergeIdLists should all be IdLists."
assert all(record.items.metadata is not None
for record in self.input_record), \
"Features without metadata are not supported"
merge_dim = max(get_categorical_limit(record)
for record in self.input_record)
assert merge_dim is not None, "Unbounded features are not supported"
self.output_schema = schema.NewRecord(
model.net, schema.List(
schema.Scalar(
np.int64,
blob=model.net.NextBlob(name),
metadata=schema.Metadata(categorical_limit=merge_dim)
)))
def add_ops(self, net):
return net.MergeIdLists(self.input_record.field_blobs(),
self.output_schema.field_blobs())
| caffe2/python/layers/merge_id_lists.py | 2,321 | Merge multiple ID_LISTs into a single ID_LIST
Arguments:
model: A layer model instance
input_record: Tuple (Struct) of ID_LIST features to be
merged
Returns:
the merged ID_LIST feature
Copyright (c) 2016-present, Facebook, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. | 769 | en | 0.836883 |
"""Test project for line_round_dot_setting.
Command examples:
$ python test_projects/line_round_dot_setting/main.py
"""
import sys
sys.path.append('./')
import os
from types import ModuleType
import apysc as ap
from apysc._file import file_util
this_module: ModuleType = sys.modules[__name__]
_DEST_DIR_PATH: str = os.path.join(
file_util.get_abs_module_dir_path(module=this_module),
'test_output/'
)
def main() -> None:
"""
Entry point of this test project.
"""
ap.Stage(
background_color='#333',
stage_width=1000, stage_height=500)
sprite: ap.Sprite = ap.Sprite()
sprite.graphics.line_style(
color='#0af',
round_dot_setting=ap.LineRoundDotSetting(
round_size=10, space_size=10))
sprite.graphics.move_to(x=50, y=30)
sprite.graphics.line_to(x=450, y=30)
sprite.graphics.line_style(
color='#0af',
round_dot_setting=ap.LineRoundDotSetting(
round_size=10, space_size=20))
sprite.graphics.move_to(x=50, y=60)
sprite.graphics.line_to(x=450, y=60)
sprite.graphics.line_style(
color='#0af',
round_dot_setting=ap.LineRoundDotSetting(
round_size=20, space_size=0))
sprite.graphics.move_to(x=50, y=90)
sprite.graphics.line_to(x=450, y=90)
sprite.graphics.line_style(
color='#0af', thickness=3)
sprite.graphics.move_to(x=40, y=120)
sprite.graphics.line_to(x=460, y=120)
sprite.graphics.line_style(
color='#0af',
round_dot_setting=ap.LineRoundDotSetting(
round_size=10, space_size=10))
polyline: ap.Polyline = sprite.graphics.move_to(x=50, y=150)
sprite.graphics.line_to(x=450, y=150)
sprite.graphics.line_to(x=700, y=250)
sprite.graphics.line_to(x=700, y=150)
polyline.click(on_polyline_click)
ap.save_overall_html(dest_dir_path=_DEST_DIR_PATH)
def on_polyline_click(
e: ap.MouseEvent[ap.Polyline], options: dict) -> None:
"""
Handler that called when polyline is clicked.
Parameters
----------
e : MouseEvent
Created MouseEvent instance.
options : dict
Optional parameters.
"""
polyline: ap.Polyline = e.this
polyline.line_round_dot_setting = None
if __name__ == '__main__':
main()
| test_projects/line_round_dot_setting/main.py | 2,386 | Entry point of this test project.
Handler that called when polyline is clicked.
Parameters
----------
e : MouseEvent
Created MouseEvent instance.
options : dict
Optional parameters.
Test project for line_round_dot_setting.
Command examples:
$ python test_projects/line_round_dot_setting/main.py | 304 | en | 0.633595 |
"""
This module descibes how to split a dataset into two parts A and B: A is for
tuning the algorithm parameters, and B is for having an unbiased estimation of
its performances. The tuning is done by Grid Search.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import random
from surprise import SVD
from surprise import Dataset
from surprise import accuracy
from surprise import GridSearch
# Load the full dataset.
data = Dataset.load_builtin('ml-100k')
raw_ratings = data.raw_ratings
# shuffle ratings if you want
random.shuffle(raw_ratings)
# A = 90% of the data, B = 10% of the data
threshold = int(.9 * len(raw_ratings))
A_raw_ratings = raw_ratings[:threshold]
B_raw_ratings = raw_ratings[threshold:]
data.raw_ratings = A_raw_ratings # data is now the set A
data.split(n_folds=3)
# Select your best algo with grid search.
print('Grid Search...')
param_grid = {'n_epochs': [5, 10], 'lr_all': [0.002, 0.005]}
grid_search = GridSearch(SVD, param_grid, measures=['RMSE'], verbose=0)
grid_search.evaluate(data)
algo = grid_search.best_estimator['RMSE']
# retrain on the whole set A
trainset = data.build_full_trainset()
algo.train(trainset)
# Compute biased accuracy on A
predictions = algo.test(trainset.build_testset())
print('Biased accuracy on A,', end=' ')
accuracy.rmse(predictions)
# Compute unbiased accuracy on B
testset = data.construct_testset(B_raw_ratings) # testset is now the set B
predictions = algo.test(testset)
print('Unbiased accuracy on B,', end=' ')
accuracy.rmse(predictions)
| examples/split_data_for_unbiased_estimation.py | 1,581 | This module descibes how to split a dataset into two parts A and B: A is for
tuning the algorithm parameters, and B is for having an unbiased estimation of
its performances. The tuning is done by Grid Search.
Load the full dataset. shuffle ratings if you want A = 90% of the data, B = 10% of the data data is now the set A Select your best algo with grid search. retrain on the whole set A Compute biased accuracy on A Compute unbiased accuracy on B testset is now the set B | 476 | en | 0.912124 |
# -*- coding: utf-8 -*-
# ---------------------------------------------------------------------
# sla.slaprofile application
# ---------------------------------------------------------------------
# Copyright (C) 2007-2018 The NOC Project
# See LICENSE for details
# ---------------------------------------------------------------------
# NOC modules
from noc.lib.app.extdocapplication import ExtDocApplication
from noc.sla.models.slaprofile import SLAProfile
from noc.core.translation import ugettext as _
class SLAProfileApplication(ExtDocApplication):
"""
SLAProfile application
"""
title = "SLA Profile"
menu = [_("Setup"), _("SLA Profiles")]
model = SLAProfile
def field_row_class(self, o):
return o.style.css_class_name if o.style else ""
| services/web/apps/sla/slaprofile/views.py | 786 | SLAProfile application
-*- coding: utf-8 -*- --------------------------------------------------------------------- sla.slaprofile application --------------------------------------------------------------------- Copyright (C) 2007-2018 The NOC Project See LICENSE for details --------------------------------------------------------------------- NOC modules | 359 | en | 0.263746 |
from flask import Blueprint, request, jsonify
import subprocess
import json
import yamale
import yaml
import app_conf
import logging.handlers
import mydb
imageinfo = Blueprint('imageinfo', __name__)
# set logger
logger = logging.getLogger(__name__)
path = f'./logs/{__name__}.log'
fileHandler = logging.handlers.RotatingFileHandler(path,
maxBytes=app_conf.Log.log_max_size,
backupCount=app_conf.Log.log_backup_count)
fileHandler.setFormatter(logging.Formatter('%(asctime)s %(levelname)s %(filename)s:%(lineno)s %(message)s'))
logger.addHandler(fileHandler)
logger.setLevel(app_conf.Log.log_level)
# temp
logger.addHandler(logging.StreamHandler())
db_path = "data/imageinfo.db"
mydb.init(db_path)
schema_create = yamale.make_schema(content="""
name: str(required=True)
namespace: str(required=True)
""")
@imageinfo.route('/create', methods=['post'])
def create():
msg = {
'err': None,
'res': None
}
try:
# schema validation
# yamale.validate(schema_create, yamale.make_data(content=request.data.decode('utf-8')))
# name
body = yaml.load(request.data, Loader=yaml.Loader)
k = f"{body['namespace']}/{body['name']}"
v = json.dumps(body).encode()
mydb.upsert(db_path, k, v)
except Exception as e:
logger.error(str(e))
msg['err'] = str(e)
return jsonify(msg)
schema_delete = yamale.make_schema(content="""
name: str(required=True)
namespace: str(required=True)
""")
@imageinfo.route('/delete', methods=['delete'])
def delete():
msg = {
'err': None,
'res': None
}
try:
# schema validation
yamale.validate(schema_delete, yamale.make_data(content=request.data.decode('utf-8')))
body = yaml.load(request.data, Loader=yaml.Loader)
k = f"{body['namespace']}/{body['name']}"
mydb.delete(db_path, k)
except Exception as e:
logger.error(str(e))
msg['err'] = str(e)
return jsonify(msg)
@imageinfo.route('/list', methods=['get'])
def list_():
msg = {
'err': None,
'res': []
}
try:
namespace = request.args.get('namespace')
temp = mydb.keys(db_path)
for x in temp:
term = x.split('/')
if term[0] == namespace:
msg['res'].append(term[1])
except Exception as e:
logger.error(str(e))
msg['err'] = str(e)
return jsonify(msg)
@imageinfo.route('/get', methods=['get'])
def get():
msg = {
'err': None,
'res': None
}
try:
name = request.args.get('name')
namespace = request.args.get('namespace')
k = f"{namespace}/{name}"
v = mydb.get(db_path, k)
if v is not None:
msg['res'] = json.loads(v.decode())
except Exception as e:
logger.error(str(e))
msg['err'] = str(e)
return jsonify(msg)
| gs-engine/gse_api_server/imageinfo.py | 3,016 | set logger temp schema validation yamale.validate(schema_create, yamale.make_data(content=request.data.decode('utf-8'))) name schema validation | 143 | en | 0.155367 |
def is_sha1(maybe_sha):
if len(maybe_sha) != 40:
return False
try:
sha_int = int(maybe_sha, 16)
except ValueError:
return False
return True
def validate(date_text):
try:
datetime.datetime.strptime(date_text, '%d-%m-%Y:%S-%M-%H')
return True
except ValueError:
return False
from flask_cors import CORS
from flask import Flask, render_template, Response, request, jsonify
import pandas as pd
import os
import json
import shutil
import datetime
import base64
import binascii
import datetime
import requests as r
LOGIN_FILE_NAME = "login.csv"
DB = "templates/images"
GLOBAL_LIST = "acts.csv"
IP = "3.208.6.174:80"
INSTANCE_IP = "34.226.230.93"
count_requests = 0
#IP = "127.0.0.1:5000"
app = Flask(__name__)
CORS(app)
@app.errorhandler(405)
def method_not_allowed(e):
global count_requests
count_requests += 1
return jsonify({'error': 405}), 405
@app.route("/")
def index():
return render_template('index.html')
@app.route("/api/v1/categories", methods = ["GET", "POST"])
def list_categories():
global count_requests
count_requests += 1
if not os.path.exists(DB):
os.makedirs(DB, exist_ok = True)
if request.method == 'GET':
categories = os.listdir(DB)
if not categories:
return Response('{}', status=204, mimetype='application/json')
response_data = {}
for category in categories:
response_data[category] = len(os.listdir(DB + "/" + category))
return jsonify(response_data)
elif request.method == "POST":
category = json.loads(request.data)[0]
if category in os.listdir(DB):
return Response('{}', status=400, mimetype='application/json')
os.makedirs(DB + "/" + category, exist_ok = True)
return Response('{}', status=201, mimetype='application/json')
else:
return Response('{}', status=405, mimetype='application/json')
@app.route("/api/v1/categories/<category>", methods = ["DELETE"])
def delete_category(category = None):
global count_requests
count_requests += 1
if request.method == 'DELETE':
categories = os.listdir(DB)
if category in categories:
if GLOBAL_LIST in os.listdir():
data = pd.read_csv(GLOBAL_LIST)
data = data[data.category != category]
data.to_csv(GLOBAL_LIST, index = False)
shutil.rmtree(DB + "/" + category)
return Response('{}', status=200, mimetype='application/json')
else:
return Response('{}', status=400, mimetype='application/json')
else:
return Response('{}', status=405, mimetype='application/json')
@app.route("/api/v1/categories/<category>/acts", methods = ["GET"])
def list_acts(category = None):
global count_requests
count_requests += 1
if request.method == 'GET':
temp_path = DB + "/" + category + "/" + GLOBAL_LIST
if category not in os.listdir(DB):
return Response('[]', status=400, mimetype='application/json')
start = request.args.get('start')
end = request.args.get("end")
if start == None and end == None:
#print("This part")
if os.path.exists(temp_path):
data = pd.read_csv(temp_path)
rows = data.shape[0]
if rows == 0:
return Response('[]', status=204, mimetype='application/json')
elif rows >= 100:
return Response('[]', status=413, mimetype='application/json')
else:
response_data = data.to_json(orient = "records")
return Response(response_data, status=200, mimetype='application/json')
else:
return Response('[]', status=204, mimetype='application/json')
else:
start = int(start)
end = int(end)
temp_path = DB + "/" + category + "/" + GLOBAL_LIST
if category not in os.listdir(DB):
return Response('[]', status=400, mimetype='application/json')
if os.path.exists(temp_path):
data = pd.read_csv(temp_path)
data["timestamp"] = pd.to_datetime(data["timestamp"], format = '%d-%m-%Y:%S-%M-%H')
data["actId"] = data["actId"].astype(int)
sorted_data = data.sort_values(["timestamp", "actId"], ascending = [False, False], axis = 0)
#print(data)
#print(sorted_data)
rows = data.shape[0]
if start < 1 or end > rows:
return Response('[]', status=400, mimetype='application/json')
if rows == 0:
return Response('[]', status=204, mimetype='application/json')
else:
required_data = pd.DataFrame(sorted_data.iloc[start-1: end, :])
#print(required_data)
if required_data.shape[0] > 100:
return Response("[]", status=413, mimetype='application/json')
required_data["timestamp"] = pd.to_datetime(required_data["timestamp"], format = '%d-%m-%Y:%S-%M-%H')
required_data["timestamp"] = required_data["timestamp"].astype(str)
response_data = required_data.to_json(orient = "records")
return Response(response_data, status=200, mimetype='application/json')
else:
return Response('[]', status=204, mimetype='application/json')
else:
return Response('{}', status=405, mimetype='application/json')
@app.route("/api/v1/categories/<category>/acts/size", methods = ["GET"])
def count_acts(category = None):
global count_requests
count_requests += 1
if request.method == 'GET':
temp_path = DB + "/" + category
if category not in os.listdir(DB):
return Response('[]', status=400, mimetype='application/json')
if os.path.exists(temp_path):
data = pd.read_csv(GLOBAL_LIST)
count = data[data.category == category].shape[0]
return Response('[{0}]'.format(str(count)), status=200, mimetype='application/json')
else:
return Response('[]', status=204, mimetype='application/json')
else:
return Response('{}', status=405, mimetype='application/json')
@app.route("/api/v1/acts/upvote", methods = ["POST"])
def upvote():
global count_requests
count_requests += 1
if request.method == 'POST':
act_id = int(json.loads(request.data)[0])
data_id = pd.read_csv(GLOBAL_LIST)
if act_id not in data_id["act_id"].tolist():
return Response('[]', status=400, mimetype='application/json')
category = data_id[data_id["act_id"] == act_id]["category"].iloc[0]
temp_path = DB + "/" + category + "/" + GLOBAL_LIST
data = pd.read_csv(temp_path)
data.set_index("actId", inplace = True)
data.at[act_id, "upvotes"] += 1
data.reset_index(inplace = True)
data.to_csv(temp_path,index = False)
return Response("{}", status=200, mimetype='application/json')
else:
return Response('{}', status=405, mimetype='application/json')
@app.route("/api/v1/acts/<actId>", methods = ["DELETE"])
def delete_act(actId = None):
global count_requests
count_requests += 1
if request.method == 'DELETE':
act_id = int(actId)
data_id = pd.read_csv(GLOBAL_LIST)
if act_id not in data_id["act_id"].tolist():
return Response('[]', status=400, mimetype='application/json')
category = data_id[data_id["act_id"] == act_id]["category"].iloc[0]
temp_path = DB + "/" + category + "/" + GLOBAL_LIST
data_id = data_id[data_id["act_id"] != act_id]
data_id.to_csv(GLOBAL_LIST, index = False)
data = pd.read_csv(temp_path)
data = data[data["actId"] != act_id]
data.to_csv(temp_path, index = False)
os.remove(DB + "/" + category + "/" + str(act_id) + ".png")
return Response("{}", status=200, mimetype='application/json')
else:
return Response('{}', status=405, mimetype='application/json')
# @app.route("/api/v1/categories/<category>/acts?start=<startrange>&end=<endrange>", methods = ["GET"])
# def range_acts(category = None, startrange = 0, endrange = 0):
# if request.method == 'GET':
# temp_path = DB + "/" + category + "/" + GLOBAL_LIST
# if category not in os.listdir(DB):
# return Response('[]', status=400, mimetype='application/json')
# if os.path.exists(temp_path):
# data = pd.read_csv(temp_path)
# sorted_data = data.sort(columns = ["timestamp"], ascending = False)
# rows = data.shape[0]
# if startrange < 1 or endrange > rows:
# return Response('[]', status=400, mimetype='application/json')
# if rows == 0:
# return Response('[]', status=204, mimetype='application/json')
# else:
# required_data = sorted_data.ix[startrange-1: endrange-1, :]
# print(required_data)
# if required_data.shape[0] > 100:
# return Response("[]", status=413, mimetype='application/json')
# response_data = required_data.to_json(orient = "records")
# return Response(response_data, status=200, mimetype='application/json')
# else:
# return Response('[]', status=204, mimetype='application/json')
# else:
# return Response('{}', status=405, mimetype='application/json')
@app.route("/api/v1/acts", methods = ["POST"])
def upload_act():
global count_requests
count_requests += 1
if request.method == 'POST':
if not os.path.exists(DB):
os.makedirs(DB, exist_ok = True)
request_data = json.loads(request.data.decode('utf-8'))
if not GLOBAL_LIST in os.listdir():
data = pd.DataFrame(columns = ['act_id', "category"])
data.to_csv(GLOBAL_LIST, index = False)
if not LOGIN_FILE_NAME in os.listdir():
data = pd.DataFrame(columns = ['username', 'password'])
data.to_csv(LOGIN_FILE_NAME, index = False)
data_acts = pd.read_csv(GLOBAL_LIST)
#data_users = pd.read_csv(LOGIN_FILE_NAME)
# Username and actId
header = {"origin": INSTANCE_IP}
resp = r.get( "http://"+ IP + "/api/v1/users", "{}", headers = header)
print("=============")
print(resp.text)
print("=============")
data_users = eval(resp.text)
if request_data['username'] not in data_users or request_data["actId"] in data_acts["act_id"].tolist():
return Response('{}', status=400, mimetype='application/json')
# Upvotes field
if "upvotes" in request_data.keys():
return Response('{}', status=400, mimetype='application/json')
request_data['upvotes'] = 0
# category name
if request_data["categoryName"] not in os.listdir(DB):
return Response('{}', status=400, mimetype='application/json')
# Date Validity
if not validate(request_data["timestamp"]):
return Response('{}', status=400, mimetype='application/json')
# Base64 validity
try:
base64.b64decode(request_data["imgB64"])
except binascii.Error:
return Response('{}', status=400, mimetype='application/json')
data_acts = data_acts.append({"act_id": int(request_data["actId"]), "category": request_data["categoryName"] }, ignore_index = True)
data_acts.to_csv(GLOBAL_LIST, index = False)
with open(DB + "/" + request_data["categoryName"] + "/" +str(request_data["actId"]) + ".png", "wb") as fp:
fp.write(base64.decodebytes(request_data["imgB64"].encode()))
temp_path = DB + "/" + request_data["categoryName"] + "/" + GLOBAL_LIST
if not GLOBAL_LIST in os.listdir(DB + "/" + request_data["categoryName"]):
data = pd.DataFrame(columns = list(request_data.keys()))
data.to_csv(temp_path, index = False)
data = pd.read_csv(temp_path)
data = data.append(request_data, ignore_index = True)
data.to_csv(temp_path, index = False)
return Response('{}', status=201, mimetype='application/json')
else:
return Response('{}', status=405, mimetype='application/json')
@app.route("/api/v1/acts/count", methods = ["GET"])
def count_act():
global count_requests
count_requests += 1
if request.method == 'GET':
if not GLOBAL_LIST in os.listdir():
return Response('[0]', status=200, mimetype='application/json')
else:
data_acts = pd.read_csv(GLOBAL_LIST)
count_acts = data_acts.shape[0]
return Response('['+ str(count_acts) +']', status=200, mimetype='application/json')
else:
return Response('{}', status=405, mimetype='application/json')
@app.route("/api/v1/_count", methods = ["GET", "DELETE"])
def count_request():
global count_requests
if request.method == 'GET':
return Response('['+ str(count_requests) +']', status=200, mimetype='application/json')
elif request.method == 'DELETE':
count_requests = 0
return Response('{}', status=200, mimetype='application/json')
else:
return Response('{}', status=405, mimetype='application/json')
if __name__ == '__main__':
app.run(host = '0.0.0.0', port = 80, threaded=True)
#app.run(threaded = True, debug = True, port = 2000)
| load_balancer/docker_acts/app.py | 13,846 | IP = "127.0.0.1:5000"print("This part")print(data)print(sorted_data)print(required_data) @app.route("/api/v1/categories/<category>/acts?start=<startrange>&end=<endrange>", methods = ["GET"]) def range_acts(category = None, startrange = 0, endrange = 0): if request.method == 'GET': temp_path = DB + "/" + category + "/" + GLOBAL_LIST if category not in os.listdir(DB): return Response('[]', status=400, mimetype='application/json') if os.path.exists(temp_path): data = pd.read_csv(temp_path) sorted_data = data.sort(columns = ["timestamp"], ascending = False) rows = data.shape[0] if startrange < 1 or endrange > rows: return Response('[]', status=400, mimetype='application/json') if rows == 0: return Response('[]', status=204, mimetype='application/json') else: required_data = sorted_data.ix[startrange-1: endrange-1, :] print(required_data) if required_data.shape[0] > 100: return Response("[]", status=413, mimetype='application/json') response_data = required_data.to_json(orient = "records") return Response(response_data, status=200, mimetype='application/json') else: return Response('[]', status=204, mimetype='application/json') else: return Response('{}', status=405, mimetype='application/json')data_users = pd.read_csv(LOGIN_FILE_NAME) Username and actId Upvotes field category name Date Validity Base64 validityapp.run(threaded = True, debug = True, port = 2000) | 1,654 | en | 0.292062 |
"""Generated message classes for cloudasset version v1p2beta1.
The cloud asset API manages the history and inventory of cloud resources.
"""
# NOTE: This file is autogenerated and should not be edited by hand.
from __future__ import absolute_import
from apitools.base.protorpclite import messages as _messages
from apitools.base.py import encoding
from apitools.base.py import extra_types
package = 'cloudasset'
class Asset(_messages.Message):
r"""Cloud asset. This includes all Google Cloud Platform resources, Cloud
IAM policies, and other non-GCP assets.
Fields:
ancestors: Asset's ancestry path in Cloud Resource Manager (CRM)
hierarchy, represented as a list of relative resource names. Ancestry
path starts with the closest CRM ancestor and ending at a visible root.
If the asset is a CRM project/ folder/organization, this starts from the
asset itself. Example: ["projects/123456789", "folders/5432",
"organizations/1234"]
assetType: Type of the asset. Example: "compute.googleapis.com/Disk".
iamPolicy: Representation of the actual Cloud IAM policy set on a cloud
resource. For each resource, there must be at most one Cloud IAM policy
set on it.
name: The full name of the asset. For example: `//compute.googleapis.com/p
rojects/my_project_123/zones/zone1/instances/instance1`. See [Resource N
ames](https://cloud.google.com/apis/design/resource_names#full_resource_
name) for more information.
resource: Representation of the resource.
"""
ancestors = _messages.StringField(1, repeated=True)
assetType = _messages.StringField(2)
iamPolicy = _messages.MessageField('Policy', 3)
name = _messages.StringField(4)
resource = _messages.MessageField('Resource', 5)
class AuditConfig(_messages.Message):
r"""Specifies the audit configuration for a service. The configuration
determines which permission types are logged, and what identities, if any,
are exempted from logging. An AuditConfig must have one or more
AuditLogConfigs. If there are AuditConfigs for both `allServices` and a
specific service, the union of the two AuditConfigs is used for that
service: the log_types specified in each AuditConfig are enabled, and the
exempted_members in each AuditLogConfig are exempted. Example Policy with
multiple AuditConfigs: { "audit_configs": [ {
"service": "allServices" "audit_log_configs": [ {
"log_type": "DATA_READ", "exempted_members": [
"user:jose@example.com" ] }, {
"log_type": "DATA_WRITE", }, {
"log_type": "ADMIN_READ", } ] }, {
"service": "sampleservice.googleapis.com" "audit_log_configs": [
{ "log_type": "DATA_READ", }, {
"log_type": "DATA_WRITE", "exempted_members": [
"user:aliya@example.com" ] } ] }
] } For sampleservice, this policy enables DATA_READ, DATA_WRITE and
ADMIN_READ logging. It also exempts jose@example.com from DATA_READ logging,
and aliya@example.com from DATA_WRITE logging.
Fields:
auditLogConfigs: The configuration for logging of each type of permission.
service: Specifies a service that will be enabled for audit logging. For
example, `storage.googleapis.com`, `cloudsql.googleapis.com`.
`allServices` is a special value that covers all services.
"""
auditLogConfigs = _messages.MessageField('AuditLogConfig', 1, repeated=True)
service = _messages.StringField(2)
class AuditLogConfig(_messages.Message):
r"""Provides the configuration for logging a type of permissions. Example:
{ "audit_log_configs": [ { "log_type": "DATA_READ",
"exempted_members": [ "user:jose@example.com" ]
}, { "log_type": "DATA_WRITE", } ] }
This enables 'DATA_READ' and 'DATA_WRITE' logging, while exempting
jose@example.com from DATA_READ logging.
Enums:
LogTypeValueValuesEnum: The log type that this config enables.
Fields:
exemptedMembers: Specifies the identities that do not cause logging for
this type of permission. Follows the same format of Binding.members.
ignoreChildExemptions: Specifies whether principals can be exempted for
the same LogType in lower-level resource policies. If true, any lower-
level exemptions will be ignored.
logType: The log type that this config enables.
"""
class LogTypeValueValuesEnum(_messages.Enum):
r"""The log type that this config enables.
Values:
LOG_TYPE_UNSPECIFIED: Default case. Should never be this.
ADMIN_READ: Admin reads. Example: CloudIAM getIamPolicy
DATA_WRITE: Data writes. Example: CloudSQL Users create
DATA_READ: Data reads. Example: CloudSQL Users list
"""
LOG_TYPE_UNSPECIFIED = 0
ADMIN_READ = 1
DATA_WRITE = 2
DATA_READ = 3
exemptedMembers = _messages.StringField(1, repeated=True)
ignoreChildExemptions = _messages.BooleanField(2)
logType = _messages.EnumField('LogTypeValueValuesEnum', 3)
class BatchGetAssetsHistoryResponse(_messages.Message):
r"""Batch get assets history response.
Fields:
assets: A list of assets with valid time windows.
"""
assets = _messages.MessageField('TemporalAsset', 1, repeated=True)
class Binding(_messages.Message):
r"""Associates `members` with a `role`.
Fields:
condition: The condition that is associated with this binding. NOTE: An
unsatisfied condition will not allow user access via current binding.
Different bindings, including their conditions, are examined
independently.
members: Specifies the identities requesting access for a Cloud Platform
resource. `members` can have the following values: * `allUsers`: A
special identifier that represents anyone who is on the internet;
with or without a Google account. * `allAuthenticatedUsers`: A special
identifier that represents anyone who is authenticated with a Google
account or a service account. * `user:{emailid}`: An email address that
represents a specific Google account. For example,
`alice@example.com` . * `serviceAccount:{emailid}`: An email address
that represents a service account. For example, `my-other-
app@appspot.gserviceaccount.com`. * `group:{emailid}`: An email address
that represents a Google group. For example, `admins@example.com`.
* `domain:{domain}`: The G Suite domain (primary) that represents all
the users of that domain. For example, `google.com` or `example.com`.
role: Role that is assigned to `members`. For example, `roles/viewer`,
`roles/editor`, or `roles/owner`.
"""
condition = _messages.MessageField('Expr', 1)
members = _messages.StringField(2, repeated=True)
role = _messages.StringField(3)
class CloudassetBatchGetAssetsHistoryRequest(_messages.Message):
r"""A CloudassetBatchGetAssetsHistoryRequest object.
Enums:
ContentTypeValueValuesEnum: Required. The content type.
Fields:
assetNames: A list of the full names of the assets. For example: `//comput
e.googleapis.com/projects/my_project_123/zones/zone1/instances/instance1
`. See [Resource Names](https://cloud.google.com/apis/design/resource_na
mes#full_resource_name) and [Resource Name
Format](https://cloud.google.com/resource-manager/docs/cloud-asset-
inventory/resource-name-format) for more info. The request becomes a
no-op if the asset name list is empty, and the max size of the asset
name list is 100 in one request.
contentType: Required. The content type.
parent: Required. The relative name of the root asset. It can only be an
organization number (such as "organizations/123"), a project ID (such as
"projects/my-project-id")", or a project number (such as
"projects/12345").
readTimeWindow_endTime: End time of the time window (inclusive). Current
timestamp if not specified.
readTimeWindow_startTime: Start time of the time window (exclusive).
"""
class ContentTypeValueValuesEnum(_messages.Enum):
r"""Required. The content type.
Values:
CONTENT_TYPE_UNSPECIFIED: <no description>
RESOURCE: <no description>
IAM_POLICY: <no description>
"""
CONTENT_TYPE_UNSPECIFIED = 0
RESOURCE = 1
IAM_POLICY = 2
assetNames = _messages.StringField(1, repeated=True)
contentType = _messages.EnumField('ContentTypeValueValuesEnum', 2)
parent = _messages.StringField(3, required=True)
readTimeWindow_endTime = _messages.StringField(4)
readTimeWindow_startTime = _messages.StringField(5)
class CloudassetExportAssetsRequest(_messages.Message):
r"""A CloudassetExportAssetsRequest object.
Fields:
exportAssetsRequest: A ExportAssetsRequest resource to be passed as the
request body.
parent: Required. The relative name of the root asset. This can only be an
organization number (such as "organizations/123"), a project ID (such as
"projects/my-project-id"), or a project number (such as
"projects/12345").
"""
exportAssetsRequest = _messages.MessageField('ExportAssetsRequest', 1)
parent = _messages.StringField(2, required=True)
class CloudassetFeedsCreateRequest(_messages.Message):
r"""A CloudassetFeedsCreateRequest object.
Fields:
createFeedRequest: A CreateFeedRequest resource to be passed as the
request body.
parent: Required. The name of the project/folder/organization where this
feed should be created in. It can only be an organization number (such
as "organizations/123"), a folder number (such as "folders/123"), a
project ID (such as "projects/my-project-id")", or a project number
(such as "projects/12345").
"""
createFeedRequest = _messages.MessageField('CreateFeedRequest', 1)
parent = _messages.StringField(2, required=True)
class CloudassetFeedsDeleteRequest(_messages.Message):
r"""A CloudassetFeedsDeleteRequest object.
Fields:
name: The name of the feed and it must be in the format of:
projects/project_number/feeds/feed_id
folders/folder_number/feeds/feed_id
organizations/organization_number/feeds/feed_id
"""
name = _messages.StringField(1, required=True)
class CloudassetFeedsGetRequest(_messages.Message):
r"""A CloudassetFeedsGetRequest object.
Fields:
name: The name of the Feed and it must be in the format of:
projects/project_number/feeds/feed_id
folders/folder_number/feeds/feed_id
organizations/organization_number/feeds/feed_id
"""
name = _messages.StringField(1, required=True)
class CloudassetFeedsListRequest(_messages.Message):
r"""A CloudassetFeedsListRequest object.
Fields:
parent: Required. The parent project/folder/organization whose feeds are
to be listed. It can only be using project/folder/organization number
(such as "folders/12345")", or a project ID (such as "projects/my-
project-id").
"""
parent = _messages.StringField(1, required=True)
class CloudassetFeedsPatchRequest(_messages.Message):
r"""A CloudassetFeedsPatchRequest object.
Fields:
name: Required. The format will be
projects/{project_number}/feeds/{client-assigned_feed_identifier} or
folders/{folder_number}/feeds/{client-assigned_feed_identifier} or
organizations/{organization_number}/feeds/{client-
assigned_feed_identifier} The client-assigned feed identifier must be
unique within the parent project/folder/organization.
updateFeedRequest: A UpdateFeedRequest resource to be passed as the
request body.
"""
name = _messages.StringField(1, required=True)
updateFeedRequest = _messages.MessageField('UpdateFeedRequest', 2)
class CreateFeedRequest(_messages.Message):
r"""Create asset feed request.
Fields:
feed: The feed details. The field `name` must be empty and it will be
generated in the format of: projects/project_number/feeds/feed_id
folders/folder_number/feeds/feed_id
organizations/organization_number/feeds/feed_id
feedId: Required. This is the client-assigned asset feed identifier and it
needs to be unique under a specific parent project/folder/organization.
"""
feed = _messages.MessageField('Feed', 1)
feedId = _messages.StringField(2)
class Empty(_messages.Message):
r"""A generic empty message that you can re-use to avoid defining duplicated
empty messages in your APIs. A typical example is to use it as the request
or the response type of an API method. For instance: service Foo {
rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); } The
JSON representation for `Empty` is empty JSON object `{}`.
"""
class ExportAssetsRequest(_messages.Message):
r"""Export asset request.
Enums:
ContentTypeValueValuesEnum: Asset content type. If not specified, no
content but the asset name will be returned.
Fields:
assetTypes: A list of asset types of which to take a snapshot for. For
example: "compute.googleapis.com/Disk". If specified, only matching
assets will be returned. See [Introduction to Cloud Asset
Inventory](https://cloud.google.com/resource-manager/docs/cloud-asset-
inventory/overview) for all supported asset types.
contentType: Asset content type. If not specified, no content but the
asset name will be returned.
outputConfig: Required. Output configuration indicating where the results
will be output to. All results will be in newline delimited JSON format.
readTime: Timestamp to take an asset snapshot. This can only be set to a
timestamp between 2018-10-02 UTC (inclusive) and the current time. If
not specified, the current time will be used. Due to delays in resource
data collection and indexing, there is a volatile window during which
running the same query may get different results.
"""
class ContentTypeValueValuesEnum(_messages.Enum):
r"""Asset content type. If not specified, no content but the asset name
will be returned.
Values:
CONTENT_TYPE_UNSPECIFIED: Unspecified content type.
RESOURCE: Resource metadata.
IAM_POLICY: The actual IAM policy set on a resource.
"""
CONTENT_TYPE_UNSPECIFIED = 0
RESOURCE = 1
IAM_POLICY = 2
assetTypes = _messages.StringField(1, repeated=True)
contentType = _messages.EnumField('ContentTypeValueValuesEnum', 2)
outputConfig = _messages.MessageField('OutputConfig', 3)
readTime = _messages.StringField(4)
class Expr(_messages.Message):
r"""Represents an expression text. Example: title: "User account
presence" description: "Determines whether the request has a user
account" expression: "size(request.user) > 0"
Fields:
description: An optional description of the expression. This is a longer
text which describes the expression, e.g. when hovered over it in a UI.
expression: Textual representation of an expression in Common Expression
Language syntax. The application context of the containing message
determines which well-known feature set of CEL is supported.
location: An optional string indicating the location of the expression for
error reporting, e.g. a file name and a position in the file.
title: An optional title for the expression, i.e. a short string
describing its purpose. This can be used e.g. in UIs which allow to
enter the expression.
"""
description = _messages.StringField(1)
expression = _messages.StringField(2)
location = _messages.StringField(3)
title = _messages.StringField(4)
class Feed(_messages.Message):
r"""An asset feed used to export asset updates to a destinations. An asset
feed filter controls what updates are exported. The asset feed must be
created within a project, organization, or folder. Supported destinations
are: Cloud Pub/Sub topics.
Enums:
ContentTypeValueValuesEnum: Asset content type. If not specified, no
content but the asset name and type will be returned.
Fields:
assetNames: A list of the full names of the assets to receive updates. You
must specify either or both of asset_names and asset_types. Only asset
updates matching specified asset_names and asset_types are exported to
the feed. For example: `//compute.googleapis.com/projects/my_project_123
/zones/zone1/instances/instance1`. See [Resource Names](https://cloud.go
ogle.com/apis/design/resource_names#full_resource_name) for more info.
assetTypes: A list of types of the assets to receive updates. You must
specify either or both of asset_names and asset_types. Only asset
updates matching specified asset_names and asset_types are exported to
the feed. For example: "compute.googleapis.com/Disk" See [Introduction
to Cloud Asset Inventory](https://cloud.google.com/resource-
manager/docs/cloud-asset-inventory/overview) for all supported asset
types.
contentType: Asset content type. If not specified, no content but the
asset name and type will be returned.
feedOutputConfig: Required. Feed output configuration defining where the
asset updates are published to.
name: Required. The format will be
projects/{project_number}/feeds/{client-assigned_feed_identifier} or
folders/{folder_number}/feeds/{client-assigned_feed_identifier} or
organizations/{organization_number}/feeds/{client-
assigned_feed_identifier} The client-assigned feed identifier must be
unique within the parent project/folder/organization.
"""
class ContentTypeValueValuesEnum(_messages.Enum):
r"""Asset content type. If not specified, no content but the asset name
and type will be returned.
Values:
CONTENT_TYPE_UNSPECIFIED: Unspecified content type.
RESOURCE: Resource metadata.
IAM_POLICY: The actual IAM policy set on a resource.
"""
CONTENT_TYPE_UNSPECIFIED = 0
RESOURCE = 1
IAM_POLICY = 2
assetNames = _messages.StringField(1, repeated=True)
assetTypes = _messages.StringField(2, repeated=True)
contentType = _messages.EnumField('ContentTypeValueValuesEnum', 3)
feedOutputConfig = _messages.MessageField('FeedOutputConfig', 4)
name = _messages.StringField(5)
class FeedOutputConfig(_messages.Message):
r"""Output configuration for asset feed destination.
Fields:
pubsubDestination: Destination on Cloud Pubsub.
"""
pubsubDestination = _messages.MessageField('PubsubDestination', 1)
class GcsDestination(_messages.Message):
r"""A Cloud Storage location.
Fields:
uri: The uri of the Cloud Storage object. It's the same uri that is used
by gsutil. For example: "gs://bucket_name/object_name". See [Viewing and
Editing Object Metadata](https://cloud.google.com/storage/docs/viewing-
editing-metadata) for more information.
"""
uri = _messages.StringField(1)
class ListFeedsResponse(_messages.Message):
r"""A ListFeedsResponse object.
Fields:
feeds: A list of feeds.
"""
feeds = _messages.MessageField('Feed', 1, repeated=True)
class Operation(_messages.Message):
r"""This resource represents a long-running operation that is the result of
a network API call.
Messages:
MetadataValue: Service-specific metadata associated with the operation.
It typically contains progress information and common metadata such as
create time. Some services might not provide such metadata. Any method
that returns a long-running operation should document the metadata type,
if any.
ResponseValue: The normal response of the operation in case of success.
If the original method returns no data on success, such as `Delete`, the
response is `google.protobuf.Empty`. If the original method is standard
`Get`/`Create`/`Update`, the response should be the resource. For other
methods, the response should have the type `XxxResponse`, where `Xxx` is
the original method name. For example, if the original method name is
`TakeSnapshot()`, the inferred response type is `TakeSnapshotResponse`.
Fields:
done: If the value is `false`, it means the operation is still in
progress. If `true`, the operation is completed, and either `error` or
`response` is available.
error: The error result of the operation in case of failure or
cancellation.
metadata: Service-specific metadata associated with the operation. It
typically contains progress information and common metadata such as
create time. Some services might not provide such metadata. Any method
that returns a long-running operation should document the metadata type,
if any.
name: The server-assigned name, which is only unique within the same
service that originally returns it. If you use the default HTTP mapping,
the `name` should be a resource name ending with
`operations/{unique_id}`.
response: The normal response of the operation in case of success. If the
original method returns no data on success, such as `Delete`, the
response is `google.protobuf.Empty`. If the original method is standard
`Get`/`Create`/`Update`, the response should be the resource. For other
methods, the response should have the type `XxxResponse`, where `Xxx` is
the original method name. For example, if the original method name is
`TakeSnapshot()`, the inferred response type is `TakeSnapshotResponse`.
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class MetadataValue(_messages.Message):
r"""Service-specific metadata associated with the operation. It typically
contains progress information and common metadata such as create time.
Some services might not provide such metadata. Any method that returns a
long-running operation should document the metadata type, if any.
Messages:
AdditionalProperty: An additional property for a MetadataValue object.
Fields:
additionalProperties: Properties of the object. Contains field @type
with type URL.
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a MetadataValue object.
Fields:
key: Name of the additional property.
value: A extra_types.JsonValue attribute.
"""
key = _messages.StringField(1)
value = _messages.MessageField('extra_types.JsonValue', 2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
@encoding.MapUnrecognizedFields('additionalProperties')
class ResponseValue(_messages.Message):
r"""The normal response of the operation in case of success. If the
original method returns no data on success, such as `Delete`, the response
is `google.protobuf.Empty`. If the original method is standard
`Get`/`Create`/`Update`, the response should be the resource. For other
methods, the response should have the type `XxxResponse`, where `Xxx` is
the original method name. For example, if the original method name is
`TakeSnapshot()`, the inferred response type is `TakeSnapshotResponse`.
Messages:
AdditionalProperty: An additional property for a ResponseValue object.
Fields:
additionalProperties: Properties of the object. Contains field @type
with type URL.
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a ResponseValue object.
Fields:
key: Name of the additional property.
value: A extra_types.JsonValue attribute.
"""
key = _messages.StringField(1)
value = _messages.MessageField('extra_types.JsonValue', 2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
done = _messages.BooleanField(1)
error = _messages.MessageField('Status', 2)
metadata = _messages.MessageField('MetadataValue', 3)
name = _messages.StringField(4)
response = _messages.MessageField('ResponseValue', 5)
class OutputConfig(_messages.Message):
r"""Output configuration for export assets destination.
Fields:
gcsDestination: Destination on Cloud Storage.
"""
gcsDestination = _messages.MessageField('GcsDestination', 1)
class Policy(_messages.Message):
r"""Defines an Identity and Access Management (IAM) policy. It is used to
specify access control policies for Cloud Platform resources. A `Policy`
consists of a list of `bindings`. A `binding` binds a list of `members` to a
`role`, where the members can be user accounts, Google groups, Google
domains, and service accounts. A `role` is a named list of permissions
defined by IAM. **JSON Example** { "bindings": [ {
"role": "roles/owner", "members": [
"user:mike@example.com", "group:admins@example.com",
"domain:google.com", "serviceAccount:my-other-
app@appspot.gserviceaccount.com" ] }, {
"role": "roles/viewer", "members": ["user:sean@example.com"]
} ] } **YAML Example** bindings: - members: -
user:mike@example.com - group:admins@example.com -
domain:google.com - serviceAccount:my-other-
app@appspot.gserviceaccount.com role: roles/owner - members:
- user:sean@example.com role: roles/viewer For a description of IAM
and its features, see the [IAM developer's
guide](https://cloud.google.com/iam/docs).
Fields:
auditConfigs: Specifies cloud audit logging configuration for this policy.
bindings: Associates a list of `members` to a `role`. `bindings` with no
members will result in an error.
etag: `etag` is used for optimistic concurrency control as a way to help
prevent simultaneous updates of a policy from overwriting each other. It
is strongly suggested that systems make use of the `etag` in the read-
modify-write cycle to perform policy updates in order to avoid race
conditions: An `etag` is returned in the response to `getIamPolicy`, and
systems are expected to put that etag in the request to `setIamPolicy`
to ensure that their change will be applied to the same version of the
policy. If no `etag` is provided in the call to `setIamPolicy`, then
the existing policy is overwritten.
version: Deprecated.
"""
auditConfigs = _messages.MessageField('AuditConfig', 1, repeated=True)
bindings = _messages.MessageField('Binding', 2, repeated=True)
etag = _messages.BytesField(3)
version = _messages.IntegerField(4, variant=_messages.Variant.INT32)
class PubsubDestination(_messages.Message):
r"""A Cloud Pubsub destination.
Fields:
topic: The name of the Cloud Pub/Sub topic to publish to. For example:
`projects/PROJECT_ID/topics/TOPIC_ID`.
"""
topic = _messages.StringField(1)
class Resource(_messages.Message):
r"""Representation of a cloud resource.
Messages:
DataValue: The content of the resource, in which some sensitive fields are
scrubbed away and may not be present.
Fields:
data: The content of the resource, in which some sensitive fields are
scrubbed away and may not be present.
discoveryDocumentUri: The URL of the discovery document containing the
resource's JSON schema. For example:
`"https://www.googleapis.com/discovery/v1/apis/compute/v1/rest"`. It
will be left unspecified for resources without a discovery-based API,
such as Cloud Bigtable.
discoveryName: The JSON schema name listed in the discovery document.
Example: "Project". It will be left unspecified for resources (such as
Cloud Bigtable) without a discovery-based API.
parent: The full name of the immediate parent of this resource. See
[Resource Names](https://cloud.google.com/apis/design/resource_names#ful
l_resource_name) for more information. For GCP assets, it is the parent
resource defined in the [Cloud IAM policy
hierarchy](https://cloud.google.com/iam/docs/overview#policy_hierarchy).
For example:
`"//cloudresourcemanager.googleapis.com/projects/my_project_123"`. For
third-party assets, it is up to the users to define.
resourceUrl: The REST URL for accessing the resource. An HTTP GET
operation using this URL returns the resource itself. Example:
`https://cloudresourcemanager.googleapis.com/v1/projects/my-
project-123`. It will be left unspecified for resources without a REST
API.
version: The API version. Example: "v1".
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class DataValue(_messages.Message):
r"""The content of the resource, in which some sensitive fields are
scrubbed away and may not be present.
Messages:
AdditionalProperty: An additional property for a DataValue object.
Fields:
additionalProperties: Properties of the object.
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a DataValue object.
Fields:
key: Name of the additional property.
value: A extra_types.JsonValue attribute.
"""
key = _messages.StringField(1)
value = _messages.MessageField('extra_types.JsonValue', 2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
data = _messages.MessageField('DataValue', 1)
discoveryDocumentUri = _messages.StringField(2)
discoveryName = _messages.StringField(3)
parent = _messages.StringField(4)
resourceUrl = _messages.StringField(5)
version = _messages.StringField(6)
class StandardQueryParameters(_messages.Message):
r"""Query parameters accepted by all methods.
Enums:
FXgafvValueValuesEnum: V1 error format.
AltValueValuesEnum: Data format for response.
Fields:
f__xgafv: V1 error format.
access_token: OAuth access token.
alt: Data format for response.
callback: JSONP
fields: Selector specifying which fields to include in a partial response.
key: API key. Your API key identifies your project and provides you with
API access, quota, and reports. Required unless you provide an OAuth 2.0
token.
oauth_token: OAuth 2.0 token for the current user.
prettyPrint: Returns response with indentations and line breaks.
quotaUser: Available to use for quota purposes for server-side
applications. Can be any arbitrary string assigned to a user, but should
not exceed 40 characters.
trace: A tracing token of the form "token:<tokenid>" to include in api
requests.
uploadType: Legacy upload protocol for media (e.g. "media", "multipart").
upload_protocol: Upload protocol for media (e.g. "raw", "multipart").
"""
class AltValueValuesEnum(_messages.Enum):
r"""Data format for response.
Values:
json: Responses with Content-Type of application/json
media: Media download with context-dependent Content-Type
proto: Responses with Content-Type of application/x-protobuf
"""
json = 0
media = 1
proto = 2
class FXgafvValueValuesEnum(_messages.Enum):
r"""V1 error format.
Values:
_1: v1 error format
_2: v2 error format
"""
_1 = 0
_2 = 1
f__xgafv = _messages.EnumField('FXgafvValueValuesEnum', 1)
access_token = _messages.StringField(2)
alt = _messages.EnumField('AltValueValuesEnum', 3, default='json')
callback = _messages.StringField(4)
fields = _messages.StringField(5)
key = _messages.StringField(6)
oauth_token = _messages.StringField(7)
prettyPrint = _messages.BooleanField(8, default=True)
quotaUser = _messages.StringField(9)
trace = _messages.StringField(10)
uploadType = _messages.StringField(11)
upload_protocol = _messages.StringField(12)
class Status(_messages.Message):
r"""The `Status` type defines a logical error model that is suitable for
different programming environments, including REST APIs and RPC APIs. It is
used by [gRPC](https://github.com/grpc). Each `Status` message contains
three pieces of data: error code, error message, and error details. You can
find out more about this error model and how to work with it in the [API
Design Guide](https://cloud.google.com/apis/design/errors).
Messages:
DetailsValueListEntry: A DetailsValueListEntry object.
Fields:
code: The status code, which should be an enum value of google.rpc.Code.
details: A list of messages that carry the error details. There is a
common set of message types for APIs to use.
message: A developer-facing error message, which should be in English. Any
user-facing error message should be localized and sent in the
google.rpc.Status.details field, or localized by the client.
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class DetailsValueListEntry(_messages.Message):
r"""A DetailsValueListEntry object.
Messages:
AdditionalProperty: An additional property for a DetailsValueListEntry
object.
Fields:
additionalProperties: Properties of the object. Contains field @type
with type URL.
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a DetailsValueListEntry object.
Fields:
key: Name of the additional property.
value: A extra_types.JsonValue attribute.
"""
key = _messages.StringField(1)
value = _messages.MessageField('extra_types.JsonValue', 2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
code = _messages.IntegerField(1, variant=_messages.Variant.INT32)
details = _messages.MessageField('DetailsValueListEntry', 2, repeated=True)
message = _messages.StringField(3)
class TemporalAsset(_messages.Message):
r"""Temporal asset. In addition to the asset, the temporal asset includes
the status of the asset and valid from and to time of it.
Fields:
asset: Asset.
deleted: If the asset is deleted or not.
window: The time window when the asset data and state was observed.
"""
asset = _messages.MessageField('Asset', 1)
deleted = _messages.BooleanField(2)
window = _messages.MessageField('TimeWindow', 3)
class TimeWindow(_messages.Message):
r"""A time window of (start_time, end_time].
Fields:
endTime: End time of the time window (inclusive). Current timestamp if not
specified.
startTime: Start time of the time window (exclusive).
"""
endTime = _messages.StringField(1)
startTime = _messages.StringField(2)
class UpdateFeedRequest(_messages.Message):
r"""Update asset feed request.
Fields:
feed: The new values of feed details. It must match an existing feed and
the field `name` must be in the format of:
projects/project_number/feeds/feed_id or
folders/folder_number/feeds/feed_id or
organizations/organization_number/feeds/feed_id.
updateMask: Only updates the `feed` fields indicated by this mask. The
field mask must not be empty, and it must not contain fields that are
immutable or only set by the server.
"""
feed = _messages.MessageField('Feed', 1)
updateMask = _messages.StringField(2)
encoding.AddCustomJsonFieldMapping(
StandardQueryParameters, 'f__xgafv', '$.xgafv')
encoding.AddCustomJsonEnumMapping(
StandardQueryParameters.FXgafvValueValuesEnum, '_1', '1')
encoding.AddCustomJsonEnumMapping(
StandardQueryParameters.FXgafvValueValuesEnum, '_2', '2')
| lib/googlecloudsdk/third_party/apis/cloudasset/v1p2beta1/cloudasset_v1p2beta1_messages.py | 35,781 | An additional property for a MetadataValue object.
Fields:
key: Name of the additional property.
value: A extra_types.JsonValue attribute.
An additional property for a ResponseValue object.
Fields:
key: Name of the additional property.
value: A extra_types.JsonValue attribute.
An additional property for a DataValue object.
Fields:
key: Name of the additional property.
value: A extra_types.JsonValue attribute.
An additional property for a DetailsValueListEntry object.
Fields:
key: Name of the additional property.
value: A extra_types.JsonValue attribute.
Data format for response.
Values:
json: Responses with Content-Type of application/json
media: Media download with context-dependent Content-Type
proto: Responses with Content-Type of application/x-protobuf
Cloud asset. This includes all Google Cloud Platform resources, Cloud
IAM policies, and other non-GCP assets.
Fields:
ancestors: Asset's ancestry path in Cloud Resource Manager (CRM)
hierarchy, represented as a list of relative resource names. Ancestry
path starts with the closest CRM ancestor and ending at a visible root.
If the asset is a CRM project/ folder/organization, this starts from the
asset itself. Example: ["projects/123456789", "folders/5432",
"organizations/1234"]
assetType: Type of the asset. Example: "compute.googleapis.com/Disk".
iamPolicy: Representation of the actual Cloud IAM policy set on a cloud
resource. For each resource, there must be at most one Cloud IAM policy
set on it.
name: The full name of the asset. For example: `//compute.googleapis.com/p
rojects/my_project_123/zones/zone1/instances/instance1`. See [Resource N
ames](https://cloud.google.com/apis/design/resource_names#full_resource_
name) for more information.
resource: Representation of the resource.
Specifies the audit configuration for a service. The configuration
determines which permission types are logged, and what identities, if any,
are exempted from logging. An AuditConfig must have one or more
AuditLogConfigs. If there are AuditConfigs for both `allServices` and a
specific service, the union of the two AuditConfigs is used for that
service: the log_types specified in each AuditConfig are enabled, and the
exempted_members in each AuditLogConfig are exempted. Example Policy with
multiple AuditConfigs: { "audit_configs": [ {
"service": "allServices" "audit_log_configs": [ {
"log_type": "DATA_READ", "exempted_members": [
"user:jose@example.com" ] }, {
"log_type": "DATA_WRITE", }, {
"log_type": "ADMIN_READ", } ] }, {
"service": "sampleservice.googleapis.com" "audit_log_configs": [
{ "log_type": "DATA_READ", }, {
"log_type": "DATA_WRITE", "exempted_members": [
"user:aliya@example.com" ] } ] }
] } For sampleservice, this policy enables DATA_READ, DATA_WRITE and
ADMIN_READ logging. It also exempts jose@example.com from DATA_READ logging,
and aliya@example.com from DATA_WRITE logging.
Fields:
auditLogConfigs: The configuration for logging of each type of permission.
service: Specifies a service that will be enabled for audit logging. For
example, `storage.googleapis.com`, `cloudsql.googleapis.com`.
`allServices` is a special value that covers all services.
Provides the configuration for logging a type of permissions. Example:
{ "audit_log_configs": [ { "log_type": "DATA_READ",
"exempted_members": [ "user:jose@example.com" ]
}, { "log_type": "DATA_WRITE", } ] }
This enables 'DATA_READ' and 'DATA_WRITE' logging, while exempting
jose@example.com from DATA_READ logging.
Enums:
LogTypeValueValuesEnum: The log type that this config enables.
Fields:
exemptedMembers: Specifies the identities that do not cause logging for
this type of permission. Follows the same format of Binding.members.
ignoreChildExemptions: Specifies whether principals can be exempted for
the same LogType in lower-level resource policies. If true, any lower-
level exemptions will be ignored.
logType: The log type that this config enables.
Batch get assets history response.
Fields:
assets: A list of assets with valid time windows.
Associates `members` with a `role`.
Fields:
condition: The condition that is associated with this binding. NOTE: An
unsatisfied condition will not allow user access via current binding.
Different bindings, including their conditions, are examined
independently.
members: Specifies the identities requesting access for a Cloud Platform
resource. `members` can have the following values: * `allUsers`: A
special identifier that represents anyone who is on the internet;
with or without a Google account. * `allAuthenticatedUsers`: A special
identifier that represents anyone who is authenticated with a Google
account or a service account. * `user:{emailid}`: An email address that
represents a specific Google account. For example,
`alice@example.com` . * `serviceAccount:{emailid}`: An email address
that represents a service account. For example, `my-other-
app@appspot.gserviceaccount.com`. * `group:{emailid}`: An email address
that represents a Google group. For example, `admins@example.com`.
* `domain:{domain}`: The G Suite domain (primary) that represents all
the users of that domain. For example, `google.com` or `example.com`.
role: Role that is assigned to `members`. For example, `roles/viewer`,
`roles/editor`, or `roles/owner`.
A CloudassetBatchGetAssetsHistoryRequest object.
Enums:
ContentTypeValueValuesEnum: Required. The content type.
Fields:
assetNames: A list of the full names of the assets. For example: `//comput
e.googleapis.com/projects/my_project_123/zones/zone1/instances/instance1
`. See [Resource Names](https://cloud.google.com/apis/design/resource_na
mes#full_resource_name) and [Resource Name
Format](https://cloud.google.com/resource-manager/docs/cloud-asset-
inventory/resource-name-format) for more info. The request becomes a
no-op if the asset name list is empty, and the max size of the asset
name list is 100 in one request.
contentType: Required. The content type.
parent: Required. The relative name of the root asset. It can only be an
organization number (such as "organizations/123"), a project ID (such as
"projects/my-project-id")", or a project number (such as
"projects/12345").
readTimeWindow_endTime: End time of the time window (inclusive). Current
timestamp if not specified.
readTimeWindow_startTime: Start time of the time window (exclusive).
A CloudassetExportAssetsRequest object.
Fields:
exportAssetsRequest: A ExportAssetsRequest resource to be passed as the
request body.
parent: Required. The relative name of the root asset. This can only be an
organization number (such as "organizations/123"), a project ID (such as
"projects/my-project-id"), or a project number (such as
"projects/12345").
A CloudassetFeedsCreateRequest object.
Fields:
createFeedRequest: A CreateFeedRequest resource to be passed as the
request body.
parent: Required. The name of the project/folder/organization where this
feed should be created in. It can only be an organization number (such
as "organizations/123"), a folder number (such as "folders/123"), a
project ID (such as "projects/my-project-id")", or a project number
(such as "projects/12345").
A CloudassetFeedsDeleteRequest object.
Fields:
name: The name of the feed and it must be in the format of:
projects/project_number/feeds/feed_id
folders/folder_number/feeds/feed_id
organizations/organization_number/feeds/feed_id
A CloudassetFeedsGetRequest object.
Fields:
name: The name of the Feed and it must be in the format of:
projects/project_number/feeds/feed_id
folders/folder_number/feeds/feed_id
organizations/organization_number/feeds/feed_id
A CloudassetFeedsListRequest object.
Fields:
parent: Required. The parent project/folder/organization whose feeds are
to be listed. It can only be using project/folder/organization number
(such as "folders/12345")", or a project ID (such as "projects/my-
project-id").
A CloudassetFeedsPatchRequest object.
Fields:
name: Required. The format will be
projects/{project_number}/feeds/{client-assigned_feed_identifier} or
folders/{folder_number}/feeds/{client-assigned_feed_identifier} or
organizations/{organization_number}/feeds/{client-
assigned_feed_identifier} The client-assigned feed identifier must be
unique within the parent project/folder/organization.
updateFeedRequest: A UpdateFeedRequest resource to be passed as the
request body.
Required. The content type.
Values:
CONTENT_TYPE_UNSPECIFIED: <no description>
RESOURCE: <no description>
IAM_POLICY: <no description>
Asset content type. If not specified, no content but the asset name
will be returned.
Values:
CONTENT_TYPE_UNSPECIFIED: Unspecified content type.
RESOURCE: Resource metadata.
IAM_POLICY: The actual IAM policy set on a resource.
Asset content type. If not specified, no content but the asset name
and type will be returned.
Values:
CONTENT_TYPE_UNSPECIFIED: Unspecified content type.
RESOURCE: Resource metadata.
IAM_POLICY: The actual IAM policy set on a resource.
Create asset feed request.
Fields:
feed: The feed details. The field `name` must be empty and it will be
generated in the format of: projects/project_number/feeds/feed_id
folders/folder_number/feeds/feed_id
organizations/organization_number/feeds/feed_id
feedId: Required. This is the client-assigned asset feed identifier and it
needs to be unique under a specific parent project/folder/organization.
The content of the resource, in which some sensitive fields are
scrubbed away and may not be present.
Messages:
AdditionalProperty: An additional property for a DataValue object.
Fields:
additionalProperties: Properties of the object.
A DetailsValueListEntry object.
Messages:
AdditionalProperty: An additional property for a DetailsValueListEntry
object.
Fields:
additionalProperties: Properties of the object. Contains field @type
with type URL.
A generic empty message that you can re-use to avoid defining duplicated
empty messages in your APIs. A typical example is to use it as the request
or the response type of an API method. For instance: service Foo {
rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); } The
JSON representation for `Empty` is empty JSON object `{}`.
Export asset request.
Enums:
ContentTypeValueValuesEnum: Asset content type. If not specified, no
content but the asset name will be returned.
Fields:
assetTypes: A list of asset types of which to take a snapshot for. For
example: "compute.googleapis.com/Disk". If specified, only matching
assets will be returned. See [Introduction to Cloud Asset
Inventory](https://cloud.google.com/resource-manager/docs/cloud-asset-
inventory/overview) for all supported asset types.
contentType: Asset content type. If not specified, no content but the
asset name will be returned.
outputConfig: Required. Output configuration indicating where the results
will be output to. All results will be in newline delimited JSON format.
readTime: Timestamp to take an asset snapshot. This can only be set to a
timestamp between 2018-10-02 UTC (inclusive) and the current time. If
not specified, the current time will be used. Due to delays in resource
data collection and indexing, there is a volatile window during which
running the same query may get different results.
Represents an expression text. Example: title: "User account
presence" description: "Determines whether the request has a user
account" expression: "size(request.user) > 0"
Fields:
description: An optional description of the expression. This is a longer
text which describes the expression, e.g. when hovered over it in a UI.
expression: Textual representation of an expression in Common Expression
Language syntax. The application context of the containing message
determines which well-known feature set of CEL is supported.
location: An optional string indicating the location of the expression for
error reporting, e.g. a file name and a position in the file.
title: An optional title for the expression, i.e. a short string
describing its purpose. This can be used e.g. in UIs which allow to
enter the expression.
V1 error format.
Values:
_1: v1 error format
_2: v2 error format
An asset feed used to export asset updates to a destinations. An asset
feed filter controls what updates are exported. The asset feed must be
created within a project, organization, or folder. Supported destinations
are: Cloud Pub/Sub topics.
Enums:
ContentTypeValueValuesEnum: Asset content type. If not specified, no
content but the asset name and type will be returned.
Fields:
assetNames: A list of the full names of the assets to receive updates. You
must specify either or both of asset_names and asset_types. Only asset
updates matching specified asset_names and asset_types are exported to
the feed. For example: `//compute.googleapis.com/projects/my_project_123
/zones/zone1/instances/instance1`. See [Resource Names](https://cloud.go
ogle.com/apis/design/resource_names#full_resource_name) for more info.
assetTypes: A list of types of the assets to receive updates. You must
specify either or both of asset_names and asset_types. Only asset
updates matching specified asset_names and asset_types are exported to
the feed. For example: "compute.googleapis.com/Disk" See [Introduction
to Cloud Asset Inventory](https://cloud.google.com/resource-
manager/docs/cloud-asset-inventory/overview) for all supported asset
types.
contentType: Asset content type. If not specified, no content but the
asset name and type will be returned.
feedOutputConfig: Required. Feed output configuration defining where the
asset updates are published to.
name: Required. The format will be
projects/{project_number}/feeds/{client-assigned_feed_identifier} or
folders/{folder_number}/feeds/{client-assigned_feed_identifier} or
organizations/{organization_number}/feeds/{client-
assigned_feed_identifier} The client-assigned feed identifier must be
unique within the parent project/folder/organization.
Output configuration for asset feed destination.
Fields:
pubsubDestination: Destination on Cloud Pubsub.
A Cloud Storage location.
Fields:
uri: The uri of the Cloud Storage object. It's the same uri that is used
by gsutil. For example: "gs://bucket_name/object_name". See [Viewing and
Editing Object Metadata](https://cloud.google.com/storage/docs/viewing-
editing-metadata) for more information.
A ListFeedsResponse object.
Fields:
feeds: A list of feeds.
The log type that this config enables.
Values:
LOG_TYPE_UNSPECIFIED: Default case. Should never be this.
ADMIN_READ: Admin reads. Example: CloudIAM getIamPolicy
DATA_WRITE: Data writes. Example: CloudSQL Users create
DATA_READ: Data reads. Example: CloudSQL Users list
Service-specific metadata associated with the operation. It typically
contains progress information and common metadata such as create time.
Some services might not provide such metadata. Any method that returns a
long-running operation should document the metadata type, if any.
Messages:
AdditionalProperty: An additional property for a MetadataValue object.
Fields:
additionalProperties: Properties of the object. Contains field @type
with type URL.
This resource represents a long-running operation that is the result of
a network API call.
Messages:
MetadataValue: Service-specific metadata associated with the operation.
It typically contains progress information and common metadata such as
create time. Some services might not provide such metadata. Any method
that returns a long-running operation should document the metadata type,
if any.
ResponseValue: The normal response of the operation in case of success.
If the original method returns no data on success, such as `Delete`, the
response is `google.protobuf.Empty`. If the original method is standard
`Get`/`Create`/`Update`, the response should be the resource. For other
methods, the response should have the type `XxxResponse`, where `Xxx` is
the original method name. For example, if the original method name is
`TakeSnapshot()`, the inferred response type is `TakeSnapshotResponse`.
Fields:
done: If the value is `false`, it means the operation is still in
progress. If `true`, the operation is completed, and either `error` or
`response` is available.
error: The error result of the operation in case of failure or
cancellation.
metadata: Service-specific metadata associated with the operation. It
typically contains progress information and common metadata such as
create time. Some services might not provide such metadata. Any method
that returns a long-running operation should document the metadata type,
if any.
name: The server-assigned name, which is only unique within the same
service that originally returns it. If you use the default HTTP mapping,
the `name` should be a resource name ending with
`operations/{unique_id}`.
response: The normal response of the operation in case of success. If the
original method returns no data on success, such as `Delete`, the
response is `google.protobuf.Empty`. If the original method is standard
`Get`/`Create`/`Update`, the response should be the resource. For other
methods, the response should have the type `XxxResponse`, where `Xxx` is
the original method name. For example, if the original method name is
`TakeSnapshot()`, the inferred response type is `TakeSnapshotResponse`.
Output configuration for export assets destination.
Fields:
gcsDestination: Destination on Cloud Storage.
Defines an Identity and Access Management (IAM) policy. It is used to
specify access control policies for Cloud Platform resources. A `Policy`
consists of a list of `bindings`. A `binding` binds a list of `members` to a
`role`, where the members can be user accounts, Google groups, Google
domains, and service accounts. A `role` is a named list of permissions
defined by IAM. **JSON Example** { "bindings": [ {
"role": "roles/owner", "members": [
"user:mike@example.com", "group:admins@example.com",
"domain:google.com", "serviceAccount:my-other-
app@appspot.gserviceaccount.com" ] }, {
"role": "roles/viewer", "members": ["user:sean@example.com"]
} ] } **YAML Example** bindings: - members: -
user:mike@example.com - group:admins@example.com -
domain:google.com - serviceAccount:my-other-
app@appspot.gserviceaccount.com role: roles/owner - members:
- user:sean@example.com role: roles/viewer For a description of IAM
and its features, see the [IAM developer's
guide](https://cloud.google.com/iam/docs).
Fields:
auditConfigs: Specifies cloud audit logging configuration for this policy.
bindings: Associates a list of `members` to a `role`. `bindings` with no
members will result in an error.
etag: `etag` is used for optimistic concurrency control as a way to help
prevent simultaneous updates of a policy from overwriting each other. It
is strongly suggested that systems make use of the `etag` in the read-
modify-write cycle to perform policy updates in order to avoid race
conditions: An `etag` is returned in the response to `getIamPolicy`, and
systems are expected to put that etag in the request to `setIamPolicy`
to ensure that their change will be applied to the same version of the
policy. If no `etag` is provided in the call to `setIamPolicy`, then
the existing policy is overwritten.
version: Deprecated.
A Cloud Pubsub destination.
Fields:
topic: The name of the Cloud Pub/Sub topic to publish to. For example:
`projects/PROJECT_ID/topics/TOPIC_ID`.
Representation of a cloud resource.
Messages:
DataValue: The content of the resource, in which some sensitive fields are
scrubbed away and may not be present.
Fields:
data: The content of the resource, in which some sensitive fields are
scrubbed away and may not be present.
discoveryDocumentUri: The URL of the discovery document containing the
resource's JSON schema. For example:
`"https://www.googleapis.com/discovery/v1/apis/compute/v1/rest"`. It
will be left unspecified for resources without a discovery-based API,
such as Cloud Bigtable.
discoveryName: The JSON schema name listed in the discovery document.
Example: "Project". It will be left unspecified for resources (such as
Cloud Bigtable) without a discovery-based API.
parent: The full name of the immediate parent of this resource. See
[Resource Names](https://cloud.google.com/apis/design/resource_names#ful
l_resource_name) for more information. For GCP assets, it is the parent
resource defined in the [Cloud IAM policy
hierarchy](https://cloud.google.com/iam/docs/overview#policy_hierarchy).
For example:
`"//cloudresourcemanager.googleapis.com/projects/my_project_123"`. For
third-party assets, it is up to the users to define.
resourceUrl: The REST URL for accessing the resource. An HTTP GET
operation using this URL returns the resource itself. Example:
`https://cloudresourcemanager.googleapis.com/v1/projects/my-
project-123`. It will be left unspecified for resources without a REST
API.
version: The API version. Example: "v1".
The normal response of the operation in case of success. If the
original method returns no data on success, such as `Delete`, the response
is `google.protobuf.Empty`. If the original method is standard
`Get`/`Create`/`Update`, the response should be the resource. For other
methods, the response should have the type `XxxResponse`, where `Xxx` is
the original method name. For example, if the original method name is
`TakeSnapshot()`, the inferred response type is `TakeSnapshotResponse`.
Messages:
AdditionalProperty: An additional property for a ResponseValue object.
Fields:
additionalProperties: Properties of the object. Contains field @type
with type URL.
Query parameters accepted by all methods.
Enums:
FXgafvValueValuesEnum: V1 error format.
AltValueValuesEnum: Data format for response.
Fields:
f__xgafv: V1 error format.
access_token: OAuth access token.
alt: Data format for response.
callback: JSONP
fields: Selector specifying which fields to include in a partial response.
key: API key. Your API key identifies your project and provides you with
API access, quota, and reports. Required unless you provide an OAuth 2.0
token.
oauth_token: OAuth 2.0 token for the current user.
prettyPrint: Returns response with indentations and line breaks.
quotaUser: Available to use for quota purposes for server-side
applications. Can be any arbitrary string assigned to a user, but should
not exceed 40 characters.
trace: A tracing token of the form "token:<tokenid>" to include in api
requests.
uploadType: Legacy upload protocol for media (e.g. "media", "multipart").
upload_protocol: Upload protocol for media (e.g. "raw", "multipart").
The `Status` type defines a logical error model that is suitable for
different programming environments, including REST APIs and RPC APIs. It is
used by [gRPC](https://github.com/grpc). Each `Status` message contains
three pieces of data: error code, error message, and error details. You can
find out more about this error model and how to work with it in the [API
Design Guide](https://cloud.google.com/apis/design/errors).
Messages:
DetailsValueListEntry: A DetailsValueListEntry object.
Fields:
code: The status code, which should be an enum value of google.rpc.Code.
details: A list of messages that carry the error details. There is a
common set of message types for APIs to use.
message: A developer-facing error message, which should be in English. Any
user-facing error message should be localized and sent in the
google.rpc.Status.details field, or localized by the client.
Temporal asset. In addition to the asset, the temporal asset includes
the status of the asset and valid from and to time of it.
Fields:
asset: Asset.
deleted: If the asset is deleted or not.
window: The time window when the asset data and state was observed.
A time window of (start_time, end_time].
Fields:
endTime: End time of the time window (inclusive). Current timestamp if not
specified.
startTime: Start time of the time window (exclusive).
Update asset feed request.
Fields:
feed: The new values of feed details. It must match an existing feed and
the field `name` must be in the format of:
projects/project_number/feeds/feed_id or
folders/folder_number/feeds/feed_id or
organizations/organization_number/feeds/feed_id.
updateMask: Only updates the `feed` fields indicated by this mask. The
field mask must not be empty, and it must not contain fields that are
immutable or only set by the server.
Generated message classes for cloudasset version v1p2beta1.
The cloud asset API manages the history and inventory of cloud resources.
NOTE: This file is autogenerated and should not be edited by hand. | 25,982 | en | 0.771063 |
# -*- coding: utf-8 -*-
"""Django page CMS test suite module for page links"""
from pages.tests.testcase import TestCase
from pages.models import Content
class LinkTestCase(TestCase):
"""Django page CMS link test suite class"""
def test_01_set_body_pagelink(self):
"""Test the get_body_pagelink_ids and set_body_pagelink functions."""
self.set_setting("PAGE_LINK_FILTER", True)
page1 = self.create_new_page()
page2 = self.create_new_page()
# page2 has a link on page1
content_string = 'test <a href="%s" class="page_%d">hello</a>'
content = Content(
page=page2,
language='en-us',
type='body',
body=content_string % ('#', page1.id)
)
content.save()
self.assertEqual(
Content.objects.get_content(page2, 'en-us', 'body'),
content_string % (page1.get_url_path(), page1.id)
)
self.assertFalse(page2.has_broken_link())
page1.delete()
self.assertEqual(
Content.objects.get_content(page2, 'en-us', 'body'),
'test <a href="#" class="pagelink_broken">hello</a>'
)
self.assertTrue(page2.has_broken_link()) | pages/tests/test_pages_link.py | 1,229 | Django page CMS link test suite class
Test the get_body_pagelink_ids and set_body_pagelink functions.
Django page CMS test suite module for page links
-*- coding: utf-8 -*- page2 has a link on page1 | 200 | en | 0.523523 |
from ParseHandler import ParseHandler
from PathHandler import PathHandler
import paths
parser = ParseHandler()
pather = PathHandler()
# match subdirectories in both folders
pather.build_matching_subdir(paths.TOY_RAW, paths.TOY_CLEAN)
# get paths to folders in raw/ directory
dir_names = pather.get_dir_names(paths.TOY_RAW)
raw_dir_paths = pather.get_dir_paths(paths.TOY_RAW)
clean_dir_paths = pather.get_dir_paths(paths.TOY_CLEAN)
# iterate through the contents of each folder
for raw_dir_path, clean_dir_path in zip(raw_dir_paths, clean_dir_paths):
# get raw file paths from each subdir
file_names = pather.get_file_names(raw_dir_path)
raw_file_paths = pather.get_file_paths(raw_dir_path)
clean_file_paths = [clean_dir_path + file_name for file_name in file_names]
# parse each raw_file into the clean_file
for raw_file_path, clean_file_path in zip(raw_file_paths, clean_file_paths):
parser.parse(raw_file_path, clean_file_path)
| old/src/integrationClean.py | 982 | match subdirectories in both folders get paths to folders in raw/ directory iterate through the contents of each folder get raw file paths from each subdir parse each raw_file into the clean_file | 195 | en | 0.912252 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
class MybankCreditLoanapplyInsturlQueryResponse(AlipayResponse):
def __init__(self):
super(MybankCreditLoanapplyInsturlQueryResponse, self).__init__()
self._target_url = None
@property
def target_url(self):
return self._target_url
@target_url.setter
def target_url(self, value):
self._target_url = value
def parse_response_content(self, response_content):
response = super(MybankCreditLoanapplyInsturlQueryResponse, self).parse_response_content(response_content)
if 'target_url' in response:
self.target_url = response['target_url']
| alipay/aop/api/response/MybankCreditLoanapplyInsturlQueryResponse.py | 748 | !/usr/bin/env python -*- coding: utf-8 -*- | 42 | en | 0.34282 |
"""
This file defines the database models
"""
from .common import db, Field, auth
from py4web import URL
from pydal.validators import IS_NOT_EMPTY, IS_FILE, IS_EMPTY_OR
import datetime
from . import settings
def get_time():
return datetime.datetime.utcnow()
def get_download_url(picture):
return f"images/{picture}"
def get_user():
return auth.current_user.get("id") if auth.current_user else None
db.define_table(
"post",
Field("title", "string", requires=IS_NOT_EMPTY()),
Field("content", "text", requires=IS_NOT_EMPTY()),
Field("date_posted", "datetime", default=get_time, readable=False, writable=False),
Field(
"author",
"reference auth_user",
default=get_user,
readable=False,
writable=False,
),
)
db.define_table(
"profile",
Field("user", "reference auth_user", readable=False, writable=False),
Field(
"image",
"upload",
requires = IS_EMPTY_OR(IS_FILE()),
default="",
uploadfolder=settings.UPLOAD_PATH,
download_url=get_download_url, label="Profile Picture",
),
)
# We do not want these fields to appear in forms by default.
db.post.id.readable = False
db.post.id.writable = False
db.profile.id.readable = False
db.profile.id.writable = False
db.commit()
| models.py | 1,314 | This file defines the database models
We do not want these fields to appear in forms by default. | 98 | en | 0.915227 |
import imageio
import tensorflow as tf
import numpy as np
from PIL import Image, ImageDraw, ImageFont
from tf_agents.replay_buffers import tf_uniform_replay_buffer
from tf_agents.drivers import dynamic_step_driver
from tf_agents.environments import tf_py_environment
def load_policy(path):
return tf.compat.v2.saved_model.load(path)
def visualize_policy(environment, policy, output_filename, num_episodes=1, fps=5):
rendering_environment = environment
if isinstance(environment, tf_py_environment.TFPyEnvironment):
# The inner env should be used for rendering
rendering_environment = environment.pyenv.envs[0]
with imageio.get_writer(output_filename, fps=fps) as video:
font = ImageFont.load_default()
total_reward = None
def _add_environment_frame():
rendered_env = rendering_environment.render()
image = Image.fromarray(rendered_env.astype(np.uint8), mode='RGB')
draw = ImageDraw.Draw(image)
draw.text((5, 5), 'TR: %.1f' % total_reward, font=font)
image_as_numpy = np.array(image.getdata()).reshape(rendered_env.shape).astype(np.uint8)
video.append_data(image_as_numpy)
for _ in range(num_episodes):
total_reward = 0.0
time_step = environment.reset()
_add_environment_frame()
while not time_step.is_last():
action_step = policy.action(time_step)
time_step = environment.step(action_step.action)
total_reward += time_step.reward.numpy()[0]
_add_environment_frame()
def evaluate_policy(env, policy, num_episodes):
total_return = 0.0
total_num_steps = 0.0
for _ in range(num_episodes):
time_step = env.reset()
episode_return = 0.0
episode_num_steps = 0.0
while not time_step.is_last():
action_step = policy.action(time_step)
time_step = env.step(action_step.action)
episode_return += time_step.reward
episode_num_steps += 1
total_return += episode_return
total_num_steps += episode_num_steps
return (total_return / num_episodes).numpy()[0], total_num_steps / num_episodes
def as_tf_env(env):
return tf_py_environment.TFPyEnvironment(env)
def create_replay_buffer(agent, train_env, replay_buffer_size):
return tf_uniform_replay_buffer.TFUniformReplayBuffer(
data_spec=agent.collect_data_spec,
batch_size=train_env.batch_size,
max_length=replay_buffer_size,
)
def create_collect_driver(train_env, agent, replay_buffer, collect_steps):
return dynamic_step_driver.DynamicStepDriver(
train_env, agent.collect_policy,
observers=[replay_buffer.add_batch],
num_steps=collect_steps,
)
def cudnn_workaround():
gpus = tf.config.experimental.list_physical_devices('GPU')
if gpus:
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
| trackdays/training/utils.py | 3,014 | The inner env should be used for rendering | 42 | en | 0.583987 |
#!/usr/bin/env python
import argparse
import open3d as o3d
import numpy as np
import os
import time
from os.path import join, dirname, basename, splitext, exists, isdir, isfile
from os import listdir
from numpy import linalg as LA
import math
import cv2
from pathlib import Path
def pcd_to_bin(pcd_path, outdir=None):
pcd = o3d.io.read_point_cloud(pcd_path, format="pcd")
pcd_arr = np.asarray(pcd.points)
if len(pcd_arr) == 0:
return None
outpath = join(Path(pcd_path).parent if outdir is None else outdir, splitext(basename(pcd_path))[0] + ".bin")
# binarize array and save to the same file path with .bin extension
pcd_arr.tofile(outpath)
return outpath
def pcd_to_sphproj(pcd_path, nr_scans, width, outdir=None):
pcd = o3d.io.read_point_cloud(pcd_path, format="pcd")
pcd_arr = np.asarray(pcd.points)
if len(pcd_arr) == 0:
return None
# https://towardsdatascience.com/spherical-projection-for-point-clouds-56a2fc258e6c
# print(pcd_arr.shape)
# print(pcd_arr[:, :3].shape)
R = LA.norm(pcd_arr[:, :3], axis=1)
print("R {} | {} -- {}".format(R.shape, np.amin(R), np.amax(R)))
yaw = np.arctan2(pcd_arr[:, 1], pcd_arr[:, 0])
# print("yaw {} | {} -- {}".format(yaw.shape, np.amin(yaw), np.amax(yaw)))
# print("y {} | {} -- {}".format(pcd_arr[:, 1].shape, np.amin(pcd_arr[:, 1]), np.amax(pcd_arr[:, 1])))
pitch = np.arcsin(np.divide(pcd_arr[:, 2], R))
# print("pitch {} | {} -- {}".format(pitch.shape, np.amin(pitch), np.amax(pitch)))
# import matplotlib.pyplot as plt
# plt.plot(yaw, pitch, 'b.')
# plt.xlabel('yaw [rad]')
# plt.ylabel('pitch [rad]')
# plt.axis('equal')
# plt.show()
FOV_Down = np.amin(pitch)
FOV_Up = np.amax(pitch)
FOV = FOV_Up + abs(FOV_Down)
u = np.around((nr_scans-1) * (1.0-(pitch-FOV_Down)/FOV)).astype(np.int16)
# print("u {} | {} -- {} | {}".format(u.shape, np.amin(u), np.amax(u), u.dtype))
v = np.around((width-1) * (0.5 * ((yaw/math.pi) + 1))).astype(np.int16)
# print("v {} | {} -- {} | {}".format(v.shape, np.amin(v), np.amax(v), v.dtype))
sph_proj = np.zeros((nr_scans, width))
R[R > 100.0] = 100.0 # cut off all values above 100m
R = np.round((R / 100.0) * 255.0) # convert 0.0-100.0m into 0.0-255.0 for saving as byte8 image
sph_proj[u, v] = R
# print("sph_proj {} | {} -- {} | {}".format(sph_proj.shape, np.amin(sph_proj), np.amax(sph_proj), sph_proj.dtype))
outpath = join(Path(pcd_path).parent if outdir is None else outdir, splitext(basename(pcd_path))[0] + ".jpg")
cv2.imwrite(outpath, sph_proj)
print(outpath)
return np.amin(R), np.amax(R)
def bin_to_pcd(bin_path, outdir=None):
print(bin_path)
pcd_arr = np.fromfile(bin_path, dtype=np.float32)
pcd_arr = pcd_arr.reshape((-1, 4)) # kitti has 4 values per point
# print(type(pcd_arr), pcd_arr.shape, len(pcd_arr))
# print(pcd_arr[:, :3].shape)
if len(pcd_arr) == 0:
return None
outpath = join(Path(bin_path).parent if outdir is None else outdir, splitext(basename(bin_path))[0] + ".pcd")
print(outpath)
# save array as .pcd
pcd = o3d.geometry.PointCloud()
pcd.points = o3d.utility.Vector3dVector(pcd_arr[:, :3]) # 3 dimensions
o3d.io.write_point_cloud(outpath, pcd)
return outpath
def bin_to_sphproj(bin_path, outdir=None):
pass
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Convert between .pcd and .bin point cloud formats')
parser.add_argument("-t", type=str, required=True,
help="Conversion to run (pcd2bin, pcd2sphproj, bin2pcd, bin2sphproj)")
parser.add_argument("-p", type=str, required=True, help="Path to directory or file with point cloud")
parser.add_argument("-nr_scans", type=int, help="Number of lidar scans (default 16)", default=16)
parser.add_argument("-width", type=int, help="Spherical projection width (default 1024)", default=1024)
args = parser.parse_args()
if not exists(args.p):
exit("{} does not exist".format(args.p))
if isfile(args.p):
# check extension
ext = splitext(args.p)[-1].lower()
if args.t == "pcd2bin" and ext == ".pcd":
pcd_to_bin(args.p)
elif args.t == "bin2pcd" and ext == ".bin":
bin_to_pcd(args.p)
elif args.t == "pcd2sphproj" and ext == ".pcd":
pcd_to_sphproj(args.p, args.nr_scans, args.width)
elif args.t == "bin2sphproj" and ext == ".bin":
bin_to_sphproj(args.p)
else:
print("Wrong conversion or extension incompatible with conversion")
elif isdir(args.p):
# go through all files and convert .pcd or .bin files encountered within the directory
timestamp = time.strftime("%Y%m%d-%H%M%S")
outdir = join(Path(args.p).parent, str(args.t) + "_" + timestamp)
if not os.path.exists(outdir):
os.makedirs(outdir)
range_min = float('inf')
range_max = float('-inf')
for f in listdir(args.p):
# check extension
ext = splitext(f)[-1].lower()
if args.t == "pcd2bin" and ext == ".pcd":
pcd_to_bin(join(args.p, f), outdir)
elif args.t == "bin2pcd" and ext == ".bin":
bin_to_pcd(join(args.p, f), outdir)
elif args.t == "pcd2sphproj" and ext == ".pcd":
range_min1, range_max1 = pcd_to_sphproj(join(args.p, f), args.nr_scans, args.width, outdir)
if range_min1 < range_min:
range_min = range_min1
if range_max1 > range_max:
range_max = range_max1
elif args.t == "bin2sphproj" and ext == ".bin":
bin_to_sphproj(join(args.p, f), outdir)
else:
print("Wrong conversion or extension incompatible with conversion")
print("range: {} - {}".format(range_min, range_max)) | pointcloud/pcl_conv.py | 5,986 | !/usr/bin/env python binarize array and save to the same file path with .bin extension https://towardsdatascience.com/spherical-projection-for-point-clouds-56a2fc258e6c print(pcd_arr.shape) print(pcd_arr[:, :3].shape) print("yaw {} | {} -- {}".format(yaw.shape, np.amin(yaw), np.amax(yaw))) print("y {} | {} -- {}".format(pcd_arr[:, 1].shape, np.amin(pcd_arr[:, 1]), np.amax(pcd_arr[:, 1]))) print("pitch {} | {} -- {}".format(pitch.shape, np.amin(pitch), np.amax(pitch))) import matplotlib.pyplot as plt plt.plot(yaw, pitch, 'b.') plt.xlabel('yaw [rad]') plt.ylabel('pitch [rad]') plt.axis('equal') plt.show() print("u {} | {} -- {} | {}".format(u.shape, np.amin(u), np.amax(u), u.dtype)) print("v {} | {} -- {} | {}".format(v.shape, np.amin(v), np.amax(v), v.dtype)) cut off all values above 100m convert 0.0-100.0m into 0.0-255.0 for saving as byte8 image print("sph_proj {} | {} -- {} | {}".format(sph_proj.shape, np.amin(sph_proj), np.amax(sph_proj), sph_proj.dtype)) kitti has 4 values per point print(type(pcd_arr), pcd_arr.shape, len(pcd_arr)) print(pcd_arr[:, :3].shape) save array as .pcd 3 dimensions check extension go through all files and convert .pcd or .bin files encountered within the directory check extension | 1,228 | en | 0.320352 |
import os
import traceback
import datetime
class DuplicatePlotException(Exception): pass
class ModelLogUtils():
'''
Collection of utility methods for logging and plotting of messages & metrics during training.
'''
def __init__(self):
# Add logging to stdout for local debugging
self._logger = ModelLogUtilsLogger()
def set_logger(self, logger):
if not isinstance(logger, ModelLogUtilsLogger):
raise Exception('`logger` should subclass `ModelLogUtilsLogger`')
self._logger = logger
def log(self, message):
'''
Logs a message for analysis of model training.
'''
self._logger.log(message)
def define_loss_plot(self):
'''
Convenience method of defining a plot of ``loss`` against ``epoch``.
To be used with ``log_loss_metric()``.
'''
self.define_plot('Loss Over Epochs', ['loss'], x_axis='epoch')
def log_loss_metric(self, loss, epoch):
'''
Convenience method for logging `loss` against `epoch`.
To be used with ``define_loss_plot()``.
'''
self.log_metrics(loss=loss, epoch=epoch)
def define_plot(self, title, metrics, x_axis=None):
'''
Defines a plot for a set of metrics for analysis of model training.
By default, metrics will be plotted against time.
'''
self._logger.define_plot(title, metrics, x_axis)
def log_metrics(self, **kwargs):
'''
Logs metrics for a single point in time { <metric>: <value> }.
<value> should be a number.
'''
self._logger.log_metrics(**kwargs)
class ModelLogUtilsLogger():
def __init__(self):
self._plots = set()
def log(self, message):
self._print(message)
def define_plot(self, title, metrics, x_axis):
if title in self._plots:
raise DuplicatePlotException('Plot {} already defined'.format(title))
self._plots.add(title)
self._print('Plot with title `{}` of {} against {} will be registered when this model is being trained on Rafiki' \
.format(title, ', '.join(metrics), x_axis or 'time'))
def log_metrics(self, **kwargs):
self._print(', '.join(['{}={}'.format(metric, value) for (metric, value) in kwargs.items()]))
def _print(self, message):
print(message) | rafiki/model/log.py | 2,395 | Collection of utility methods for logging and plotting of messages & metrics during training.
Convenience method of defining a plot of ``loss`` against ``epoch``.
To be used with ``log_loss_metric()``.
Defines a plot for a set of metrics for analysis of model training.
By default, metrics will be plotted against time.
Logs a message for analysis of model training.
Convenience method for logging `loss` against `epoch`.
To be used with ``define_loss_plot()``.
Logs metrics for a single point in time { <metric>: <value> }.
<value> should be a number.
Add logging to stdout for local debugging | 596 | en | 0.84352 |
# -*- coding: utf-8 -*-
try:
# Python 2.7
from collections import OrderedDict
except:
# Python 2.6
from gluon.contrib.simplejson.ordered_dict import OrderedDict
from gluon import current
from gluon.storage import Storage
def config(settings):
"""
Template for WA-COP + CAD Cloud Integration
"""
T = current.T
# =========================================================================
# System Settings
#
settings.base.system_name = T("Sahana: Washington Common Operating Picture (WA-COP)")
settings.base.system_name_short = T("Sahana")
# Prepop options
settings.base.prepopulate_options = {"mandatory": "CAD",
"default": ("default/users",
"CAD/Demo",
),
}
# Prepop default
settings.base.prepopulate = "template:default"
# Theme (folder to use for views/layout.html)
#settings.base.theme = "default"
# -------------------------------------------------------------------------
# Self-Registration and User Profile
#
# Users can self-register
#settings.security.self_registration = False
# Users need to verify their email
settings.auth.registration_requires_verification = True
# Users need to be approved
settings.auth.registration_requires_approval = True
settings.auth.registration_requests_organisation = True
settings.auth.registration_organisation_required = True
# Approval emails get sent to all admins
settings.mail.approver = "ADMIN"
settings.auth.registration_link_user_to = {"staff": T("Staff")}
settings.auth.registration_link_user_to_default = ["staff"]
settings.auth.registration_roles = {"organisation_id": ["USER"],
}
settings.auth.show_utc_offset = False
settings.auth.show_link = False
# -------------------------------------------------------------------------
# Security Policy
#
settings.security.policy = 7 # Apply Controller, Function and Table ACLs
settings.security.map = True
# -------------------------------------------------------------------------
# L10n (Localization) settings
#
settings.L10n.languages = OrderedDict([
("en", "English"),
])
# Default Language
settings.L10n.default_language = "en"
# Default timezone for users
settings.L10n.utc_offset = "-0800"
# Unsortable 'pretty' date format
settings.L10n.date_format = "%b %d %Y"
# Number formats (defaults to ISO 31-0)
# Decimal separator for numbers (defaults to ,)
settings.L10n.decimal_separator = "."
# Thousands separator for numbers (defaults to space)
settings.L10n.thousands_separator = ","
# Default Country Code for telephone numbers
settings.L10n.default_country_code = 1
# Enable this to change the label for 'Mobile Phone'
settings.ui.label_mobile_phone = "Cell Phone"
# Enable this to change the label for 'Postcode'
settings.ui.label_postcode = "ZIP Code"
settings.msg.require_international_phone_numbers = False
# PDF to Letter
settings.base.paper_size = T("Letter")
# Uncomment this to Translate CMS Series Names
# - we want this on when running s3translate but off in normal usage as we use the English names to lookup icons in render_posts
#settings.L10n.translate_cms_series = True
# Uncomment this to Translate Location Names
#settings.L10n.translate_gis_location = True
# -------------------------------------------------------------------------
# GIS settings
#
# Restrict the Location Selector to just certain countries
settings.gis.countries = ("US",)
# Levels for the LocationSelector
levels = ("L1", "L2", "L3")
# Uncomment to pass Addresses imported from CSV to a Geocoder to try and automate Lat/Lon
#settings.gis.geocode_imported_addresses = "google"
# Until we add support to S3LocationSelector to set dropdowns from LatLons
settings.gis.check_within_parent_boundaries = False
# GeoNames username
settings.gis.geonames_username = "mcop"
# Uncomment to hide Layer Properties tool
#settings.gis.layer_properties = False
# Uncomment to display the Map Legend as a floating DIV
settings.gis.legend = "float"
# Uncomment to prevent showing LatLon in Location Represents
settings.gis.location_represent_address_only = "icon"
# Resources which can be directly added to the main map
settings.gis.poi_create_resources = None
# -------------------------------------------------------------------------
# Event Management Settings
#
settings.event.incident_teams_tab = "Units"
# -------------------------------------------------------------------------
# Modules
#
settings.modules = OrderedDict([
# Core modules which shouldn't be disabled
("default", Storage(
name_nice = "Home",
restricted = False, # Use ACLs to control access to this module
access = None, # All Users (inc Anonymous) can see this module in the default menu & access the controller
module_type = None # This item is not shown in the menu
)),
("admin", Storage(
name_nice = "Administration",
#description = "Site Administration",
restricted = True,
access = "|1|", # Only Administrators can see this module in the default menu & access the controller
module_type = None # This item is handled separately for the menu
)),
("appadmin", Storage(
name_nice = "Administration",
#description = "Site Administration",
restricted = True,
module_type = None # No Menu
)),
# ("errors", Storage(
# name_nice = "Ticket Viewer",
# #description = "Needed for Breadcrumbs",
# restricted = False,
# module_type = None # No Menu
# )),
("sync", Storage(
name_nice = "Synchronization",
#description = "Synchronization",
restricted = True,
access = "|1|", # Only Administrators can see this module in the default menu & access the controller
module_type = None # This item is handled separately for the menu
)),
("translate", Storage(
name_nice = "Translation Functionality",
#description = "Selective translation of strings based on module.",
module_type = None,
)),
("gis", Storage(
name_nice = "Map",
#description = "Situation Awareness & Geospatial Analysis",
restricted = True,
module_type = 1, # 1st item in the menu
)),
("pr", Storage(
name_nice = "Persons",
description = "Central point to record details on People",
restricted = True,
access = "|1|", # Only Administrators can see this module in the default menu (access to controller is possible to all still)
module_type = None
)),
("org", Storage(
name_nice = "Organizations",
#description = 'Lists "who is doing what & where". Allows relief agencies to coordinate their activities',
restricted = True,
module_type = 10
)),
# All modules below here should be possible to disable safely
("hrm", Storage(
name_nice = "Contacts",
#description = "Human Resources Management",
restricted = True,
module_type = None,
)),
("cms", Storage(
name_nice = "Content Management",
restricted = True,
module_type = 10,
)),
("event", Storage(
name_nice = "Event Management",
restricted = True,
module_type = 2,
)),
("project", Storage(
name_nice = "Project Management",
restricted = True,
module_type = None,
)),
("doc", Storage(
name_nice = "Documents",
#description = "A library of digital resources, such as photos, documents and reports",
restricted = True,
module_type = None,
)),
("stats", Storage(
name_nice = "Statistics",
restricted = True,
module_type = None
)),
])
# END ========================================================================= | modules/templates/CAD/config.py | 8,740 | Template for WA-COP + CAD Cloud Integration
-*- coding: utf-8 -*- Python 2.7 Python 2.6 ========================================================================= System Settings Prepop options Prepop default Theme (folder to use for views/layout.html)settings.base.theme = "default" ------------------------------------------------------------------------- Self-Registration and User Profile Users can self-registersettings.security.self_registration = False Users need to verify their email Users need to be approved Approval emails get sent to all admins ------------------------------------------------------------------------- Security Policy Apply Controller, Function and Table ACLs ------------------------------------------------------------------------- L10n (Localization) settings Default Language Default timezone for users Unsortable 'pretty' date format Number formats (defaults to ISO 31-0) Decimal separator for numbers (defaults to ,) Thousands separator for numbers (defaults to space) Default Country Code for telephone numbers Enable this to change the label for 'Mobile Phone' Enable this to change the label for 'Postcode' PDF to Letter Uncomment this to Translate CMS Series Names - we want this on when running s3translate but off in normal usage as we use the English names to lookup icons in render_postssettings.L10n.translate_cms_series = True Uncomment this to Translate Location Namessettings.L10n.translate_gis_location = True ------------------------------------------------------------------------- GIS settings Restrict the Location Selector to just certain countries Levels for the LocationSelector Uncomment to pass Addresses imported from CSV to a Geocoder to try and automate Lat/Lonsettings.gis.geocode_imported_addresses = "google" Until we add support to S3LocationSelector to set dropdowns from LatLons GeoNames username Uncomment to hide Layer Properties toolsettings.gis.layer_properties = False Uncomment to display the Map Legend as a floating DIV Uncomment to prevent showing LatLon in Location Represents Resources which can be directly added to the main map ------------------------------------------------------------------------- Event Management Settings ------------------------------------------------------------------------- Modules Core modules which shouldn't be disabled Use ACLs to control access to this module All Users (inc Anonymous) can see this module in the default menu & access the controller This item is not shown in the menudescription = "Site Administration", Only Administrators can see this module in the default menu & access the controller This item is handled separately for the menudescription = "Site Administration", No Menu ("errors", Storage( name_nice = "Ticket Viewer", description = "Needed for Breadcrumbs", restricted = False, module_type = None No Menu )),description = "Synchronization", Only Administrators can see this module in the default menu & access the controller This item is handled separately for the menudescription = "Selective translation of strings based on module.",description = "Situation Awareness & Geospatial Analysis", 1st item in the menu Only Administrators can see this module in the default menu (access to controller is possible to all still)description = 'Lists "who is doing what & where". Allows relief agencies to coordinate their activities', All modules below here should be possible to disable safelydescription = "Human Resources Management",description = "A library of digital resources, such as photos, documents and reports", END ========================================================================= | 3,669 | en | 0.634979 |
from urllib.parse import quote
import re
def parse_equation(match):
# Converts a latex expression into something the tex API can understand
eq = match.group(0)
# Curly brackets need to be escaped
eq = eq.replace('{', '\{')
eq = eq.replace('}', '\}')
# Create the url using the quote method which converts special characters
url = 'https://tex.s2cms.ru/svg/%s' % quote(eq)
# Return the markdown SVG tag
return '' % url
def parse_markdown(md):
# Define a pattern for catching latex equations delimited by dollar signs
eq_pattern = r'(\$.+?\$)'
# Substitute any latex equations found
return re.sub(eq_pattern, parse_equation, md)
def markdown_texify(file_in, file_out):
# Read input file
markdown = open(file_in).read()
# Parse markdown, take care of equations
latex = parse_markdown(markdown)
# Write to out-file
result = open(file_out, 'w').write(latex)
print('Finished, %i characters written to %s' % (result, file_out))
| venv/lib/python3.7/site-packages/github_markdown.py | 1,021 | Converts a latex expression into something the tex API can understand Curly brackets need to be escaped Create the url using the quote method which converts special characters Return the markdown SVG tag Define a pattern for catching latex equations delimited by dollar signs Substitute any latex equations found Read input file Parse markdown, take care of equations Write to out-file | 385 | en | 0.788359 |
import pandas as pd
from oss_hugo.API_Hugo_OSS import API_Hugo_OSS
class OSS_Schedule:
def __init__(self):
self.hugo = API_Hugo_OSS()
def sessions_mapped_by_size(self):
mapping = []
for path, session in self.hugo.sessions().items():
content = session.get('content')
metadata = session.get('metadata')
page_type = metadata.get('type')
title = metadata.get('title')
track = metadata.get('track')
organizers = metadata.get('organizers')
participants = metadata.get('participants')
if not organizers: organizers = []
if not participants: participants = []
if type(organizers) is str: organizers = organizers.split(',')
if type(participants) is str: participants = participants.split(',')
if 'TBD' in organizers: organizers.remove('TBD')
if 'Pending' in organizers: organizers.remove('Pending')
if 'you ?' in participants: participants.remove('you ?')
if title and page_type:
item = {
'title': title,
'track': track,
'page_type': page_type,
'organizers': organizers,
'participants': participants,
'content': len(content),
'path': path
}
mapping.append(item)
df_mappings = pd.DataFrame(mapping)
df_mappings = df_mappings[['title', 'track', 'page_type', 'content', 'organizers', 'participants']]
df_sessions = df_mappings[df_mappings['page_type'] != 'track']
df_sessions = df_sessions.sort_values(['content'], ascending=False).reset_index(drop=True)
return df_sessions
#todo get the result below using pandas
def df_sessions_registered_participants(self):
results = {}
for key, value in self.hugo.df_participants().to_dict(orient='index').items():
title = value.get('title')
sessions = value.get('sessions')
for session in sessions:
if results.get(session) is None: results[session] = []
results[session].append(title)
mappings = []
for key, value in results.items():
mappings.append({'title': key, 'participants': value, 'participants_count': len(value)})
df_mappings = pd.DataFrame(mappings)
df_mappings = df_mappings[['title', 'participants_count', 'participants']].sort_values(['participants_count'], ascending=False)
return df_mappings | notebooks/api/oss_hugo/OSS_Schedule.py | 2,640 | todo get the result below using pandas | 38 | gl | 0.150502 |
# --------------
##File path for the file
file_path
def read_file(path):
file = open(file_path , 'r')
sentence = file.readline()
file.close()
return sentence
sample_message = read_file(file_path)
print(sample_message)
#Code starts here
# --------------
#Code starts here
file_path_1
file_path_2
def read_file(path):
file = open(file_path_1 , 'r')
sentence = file.readline()
file.close()
return str(sentence)
message_1 = read_file(file_path_1)
print(message_1)
def read_file(path):
file = open(file_path_2 , 'r')
sentence = file.readline()
file.close()
return str(sentence)
message_2 = read_file(file_path_2)
print(message_2)
def fuse_msg(message_a , message_b):
quotient = int(message_b)//int(message_a)
return str(quotient)
secret_msg_1 = fuse_msg(message_1 , message_2)
print(secret_msg_1)
# --------------
#Code starts here
file_path_3
def read_file(path):
file = open(file_path_3 , 'r')
sentence = file.readline()
file.close()
return str(sentence)
message_3 = read_file(file_path_3)
print(message_3)
def substitute_msg(message_c):
if message_c == 'Red':
sub = 'Army General'
if message_c == 'Green':
sub = 'Data Scientist'
if message_c == 'Blue' :
sub = 'Marine Biologist'
return sub
secret_msg_2 = substitute_msg(message_3)
print(secret_msg_2)
# --------------
# File path for message 4 and message 5
file_path_4
file_path_5
#Code starts here
def read_file(path):
file = open(file_path_4 , 'r')
sentence = file.readline()
file.close()
return sentence
message_4 = read_file(file_path_4)
print(message_4)
def read_file(path):
file = open(file_path_5 , 'r')
sentence = file.readline()
file.close()
return sentence
message_5 = read_file(file_path_5)
print(message_5)
def compare_msg(message_d , message_e):
a_list = message_d.split()
b_list = message_e.split()
c_list = [x for x in a_list if x not in b_list]
final_msg = " ".join(c_list)
return final_msg
secret_msg_3 = compare_msg(message_4 , message_5)
print(secret_msg_3)
# --------------
#Code starts here
file_path_6
def read_file(path):
file = open(file_path_6 , 'r')
sentence = file.readline()
file.close()
return sentence
message_6 = read_file(file_path)
print(message_6)
def extract_msg(message_f):
a_list = message_f.split()
even_word = lambda x : (len(x) % 2 == 0)
b_list = filter(even_word , a_list)
final_msg = " ".join(b_list)
return final_msg
secret_msg_4 = extract_msg(message_6)
print(secret_msg_4)
# --------------
#Secret message parts in the correct order
message_parts=[secret_msg_3, secret_msg_1, secret_msg_4, secret_msg_2]
final_path= user_data_dir + '/secret_message.txt'
#Code starts here
secret_msg = secret_msg_3 + ' '+ secret_msg_1 + ' ' + secret_msg_4 + ' '+ secret_msg_2
def write_file(secret_msg , path):
file = open(final_path , 'a+')
sentence = file.write(secret_msg)
file.close()
return sentence
sample_message = write_file(secret_msg , final_path)
print(sample_message)
| Spy-Game/code.py | 3,307 | --------------File path for the file Code starts here --------------Code starts here --------------Code starts here -------------- File path for message 4 and message 5Code starts here --------------Code starts here --------------Secret message parts in the correct orderCode starts here | 288 | en | 0.396057 |
# signals are for when a user modifies something in the db, example, creates a post
from django.db.models.signals import post_save
from django.contrib.auth.models import User
from django.dispatch import receiver
from .models import Profile
# Creates a profile each time a new user is created
@receiver(post_save, sender=User)
def create_profile(sender, instance, created, **kwargs):
if created:
Profile.objects.create(user=instance)
@receiver(post_save, sender=User)
def save_profile(sender, instance, **kwargs):
instance.profile.save() | bookStore/users/signals.py | 557 | signals are for when a user modifies something in the db, example, creates a post Creates a profile each time a new user is created | 131 | en | 0.908849 |
import os
import numpy as np
import torch
import time
import sys
from collections import OrderedDict
from torch.autograd import Variable
from pathlib import Path
import warnings
warnings.filterwarnings('ignore')
mainpath = os.getcwd()
pix2pixhd_dir = Path(mainpath+'/src/pix2pixHD/')
sys.path.append(str(pix2pixhd_dir))
from data.data_loader import CreateDataLoader
from models.models import create_model
import util.util as util
from util.visualizer import Visualizer
import src.config.train_opt as opt
os.environ['CUDA_VISIBLE_DEVICES'] = "0"
torch.multiprocessing.set_sharing_strategy('file_system')
torch.backends.cudnn.benchmark = True
def main():
iter_path = os.path.join(opt.checkpoints_dir, opt.name, 'iter.txt')
data_loader = CreateDataLoader(opt)
dataset = data_loader.load_data()
dataset_size = len(data_loader)
print('#training images = %d' % dataset_size)
start_epoch, epoch_iter = 1, 0
total_steps = (start_epoch - 1) * dataset_size + epoch_iter
display_delta = total_steps % opt.display_freq
print_delta = total_steps % opt.print_freq
save_delta = total_steps % opt.save_latest_freq
model = create_model(opt)
model = model.cuda()
visualizer = Visualizer(opt)
for epoch in range(start_epoch, opt.niter + opt.niter_decay + 1):
epoch_start_time = time.time()
if epoch != start_epoch:
epoch_iter = epoch_iter % dataset_size
for i, data in enumerate(dataset, start=epoch_iter):
iter_start_time = time.time()
total_steps += opt.batchSize
epoch_iter += opt.batchSize
# whether to collect output images
save_fake = total_steps % opt.display_freq == display_delta
############## Forward Pass ######################
losses, generated = model(Variable(data['label']), Variable(data['inst']),
Variable(data['image']), Variable(data['feat']), infer=save_fake)
# sum per device losses
losses = [torch.mean(x) if not isinstance(x, int) else x for x in losses]
loss_dict = dict(zip(model.loss_names, losses))
# calculate final loss scalar
loss_D = (loss_dict['D_fake'] + loss_dict['D_real']) * 0.5
loss_G = loss_dict['G_GAN'] + loss_dict.get('G_GAN_Feat', 0) + loss_dict.get('G_VGG', 0)
############### Backward Pass ####################
# update generator weights
model.optimizer_G.zero_grad()
loss_G.backward()
model.optimizer_G.step()
# update discriminator weights
model.optimizer_D.zero_grad()
loss_D.backward()
model.optimizer_D.step()
############## Display results and errors ##########
### print out errors
if total_steps % opt.print_freq == print_delta:
errors = {k: v.data if not isinstance(v, int) else v for k, v in loss_dict.items()} # CHANGE: removed [0] after v.data
t = (time.time() - iter_start_time) / opt.batchSize
visualizer.print_current_errors(epoch, epoch_iter, errors, t)
visualizer.plot_current_errors(errors, total_steps)
### display output images
if save_fake:
visuals = OrderedDict([('input_label', util.tensor2label(data['label'][0], opt.label_nc)),
('synthesized_image', util.tensor2im(generated.data[0])),
('real_image', util.tensor2im(data['image'][0]))])
visualizer.display_current_results(visuals, epoch, total_steps)
### save latest model
if total_steps % opt.save_latest_freq == save_delta:
print('saving the latest model (epoch %d, total_steps %d)' % (epoch, total_steps))
model.save('latest')
np.savetxt(iter_path, (epoch, epoch_iter), delimiter=',', fmt='%d')
if epoch_iter >= dataset_size:
break
# end of epoch
print('End of epoch %d / %d \t Time Taken: %d sec' %
(epoch, opt.niter + opt.niter_decay, time.time() - epoch_start_time))
### save model for this epoch
if epoch % opt.save_epoch_freq == 0:
print('saving the model at the end of epoch %d, iters %d' % (epoch, total_steps))
model.save('latest')
model.save(epoch)
np.savetxt(iter_path, (epoch + 1, 0), delimiter=',', fmt='%d')
### instead of only training the local enhancer, train the entire network after certain iterations
if (opt.niter_fix_global != 0) and (epoch == opt.niter_fix_global):
model.update_fixed_params()
### linearly decay learning rate after certain iterations
if epoch > opt.niter:
model.update_learning_rate()
torch.cuda.empty_cache()
if __name__ == '__main__':
main()
| train_pose2vid.py | 5,001 | whether to collect output images Forward Pass sum per device losses calculate final loss scalar Backward Pass update generator weights update discriminator weights Display results and errors print out errors CHANGE: removed [0] after v.data display output images save latest model end of epoch save model for this epoch instead of only training the local enhancer, train the entire network after certain iterations linearly decay learning rate after certain iterations | 471 | en | 0.740716 |
# MIT License
#
# Copyright (C) The Adversarial Robustness Toolbox (ART) Authors 2018
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
# Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
This module implements classes to evaluate the performance of poison detection methods.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import json
import logging
from typing import Tuple, Union, List
import numpy as np
logger = logging.getLogger(__name__)
class GroundTruthEvaluator:
"""
Class to evaluate the performance of the poison detection method.
"""
def __init__(self):
"""
Evaluates ground truth constructor
"""
def analyze_correctness(
self, assigned_clean_by_class: Union[np.ndarray, List[np.ndarray]], is_clean_by_class: list
) -> Tuple[np.ndarray, str]:
"""
For each training sample, determine whether the activation clustering method was correct.
:param assigned_clean_by_class: Result of clustering.
:param is_clean_by_class: is clean separated by class.
:return: Two variables are returned:
1) all_errors_by_class[i]: an array indicating the correctness of each assignment
in the ith class. Such that:
all_errors_by_class[i] = 0 if marked poison, is poison
all_errors_by_class[i] = 1 if marked clean, is clean
all_errors_by_class[i] = 2 if marked poison, is clean
all_errors_by_class[i] = 3 marked clean, is poison
2) Json object with confusion matrix per-class.
"""
all_errors_by_class = []
poison = 0
clean = 1
dic_json = {}
logger.debug("Error rates per class:")
for class_i, (assigned_clean, is_clean) in enumerate(zip(assigned_clean_by_class, is_clean_by_class)):
errors = []
for assignment, bl_var in zip(assigned_clean, is_clean):
bl_var = int(bl_var)
# marked poison, is poison = 0
# true positive
if assignment == poison and bl_var == poison:
errors.append(0)
# marked clean, is clean = 1
# true negative
elif assignment == clean and bl_var == clean:
errors.append(1)
# marked poison, is clean = 2
# false positive
elif assignment == poison and bl_var == clean:
errors.append(2)
# marked clean, is poison = 3
# false negative
elif assignment == clean and bl_var == poison:
errors.append(3)
else:
raise Exception("Analyze_correctness entered wrong class")
errors = np.asarray(errors)
logger.debug("-------------------%d---------------", class_i)
key_i = "class_" + str(class_i)
matrix_i = self.get_confusion_matrix(errors)
dic_json.update({key_i: matrix_i})
all_errors_by_class.append(errors)
all_errors_by_class = np.asarray(all_errors_by_class)
conf_matrix_json = json.dumps(dic_json)
return all_errors_by_class, conf_matrix_json
def get_confusion_matrix(self, values: np.ndarray) -> dict:
"""
Computes and returns a json object that contains the confusion matrix for each class.
:param values: Array indicating the correctness of each assignment in the ith class.
:return: Json object with confusion matrix per-class.
"""
dic_class = {}
true_positive = np.where(values == 0)[0].shape[0]
true_negative = np.where(values == 1)[0].shape[0]
false_positive = np.where(values == 2)[0].shape[0]
false_negative = np.where(values == 3)[0].shape[0]
tp_rate = self.calculate_and_print(true_positive, true_positive + false_negative, "true-positive rate")
tn_rate = self.calculate_and_print(true_negative, false_positive + true_negative, "true-negative rate")
fp_rate = self.calculate_and_print(false_positive, false_positive + true_negative, "false-positive rate")
fn_rate = self.calculate_and_print(false_negative, true_positive + false_negative, "false-negative rate")
dic_tp = dict(
rate=round(tp_rate, 2),
numerator=true_positive,
denominator=(true_positive + false_negative),
)
if (true_positive + false_negative) == 0:
dic_tp = dict(
rate="N/A",
numerator=true_positive,
denominator=(true_positive + false_negative),
)
dic_tn = dict(
rate=round(tn_rate, 2),
numerator=true_negative,
denominator=(false_positive + true_negative),
)
if (false_positive + true_negative) == 0:
dic_tn = dict(
rate="N/A",
numerator=true_negative,
denominator=(false_positive + true_negative),
)
dic_fp = dict(
rate=round(fp_rate, 2),
numerator=false_positive,
denominator=(false_positive + true_negative),
)
if (false_positive + true_negative) == 0:
dic_fp = dict(
rate="N/A",
numerator=false_positive,
denominator=(false_positive + true_negative),
)
dic_fn = dict(
rate=round(fn_rate, 2),
numerator=false_negative,
denominator=(true_positive + false_negative),
)
if (true_positive + false_negative) == 0:
dic_fn = dict(
rate="N/A",
numerator=false_negative,
denominator=(true_positive + false_negative),
)
dic_class.update(dict(TruePositive=dic_tp))
dic_class.update(dict(TrueNegative=dic_tn))
dic_class.update(dict(FalsePositive=dic_fp))
dic_class.update(dict(FalseNegative=dic_fn))
return dic_class
@staticmethod
def calculate_and_print(numerator: int, denominator: int, name: str) -> float:
"""
Computes and prints the rates based on the denominator provided.
:param numerator: number used to compute the rate.
:param denominator: number used to compute the rate.
:param name: Rate name being computed e.g., false-positive rate.
:return: Computed rate
"""
try:
res = 100 * (numerator / float(denominator))
logger.debug("%s: %d/%d=%.3g", name, numerator, denominator, res)
return res
except ZeroDivisionError:
logger.debug("%s: couldn't calculate %d/%d", name, numerator, denominator)
return 0.0
| art/defences/detector/poison/ground_truth_evaluator.py | 7,842 | Class to evaluate the performance of the poison detection method.
Evaluates ground truth constructor
For each training sample, determine whether the activation clustering method was correct.
:param assigned_clean_by_class: Result of clustering.
:param is_clean_by_class: is clean separated by class.
:return: Two variables are returned:
1) all_errors_by_class[i]: an array indicating the correctness of each assignment
in the ith class. Such that:
all_errors_by_class[i] = 0 if marked poison, is poison
all_errors_by_class[i] = 1 if marked clean, is clean
all_errors_by_class[i] = 2 if marked poison, is clean
all_errors_by_class[i] = 3 marked clean, is poison
2) Json object with confusion matrix per-class.
Computes and prints the rates based on the denominator provided.
:param numerator: number used to compute the rate.
:param denominator: number used to compute the rate.
:param name: Rate name being computed e.g., false-positive rate.
:return: Computed rate
Computes and returns a json object that contains the confusion matrix for each class.
:param values: Array indicating the correctness of each assignment in the ith class.
:return: Json object with confusion matrix per-class.
This module implements classes to evaluate the performance of poison detection methods.
MIT License Copyright (C) The Adversarial Robustness Toolbox (ART) Authors 2018 Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. marked poison, is poison = 0 true positive marked clean, is clean = 1 true negative marked poison, is clean = 2 false positive marked clean, is poison = 3 false negative | 2,617 | en | 0.850704 |
import logging; log = logging.getLogger(__name__)
from .Menu import Menu
class HitboxMenu(Menu):
"""A menu for examining a hitbox."""
def __init__(self, parent):
super().__init__(parent)
self.title = "Hitboxes"
self.refresh()
def refresh(self):
if self.parent.model is None:
self.items = ["No Model"]
self.cursorPos = 0
return
self.items = ["Box Bone 02 14 1617 Radius X Y Z"]
for i, box in enumerate(self.parent.model.hitboxes):
self.items.append("%3d %04X %04X %04X %02X%02X %+7.2f %+7.2f %+7.2f %+7.2f" % (
i, box.bone,
box.unk02, box.unk14, box.unk16, box.unk17, box.radius,
box.pos[0], box.pos[1], box.pos[2],
))
self.cursorPos = 0
#def activate(self):
# selPoly = self.cursorPos - 1
# if selPoly >= 0:
# poly = self.dlist.polys[selPoly]
# menu = PolyMenu(self.parent, poly,
# "Display List %d Poly %d: %s" % (poly['list'], selPoly,
# self.drawModes[poly['mode']],
# ))
# self.parent.enterMenu(menu)
def render(self):
super().render()
#selPoly = self.cursorPos - 1
#if selPoly >= 0:
# poly = self.dlist.polys[selPoly]
# log.dprint("\x1B[16,400HPoly %d: %s, %d vtxs", selPoly,
# self.drawModes[poly['mode']],
# len(poly['vtxs']))
def _onChange(self):
sel = self.cursorPos - 1
self.parent.highlightedHitbox = sel
| modelviewer/programs/SfaModel/Menu/HitboxMenu.py | 1,614 | A menu for examining a hitbox.
def activate(self): selPoly = self.cursorPos - 1 if selPoly >= 0: poly = self.dlist.polys[selPoly] menu = PolyMenu(self.parent, poly, "Display List %d Poly %d: %s" % (poly['list'], selPoly, self.drawModes[poly['mode']], )) self.parent.enterMenu(menu)selPoly = self.cursorPos - 1if selPoly >= 0: poly = self.dlist.polys[selPoly] log.dprint("\x1B[16,400HPoly %d: %s, %d vtxs", selPoly, self.drawModes[poly['mode']], len(poly['vtxs'])) | 540 | en | 0.375051 |
# -*- coding: utf-8 -*-
# Copyright 2017 Openstack Foundation.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from oslo_config import cfg
PATH_OPTS = [
cfg.StrOpt('pybasedir',
default=os.path.abspath(os.path.join(os.path.dirname(__file__),
'../')),
help='Directory where the shadowfiend'
'python module is installed.'),
cfg.StrOpt('bindir',
default='$pybasedir/bin',
help='Directory where shadowfiend'
'binaries are installed.'),
cfg.StrOpt('state_path',
default='$pybasedir',
help="Top-level directory for maintainings"
"shadowfiend's state."),
]
CONF = cfg.CONF
CONF.register_opts(PATH_OPTS)
def basedir_def(*args):
"""Return an uninterpolated path relative to $pybasedir."""
return os.path.join('$pybasedir', *args)
def bindir_def(*args):
"""Return an uninterpolated path relative to $bindir."""
return os.path.join('$bindir', *args)
def state_path_def(*args):
"""Return an uninterpolated path relative to $state_path."""
return os.path.join('$state_path', *args)
def basedir_rel(*args):
"""Return a path relative to $pybasedir."""
return os.path.join(CONF.pybasedir, *args)
def bindir_rel(*args):
"""Return a path relative to $bindir."""
return os.path.join(CONF.bindir, *args)
def state_path_rel(*args):
"""Return a path relative to $state_path."""
return os.path.join(CONF.state_path, *args)
| shadowfiend/common/paths.py | 2,113 | Return an uninterpolated path relative to $pybasedir.
Return a path relative to $pybasedir.
Return an uninterpolated path relative to $bindir.
Return a path relative to $bindir.
Return an uninterpolated path relative to $state_path.
Return a path relative to $state_path.
-*- coding: utf-8 -*- Copyright 2017 Openstack Foundation. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. | 882 | en | 0.836584 |
import csv
def save2csv(dst_fh, row):
"""
Appends a list with data to a dst_fh csv
args:
dst_fh: str, output file
row: list, list of values to write in a row
"""
with open(dst_fh, "a", encoding="utf-8") as csvfile:
out = csv.writer(
csvfile,
delimiter=",",
lineterminator="\n",
quotechar='"',
quoting=csv.QUOTE_MINIMAL,
)
try:
out.writerow(row)
except UnicodeEncodeError:
pass
| src/utils.py | 532 | Appends a list with data to a dst_fh csv
args:
dst_fh: str, output file
row: list, list of values to write in a row | 123 | en | 0.563405 |
#!/usr/bin/env python3
# Print out all the codons for the sequence below in reading frame 1
# Use a 'for' loop
dna = 'ATAGCGAATATCTCTCATGAGAGGGAA'
for nt in range(0, len(dna) - 2, 3):
print(dna[nt:nt+3])
"""
ATA
GCG
AAT
ATC
TCT
CAT
GAG
AGG
GAA
"""
| codons.py | 256 | !/usr/bin/env python3 Print out all the codons for the sequence below in reading frame 1 Use a 'for' loop | 105 | en | 0.550913 |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file '.\design.ui'
#
# Created by: PyQt5 UI code generator 5.11.3
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(632, 318)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.verticalLayout_2 = QtWidgets.QVBoxLayout(self.centralwidget)
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.verticalLayout = QtWidgets.QVBoxLayout()
self.verticalLayout.setObjectName("verticalLayout")
self.gridLayout = QtWidgets.QGridLayout()
self.gridLayout.setObjectName("gridLayout")
self.label = QtWidgets.QLabel(self.centralwidget)
self.label.setObjectName("label")
self.gridLayout.addWidget(self.label, 0, 0, 1, 1)
self.csv_output_button = QtWidgets.QPushButton(self.centralwidget)
self.csv_output_button.setObjectName("csv_output_button")
self.gridLayout.addWidget(self.csv_output_button, 1, 4, 1, 1)
self.label_2 = QtWidgets.QLabel(self.centralwidget)
self.label_2.setObjectName("label_2")
self.gridLayout.addWidget(self.label_2, 1, 0, 1, 1)
self.csv_output_line_edit = QtWidgets.QLineEdit(self.centralwidget)
self.csv_output_line_edit.setStyleSheet("output_line_edit.setStyleSheet(\"QLineEdit { border-radius: 5px; }\")")
self.csv_output_line_edit.setObjectName("csv_output_line_edit")
self.gridLayout.addWidget(self.csv_output_line_edit, 1, 1, 1, 3)
self.input_button = QtWidgets.QPushButton(self.centralwidget)
self.input_button.setObjectName("input_button")
self.gridLayout.addWidget(self.input_button, 0, 4, 1, 1)
self.video_line_edit = QtWidgets.QLineEdit(self.centralwidget)
self.video_line_edit.setStyleSheet("video_line_edit.setStyleSheet(\"QLineEdit { border: 2px solid gray; border-radius: 5px;}\")")
self.video_line_edit.setObjectName("video_line_edit")
self.gridLayout.addWidget(self.video_line_edit, 0, 1, 1, 3)
self.gridLayout.setColumnStretch(1, 7)
self.gridLayout.setColumnStretch(2, 1)
self.gridLayout.setColumnStretch(3, 1)
self.gridLayout.setColumnStretch(4, 1)
self.verticalLayout.addLayout(self.gridLayout)
self.horizontalLayout_2 = QtWidgets.QHBoxLayout()
self.horizontalLayout_2.setContentsMargins(-1, 0, -1, 0)
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.label_3 = QtWidgets.QLabel(self.centralwidget)
self.label_3.setObjectName("label_3")
self.horizontalLayout_2.addWidget(self.label_3)
self.model_combo_box = QtWidgets.QComboBox(self.centralwidget)
self.model_combo_box.setEditable(False)
self.model_combo_box.setObjectName("model_combo_box")
self.model_combo_box.addItem("")
self.model_combo_box.addItem("")
self.model_combo_box.addItem("")
self.model_combo_box.addItem("")
self.horizontalLayout_2.addWidget(self.model_combo_box)
spacerItem = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_2.addItem(spacerItem)
self.verticalLayout.addLayout(self.horizontalLayout_2)
self.horizontalLayout_3 = QtWidgets.QHBoxLayout()
self.horizontalLayout_3.setContentsMargins(-1, 24, -1, -1)
self.horizontalLayout_3.setObjectName("horizontalLayout_3")
self.save_video_check_box = QtWidgets.QCheckBox(self.centralwidget)
self.save_video_check_box.setChecked(True)
self.save_video_check_box.setObjectName("save_video_check_box")
self.horizontalLayout_3.addWidget(self.save_video_check_box)
self.dark_bg_check_box = QtWidgets.QCheckBox(self.centralwidget)
self.dark_bg_check_box.setObjectName("dark_bg_check_box")
self.horizontalLayout_3.addWidget(self.dark_bg_check_box)
self.verticalLayout.addLayout(self.horizontalLayout_3)
self.horizontalLayout_4 = QtWidgets.QHBoxLayout()
self.horizontalLayout_4.setContentsMargins(-1, -1, -1, 36)
self.horizontalLayout_4.setObjectName("horizontalLayout_4")
self.label_4 = QtWidgets.QLabel(self.centralwidget)
self.label_4.setObjectName("label_4")
self.horizontalLayout_4.addWidget(self.label_4)
self.video_output_line_edit = QtWidgets.QLineEdit(self.centralwidget)
self.video_output_line_edit.setObjectName("video_output_line_edit")
self.horizontalLayout_4.addWidget(self.video_output_line_edit)
self.video_output_button = QtWidgets.QPushButton(self.centralwidget)
self.video_output_button.setObjectName("video_output_button")
self.horizontalLayout_4.addWidget(self.video_output_button)
self.verticalLayout.addLayout(self.horizontalLayout_4)
self.progress_bar = QtWidgets.QProgressBar(self.centralwidget)
self.progress_bar.setProperty("value", 0)
self.progress_bar.setTextVisible(False)
self.progress_bar.setObjectName("progress_bar")
self.verticalLayout.addWidget(self.progress_bar)
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setContentsMargins(-1, 24, -1, -1)
self.horizontalLayout.setObjectName("horizontalLayout")
spacerItem1 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem1)
self.cancel_button = QtWidgets.QPushButton(self.centralwidget)
self.cancel_button.setEnabled(False)
self.cancel_button.setCheckable(True)
self.cancel_button.setChecked(False)
self.cancel_button.setObjectName("cancel_button")
self.horizontalLayout.addWidget(self.cancel_button)
self.ok_button = QtWidgets.QPushButton(self.centralwidget)
self.ok_button.setObjectName("ok_button")
self.horizontalLayout.addWidget(self.ok_button)
spacerItem2 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem2)
self.verticalLayout.addLayout(self.horizontalLayout)
self.verticalLayout.setStretch(0, 1)
self.verticalLayout_2.addLayout(self.verticalLayout)
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 632, 21))
self.menubar.setObjectName("menubar")
MainWindow.setMenuBar(self.menubar)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow"))
self.label.setText(_translate("MainWindow", "Путь к видео:"))
self.csv_output_button.setText(_translate("MainWindow", "Выбрать"))
self.label_2.setText(_translate("MainWindow", "CSV для точек:"))
self.input_button.setText(_translate("MainWindow", "Выбрать"))
self.label_3.setText(_translate("MainWindow", "Модель:"))
self.model_combo_box.setCurrentText(_translate("MainWindow", "cmu"))
self.model_combo_box.setItemText(0, _translate("MainWindow", "cmu"))
self.model_combo_box.setItemText(1, _translate("MainWindow", "mobilenet_thin"))
self.model_combo_box.setItemText(2, _translate("MainWindow", "mobilenet_v2_large"))
self.model_combo_box.setItemText(3, _translate("MainWindow", "mobilenet_v2_small"))
self.save_video_check_box.setText(_translate("MainWindow", "Показать ключевые точки в видео"))
self.dark_bg_check_box.setText(_translate("MainWindow", "Темный фон"))
self.label_4.setText(_translate("MainWindow", "Сохранить видео как:"))
self.video_output_button.setText(_translate("MainWindow", "Выбрать"))
self.progress_bar.setFormat(_translate("MainWindow", "%p%"))
self.cancel_button.setText(_translate("MainWindow", "Прервать"))
self.ok_button.setText(_translate("MainWindow", "ОК"))
| design.py | 8,521 | -*- coding: utf-8 -*- Form implementation generated from reading ui file '.\design.ui' Created by: PyQt5 UI code generator 5.11.3 WARNING! All changes made in this file will be lost! | 182 | en | 0.870108 |
from django.contrib import admin
from django.contrib.auth.models import User
from django.test.testcases import TestCase
from django.urls import reverse
from pagetools.menus.admin import MenuAdmin, make_entrieable_admin
from pagetools.menus.apps import MenusConfig
from pagetools.menus.models import Link, Menu, MenuEntry
from pagetools.tests.test_models import ConcretePublishableLangModel
from pagetools.utils import get_adminedit_url
from pagetools.widgets.settings import TEMPLATETAG_WIDGETS
class CPMAdmin(admin.ModelAdmin):
model = ConcretePublishableLangModel
admin.site.register(ConcretePublishableLangModel, CPMAdmin)
class MenuAdminTests(TestCase):
def setUp(self):
self.admin = User.objects.create_superuser("admin", "q@w.de", "password")
self.client.login(username="admin", password="password")
self.site = admin.site
def _data_from_menu(self, menu):
return {
key: menu.__dict__[key]
for key in (
"id",
"lang",
"title",
"slug",
"content_type_id",
"object_id",
"enabled",
"lft",
"rght",
"tree_id",
"level",
)
}
def test_admin_index(self):
""" test index because customdashboard with MenuModule is may used"""
adminindex = reverse("admin:index")
response = self.client.get(adminindex, follow=True, extra={"app_label": "admin"})
self.assertIn(response.status_code, (200, 302))
def test_add(self):
adminurl = reverse("admin:menus_menu_add", args=[])
self.client.post(adminurl, {"title": "Menu1"})
menu = Menu.objects.get(title="Menu1")
self.assertEqual(len(menu.children.all()), 0)
return menu
def test_update(self):
menu = Menu.objects.add_root(title="Menu1")
entries = []
for i in range(1, 3):
entries.append(
MenuEntry.objects.add_child(
parent=menu,
title="e%s" % i,
content_object=Link.objects.create(
url="#%s" % i,
),
enabled=True,
)
)
adminurl = reverse("admin:menus_menu_change", args=[menu.pk])
self.client.get(adminurl, {"pk": menu.pk})
data = self._data_from_menu(menu)
data["entry-order-id-0"] = entries[0].pk
data["entry-text-0"] = "changed"
data["entry-published-0"] = 1
self.client.post(adminurl, data)
children = menu.children_list()
self.assertEqual(children[0]["entry_title"], "changed")
def test_reorder(self):
menu = Menu.objects.add_root(title="Menu1")
entries = []
for i in range(1, 3):
entries.append(
MenuEntry.objects.add_child(
parent=menu,
title="e%s" % i,
content_object=Link.objects.create(
url="#%s" % i,
),
enabled=True,
)
)
adminurl = reverse("admin:menus_menu_change", args=[menu.pk])
data = self._data_from_menu(menu)
self.client.post(adminurl, data)
self.assertEqual([entry["entry_title"] for entry in menu.children_list()], ["e1", "e2"])
data.update(
{
"entry-order": "[%s]=null&[%s]=null" % (entries[1].pk, entries[0].pk),
}
)
self.client.post(adminurl, data)
self.assertEqual([e["entry_title"] for e in menu.children_list()], ["e2", "e1"])
def test_addentry(self):
menu = Menu.objects.add_root(title="Menu1", enabled=True)
entries = []
for i in range(1, 3):
entries.append(
MenuEntry.objects.add_child(
parent=menu,
title="e%s" % i,
content_object=Link.objects.create(
url="#%s" % i,
),
enabled=True,
)
)
adminurl = reverse("admin:menus_menu_change", args=[menu.pk])
data = self._data_from_menu(menu)
data["addentry"] = "menus#link"
result = self.client.post(adminurl, data)
self.assertEqual(result.status_code, 302)
def test_addableentries(self):
admininstance = MenuAdmin(model=Menu, admin_site=self.site)
menu = Menu.objects.add_root(title="Menu1")
entries = admininstance.addable_entries(obj=menu)
len_e = len(MenusConfig.entrieable_models)
if not TEMPLATETAG_WIDGETS:
len_e -= 1
self.assertEqual(entries.count("<li>"), len_e)
def test_mk_entriableadmin(self):
admincls = CPMAdmin
make_entrieable_admin(admincls)
self.assertTrue(admincls.is_menu_entrieable)
instance = ConcretePublishableLangModel.objects.create(foo="x")
data = instance.__dict__
menu = Menu.objects.add_root(title="Menu1")
admininstance = admincls(model=ConcretePublishableLangModel, admin_site=self.site)
self.assertTrue(admininstance.get_fields({}, instance), [])
self.assertTrue(admininstance.get_fieldsets({}, instance), [])
formcls = admincls.form
formcls._meta.model = ConcretePublishableLangModel
form = formcls(instance.__dict__)
self.assertTrue("menus" in form.fields.keys())
valid = form.is_valid()
self.assertTrue(valid)
data["menus"] = [menu.pk]
form = formcls(data, instance=instance)
self.assertTrue("menus" in form.fields.keys())
valid = form.is_valid()
self.assertTrue(valid)
data["status_changed_0"] = "2016-01-01"
data["status_changed_1"] = "23:00"
adminurl = get_adminedit_url(instance)
response = self.client.post(adminurl, data)
self.assertIn(response.status_code, (200, 302))
self.assertEqual(MenuEntry.objects.count(), 2)
response = self.client.get(adminurl)
content = str(response.content)
start = content.find('<input type="checkbox" name="menus"')
end = content[start:].find(">")
tag = content[start : start + end + 1]
self.assertTrue(" checked" in tag)
| pagetools/menus/tests/test_admin.py | 6,441 | test index because customdashboard with MenuModule is may used | 62 | en | 0.791006 |
# coding: utf8
from __future__ import absolute_import
import datetime
from celery import shared_task
from celery.utils.log import get_task_logger
from django.utils.translation import ugettext as _
from django.core.mail import send_mail
from django.contrib.auth import get_user_model
from django.conf import settings
from django.template import loader, Context
from common.helpers import send_email
from .models import Challenge
log = get_task_logger(__name__)
def send_challenge_reminder(user_id):
user = get_user_model().objects.get(id=user_id)
today = datetime.date.today()
filters = {
'status': Challenge.ACTIVE,
'end_date': today
}
ending_challenges = user.challenges_recieved.filter(**filters)
email_subject = _('Challenge ends today!')
email_context = {
'ending_challenges': ending_challenges
}
send_email([user.email],
email_subject,
'challenges/emails/challenges_reminder.html',
email_context)
@shared_task(ignore_result=True)
def send_challenge_reminders():
# Fetch runners that has challenges ending today.
today = datetime.date.today()
filters = {
'is_active': True,
'challenges_recieved__end_date': today
}
relevant_runners = get_user_model().objects.filter(**filters)
for runner in relevant_runners:
send_challenge_reminder(runner.id)
| challenges/tasks.py | 1,414 | coding: utf8 Fetch runners that has challenges ending today. | 60 | en | 0.976006 |
# Copyright 2017 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from pants.goal.task_registrar import TaskRegistrar as task
from pants.contrib.codeanalysis.tasks.bundle_entries import BundleEntries
from pants.contrib.codeanalysis.tasks.extract_java import ExtractJava
from pants.contrib.codeanalysis.tasks.index_java import IndexJava
def register_goals():
task(name="kythe-java-extract", action=ExtractJava).install("index")
task(name="kythe-java-index", action=IndexJava).install("index")
task(name="bundle-entries", action=BundleEntries).install("index")
| contrib/codeanalysis/src/python/pants/contrib/codeanalysis/register.py | 640 | Copyright 2017 Pants project contributors (see CONTRIBUTORS.md). Licensed under the Apache License, Version 2.0 (see LICENSE). | 126 | en | 0.673559 |
# Define here the models for your scraped items
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/items.html
import scrapy
class OslusiadasextractItem(scrapy.Item):
# define the fields for your item here like:
# name = scrapy.Field()
pass
| root_package/oslusiadasextract/items.py | 273 | Define here the models for your scraped items See documentation in: https://docs.scrapy.org/en/latest/topics/items.html define the fields for your item here like: name = scrapy.Field() | 184 | en | 0.676575 |
import numpy as np
import math
# number will be decreased by a small amount when some deletions happen
number_of_vertices = 5_000
# probability of an arc between any two instances
probability_of_an_arc = 0.001
# number of reads in the read-heavy test
read_test_operations = 20_000
# probability of removing a random vertex in each after processing each vertex
removal_probability = 0.04
# probability of adding a lookup command after each add arc command in write-heavy test
random_lookup_probability = 0.1
# probability of adding an add command after each lookup command in read-heavy test
random_add_probability = 0.1
# used in the write-heavy test. prabability of removing a vertex. Removing an arc has a 1-x probability
probability_of_removing_a_vertex = 0.5
# used in the read-heavy test. prabability of looking up a vertex. Looking up an arc has a 1-x probability
probability_of_looking_up_a_vertex = 0.5
avg_degree = number_of_vertices * probability_of_an_arc
std_deviation = math.sqrt((number_of_vertices-1)*probability_of_an_arc*(1-probability_of_an_arc))
write_heavy_test_name = "operations1.txt"
read_heavy_test_name = "operations2.txt"
with open(write_heavy_test_name, "w") as file:
# write the vertices first so you dont get errors in neo4j
for i in range(0, number_of_vertices):
file.write(f"av {i}\n")
print("Written vertices")
# start adding the arcs
for current_vertex in range(0, number_of_vertices):
# get the degree of the vertex using the normal distribution
degree = np.random.normal(avg_degree, std_deviation)
for j in range(0, int(degree)):
# select a target and write the operation to the instruction set
target = np.random.randint(0, number_of_vertices)
while target == current_vertex:
target = np.random.randint(0, number_of_vertices)
file.write(f"aa {current_vertex} {target}\n")
# add rare random lookups durring the write-heavy test
if(np.random.ranf()<random_lookup_probability):
if(np.random.ranf()<probability_of_looking_up_a_vertex):
vertex_to_look = np.random.randint(0, number_of_vertices)
file.write(f"lv {vertex_to_look}\n")
else:
source_arc_to_look = np.random.randint(0, number_of_vertices)
target_arc_to_look = np.random.randint(0, number_of_vertices)
file.write(f"la {source_arc_to_look} {target_arc_to_look}\n")
if(current_vertex % 1000 == 0):
print(f"Written arcs for {current_vertex} vertices")
# after processing the arcs of an vertex add a rare random removal command
if(np.random.ranf()<removal_probability):
if(np.random.ranf()<probability_of_removing_a_vertex):
vertex_to_remove = np.random.randint(0, number_of_vertices)
file.write(f"rv {vertex_to_remove}\n")
else:
source_arc_to_rmv = np.random.randint(0, number_of_vertices)
target_arc_to_rmv = np.random.randint(0, number_of_vertices)
file.write(f"ra {source_arc_to_rmv} {target_arc_to_rmv}\n")
print("Written arcs")
with open(read_heavy_test_name, "w") as file:
# write the read_test_operations read operations
for i in range(0, read_test_operations):
# before each read operation add a rare random write command
if(np.random.ranf()<random_add_probability):
file.write(f"av x{i}\n")
if(np.random.ranf()<probability_of_looking_up_a_vertex):
vertex_to_look = np.random.randint(0, number_of_vertices)
file.write(f"lv {vertex_to_look}\n")
else:
source_arc_to_look = np.random.randint(0, number_of_vertices)
target_arc_to_look = np.random.randint(0, number_of_vertices)
file.write(f"la {source_arc_to_look} {target_arc_to_look}\n")
if(i % 10_000 == 0):
print(f"Written {i} lookups")
print("Written lookups")
| Conflict-free_Replicated_Data_Types/experiments/benchmarking/OperationTestsGenerator.py | 4,235 | number will be decreased by a small amount when some deletions happen probability of an arc between any two instances number of reads in the read-heavy test probability of removing a random vertex in each after processing each vertex probability of adding a lookup command after each add arc command in write-heavy test probability of adding an add command after each lookup command in read-heavy test used in the write-heavy test. prabability of removing a vertex. Removing an arc has a 1-x probability used in the read-heavy test. prabability of looking up a vertex. Looking up an arc has a 1-x probability write the vertices first so you dont get errors in neo4j start adding the arcs get the degree of the vertex using the normal distribution select a target and write the operation to the instruction set add rare random lookups durring the write-heavy test after processing the arcs of an vertex add a rare random removal command write the read_test_operations read operations before each read operation add a rare random write command | 1,045 | en | 0.867408 |
"""Tests related to inheritance from interface."""
from datetime import datetime
import pytest
from generics import defended
from generics import delegated
from generics import private
pytestmark = pytest.mark.parametrize("f", [private, delegated, defended])
def test_allow_inheritance_from_interface(f, s):
"""Allow inheritance from interface."""
user_class = f(s.User)
user = user_class(last_login=datetime(1999, 12, 31))
assert not user.is_active()
| tests/test_subtyping.py | 474 | Allow inheritance from interface.
Tests related to inheritance from interface. | 78 | en | 0.906998 |
import math
import collections
import numpy as np
def __CheckEvaluationInput(y, yPredicted):
# Check sizes
if(len(y) != len(yPredicted)):
raise UserWarning("Attempting to evaluate between the true labels and predictions.\n Arrays contained different numbers of samples. Check your work and try again.")
# Check values
valueError = False
for value in y:
if value not in [0, 1]:
valueError = True
for value in yPredicted:
if value not in [0, 1]:
valueError = True
if valueError:
raise UserWarning("Attempting to evaluate between the true labels and predictions.\n Arrays contained unexpected value. Must be 0 or 1.")
def __CheckEvaluationCount(y, yPredicted):
# Check sizes
if(len(y) != len(yPredicted)):
raise UserWarning("Attempting to evaluate between the true labels and predictions.\n Arrays contained different numbers of samples. Check your work and try again.")
def Accuracy(y, yPredicted):
__CheckEvaluationInput(y, yPredicted)
correct = []
for i in range(len(y)):
if(y[i] == yPredicted[i]):
correct.append(1)
else:
correct.append(0)
return sum(correct)/len(correct)
def CountCorrect(y, yPredicted):
__CheckEvaluationInput(y, yPredicted)
correct = []
for i in range(len(y)):
if(y[i] == yPredicted[i]):
correct.append(1)
else:
correct.append(0)
return sum(correct)
def PredictionDiff(xTestRaw, y, yPredicted):
__CheckEvaluationCount(y, yPredicted)
__CheckEvaluationCount(xTestRaw, y)
predictionRange = {}
for i in range(len(y)):
predictionRange[xTestRaw[i]] = y[i] - yPredicted[i]
return predictionRange
def Precision(y, yPredicted):
numerator = TPCount(y, yPredicted)
denominator = (numerator + FPCount(y, yPredicted))
return 0.0 if denominator == 0 else numerator / denominator
def Recall(y, yPredicted):
numerator = TPCount(y, yPredicted)
denominator = (numerator + FNCount(y, yPredicted))
return 0.0 if denominator == 0 else numerator / denominator
def FalseNegativeRate(y, yPredicted):
numerator = FNCount(y, yPredicted)
denominator = numerator + TPCount(y, yPredicted)
return 0.0 if denominator == 0 else numerator / denominator
def FalsePositiveRate(y, yPredicted):
numerator = FPCount(y, yPredicted)
denominator = numerator + TNCount(y, yPredicted)
return 0.0 if denominator == 0 else numerator / denominator
def FNCount(y, yPredicted):
counter = 0
for i in range(len(y)):
if(y[i] == 1 and yPredicted[i] == 0):
counter += 1
return counter
def FPCount(y, yPredicted):
counter = 0
for i in range(len(y)):
if(y[i] == 0 and yPredicted[i] == 1):
counter += 1
return counter
def TNCount(y, yPredicted):
counter = 0
for i in range(len(y)):
if(y[i] == 0 and yPredicted[i] == 0):
counter += 1
return counter
def TPCount(y, yPredicted):
counter = 0
for i in range(len(y)):
if(y[i] == 1 and yPredicted[i] == 1):
counter += 1
return counter
def UpperAccRange(Accuracy, n):
return Accuracy + 1.96 * math.sqrt((Accuracy * (1 - Accuracy) / n))
def LowerAccRange(Accuracy, n):
return Accuracy - 1.96 * math.sqrt((Accuracy * (1 - Accuracy) / n))
def ConfusionMatrix(y, yPredicted):
print(" Predicted Negative | Predicted Positive")
print("Actual Negative | TN: " + str(TNCount(y, yPredicted)) + " | FP: " + str(FPCount(y, yPredicted)))
print("Actual Positive | FN: " + str(FNCount(y, yPredicted)) + " | TP: " + str(TPCount(y, yPredicted)))
def ExecuteAll(y, yPredicted):
accuracyVal = Accuracy(y, yPredicted)
print(ConfusionMatrix(y, yPredicted))
print("Accuracy:", accuracyVal)
print("Precision:", Precision(y, yPredicted))
print("Recall:", Recall(y, yPredicted))
print("FPR:", FalsePositiveRate(y, yPredicted))
print("FNR:", FalseNegativeRate(y, yPredicted))
print("95% confidence range:", LowerAccRange(accuracyVal, len(y)), "to", UpperAccRange(accuracyVal, len(y)) )
| Code/EvaluationsStub.py | 4,217 | Check sizes Check values Check sizes | 36 | en | 0.119323 |
"""
module housing core library functionality
"""
import numpy as np
from typing import Optional, Tuple
import humba.jits as jits
def histogram(
x: np.ndarray,
bins: int = 10,
range: Tuple[float, float] = (0, 10),
weights: Optional[np.ndarray] = None,
flow: bool = False,
) -> Tuple[np.ndarray, Optional[np.ndarray], np.ndarray]:
"""Calculate the histogram for the data ``x``.
Parameters
----------
x : :obj:`numpy.ndarray`
data to histogram
bins : int
number of bins
range : (float, float)
axis range
weights : :obj:`numpy.ndarray`, optional
array of weights for ``x``
flow : bool
include over and underflow content in first and last bins
Returns
-------
count : :obj:`numpy.ndarray`
The values of the histogram
error : :obj:`numpy.ndarray`, optional
The poission uncertainty on the bin heights
edges : :obj:`numpy.ndarray`
The bin edges
Notes
-----
If the dtype of the ``weights`` is not the same as ``x``, then it
is converted to the dtype of ``x``.
Examples
--------
>>> import numpy as np
>>> from humba import histogram
>>> x = np.random.randn(100000)
>>> w = np.random.uniform(0.4, 0.5, x.shape[0])
>>> hist1, _, edges = humba.histogram(x, bins=50, range=(-5, 5))
>>> hist2, _, edges = humba.histogram(x, bins=50, range=(-5, 5), flow=True)
>>> hist3, error, edges = histogram(x, bins=50, range=(-5, 5), weights=w)
>>> hist4, error, edges = histogram(x, bins=50, range=(-3, 3), weights=w, flow=True)
"""
edges = np.linspace(range[0], range[1], bins + 1)
if weights is not None:
assert x.shape == weights.shape, "x and weights must have identical shape"
if x.dtype == np.float64:
hfunc = jits._hfloat64_weighted
elif x.dtype == np.float32:
hfunc = jits._hfloat32_weighted
else:
raise TypeError("dtype of input must be float32 or float64")
res, err = hfunc(x, weights.astype(x.dtype), bins, range[0], range[1], flow)
return (res, err, edges)
else:
if x.dtype == np.float64:
hfunc = jits._hfloat64
elif x.dtype == np.float32:
hfunc = jits._hfloat32
else:
raise TypeError("dtype of input must be float32 or float64")
res = hfunc(x, bins, range[0], range[1], flow)
return (res, None, edges)
def mwv_histogram(
x: np.ndarray,
weights: np.ndarray,
bins: int = 10,
range: Tuple[float, float] = (0, 10),
flow: bool = False,
) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
"""Histogram the same data but with multiple weight variations.
Parameters
----------
x : :obj:`numpy.ndarray`
data to histogram
weights : :obj:`numpy.ndarray`, optional
multidimensional array of weights for ``x`` the first element
of the ``shape`` attribute must be equal to the length of ``x``.
bins : int
number of bins
range : (float, float)
axis range
flow : bool
include over and underflow content in first and last bins
Returns
-------
count : :obj:`numpy.ndarray`
The values of the histograms calculated from the weights
Shape will be (bins, ``weights.shape[0]``)
error : :obj:`numpy.ndarray`
The poission uncertainty on the bin heights (shape will be
the same as ``count``.
edges : :obj:`numpy.ndarray`
The bin edges
Notes
-----
If ``x`` is not the same dtype as ``weights``, then it is converted
to the dtype of ``weights`` (for multi weight histograms we expect
the weights array to be larger than the data array so we prefer to
cast the smaller chunk of data).
"""
edges = np.linspace(range[0], range[1], bins + 1)
assert x.shape[0] == weights.shape[0], "weights shape is not compatible with x"
if weights.dtype == np.float64:
hfunc = jits._hfloat64_multiweights
elif weights.dtype == np.float32:
hfunc = jits._hfloat32_multiweights
else:
raise TypeError("dtype of input must be float32 or float64")
res, err = hfunc(x.astype(weights.dtype), weights, bins, range[0], range[1], flow)
return (res, err, edges)
| humba/core.py | 4,312 | Calculate the histogram for the data ``x``.
Parameters
----------
x : :obj:`numpy.ndarray`
data to histogram
bins : int
number of bins
range : (float, float)
axis range
weights : :obj:`numpy.ndarray`, optional
array of weights for ``x``
flow : bool
include over and underflow content in first and last bins
Returns
-------
count : :obj:`numpy.ndarray`
The values of the histogram
error : :obj:`numpy.ndarray`, optional
The poission uncertainty on the bin heights
edges : :obj:`numpy.ndarray`
The bin edges
Notes
-----
If the dtype of the ``weights`` is not the same as ``x``, then it
is converted to the dtype of ``x``.
Examples
--------
>>> import numpy as np
>>> from humba import histogram
>>> x = np.random.randn(100000)
>>> w = np.random.uniform(0.4, 0.5, x.shape[0])
>>> hist1, _, edges = humba.histogram(x, bins=50, range=(-5, 5))
>>> hist2, _, edges = humba.histogram(x, bins=50, range=(-5, 5), flow=True)
>>> hist3, error, edges = histogram(x, bins=50, range=(-5, 5), weights=w)
>>> hist4, error, edges = histogram(x, bins=50, range=(-3, 3), weights=w, flow=True)
Histogram the same data but with multiple weight variations.
Parameters
----------
x : :obj:`numpy.ndarray`
data to histogram
weights : :obj:`numpy.ndarray`, optional
multidimensional array of weights for ``x`` the first element
of the ``shape`` attribute must be equal to the length of ``x``.
bins : int
number of bins
range : (float, float)
axis range
flow : bool
include over and underflow content in first and last bins
Returns
-------
count : :obj:`numpy.ndarray`
The values of the histograms calculated from the weights
Shape will be (bins, ``weights.shape[0]``)
error : :obj:`numpy.ndarray`
The poission uncertainty on the bin heights (shape will be
the same as ``count``.
edges : :obj:`numpy.ndarray`
The bin edges
Notes
-----
If ``x`` is not the same dtype as ``weights``, then it is converted
to the dtype of ``weights`` (for multi weight histograms we expect
the weights array to be larger than the data array so we prefer to
cast the smaller chunk of data).
module housing core library functionality | 2,167 | en | 0.58586 |
# -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""
Template 2a_1:
.. parsed-literal::
┌───┐┌───┐
q_0: ┤ X ├┤ X ├
└───┘└───┘
"""
from qiskit.circuit.quantumcircuit import QuantumCircuit
def template_2a_1():
"""
Returns:
QuantumCircuit: template as a quantum circuit.
"""
qc = QuantumCircuit(1)
qc.x(0)
qc.x(0)
return qc
| qiskit/circuit/library/template_circuits/toffoli/template_2a_1.py | 896 | Returns:
QuantumCircuit: template as a quantum circuit.
Template 2a_1:
.. parsed-literal::
┌───┐┌───┐
q_0: ┤ X ├┤ X ├
└───┘└───┘
-*- coding: utf-8 -*- This code is part of Qiskit. (C) Copyright IBM 2020. This code is licensed under the Apache License, Version 2.0. You may obtain a copy of this license in the LICENSE.txt file in the root directory of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. Any modifications or derivative works of this code must retain this copyright notice, and modified files need to carry a notice indicating that they have been altered from the originals. | 645 | en | 0.835209 |
import os
import csv
import logging
import itertools
import pandas as pd
import psutil as ps
from _pytest.monkeypatch import MonkeyPatch
from dataactcore.interfaces.db import GlobalDB
from dataactcore.config import CONFIG_SERVICES
from dataactcore.models.domainModels import concat_tas_dict
from dataactcore.models.lookups import (FILE_TYPE_DICT, JOB_TYPE_DICT, JOB_STATUS_DICT, RULE_SEVERITY_DICT)
from dataactcore.models.jobModels import Submission, Job, FileType
from dataactcore.models.userModel import User
from dataactcore.models.errorModels import ErrorMetadata
from dataactcore.models.stagingModels import (
Appropriation, ObjectClassProgramActivity, AwardFinancial, FlexField, TotalObligations)
from dataactvalidator.health_check import create_app
import dataactvalidator.validation_handlers.validationManager
from dataactvalidator.validation_handlers.validationManager import (
ValidationManager, FileColumn, CsvReader, parse_fields
)
import dataactvalidator.validation_handlers.validator
from dataactbroker.handlers.fileHandler import report_file_name
from tests.unit.dataactcore.factories.domain import SF133Factory, TASFactory
from tests.integration.baseTestValidator import BaseTestValidator
from tests.integration.integration_test_helper import insert_submission, insert_job
FILES_DIR = os.path.join('tests', 'integration', 'data')
# Valid Files
APPROP_FILE = os.path.join(FILES_DIR, 'appropValid.csv')
AFINANCIAL_FILE = os.path.join(FILES_DIR, 'awardFinancialValid.csv')
CROSS_FILE_A = os.path.join(FILES_DIR, 'cross_file_A.csv')
CROSS_FILE_B = os.path.join(FILES_DIR, 'cross_file_B.csv')
# Invalid Files
HEADER_ERROR = os.path.join(FILES_DIR, 'appropHeaderError.csv')
READ_ERROR = os.path.join(FILES_DIR, 'appropReadError.csv')
LENGTH_ERROR = os.path.join(FILES_DIR, 'appropLengthError.csv')
TYPE_ERROR = os.path.join(FILES_DIR, 'appropTypeError.csv')
REQUIRED_ERROR = os.path.join(FILES_DIR, 'appropRequiredError.csv')
RULE_FAILED_WARNING = os.path.join(FILES_DIR, 'appropInvalidWarning.csv')
RULE_FAILED_ERROR = os.path.join(FILES_DIR, 'appropInvalidError.csv')
INVALID_CROSS_A = os.path.join(FILES_DIR, 'invalid_cross_file_A.csv')
INVALID_CROSS_B = os.path.join(FILES_DIR, 'invalid_cross_file_B.csv')
BLANK_C = os.path.join(FILES_DIR, 'awardFinancialBlank.csv')
class ErrorWarningTests(BaseTestValidator):
""" Overall integration tests for error/warning reports.
For each file type (single-file, cross-file, errors, warnings), test if each has
- the correct structure
- each column's content is correct after testing each possible type of error:
- formatting
- length
- types
- required/optional
- SQL validation
Attributes:
session: the database session connection
validator: validator instance to be used for the tests
submission_id: the id of the submission foundation
submission: the submission foundation to be used for all the tests
val_job: the validation job to be used for all the tests
"""
CHUNK_SIZES = [4]
PARALLEL_OPTIONS = [True, False]
BATCH_SQL_OPTIONS = [True, False]
CONFIGS = list(itertools.product(CHUNK_SIZES, PARALLEL_OPTIONS, BATCH_SQL_OPTIONS))
@classmethod
def setUpClass(cls):
""" Set up class-wide resources (test data) """
super(ErrorWarningTests, cls).setUpClass()
logging.getLogger('dataactcore').setLevel(logging.ERROR)
logging.getLogger('dataactvalidator').setLevel(logging.ERROR)
with create_app().app_context():
cls.monkeypatch = MonkeyPatch()
# get the submission test users
sess = GlobalDB.db().session
cls.session = sess
# set up default e-mails for tests
admin_user = sess.query(User).filter(User.email == cls.test_users['admin_user']).one()
cls.validator = ValidationManager(directory=CONFIG_SERVICES['error_report_path'])
# Just have one valid submission and then keep on reloading files
cls.submission_id = insert_submission(sess, admin_user.user_id, cgac_code='SYS', start_date='01/2001',
end_date='03/2001', is_quarter=True)
cls.submission = sess.query(Submission).filter_by(submission_id=cls.submission_id).one()
cls.val_job = insert_job(cls.session, FILE_TYPE_DICT['appropriations'], JOB_STATUS_DICT['ready'],
JOB_TYPE_DICT['csv_record_validation'], cls.submission_id,
filename=JOB_TYPE_DICT['csv_record_validation'])
cls.original_reports = set(os.listdir(CONFIG_SERVICES['error_report_path']))
# adding TAS to ensure valid file is valid
tas1 = TASFactory(account_num=1, allocation_transfer_agency='019', agency_identifier='072',
beginning_period_of_availa=None, ending_period_of_availabil=None,
availability_type_code='X', main_account_code='0306', sub_account_code='000',
internal_start_date='01-01-2000', financial_indicator2='F')
tas2 = TASFactory(account_num=2, allocation_transfer_agency=None, agency_identifier='019',
beginning_period_of_availa='2016', ending_period_of_availabil='2016',
availability_type_code=None, main_account_code='0113', sub_account_code='000',
internal_start_date='01-01-2000', financial_indicator2='F')
tas3 = TASFactory(account_num=3, allocation_transfer_agency=None, agency_identifier='028',
beginning_period_of_availa=None, ending_period_of_availabil=None,
availability_type_code='X', main_account_code='0406', sub_account_code='000',
internal_start_date='01-01-2000', financial_indicator2='F')
tas4 = TASFactory(account_num=4, allocation_transfer_agency=None, agency_identifier='028',
beginning_period_of_availa='2010', ending_period_of_availabil='2011',
availability_type_code=None, main_account_code='0406', sub_account_code='000',
internal_start_date='01-01-2000', financial_indicator2='F')
tas5 = TASFactory(account_num=5, allocation_transfer_agency='069', agency_identifier='013',
beginning_period_of_availa=None, ending_period_of_availabil=None,
availability_type_code='X', main_account_code='2050', sub_account_code='005',
internal_start_date='01-01-2000', financial_indicator2='F')
tas6 = TASFactory(account_num=6, allocation_transfer_agency='028', agency_identifier='028',
beginning_period_of_availa=None, ending_period_of_availabil=None,
availability_type_code='X', main_account_code='8007', sub_account_code='000',
internal_start_date='01-01-2000', financial_indicator2='F')
tas7 = TASFactory(account_num=7, allocation_transfer_agency=None, agency_identifier='049',
beginning_period_of_availa=None, ending_period_of_availabil=None,
availability_type_code='X', main_account_code='0100', sub_account_code='000',
internal_start_date='01-01-2000', financial_indicator2='F')
tas8 = TASFactory(account_num=8, allocation_transfer_agency=None, agency_identifier='049',
beginning_period_of_availa='2010', ending_period_of_availabil='2011',
availability_type_code=None, main_account_code='0100', sub_account_code='000',
internal_start_date='01-01-2000', financial_indicator2='F')
tas9 = TASFactory(account_num=9, allocation_transfer_agency=None, agency_identifier='049',
beginning_period_of_availa='2014', ending_period_of_availabil='2015',
availability_type_code=None, main_account_code='0100', sub_account_code='000',
internal_start_date='01-01-2000', financial_indicator2='F')
tas10 = TASFactory(account_num=10, allocation_transfer_agency=None, agency_identifier='049',
beginning_period_of_availa='2015', ending_period_of_availabil='2016',
availability_type_code=None, main_account_code='0100', sub_account_code='000',
internal_start_date='01-01-2000')
sess.add_all([tas1, tas2, tas3, tas4, tas5, tas6, tas7, tas8, tas9, tas10])
# adding GTAS to ensure valid file is valid
gtas1 = SF133Factory(tas=concat_tas_dict(tas1.component_dict()), allocation_transfer_agency='019',
agency_identifier='072', beginning_period_of_availa=None, line=1009,
ending_period_of_availabil=None, availability_type_code='X',
main_account_code='0306', sub_account_code='000', period=6, fiscal_year=2001)
gtas2 = SF133Factory(tas=concat_tas_dict(tas2.component_dict()), allocation_transfer_agency=None,
agency_identifier='019', beginning_period_of_availa='2016', line=1009,
ending_period_of_availabil='2016', availability_type_code=None,
main_account_code='0113', sub_account_code='000', period=6, fiscal_year=2001)
gtas3 = SF133Factory(tas=concat_tas_dict(tas3.component_dict()), allocation_transfer_agency=None,
agency_identifier='028', beginning_period_of_availa=None, line=1009,
ending_period_of_availabil=None, availability_type_code='X',
main_account_code='0406', sub_account_code='000', period=6, fiscal_year=2001)
gtas4 = SF133Factory(tas=concat_tas_dict(tas4.component_dict()), allocation_transfer_agency=None,
agency_identifier='028', beginning_period_of_availa='2010', line=1009,
ending_period_of_availabil='2011', availability_type_code=None,
main_account_code='0406', sub_account_code='000', period=6, fiscal_year=2001)
gtas5 = SF133Factory(tas=concat_tas_dict(tas5.component_dict()), allocation_transfer_agency='069',
agency_identifier='013', beginning_period_of_availa=None, line=1009,
ending_period_of_availabil=None, availability_type_code='X',
main_account_code='2050', sub_account_code='005', period=6, fiscal_year=2001)
gtas6 = SF133Factory(tas=concat_tas_dict(tas6.component_dict()), allocation_transfer_agency='028',
agency_identifier='028', beginning_period_of_availa=None, line=1009,
ending_period_of_availabil=None, availability_type_code='X',
main_account_code='8007', sub_account_code='000', period=6, fiscal_year=2001)
gtas7 = SF133Factory(tas=concat_tas_dict(tas7.component_dict()), allocation_transfer_agency=None,
agency_identifier='049', beginning_period_of_availa=None, line=1009,
ending_period_of_availabil=None, availability_type_code='X',
main_account_code='0100', sub_account_code='000', period=6, fiscal_year=2001)
gtas8 = SF133Factory(tas=concat_tas_dict(tas8.component_dict()), allocation_transfer_agency=None,
agency_identifier='049', beginning_period_of_availa='2010', line=1009,
ending_period_of_availabil='2011', availability_type_code=None,
main_account_code='0100', sub_account_code='000', period=6, fiscal_year=2001)
gtas9 = SF133Factory(tas=concat_tas_dict(tas9.component_dict()), allocation_transfer_agency=None,
agency_identifier='049', beginning_period_of_availa='2014', line=1009,
ending_period_of_availabil='2015', availability_type_code=None,
main_account_code='0100', sub_account_code='000', period=6, fiscal_year=2001)
gtas10 = SF133Factory(tas=concat_tas_dict(tas10.component_dict()), allocation_transfer_agency=None,
agency_identifier='049', beginning_period_of_availa='2015', line=1009,
ending_period_of_availabil='2016', availability_type_code=None,
main_account_code='0100', sub_account_code='000', period=6, fiscal_year=2001)
sess.add_all([gtas1, gtas2, gtas3, gtas4, gtas5, gtas6, gtas7, gtas8, gtas9, gtas10])
sess.commit()
def setUp(self):
"""Test set-up."""
super(ErrorWarningTests, self).setUp()
def get_report_path(self, file_type, warning=False, cross_type=None):
filename = report_file_name(self.submission_id, warning, file_type, cross_type)
return os.path.join(CONFIG_SERVICES['error_report_path'], filename)
def setup_csv_record_validation(self, file, file_type):
self.session.query(Job).delete(synchronize_session='fetch')
self.val_job = insert_job(self.session, FILE_TYPE_DICT[file_type], JOB_STATUS_DICT['ready'],
JOB_TYPE_DICT['csv_record_validation'], self.submission_id,
filename=file)
def setup_validation(self):
self.session.query(Job).delete(synchronize_session='fetch')
self.val_job = insert_job(self.session, None, JOB_STATUS_DICT['ready'],
JOB_TYPE_DICT['validation'], self.submission_id,
filename=None)
def get_report_content(self, report_path, cross_file=False):
report_content = []
report_headers = None
with open(report_path, 'r') as report_csv:
reader = csv.DictReader(report_csv)
for row in reader:
report_content.append(row)
report_headers = reader.fieldnames
row_number_col = 'Row Number' if not cross_file else 'Source Row Number'
if row_number_col in report_headers:
report_content = list(sorted(report_content, key=lambda x: int(x[row_number_col] or 0)))
return report_headers, report_content
def generate_file_report(self, file, file_type, warning=False, ignore_error=False):
self.setup_csv_record_validation(file, file_type)
if ignore_error:
try:
self.validator.validate_job(self.val_job.job_id)
except Exception:
pass
else:
self.validator.validate_job(self.val_job.job_id)
report_path = self.get_report_path(file_type, warning=warning)
report_content = self.get_report_content(report_path, cross_file=False)
return report_content
def generate_cross_file_report(self, cross_files, warning=False, ignore_error=False):
cross_types = []
for cross_file in cross_files:
cross_types.append(cross_file[1])
self.generate_file_report(cross_file[0], cross_file[1], warning=warning, ignore_error=ignore_error)
self.setup_validation()
if ignore_error:
try:
self.validator.validate_job(self.val_job.job_id)
except Exception:
pass
else:
self.validator.validate_job(self.val_job.job_id)
report_path = self.get_report_path(cross_types[0], cross_type=cross_types[1], warning=warning)
report_content = self.get_report_content(report_path, cross_file=True)
return report_content
def cleanup(self):
new_reports = set(os.listdir(CONFIG_SERVICES['error_report_path'])) - self.original_reports
for new_report in new_reports:
os.remove(os.path.join(CONFIG_SERVICES['error_report_path'], new_report))
self.session.query(Appropriation).delete(synchronize_session='fetch')
self.session.query(ObjectClassProgramActivity).delete(synchronize_session='fetch')
self.session.query(AwardFinancial).delete(synchronize_session='fetch')
self.session.query(ErrorMetadata).delete(synchronize_session='fetch')
self.session.query(FlexField).delete(synchronize_session='fetch')
self.session.commit()
def test_single_file_warnings(self):
for chunk_size, parallel, batch_sql in self.CONFIGS:
self.monkeypatch.setattr(dataactvalidator.validation_handlers.validationManager, 'CHUNK_SIZE', chunk_size)
self.monkeypatch.setattr(dataactvalidator.validation_handlers.validationManager, 'PARALLEL', parallel)
self.monkeypatch.setattr(dataactvalidator.validation_handlers.validationManager, 'BATCH_SQL_VAL_RESULTS',
batch_sql)
self.monkeypatch.setattr(dataactvalidator.validation_handlers.validator, 'SQL_VALIDATION_BATCH_SIZE',
chunk_size)
self.single_file_warnings()
def single_file_warnings(self):
self.cleanup()
# Valid
report_headers, report_content = self.generate_file_report(APPROP_FILE, 'appropriations', warning=True)
appro_count = self.session.query(Appropriation).filter_by(submission_id=self.submission_id).count()
assert appro_count == 10
flex_count = self.session.query(FlexField).filter_by(submission_id=self.submission_id).count()
assert flex_count == 20
error_count = self.session.query(ErrorMetadata).filter_by(job_id=self.val_job.job_id,
severity_id=RULE_SEVERITY_DICT['warning']).count()
assert self.validator.job.number_of_rows == 11
assert self.validator.job.number_of_rows_valid == 10
assert error_count == 0
assert report_headers == self.validator.report_headers
assert len(report_content) == 0
self.cleanup()
# Blank File
report_headers, report_content = self.generate_file_report(BLANK_C, 'award_financial', warning=True)
awfin_count = self.session.query(AwardFinancial).filter_by(submission_id=self.submission_id).count()
assert awfin_count == 0
flex_count = self.session.query(FlexField).filter_by(submission_id=self.submission_id).count()
assert flex_count == 0
error_count = self.session.query(ErrorMetadata).filter_by(job_id=self.val_job.job_id,
severity_id=RULE_SEVERITY_DICT['warning']).count()
assert self.validator.job.number_of_rows == 1
assert self.validator.job.number_of_rows_valid == 2
assert error_count == 1
assert report_headers == self.validator.report_headers
expected_values = [
{
'Unique ID': '',
'Field Name': 'Blank File',
'Rule Message': 'File does not contain data. For files A and B, this must be addressed prior to'
' publication/certification. Blank file C does not prevent publication/certification.',
'Value Provided': '',
'Expected Value': '',
'Difference': '',
'Flex Field': '',
'Row Number': '',
'Rule Label': 'DABSBLANK'
}
]
assert report_content == expected_values
self.cleanup()
# SQL Validation
report_headers, report_content = self.generate_file_report(RULE_FAILED_WARNING, 'appropriations', warning=True)
appro_count = self.session.query(Appropriation).filter_by(submission_id=self.submission_id).count()
assert appro_count == 10
flex_count = self.session.query(FlexField).filter_by(submission_id=self.submission_id).count()
assert flex_count == 20
error_count = self.session.query(ErrorMetadata).filter_by(job_id=self.val_job.job_id,
severity_id=RULE_SEVERITY_DICT['warning']).count()
assert self.validator.job.number_of_rows == 11
assert self.validator.job.number_of_rows_valid == 10
assert error_count == 1
assert report_headers == self.validator.report_headers
expected_values = [
{
'Unique ID': 'TAS: 028-2010/2011-0406-000',
'Field Name': 'budgetauthorityunobligatedbalancebroughtforward_fyb',
'Rule Message': 'All the elements that have FYB in file A are expected in the first submission'
' for a fiscal year',
'Value Provided': 'budgetauthorityunobligatedbalancebroughtforward_fyb: ',
'Expected Value': 'If the reporting period is Quarter 1, a non-null amount should be submitted for the'
' following elements: BudgetAuthorityUnobligatedBalanceBroughtForward_FYB',
'Difference': '',
'Flex Field': 'flex_field_a: FLEX_A, flex_field_b: FLEX_B',
'Row Number': '5',
'Rule Label': 'A16.1'
}
]
assert report_content == expected_values
self.cleanup()
def test_single_file_errors(self):
for chunk_size, parallel, batch_sql in self.CONFIGS:
self.monkeypatch.setattr(dataactvalidator.validation_handlers.validationManager, 'CHUNK_SIZE', chunk_size)
self.monkeypatch.setattr(dataactvalidator.validation_handlers.validationManager, 'PARALLEL', parallel)
self.monkeypatch.setattr(dataactvalidator.validation_handlers.validationManager, 'BATCH_SQL_VAL_RESULTS',
batch_sql)
self.monkeypatch.setattr(dataactvalidator.validation_handlers.validator, 'SQL_VALIDATION_BATCH_SIZE',
chunk_size)
self.single_file_errors()
def single_file_errors(self):
self.cleanup()
# Valid
report_headers, report_content = self.generate_file_report(APPROP_FILE, 'appropriations', warning=False)
appro_count = self.session.query(Appropriation).filter_by(submission_id=self.submission_id).count()
assert appro_count == 10
flex_count = self.session.query(FlexField).filter_by(submission_id=self.submission_id).count()
assert flex_count == 20
error_count = self.session.query(ErrorMetadata).filter_by(job_id=self.val_job.job_id,
severity_id=RULE_SEVERITY_DICT['fatal']).count()
assert self.validator.job.number_of_rows == 11
assert self.validator.job.number_of_rows_valid == 10
assert error_count == 0
assert report_headers == self.validator.report_headers
assert len(report_content) == 0
self.cleanup()
# Header Error
report_headers, report_content = self.generate_file_report(HEADER_ERROR, 'appropriations', warning=False,
ignore_error=True)
appro_count = self.session.query(Appropriation).filter_by(submission_id=self.submission_id).count()
assert appro_count == 0
flex_count = self.session.query(FlexField).filter_by(submission_id=self.submission_id).count()
assert flex_count == 0
error_count = self.session.query(ErrorMetadata).filter_by(job_id=self.val_job.job_id,
severity_id=RULE_SEVERITY_DICT['fatal']).count()
assert self.validator.job.number_of_rows is None
assert self.validator.job.number_of_rows_valid == 0
# Header errors do not get saved to the database
assert error_count == 0
assert report_headers == ['Error type', 'Header name']
expected_values = [
{
'Error type': 'Duplicated header',
'Header name': 'AllocationTransferAgencyIdentifier'
},
{
'Error type': 'Missing header',
'Header name': 'AdjustmentsToUnobligatedBalanceBroughtForward_CPE'
},
{
'Error type': 'Missing header',
'Header name': 'AgencyIdentifier'
},
{
'Error type': 'Missing header',
'Header name': 'BudgetAuthorityUnobligatedBalanceBroughtForward_FYB'
},
{
'Error type': 'Missing header',
'Header name': 'DeobligationsRecoveriesRefundsByTAS_CPE'
},
{
'Error type': 'Missing header',
'Header name': 'GrossOutlayAmountByTAS_CPE'
},
{
'Error type': 'Missing header',
'Header name': 'ObligationsIncurredTotalByTAS_CPE'
},
{
'Error type': 'Missing header',
'Header name': 'StatusOfBudgetaryResourcesTotal_CPE'
}
]
assert report_content == expected_values
self.cleanup()
# Read Error
report_headers, report_content = self.generate_file_report(READ_ERROR, 'appropriations', warning=False)
appro_count = self.session.query(Appropriation).filter_by(submission_id=self.submission_id).count()
assert appro_count == 6
flex_count = self.session.query(FlexField).filter_by(submission_id=self.submission_id).count()
assert flex_count == 12
assert self.validator.job.number_of_rows == 11
assert self.validator.job.number_of_rows_valid == 6
format_errors = self.session.query(ErrorMetadata).filter_by(job_id=self.val_job.job_id,
severity_id=RULE_SEVERITY_DICT['fatal']).one()
format_error_count = format_errors.occurrences
assert format_error_count == 4
assert report_headers == self.validator.report_headers
expected_values = [
{
'Unique ID': '',
'Field Name': 'Formatting Error',
'Rule Message': 'Could not parse this record correctly.',
'Value Provided': '',
'Expected Value': '',
'Difference': '',
'Flex Field': '',
'Row Number': '2',
'Rule Label': ''
},
{
'Unique ID': '',
'Field Name': 'Formatting Error',
'Rule Message': 'Could not parse this record correctly.',
'Value Provided': '',
'Expected Value': '',
'Difference': '',
'Flex Field': '',
'Row Number': '3',
'Rule Label': ''
},
{
'Unique ID': '',
'Field Name': 'Formatting Error',
'Rule Message': 'Could not parse this record correctly.',
'Value Provided': '',
'Expected Value': '',
'Difference': '',
'Flex Field': '',
'Row Number': '5',
'Rule Label': ''
},
{
'Unique ID': '',
'Field Name': 'Formatting Error',
'Rule Message': 'Could not parse this record correctly.',
'Value Provided': '',
'Expected Value': '',
'Difference': '',
'Flex Field': '',
'Row Number': '7',
'Rule Label': ''
}
]
assert report_content == expected_values
self.cleanup()
# Type Error
report_headers, report_content = self.generate_file_report(TYPE_ERROR, 'appropriations', warning=False)
appro_count = self.session.query(Appropriation).filter_by(submission_id=self.submission_id).count()
assert appro_count == 9
flex_count = self.session.query(FlexField).filter_by(submission_id=self.submission_id).count()
assert flex_count == 18
assert self.validator.job.number_of_rows == 11
assert self.validator.job.number_of_rows_valid == 9
error_count = self.session.query(ErrorMetadata).filter_by(job_id=self.val_job.job_id,
severity_id=RULE_SEVERITY_DICT['fatal']).count()
assert error_count == 1
assert report_headers == self.validator.report_headers
expected_values = [
{
'Unique ID': 'TAS: 069-013-X-2050-005',
'Field Name': 'statusofbudgetaryresourcestotal_cpe',
'Rule Message': 'The value provided was of the wrong type. Note that all type errors in a line must be'
' fixed before the rest of the validation logic is applied to that line.',
'Value Provided': 'statusofbudgetaryresourcestotal_cpe: A',
'Expected Value': 'This field must be a decimal',
'Difference': '',
'Flex Field': 'flex_field_a: FLEX_A, flex_field_b: FLEX_B',
'Row Number': '6',
'Rule Label': ''
}
]
assert report_content == expected_values
self.cleanup()
# Length Error
report_headers, report_content = self.generate_file_report(LENGTH_ERROR, 'appropriations', warning=False)
appro_count = self.session.query(Appropriation).filter_by(submission_id=self.submission_id).count()
assert appro_count == 10
flex_count = self.session.query(FlexField).filter_by(submission_id=self.submission_id).count()
assert flex_count == 20
assert self.validator.job.number_of_rows == 11
assert self.validator.job.number_of_rows_valid == 9
error_count = self.session.query(ErrorMetadata).filter_by(job_id=self.val_job.job_id,
severity_id=RULE_SEVERITY_DICT['fatal']).count()
assert error_count == 1
assert report_headers == self.validator.report_headers
expected_values = [
{
'Unique ID': 'TAS: 069-013-X-2050-005',
'Field Name': 'grossoutlayamountbytas_cpe',
'Rule Message': 'Value was longer than maximum length for this field.',
'Value Provided': 'grossoutlayamountbytas_cpe: 35000000000000000000000000',
'Expected Value': 'Max length: 21',
'Difference': '',
'Flex Field': 'flex_field_a: FLEX_A, flex_field_b: FLEX_B',
'Row Number': '6',
'Rule Label': ''
}
]
assert report_content == expected_values
self.cleanup()
# Required Error + SQL Validation
report_headers, report_content = self.generate_file_report(REQUIRED_ERROR, 'appropriations', warning=False)
appro_count = self.session.query(Appropriation).filter_by(submission_id=self.submission_id).count()
assert appro_count == 10
flex_count = self.session.query(FlexField).filter_by(submission_id=self.submission_id).count()
assert flex_count == 20
assert self.validator.job.number_of_rows == 11
assert self.validator.job.number_of_rows_valid == 9
error_count = self.session.query(ErrorMetadata).filter_by(job_id=self.val_job.job_id,
severity_id=RULE_SEVERITY_DICT['fatal']).count()
assert error_count == 3
assert report_headers == self.validator.report_headers
expected_values = [
{
'Unique ID': 'TAS: 019-2016/2016-0113-000',
'Field Name': 'statusofbudgetaryresourcestotal_cpe',
'Rule Message': 'This field is required for all submissions but was not provided in this row.',
'Value Provided': '',
'Expected Value': '(not blank)',
'Difference': '',
'Flex Field': 'flex_field_a: FLEX_A, flex_field_b: FLEX_B',
'Row Number': '3',
'Rule Label': ''
},
{
'Unique ID': 'TAS: 019-2016/2016-0113-000',
'Field Name': 'statusofbudgetaryresourcestotal_cpe, obligationsincurredtotalbytas_cpe,'
' unobligatedbalance_cpe',
'Rule Message': 'StatusOfBudgetaryResourcesTotal_CPE= ObligationsIncurredTotalByTAS_CPE'
' + UnobligatedBalance_CPE',
'Value Provided': 'statusofbudgetaryresourcestotal_cpe: , obligationsincurredtotalbytas_cpe: 8.08,'
' unobligatedbalance_cpe: 2.02',
'Expected Value': 'StatusOfBudgetaryResourcesTotal_CPE must equal the sum of these elements:'
' ObligationsIncurredTotalByTAS_CPE + UnobligatedBalance_CPE. The Broker cannot'
' distinguish which item is incorrect for this rule. Refer to related rule errors'
' and warnings in this report (rules A15, A22, A23) to distinguish which elements'
' may be incorrect.',
'Difference': '-10.10',
'Flex Field': 'flex_field_a: FLEX_A, flex_field_b: FLEX_B',
'Row Number': '3',
'Rule Label': 'A4'
},
{
'Unique ID': 'TAS: 019-2016/2016-0113-000',
'Field Name': 'statusofbudgetaryresourcestotal_cpe, totalbudgetaryresources_cpe',
'Rule Message': 'StatusOfBudgetaryResourcesTotal_CPE = TotalBudgetaryResources_CPE',
'Value Provided': 'statusofbudgetaryresourcestotal_cpe: , totalbudgetaryresources_cpe: 10.1',
'Expected Value': 'StatusOfBudgetaryResourcesTotal_CPE must equal TotalBudgetaryResources_CPE. The'
' Broker cannot distinguish which side of the equation is correct for this rule.'
' Refer to related rule errors and warnings in this report (rules A6, A23) to'
' distinguish which elements may be incorrect.',
'Difference': '-10.1',
'Flex Field': 'flex_field_a: FLEX_A, flex_field_b: FLEX_B',
'Row Number': '3',
'Rule Label': 'A24'
}
]
assert report_content == expected_values
self.cleanup()
# SQL Validation (with difference)
report_headers, report_content = self.generate_file_report(RULE_FAILED_ERROR, 'appropriations', warning=False)
appro_count = self.session.query(Appropriation).filter_by(submission_id=self.submission_id).count()
assert appro_count == 10
flex_count = self.session.query(FlexField).filter_by(submission_id=self.submission_id).count()
assert flex_count == 20
assert self.validator.job.number_of_rows == 11
assert self.validator.job.number_of_rows_valid == 10
error_count = self.session.query(ErrorMetadata).filter_by(job_id=self.val_job.job_id,
severity_id=RULE_SEVERITY_DICT['fatal']).count()
assert error_count == 0
assert report_headers == self.validator.report_headers
# TODO put this back when we put A2 back
# expected_values = [
# {
# 'Unique ID': 'TAS: 049-2014/2015-0100-000',
# 'Field Name': 'totalbudgetaryresources_cpe, budgetauthorityappropriatedamount_cpe,'
# ' budgetauthorityunobligatedbalancebroughtforward_fyb,'
# ' adjustmentstounobligatedbalancebroughtforward_cpe, otherbudgetaryresourcesamount_cpe',
# 'Rule Message': 'TotalBudgetaryResources_CPE = BudgetAuthorityAppropriatedAmount_CPE +'
# ' BudgetAuthorityUnobligatedBalanceBroughtForward_FYB +'
# ' AdjustmentsToUnobligatedBalanceBroughtForward_CPE +'
# ' OtherBudgetaryResourcesAmount_CPE',
# 'Value Provided': 'totalbudgetaryresources_cpe: 10.1, budgetauthorityappropriatedamount_cpe: 0.01,'
# ' budgetauthorityunobligatedbalancebroughtforward_fyb: 3.03,'
# ' adjustmentstounobligatedbalancebroughtforward_cpe: 2.02,'
# ' otherbudgetaryresourcesamount_cpe: 4.04',
# 'Expected Value': 'TotalBudgetaryResources_CPE must equal the sum of these elements:'
# ' BudgetAuthorityAppropriatedAmount_CPE +'
# ' BudgetAuthorityUnobligatedBalanceBroughtForward_FYB +'
# ' AdjustmentsToUnobligatedBalanceBroughtForward_CPE +'
# ' OtherBudgetaryResourcesAmount_CPE. The Broker cannot distinguish which item is'
# ' incorrect for this rule. Refer to related rule errors and warnings in this report'
# ' (rules A3, A6, A7, A8, A12) to distinguish which elements may be incorrect.',
# 'Difference': '1.00',
# 'Flex Field': 'flex_field_a: FLEX_A, flex_field_b: FLEX_B',
# 'Row Number': '10',
# 'Rule Label': 'A2'
# }
# ]
# assert report_content == expected_values
self.cleanup()
# Ensure total_obligations are being calculated correctly
self.generate_file_report(AFINANCIAL_FILE, 'award_financial', warning=False)
totals = self.session.query(TotalObligations).filter_by(submission_id=self.submission_id).one()
assert totals.total_obligations == 12000.00
assert totals.total_proc_obligations == 8000.00
assert totals.total_asst_obligations == 4000.00
self.cleanup()
def test_cross_file_warnings(self):
for chunk_size, parallel, batch_sql in self.CONFIGS:
self.monkeypatch.setattr(dataactvalidator.validation_handlers.validationManager, 'CHUNK_SIZE', chunk_size)
self.monkeypatch.setattr(dataactvalidator.validation_handlers.validationManager, 'PARALLEL', parallel)
self.monkeypatch.setattr(dataactvalidator.validation_handlers.validationManager, 'BATCH_SQL_VAL_RESULTS',
batch_sql)
self.monkeypatch.setattr(dataactvalidator.validation_handlers.validator, 'SQL_VALIDATION_BATCH_SIZE',
chunk_size)
self.cross_file_warnings()
def cross_file_warnings(self):
self.cleanup()
# Valid
report_headers, report_content = self.generate_cross_file_report([(CROSS_FILE_A, 'appropriations'),
(CROSS_FILE_B, 'program_activity')],
warning=True)
error_count = self.session.query(ErrorMetadata).filter_by(job_id=self.val_job.job_id,
severity_id=RULE_SEVERITY_DICT['warning']).count()
assert error_count == 0
assert report_headers == self.validator.cross_file_report_headers
assert len(report_content) == 0
self.cleanup()
# SQL Validation
report_headers, report_content = self.generate_cross_file_report([(INVALID_CROSS_A, 'appropriations'),
(INVALID_CROSS_B, 'program_activity')],
warning=True)
warnings = list(self.session.query(ErrorMetadata).filter_by(job_id=self.val_job.job_id,
severity_id=RULE_SEVERITY_DICT['warning']).all())
assert len(warnings) == 3
assert warnings[0].occurrences == 3
assert warnings[1].occurrences == 3
assert warnings[2].occurrences == 3
assert report_headers == self.validator.cross_file_report_headers
expected_values = [
{
'Unique ID': 'TAS: 019-2016/2016-0113-000',
'Source File': 'appropriations',
'Source Field Name': 'grossoutlayamountbytas_cpe',
'Target File': 'program_activity',
'Target Field Name': 'gross_outlay_amount_by_pro_cpe_sum',
'Rule Message': 'The GrossOutlayAmountByTAS_CPE amount in the appropriation file (A) does not equal the'
' sum of the corresponding GrossOutlayAmountByProgramObjectClass_CPE values in the'
' award financial file (B). {This value is the sum of all Gross Outlay Amounts reported'
' in file B, to indicate year-to-date activity by TAS/Subaccount.}',
'Source Value Provided': 'grossoutlayamountbytas_cpe: 10000',
'Target Value Provided': 'gross_outlay_amount_by_pro_cpe_sum: 6000',
'Difference': '4000',
'Source Flex Field': 'flex_field_a: FLEX_A, flex_field_b: FLEX_B',
'Source Row Number': '5',
'Rule Label': 'A18'
},
{
'Unique ID': 'TAS: 019-2016/2016-0113-000',
'Source File': 'appropriations',
'Source Field Name': 'obligationsincurredtotalbytas_cpe',
'Target File': 'program_activity',
'Target Field Name': 'obligations_incurred_by_pr_cpe_sum',
'Rule Message': 'The ObligationsIncurredTotalByTAS_CPE amount in the appropriation file (A) does not'
' equal the negative sum of the corresponding'
' ObligationsIncurredByProgramObjectClass_CPE values in the award financial file (B).',
'Source Value Provided': 'obligationsincurredtotalbytas_cpe: 12000',
'Target Value Provided': 'obligations_incurred_by_pr_cpe_sum: 6000',
'Difference': '18000',
'Source Flex Field': 'flex_field_a: FLEX_A, flex_field_b: FLEX_B',
'Source Row Number': '5',
'Rule Label': 'A19'
},
{
'Unique ID': 'TAS: 019-2016/2016-0113-000',
'Source File': 'appropriations',
'Source Field Name': 'deobligationsrecoveriesrefundsbytas_cpe',
'Target File': 'program_activity',
'Target Field Name': 'ussgl487100_downward_adjus_cpe_sum, ussgl497100_downward_adjus_cpe_sum,'
' ussgl487200_downward_adjus_cpe_sum, ussgl497200_downward_adjus_cpe_sum',
'Rule Message': 'DeobligationsRecoveriesRefundsByTAS_CPE in File A should equal USSGL'
' (4871_CPE+ 4971_CPE+ 4872_CPE+ 4972_CPE) for the TAS in File B.',
'Source Value Provided': 'deobligationsrecoveriesrefundsbytas_cpe: 16000',
'Target Value Provided': 'ussgl487100_downward_adjus_cpe_sum: 2000,'
' ussgl497100_downward_adjus_cpe_sum: 2000,'
' ussgl487200_downward_adjus_cpe_sum: 400,'
' ussgl497200_downward_adjus_cpe_sum: 2000',
'Difference': '9600',
'Source Flex Field': 'flex_field_a: FLEX_A, flex_field_b: FLEX_B',
'Source Row Number': '5',
'Rule Label': 'A35'
},
{
'Unique ID': 'TAS: 019-2016/2016-0113-000',
'Source File': 'appropriations',
'Source Field Name': 'grossoutlayamountbytas_cpe',
'Target File': 'program_activity',
'Target Field Name': 'gross_outlay_amount_by_pro_cpe_sum',
'Rule Message': 'The GrossOutlayAmountByTAS_CPE amount in the appropriation file (A) does not equal the'
' sum of the corresponding GrossOutlayAmountByProgramObjectClass_CPE values in the'
' award financial file (B). {This value is the sum of all Gross Outlay Amounts reported'
' in file B, to indicate year-to-date activity by TAS/Subaccount.}',
'Source Value Provided': 'grossoutlayamountbytas_cpe: 10000',
'Target Value Provided': 'gross_outlay_amount_by_pro_cpe_sum: 6000',
'Difference': '4000',
'Source Flex Field': 'flex_field_a: FLEX_A, flex_field_b: FLEX_B',
'Source Row Number': '10',
'Rule Label': 'A18'
},
{
'Unique ID': 'TAS: 019-2016/2016-0113-000',
'Source File': 'appropriations',
'Source Field Name': 'obligationsincurredtotalbytas_cpe',
'Target File': 'program_activity',
'Target Field Name': 'obligations_incurred_by_pr_cpe_sum',
'Rule Message': 'The ObligationsIncurredTotalByTAS_CPE amount in the appropriation file (A) does not'
' equal the negative sum of the corresponding'
' ObligationsIncurredByProgramObjectClass_CPE values in the award financial file (B).',
'Source Value Provided': 'obligationsincurredtotalbytas_cpe: 12000',
'Target Value Provided': 'obligations_incurred_by_pr_cpe_sum: 6000',
'Difference': '18000',
'Source Flex Field': 'flex_field_a: FLEX_A, flex_field_b: FLEX_B',
'Source Row Number': '10',
'Rule Label': 'A19'
},
{
'Unique ID': 'TAS: 019-2016/2016-0113-000',
'Source File': 'appropriations',
'Source Field Name': 'deobligationsrecoveriesrefundsbytas_cpe',
'Target File': 'program_activity',
'Target Field Name': 'ussgl487100_downward_adjus_cpe_sum, ussgl497100_downward_adjus_cpe_sum,'
' ussgl487200_downward_adjus_cpe_sum, ussgl497200_downward_adjus_cpe_sum',
'Rule Message': 'DeobligationsRecoveriesRefundsByTAS_CPE in File A should equal USSGL'
' (4871_CPE+ 4971_CPE+ 4872_CPE+ 4972_CPE) for the TAS in File B.',
'Source Value Provided': 'deobligationsrecoveriesrefundsbytas_cpe: 16000',
'Target Value Provided': 'ussgl487100_downward_adjus_cpe_sum: 2000,'
' ussgl497100_downward_adjus_cpe_sum: 2000,'
' ussgl487200_downward_adjus_cpe_sum: 400,'
' ussgl497200_downward_adjus_cpe_sum: 2000',
'Difference': '9600',
'Source Flex Field': 'flex_field_a: FLEX_A, flex_field_b: FLEX_B',
'Source Row Number': '10',
'Rule Label': 'A35'
},
{
'Unique ID': 'TAS: 019-2016/2016-0113-000',
'Source File': 'appropriations',
'Source Field Name': 'grossoutlayamountbytas_cpe',
'Target File': 'program_activity',
'Target Field Name': 'gross_outlay_amount_by_pro_cpe_sum',
'Rule Message': 'The GrossOutlayAmountByTAS_CPE amount in the appropriation file (A) does not equal the'
' sum of the corresponding GrossOutlayAmountByProgramObjectClass_CPE values in the'
' award financial file (B). {This value is the sum of all Gross Outlay Amounts reported'
' in file B, to indicate year-to-date activity by TAS/Subaccount.}',
'Source Value Provided': 'grossoutlayamountbytas_cpe: 10000',
'Target Value Provided': 'gross_outlay_amount_by_pro_cpe_sum: 6000',
'Difference': '4000',
'Source Flex Field': 'flex_field_a: FLEX_A, flex_field_b: FLEX_B',
'Source Row Number': '15',
'Rule Label': 'A18'
},
{
'Unique ID': 'TAS: 019-2016/2016-0113-000',
'Source File': 'appropriations',
'Source Field Name': 'obligationsincurredtotalbytas_cpe',
'Target File': 'program_activity',
'Target Field Name': 'obligations_incurred_by_pr_cpe_sum',
'Rule Message': 'The ObligationsIncurredTotalByTAS_CPE amount in the appropriation file (A) does not'
' equal the negative sum of the corresponding'
' ObligationsIncurredByProgramObjectClass_CPE values in the award financial file (B).',
'Source Value Provided': 'obligationsincurredtotalbytas_cpe: 12000',
'Target Value Provided': 'obligations_incurred_by_pr_cpe_sum: 6000',
'Difference': '18000',
'Source Flex Field': 'flex_field_a: FLEX_A, flex_field_b: FLEX_B',
'Source Row Number': '15',
'Rule Label': 'A19'
},
{
'Unique ID': 'TAS: 019-2016/2016-0113-000',
'Source File': 'appropriations',
'Source Field Name': 'deobligationsrecoveriesrefundsbytas_cpe',
'Target File': 'program_activity',
'Target Field Name': 'ussgl487100_downward_adjus_cpe_sum, ussgl497100_downward_adjus_cpe_sum,'
' ussgl487200_downward_adjus_cpe_sum, ussgl497200_downward_adjus_cpe_sum',
'Rule Message': 'DeobligationsRecoveriesRefundsByTAS_CPE in File A should equal USSGL'
' (4871_CPE+ 4971_CPE+ 4872_CPE+ 4972_CPE) for the TAS in File B.',
'Source Value Provided': 'deobligationsrecoveriesrefundsbytas_cpe: 16000',
'Target Value Provided': 'ussgl487100_downward_adjus_cpe_sum: 2000,'
' ussgl497100_downward_adjus_cpe_sum: 2000,'
' ussgl487200_downward_adjus_cpe_sum: 400,'
' ussgl497200_downward_adjus_cpe_sum: 2000',
'Difference': '9600',
'Source Flex Field': 'flex_field_a: FLEX_A, flex_field_b: FLEX_B',
'Source Row Number': '15',
'Rule Label': 'A35'
}
]
assert report_content == expected_values
self.cleanup()
def test_cross_file_errors(self):
for chunk_size, parallel, batch_sql in self.CONFIGS:
self.monkeypatch.setattr(dataactvalidator.validation_handlers.validationManager, 'CHUNK_SIZE', chunk_size)
self.monkeypatch.setattr(dataactvalidator.validation_handlers.validationManager, 'PARALLEL', parallel)
self.monkeypatch.setattr(dataactvalidator.validation_handlers.validationManager, 'BATCH_SQL_VAL_RESULTS',
batch_sql)
self.monkeypatch.setattr(dataactvalidator.validation_handlers.validator, 'SQL_VALIDATION_BATCH_SIZE',
chunk_size)
self.cross_file_errors()
def cross_file_errors(self):
self.cleanup()
# Valid
report_headers, report_content = self.generate_cross_file_report([(CROSS_FILE_A, 'appropriations'),
(CROSS_FILE_B, 'program_activity')],
warning=False)
error_count = self.session.query(ErrorMetadata).filter_by(job_id=self.val_job.job_id,
severity_id=RULE_SEVERITY_DICT['fatal']).count()
assert error_count == 0
assert report_headers == self.validator.cross_file_report_headers
assert len(report_content) == 0
self.cleanup()
# SQL Validation
report_headers, report_content = self.generate_cross_file_report([(INVALID_CROSS_A, 'appropriations'),
(INVALID_CROSS_B, 'program_activity')],
warning=False)
warnings = list(self.session.query(ErrorMetadata).filter_by(job_id=self.val_job.job_id,
severity_id=RULE_SEVERITY_DICT['fatal']).all())
assert len(warnings) == 1
assert warnings[0].occurrences == 3
assert report_headers == self.validator.cross_file_report_headers
expected_values = [
{
'Unique ID': 'TAS: 019-072-X-0306-000',
'Source File': 'appropriations',
'Source Field Name': 'allocationtransferagencyidentifier, agencyidentifier,'
' beginningperiodofavailability, endingperiodofavailability,'
' availabilitytypecode, mainaccountcode, subaccountcode',
'Target File': 'program_activity',
'Target Field Name': '',
'Rule Message': 'All TAS values in File A (appropriations) should exist in File B'
' (object class program activity)',
'Source Value Provided': 'allocationtransferagencyidentifier: 019, agencyidentifier: 072,'
' beginningperiodofavailability: , endingperiodofavailability: ,'
' availabilitytypecode: X, mainaccountcode: 0306, subaccountcode: 000',
'Target Value Provided': '',
'Difference': '',
'Source Flex Field': 'flex_field_a: FLEX_A, flex_field_b: FLEX_B',
'Source Row Number': '2',
'Rule Label': 'A30.1'
},
{
'Unique ID': 'TAS: 019-072-X-0306-000',
'Source File': 'appropriations',
'Source Field Name': 'allocationtransferagencyidentifier, agencyidentifier,'
' beginningperiodofavailability, endingperiodofavailability,'
' availabilitytypecode, mainaccountcode, subaccountcode',
'Target File': 'program_activity',
'Target Field Name': '',
'Rule Message': 'All TAS values in File A (appropriations) should exist in File B'
' (object class program activity)',
'Source Value Provided': 'allocationtransferagencyidentifier: 019, agencyidentifier: 072,'
' beginningperiodofavailability: , endingperiodofavailability: ,'
' availabilitytypecode: X, mainaccountcode: 0306, subaccountcode: 000',
'Target Value Provided': '',
'Difference': '',
'Source Flex Field': 'flex_field_a: FLEX_A, flex_field_b: FLEX_B',
'Source Row Number': '7',
'Rule Label': 'A30.1'
},
{
'Unique ID': 'TAS: 019-072-X-0306-000',
'Source File': 'appropriations',
'Source Field Name': 'allocationtransferagencyidentifier, agencyidentifier,'
' beginningperiodofavailability, endingperiodofavailability,'
' availabilitytypecode, mainaccountcode, subaccountcode',
'Target File': 'program_activity',
'Target Field Name': '',
'Rule Message': 'All TAS values in File A (appropriations) should exist in File B'
' (object class program activity)',
'Source Value Provided': 'allocationtransferagencyidentifier: 019, agencyidentifier: 072,'
' beginningperiodofavailability: , endingperiodofavailability: ,'
' availabilitytypecode: X, mainaccountcode: 0306, subaccountcode: 000',
'Target Value Provided': '',
'Difference': '',
'Source Flex Field': 'flex_field_a: FLEX_A, flex_field_b: FLEX_B',
'Source Row Number': '12',
'Rule Label': 'A30.1'
}
]
assert report_content == expected_values
self.cleanup()
def test_validation_parallelize_error(self):
# Test the parallelize function with a broken call to see if the process is properly cleaned up
self.monkeypatch.setattr(dataactvalidator.validation_handlers.validationManager, 'MULTIPROCESSING_POOLS', 2)
# Setting up all the other elements of the validator to simulate the integration test
self.validator.submission_id = 1
self.validator.file_type = self.session.query(FileType).filter_by(
file_type_id=FILE_TYPE_DICT['appropriations']).one()
self.validator.file_name = APPROP_FILE
self.setup_csv_record_validation(APPROP_FILE, 'appropriations')
self.validator.is_fabs = False
self.validator.reader = CsvReader()
self.validator.error_list = {}
self.validator.error_rows = []
self.validator.total_rows = 1
self.validator.total_data_rows = 0
self.validator.short_rows = []
self.validator.long_rows = []
self.validator.has_data = False
self.validator.model = Appropriation
self.validator.error_file_name = report_file_name(self.validator.submission_id, False,
self.validator.file_type.name)
self.validator.error_file_path = ''.join([CONFIG_SERVICES['error_report_path'],
self.validator.error_file_name])
self.validator.warning_file_name = report_file_name(self.validator.submission_id, True,
self.validator.file_type.name)
self.validator.warning_file_path = ''.join([CONFIG_SERVICES['error_report_path'],
self.validator.warning_file_name])
self.validator.fields = self.session.query(FileColumn) \
.filter(FileColumn.file_id == FILE_TYPE_DICT[self.validator.file_type.name]) \
.order_by(FileColumn.daims_name.asc()).all()
self.validator.expected_headers, self.validator.parsed_fields = parse_fields(self.session,
self.validator.fields)
self.validator.csv_schema = {row.name_short: row for row in self.validator.fields}
with open(self.validator.error_file_path, 'w', newline='') as error_file, \
open(self.validator.warning_file_path, 'w', newline='') as warning_file:
error_csv = csv.writer(error_file, delimiter=',', quoting=csv.QUOTE_MINIMAL, lineterminator='\n')
warning_csv = csv.writer(warning_file, delimiter=',', quoting=csv.QUOTE_MINIMAL, lineterminator='\n')
error_csv.writerow(self.validator.report_headers)
warning_csv.writerow(self.validator.report_headers)
# Finally open the file for loading into the database with baseline validations
self.validator.filename = self.validator.reader.get_filename(None, None, self.validator.file_name)
self.validator.reader.open_file(None, None, self.validator.file_name, self.validator.fields, None,
self.validator.get_file_name(self.validator.error_file_name),
self.validator.daims_to_short_dict[self.validator.file_type.file_type_id],
self.validator.short_to_daims_dict[self.validator.file_type.file_type_id],
is_local=self.validator.is_local)
# Going back to reprocess the header row
self.validator.reader.file.seek(0)
reader_obj = pd.read_csv(self.validator.reader.file, dtype=str, delimiter=',', error_bad_lines=False,
na_filter=False, chunksize=2, warn_bad_lines=False)
# Setting this outside of reader/file type objects which may not be used during processing
self.validator.flex_fields = ['flex_field_a', 'flex_field_b']
self.validator.header_dict = self.validator.reader.header_dict
self.validator.file_type_name = self.validator.file_type.name
self.validator.file_type_id = self.validator.file_type.file_type_id
self.validator.job_id = 2
# Making a broken list of chunks (one that should process fine, another with an error, another fine)
# This way we can tell that the latter chunks processed later are ignored due to the error
normal_chunks = list(reader_obj)
broken_chunks = [normal_chunks[0], 'BREAK', normal_chunks[1], normal_chunks[2]]
with self.assertRaises(Exception) as val_except:
# making the reader object a list of strings instead, causing the inner function to break
self.validator.parallel_data_loading(self.session, broken_chunks)
self.assertTrue(type(val_except.exception) == AttributeError)
self.assertTrue(str(val_except.exception) == "'str' object has no attribute 'empty'")
# Check to see the processes are killed
job = ps.Process(os.getpid())
assert len(job.children(recursive=True)) == 0
| tests/integration/error_warning_file_tests.py | 62,907 | Overall integration tests for error/warning reports.
For each file type (single-file, cross-file, errors, warnings), test if each has
- the correct structure
- each column's content is correct after testing each possible type of error:
- formatting
- length
- types
- required/optional
- SQL validation
Attributes:
session: the database session connection
validator: validator instance to be used for the tests
submission_id: the id of the submission foundation
submission: the submission foundation to be used for all the tests
val_job: the validation job to be used for all the tests
Test set-up.
Set up class-wide resources (test data)
Valid Files Invalid Files get the submission test users set up default e-mails for tests Just have one valid submission and then keep on reloading files adding TAS to ensure valid file is valid adding GTAS to ensure valid file is valid Valid Blank File SQL Validation Valid Header Error Header errors do not get saved to the database Read Error Type Error Length Error Required Error + SQL Validation SQL Validation (with difference) TODO put this back when we put A2 back expected_values = [ { 'Unique ID': 'TAS: 049-2014/2015-0100-000', 'Field Name': 'totalbudgetaryresources_cpe, budgetauthorityappropriatedamount_cpe,' ' budgetauthorityunobligatedbalancebroughtforward_fyb,' ' adjustmentstounobligatedbalancebroughtforward_cpe, otherbudgetaryresourcesamount_cpe', 'Rule Message': 'TotalBudgetaryResources_CPE = BudgetAuthorityAppropriatedAmount_CPE +' ' BudgetAuthorityUnobligatedBalanceBroughtForward_FYB +' ' AdjustmentsToUnobligatedBalanceBroughtForward_CPE +' ' OtherBudgetaryResourcesAmount_CPE', 'Value Provided': 'totalbudgetaryresources_cpe: 10.1, budgetauthorityappropriatedamount_cpe: 0.01,' ' budgetauthorityunobligatedbalancebroughtforward_fyb: 3.03,' ' adjustmentstounobligatedbalancebroughtforward_cpe: 2.02,' ' otherbudgetaryresourcesamount_cpe: 4.04', 'Expected Value': 'TotalBudgetaryResources_CPE must equal the sum of these elements:' ' BudgetAuthorityAppropriatedAmount_CPE +' ' BudgetAuthorityUnobligatedBalanceBroughtForward_FYB +' ' AdjustmentsToUnobligatedBalanceBroughtForward_CPE +' ' OtherBudgetaryResourcesAmount_CPE. The Broker cannot distinguish which item is' ' incorrect for this rule. Refer to related rule errors and warnings in this report' ' (rules A3, A6, A7, A8, A12) to distinguish which elements may be incorrect.', 'Difference': '1.00', 'Flex Field': 'flex_field_a: FLEX_A, flex_field_b: FLEX_B', 'Row Number': '10', 'Rule Label': 'A2' } ] assert report_content == expected_values Ensure total_obligations are being calculated correctly Valid SQL Validation Valid SQL Validation Test the parallelize function with a broken call to see if the process is properly cleaned up Setting up all the other elements of the validator to simulate the integration test Finally open the file for loading into the database with baseline validations Going back to reprocess the header row Setting this outside of reader/file type objects which may not be used during processing Making a broken list of chunks (one that should process fine, another with an error, another fine) This way we can tell that the latter chunks processed later are ignored due to the error making the reader object a list of strings instead, causing the inner function to break Check to see the processes are killed | 3,835 | en | 0.699792 |
# -*- coding: utf-8 -*-
import os
import sys
import csv
# -----------------------------------------------------------------------------
root = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
sys.path.append(root + '/python')
import ccxt # noqa: E402
# -----------------------------------------------------------------------------
def retry_fetch_ohlcv(exchange, max_retries, symbol, timeframe, since, limit):
num_retries = 0
try:
num_retries += 1
ohlcv = exchange.fetch_ohlcv(symbol, timeframe, since, limit)
# print('Fetched', len(ohlcv), symbol, 'candles from', exchange.iso8601 (ohlcv[0][0]), 'to', exchange.iso8601 (ohlcv[-1][0]))
return ohlcv
except Exception:
if num_retries > max_retries:
raise # Exception('Failed to fetch', timeframe, symbol, 'OHLCV in', max_retries, 'attempts')
def scrape_ohlcv(exchange, max_retries, symbol, timeframe, since, limit):
earliest_timestamp = exchange.milliseconds()
timeframe_duration_in_seconds = exchange.parse_timeframe(timeframe)
timeframe_duration_in_ms = timeframe_duration_in_seconds * 1000
timedelta = limit * timeframe_duration_in_ms
all_ohlcv = []
while True:
fetch_since = earliest_timestamp - timedelta
ohlcv = retry_fetch_ohlcv(exchange, max_retries, symbol, timeframe, fetch_since, limit)
# if we have reached the beginning of history
if ohlcv[0][0] >= earliest_timestamp:
break
earliest_timestamp = ohlcv[0][0]
all_ohlcv = ohlcv + all_ohlcv
print(len(all_ohlcv), 'candles in total from', exchange.iso8601(all_ohlcv[0][0]), 'to', exchange.iso8601(all_ohlcv[-1][0]))
# if we have reached the checkpoint
if fetch_since < since:
break
return all_ohlcv
def write_to_csv(filename, data):
with open(filename, mode='w', newline = '') as output_file:
csv_writer = csv.writer(output_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
csv_writer.writerows(data)
def scrape_candles_to_csv(filename, exchange_id, max_retries, symbol, timeframe, since, limit):
# instantiate the exchange by id
exchange = getattr(ccxt, exchange_id)({
'enableRateLimit': True, # required by the Manual
})
# convert since from string to milliseconds integer if needed
if isinstance(since, str):
since = exchange.parse8601(since)
# preload all markets from the exchange
exchange.load_markets()
# fetch all candles
ohlcv = scrape_ohlcv(exchange, max_retries, symbol, timeframe, since, limit)
# save them to csv file
write_to_csv(filename, ohlcv)
print('Saved', len(ohlcv), 'candles from', exchange.iso8601(ohlcv[0][0]), 'to', exchange.iso8601(ohlcv[-1][0]), 'to', filename)
# -----------------------------------------------------------------------------
scrape_candles_to_csv('binance_3.csv', 'binance', 3, 'BTC/USDT', '1h', '2019-05-01T00:00:00Z', 100)
| binance-fetch-ohlcv-to-csv.py | 3,008 | -*- coding: utf-8 -*- ----------------------------------------------------------------------------- noqa: E402 ----------------------------------------------------------------------------- print('Fetched', len(ohlcv), symbol, 'candles from', exchange.iso8601 (ohlcv[0][0]), 'to', exchange.iso8601 (ohlcv[-1][0])) Exception('Failed to fetch', timeframe, symbol, 'OHLCV in', max_retries, 'attempts') if we have reached the beginning of history if we have reached the checkpoint instantiate the exchange by id required by the Manual convert since from string to milliseconds integer if needed preload all markets from the exchange fetch all candles save them to csv file ----------------------------------------------------------------------------- | 745 | en | 0.553811 |
##########################################
# File: refine_multiple_shards_joint.py #
# Copyright Richard Stebbing 2014. #
# Distributed under the MIT License. #
# (See accompany file LICENSE or copy at #
# http://opensource.org/licenses/MIT) #
##########################################
# Imports
import argparse
import matplotlib.pyplot as plt
import numpy as np
import visualise_progress as vis
from functools import partial
from operator import itemgetter
from solve import fit_and_colour_shards
from time import time
# Requires `rscommon`.
from rscommon.pickle_ import dump
# main
def main():
parser = argparse.ArgumentParser()
parser.add_argument('input_all_iterations_Xy_path')
parser.add_argument('output_dir')
parser.add_argument('--visualise-progress',
action='store_true',
default=False)
parser.add_argument('--ftol', type=float, default=1e-8)
parser.add_argument('--xtol', type=float, default=1e-8)
parser.add_argument('--maxfev', type=int, default=0)
parser.add_argument('--epsilon', type=float, default=1e-6)
args = parser.parse_args()
ensure_output_path = partial(vis.ensure_path, args.output_dir)
all_iterations_Xy, orig_args = np.load(args.input_all_iterations_Xy_path)
print '<-', orig_args['input_path']
I = plt.imread(orig_args['input_path']).astype(np.float64)[..., :3]
if orig_args['base'] == 'white':
J0 = np.ones_like(I)
elif orig_args['base'] == 'black':
J0 = np.zeros_like(I)
else:
head, tail = os.path.split(orig_args['base'])
root, ext = os.path.splitext(tail)
if ext == '.dat':
J0 = np.load(orig_args['base'])
else:
J0 = plt.imread(orig_args['base']).astype(np.float64)[..., :3]
Xs0, ys0 = zip(*map(itemgetter(-1), all_iterations_Xy))
print 'Solving with `fit_and_colour_shards` ...'
np.seterr(over='ignore')
t0 = time()
(Xs, ys, all_Xs_ys), (exit_code, E0, E1, J, J1) = fit_and_colour_shards(
I, J0, orig_args['alpha'],
Xs0, ys0,
k=orig_args['k'],
epsilon=args.epsilon,
ftol=args.ftol,
xtol=args.xtol,
maxfev=args.maxfev,
return_info=True,
verbose=True)
t1 = time()
np.seterr(over='warn')
print 'E0:', E0
print 'E1:', E1
print 'Exit code: %d' % exit_code
print 'Time taken: %.3fs' % (t1 - t0)
output_path = ensure_output_path('all_Xs_ys.dat')
print '->', output_path
dump(output_path, (all_Xs_ys, args.__dict__), raise_on_failure=False)
output_path = ensure_output_path('J.dat')
print '->', output_path
dump(output_path, (J, args.__dict__), raise_on_failure=False)
output_path = ensure_output_path('J1.dat')
print '->', output_path
dump(output_path, (J1, args.__dict__), raise_on_failure=False)
if args.visualise_progress:
output_path = ensure_output_path('J.png')
print '->', output_path
f, ax = vis.make_image_figure(J)
vis.save_image_figure(output_path, f, J.shape)
output_path = ensure_output_path('J1.png')
print '->', output_path
f, ax = vis.make_image_figure(J1)
vis.save_image_figure(output_path, f, J1.shape)
if __name__ == '__main__':
main()
| refine_multiple_shards_joint.py | 3,417 | File: refine_multiple_shards_joint.py Copyright Richard Stebbing 2014. Distributed under the MIT License. (See accompany file LICENSE or copy at http://opensource.org/licenses/MIT) Imports Requires `rscommon`. main | 233 | en | 0.487308 |
import numpy as np
import math
from arena import Arena
from agent import HAgent, AAgent
import random
# np.random.seed(1234)
# place the humans on the arena
def place_soldiers(n, arena, agents):
x = 0
y = 0
for i in range(n):
agents[i + 1] = HAgent([x, y])
arena.arena[x, y] = 1
y += 2
return arena, agents
# place the alien agents on the arena
def place_targets(n, arena, targets, pos=None):
if pos is not None:
orig_pos = pos
for i in range(n):
targets[i + 1] = AAgent(pos[i])
arena.arena[pos[i][0], pos[i][1]] = 2
else:
orig_pos = []
for i in range(n):
while True:
x = np.rint(np.array([(arena.size - 1) * np.random.rand(1),
(arena.size - 1) * np.random.rand(1)]))
if x[0] > 7 or x[1] > 7:
break
x = [int(i) for i in x]
# x = [19, 19]
targets[i + 1] = AAgent(x)
arena.arena[x[0], x[1]] = 2
orig_pos.append([x[0], x[1]])
return arena, targets, orig_pos
# adds half-cover tiles in random locations in the arena
# At most n cover tiles added, though potentially fewer
def place_half_cover(n, arena):
for i in range(n):
x = np.random.randint(0, (arena.size - 1))
y = np.random.randint(0, (arena.size - 1))
if arena.arena[x, y] == 0:
arena.arena[x, y] = 3
return arena
# movement for agents
def move(agent, arena, loc):
# Check that agent has movement, if not, do nothing
if agent.moves <= 0:
# print('unsuccessful move')
return agent, arena
# Check if in movement range
elif abs((loc[0] - agent.pos[0]) + (loc[1] - agent.pos[1])) <= agent.move_range:
# print('successful move')
# update the arena matrix
arena.arena[agent.pos[0], agent.pos[1]] = 0
arena.arena[loc[0], loc[1]] = 1
# update agent location, number of moves
agent.moves -= 1
agent.pos = loc
arena.time += 1
return agent, arena
# if not in movement range, do nothing
else:
# print('unsuccessful move')
return agent, arena
# reload action
def reload(agent):
if agent.moves > 0:
agent.moves -= 1
agent.ammo = 5
return agent
def fire(agent, arena, target):
# for the moment, assume anything can be fired on
# set firing agent's moves to zero
agent.moves = 0
agent.ammo -= 1
cover = 0
# check if target is in (half) cover
if agent.pos[0] + 1 > target.pos[0]:
if arena.arena[target.pos[0] - 1, target.pos[1]] == 3:
cover = 20
if agent.pos[0] - 1 < target.pos[0]:
if arena.arena[target.pos[0] + 1, target.pos[1]] == 3:
cover = 20
if agent.pos[1] + 1 > target.pos[1]:
if arena.arena[target.pos[0], target.pos[1] - 1] == 3:
cover = 20
if agent.pos[1] - 1 < target.pos[1]:
if arena.arena[target.pos[0], target.pos[1] + 1] == 3:
cover = 20
# for distance equation, see
# https://www.ufopaedia.org/index.php/Chance_to_Hit_(EU2012)
diff = [agent.pos[0] - target.pos[0], agent.pos[1] - target.pos[1]]
distance_chance = 42 - 4.5 * (np.linalg.norm(diff))
# Hit chance is base aim, less cover, plus distance modifier
to_hit = agent.aim - cover + distance_chance
if np.random.randint(100) >= to_hit:
# miss, so no change
arena.time += 1
return agent, arena, target
else:
flanking = 0
crit_modifier = 1
# check if critical
if cover == 0:
flanking = 50
crit_chance = agent.base_crit + flanking
# crit modifier in xcom is 1.5x damage
if np.random.randint(100) < crit_chance:
crit_modifier = 1.5
# slight random variance from base damage, +1 to -1
damage = math.floor(crit_modifier * (np.random.randint(-1, 2) + agent.damage))
# apply damage and return
target.health -= damage
# check if damage causes death
arena, target = check_death_enemy(arena, target)
arena.time += 1
return agent, arena, target
# check to see if character is dead, update arena information if so
def check_death_enemy(arena, target):
if target.health <= 0:
target.moves = 0
arena.arena[target.pos] = 0
arena.targets -= 1
if arena.targets <= 0:
arena.targets = 0
return arena, target
# refresh movement for non-dead characters
def new_turn(arena, agents, targets):
for i in agents:
if i.health > 0:
i.moves = 2
for j in targets:
if j.health > 0:
j.moves = 2
return arena, agents, targets
# get a valid move
def get_valid_move(agent):
x_old = agent.pos[0]
y_old = agent.pos[1]
# print(x_old, y_old)
x = int(random.randint(x_old - 3, x_old + 3))
y = int(random.randint(y_old - 3, y_old + 3))
if x < 0:
x = x * -1
if y < 0:
y = y * -1
if x > 19:
x = 19
if y > 19:
y = 19
# print(x, y)
return x, y
| helper.py | 5,194 | np.random.seed(1234) place the humans on the arena place the alien agents on the arena x = [19, 19] adds half-cover tiles in random locations in the arena At most n cover tiles added, though potentially fewer movement for agents Check that agent has movement, if not, do nothing print('unsuccessful move') Check if in movement range print('successful move') update the arena matrix update agent location, number of moves if not in movement range, do nothing print('unsuccessful move') reload action for the moment, assume anything can be fired on set firing agent's moves to zero check if target is in (half) cover for distance equation, see https://www.ufopaedia.org/index.php/Chance_to_Hit_(EU2012) Hit chance is base aim, less cover, plus distance modifier miss, so no change check if critical crit modifier in xcom is 1.5x damage slight random variance from base damage, +1 to -1 apply damage and return check if damage causes death check to see if character is dead, update arena information if so refresh movement for non-dead characters get a valid move print(x_old, y_old) print(x, y) | 1,092 | en | 0.78169 |
# -*- coding: utf-8 -*-
# This file was generated
import ctypes
import nidigital.errors as errors
import threading
from nidigital._visatype import * # noqa: F403,H303
import nidigital.history_ram_cycle_information as history_ram_cycle_information # noqa: F401
class Library(object):
'''Library
Wrapper around driver library.
Class will setup the correct ctypes information for every function on first call.
'''
def __init__(self, ctypes_library):
self._func_lock = threading.Lock()
self._library = ctypes_library
# We cache the cfunc object from the ctypes.CDLL object
self.niDigital_Abort_cfunc = None
self.niDigital_AbortKeepAlive_cfunc = None
self.niDigital_ApplyLevelsAndTiming_cfunc = None
self.niDigital_ApplyTDROffsets_cfunc = None
self.niDigital_BurstPattern_cfunc = None
self.niDigital_ClockGenerator_Abort_cfunc = None
self.niDigital_ClockGenerator_GenerateClock_cfunc = None
self.niDigital_Commit_cfunc = None
self.niDigital_ConfigureActiveLoadLevels_cfunc = None
self.niDigital_ConfigurePatternBurstSites_cfunc = None
self.niDigital_ConfigureTimeSetCompareEdgesStrobe_cfunc = None
self.niDigital_ConfigureTimeSetCompareEdgesStrobe2x_cfunc = None
self.niDigital_ConfigureTimeSetDriveEdges_cfunc = None
self.niDigital_ConfigureTimeSetDriveEdges2x_cfunc = None
self.niDigital_ConfigureTimeSetDriveFormat_cfunc = None
self.niDigital_ConfigureTimeSetEdge_cfunc = None
self.niDigital_ConfigureTimeSetEdgeMultiplier_cfunc = None
self.niDigital_ConfigureTimeSetPeriod_cfunc = None
self.niDigital_ConfigureVoltageLevels_cfunc = None
self.niDigital_CreateCaptureWaveformFromFileDigicapture_cfunc = None
self.niDigital_CreateCaptureWaveformParallel_cfunc = None
self.niDigital_CreateCaptureWaveformSerial_cfunc = None
self.niDigital_CreateSourceWaveformFromFileTDMS_cfunc = None
self.niDigital_CreateSourceWaveformParallel_cfunc = None
self.niDigital_CreateSourceWaveformSerial_cfunc = None
self.niDigital_CreateTimeSet_cfunc = None
self.niDigital_DeleteAllTimeSets_cfunc = None
self.niDigital_DisableSites_cfunc = None
self.niDigital_EnableSites_cfunc = None
self.niDigital_FetchCaptureWaveformU32_cfunc = None
self.niDigital_FetchHistoryRAMCycleInformation_cfunc = None
self.niDigital_FetchHistoryRAMCyclePinData_cfunc = None
self.niDigital_FetchHistoryRAMScanCycleNumber_cfunc = None
self.niDigital_FrequencyCounter_MeasureFrequency_cfunc = None
self.niDigital_GetAttributeViBoolean_cfunc = None
self.niDigital_GetAttributeViInt32_cfunc = None
self.niDigital_GetAttributeViInt64_cfunc = None
self.niDigital_GetAttributeViReal64_cfunc = None
self.niDigital_GetAttributeViString_cfunc = None
self.niDigital_GetChannelNameFromString_cfunc = None
self.niDigital_GetError_cfunc = None
self.niDigital_GetFailCount_cfunc = None
self.niDigital_GetHistoryRAMSampleCount_cfunc = None
self.niDigital_GetPatternName_cfunc = None
self.niDigital_GetPatternPinList_cfunc = None
self.niDigital_GetPinName_cfunc = None
self.niDigital_GetPinResultsPinInformation_cfunc = None
self.niDigital_GetSitePassFail_cfunc = None
self.niDigital_GetSiteResultsSiteNumbers_cfunc = None
self.niDigital_GetTimeSetDriveFormat_cfunc = None
self.niDigital_GetTimeSetEdge_cfunc = None
self.niDigital_GetTimeSetEdgeMultiplier_cfunc = None
self.niDigital_GetTimeSetName_cfunc = None
self.niDigital_GetTimeSetPeriod_cfunc = None
self.niDigital_InitWithOptions_cfunc = None
self.niDigital_Initiate_cfunc = None
self.niDigital_IsDone_cfunc = None
self.niDigital_IsSiteEnabled_cfunc = None
self.niDigital_LoadLevels_cfunc = None
self.niDigital_LoadPattern_cfunc = None
self.niDigital_LoadPinMap_cfunc = None
self.niDigital_LoadSpecifications_cfunc = None
self.niDigital_LoadTiming_cfunc = None
self.niDigital_LockSession_cfunc = None
self.niDigital_PPMU_Measure_cfunc = None
self.niDigital_PPMU_Source_cfunc = None
self.niDigital_ReadSequencerFlag_cfunc = None
self.niDigital_ReadSequencerRegister_cfunc = None
self.niDigital_ReadStatic_cfunc = None
self.niDigital_ResetDevice_cfunc = None
self.niDigital_SelfCalibrate_cfunc = None
self.niDigital_SendSoftwareEdgeTrigger_cfunc = None
self.niDigital_SetAttributeViBoolean_cfunc = None
self.niDigital_SetAttributeViInt32_cfunc = None
self.niDigital_SetAttributeViInt64_cfunc = None
self.niDigital_SetAttributeViReal64_cfunc = None
self.niDigital_SetAttributeViString_cfunc = None
self.niDigital_TDR_cfunc = None
self.niDigital_UnloadAllPatterns_cfunc = None
self.niDigital_UnloadSpecifications_cfunc = None
self.niDigital_UnlockSession_cfunc = None
self.niDigital_WaitUntilDone_cfunc = None
self.niDigital_WriteSequencerFlag_cfunc = None
self.niDigital_WriteSequencerRegister_cfunc = None
self.niDigital_WriteSourceWaveformBroadcastU32_cfunc = None
self.niDigital_WriteSourceWaveformDataFromFileTDMS_cfunc = None
self.niDigital_WriteSourceWaveformSiteUniqueU32_cfunc = None
self.niDigital_WriteStatic_cfunc = None
self.niDigital_close_cfunc = None
self.niDigital_error_message_cfunc = None
self.niDigital_reset_cfunc = None
self.niDigital_self_test_cfunc = None
def _get_library_function(self, name):
try:
function = getattr(self._library, name)
except AttributeError as e:
raise errors.DriverTooOldError() from e
return function
def niDigital_Abort(self, vi): # noqa: N802
with self._func_lock:
if self.niDigital_Abort_cfunc is None:
self.niDigital_Abort_cfunc = self._get_library_function('niDigital_Abort')
self.niDigital_Abort_cfunc.argtypes = [ViSession] # noqa: F405
self.niDigital_Abort_cfunc.restype = ViStatus # noqa: F405
return self.niDigital_Abort_cfunc(vi)
def niDigital_AbortKeepAlive(self, vi): # noqa: N802
with self._func_lock:
if self.niDigital_AbortKeepAlive_cfunc is None:
self.niDigital_AbortKeepAlive_cfunc = self._get_library_function('niDigital_AbortKeepAlive')
self.niDigital_AbortKeepAlive_cfunc.argtypes = [ViSession] # noqa: F405
self.niDigital_AbortKeepAlive_cfunc.restype = ViStatus # noqa: F405
return self.niDigital_AbortKeepAlive_cfunc(vi)
def niDigital_ApplyLevelsAndTiming(self, vi, site_list, levels_sheet, timing_sheet, initial_state_high_pins, initial_state_low_pins, initial_state_tristate_pins): # noqa: N802
with self._func_lock:
if self.niDigital_ApplyLevelsAndTiming_cfunc is None:
self.niDigital_ApplyLevelsAndTiming_cfunc = self._get_library_function('niDigital_ApplyLevelsAndTiming')
self.niDigital_ApplyLevelsAndTiming_cfunc.argtypes = [ViSession, ctypes.POINTER(ViChar), ctypes.POINTER(ViChar), ctypes.POINTER(ViChar), ctypes.POINTER(ViChar), ctypes.POINTER(ViChar), ctypes.POINTER(ViChar)] # noqa: F405
self.niDigital_ApplyLevelsAndTiming_cfunc.restype = ViStatus # noqa: F405
return self.niDigital_ApplyLevelsAndTiming_cfunc(vi, site_list, levels_sheet, timing_sheet, initial_state_high_pins, initial_state_low_pins, initial_state_tristate_pins)
def niDigital_ApplyTDROffsets(self, vi, channel_list, num_offsets, offsets): # noqa: N802
with self._func_lock:
if self.niDigital_ApplyTDROffsets_cfunc is None:
self.niDigital_ApplyTDROffsets_cfunc = self._get_library_function('niDigital_ApplyTDROffsets')
self.niDigital_ApplyTDROffsets_cfunc.argtypes = [ViSession, ctypes.POINTER(ViChar), ViInt32, ctypes.POINTER(ViReal64)] # noqa: F405
self.niDigital_ApplyTDROffsets_cfunc.restype = ViStatus # noqa: F405
return self.niDigital_ApplyTDROffsets_cfunc(vi, channel_list, num_offsets, offsets)
def niDigital_BurstPattern(self, vi, site_list, start_label, select_digital_function, wait_until_done, timeout): # noqa: N802
with self._func_lock:
if self.niDigital_BurstPattern_cfunc is None:
self.niDigital_BurstPattern_cfunc = self._get_library_function('niDigital_BurstPattern')
self.niDigital_BurstPattern_cfunc.argtypes = [ViSession, ctypes.POINTER(ViChar), ctypes.POINTER(ViChar), ViBoolean, ViBoolean, ViReal64] # noqa: F405
self.niDigital_BurstPattern_cfunc.restype = ViStatus # noqa: F405
return self.niDigital_BurstPattern_cfunc(vi, site_list, start_label, select_digital_function, wait_until_done, timeout)
def niDigital_ClockGenerator_Abort(self, vi, channel_list): # noqa: N802
with self._func_lock:
if self.niDigital_ClockGenerator_Abort_cfunc is None:
self.niDigital_ClockGenerator_Abort_cfunc = self._get_library_function('niDigital_ClockGenerator_Abort')
self.niDigital_ClockGenerator_Abort_cfunc.argtypes = [ViSession, ctypes.POINTER(ViChar)] # noqa: F405
self.niDigital_ClockGenerator_Abort_cfunc.restype = ViStatus # noqa: F405
return self.niDigital_ClockGenerator_Abort_cfunc(vi, channel_list)
def niDigital_ClockGenerator_GenerateClock(self, vi, channel_list, frequency, select_digital_function): # noqa: N802
with self._func_lock:
if self.niDigital_ClockGenerator_GenerateClock_cfunc is None:
self.niDigital_ClockGenerator_GenerateClock_cfunc = self._get_library_function('niDigital_ClockGenerator_GenerateClock')
self.niDigital_ClockGenerator_GenerateClock_cfunc.argtypes = [ViSession, ctypes.POINTER(ViChar), ViReal64, ViBoolean] # noqa: F405
self.niDigital_ClockGenerator_GenerateClock_cfunc.restype = ViStatus # noqa: F405
return self.niDigital_ClockGenerator_GenerateClock_cfunc(vi, channel_list, frequency, select_digital_function)
def niDigital_Commit(self, vi): # noqa: N802
with self._func_lock:
if self.niDigital_Commit_cfunc is None:
self.niDigital_Commit_cfunc = self._get_library_function('niDigital_Commit')
self.niDigital_Commit_cfunc.argtypes = [ViSession] # noqa: F405
self.niDigital_Commit_cfunc.restype = ViStatus # noqa: F405
return self.niDigital_Commit_cfunc(vi)
def niDigital_ConfigureActiveLoadLevels(self, vi, channel_list, iol, ioh, vcom): # noqa: N802
with self._func_lock:
if self.niDigital_ConfigureActiveLoadLevels_cfunc is None:
self.niDigital_ConfigureActiveLoadLevels_cfunc = self._get_library_function('niDigital_ConfigureActiveLoadLevels')
self.niDigital_ConfigureActiveLoadLevels_cfunc.argtypes = [ViSession, ctypes.POINTER(ViChar), ViReal64, ViReal64, ViReal64] # noqa: F405
self.niDigital_ConfigureActiveLoadLevels_cfunc.restype = ViStatus # noqa: F405
return self.niDigital_ConfigureActiveLoadLevels_cfunc(vi, channel_list, iol, ioh, vcom)
def niDigital_ConfigurePatternBurstSites(self, vi, site_list): # noqa: N802
with self._func_lock:
if self.niDigital_ConfigurePatternBurstSites_cfunc is None:
self.niDigital_ConfigurePatternBurstSites_cfunc = self._get_library_function('niDigital_ConfigurePatternBurstSites')
self.niDigital_ConfigurePatternBurstSites_cfunc.argtypes = [ViSession, ctypes.POINTER(ViChar)] # noqa: F405
self.niDigital_ConfigurePatternBurstSites_cfunc.restype = ViStatus # noqa: F405
return self.niDigital_ConfigurePatternBurstSites_cfunc(vi, site_list)
def niDigital_ConfigureTimeSetCompareEdgesStrobe(self, vi, pin_list, time_set_name, strobe_edge): # noqa: N802
with self._func_lock:
if self.niDigital_ConfigureTimeSetCompareEdgesStrobe_cfunc is None:
self.niDigital_ConfigureTimeSetCompareEdgesStrobe_cfunc = self._get_library_function('niDigital_ConfigureTimeSetCompareEdgesStrobe')
self.niDigital_ConfigureTimeSetCompareEdgesStrobe_cfunc.argtypes = [ViSession, ctypes.POINTER(ViChar), ctypes.POINTER(ViChar), ViReal64] # noqa: F405
self.niDigital_ConfigureTimeSetCompareEdgesStrobe_cfunc.restype = ViStatus # noqa: F405
return self.niDigital_ConfigureTimeSetCompareEdgesStrobe_cfunc(vi, pin_list, time_set_name, strobe_edge)
def niDigital_ConfigureTimeSetCompareEdgesStrobe2x(self, vi, pin_list, time_set_name, strobe_edge, strobe2_edge): # noqa: N802
with self._func_lock:
if self.niDigital_ConfigureTimeSetCompareEdgesStrobe2x_cfunc is None:
self.niDigital_ConfigureTimeSetCompareEdgesStrobe2x_cfunc = self._get_library_function('niDigital_ConfigureTimeSetCompareEdgesStrobe2x')
self.niDigital_ConfigureTimeSetCompareEdgesStrobe2x_cfunc.argtypes = [ViSession, ctypes.POINTER(ViChar), ctypes.POINTER(ViChar), ViReal64, ViReal64] # noqa: F405
self.niDigital_ConfigureTimeSetCompareEdgesStrobe2x_cfunc.restype = ViStatus # noqa: F405
return self.niDigital_ConfigureTimeSetCompareEdgesStrobe2x_cfunc(vi, pin_list, time_set_name, strobe_edge, strobe2_edge)
def niDigital_ConfigureTimeSetDriveEdges(self, vi, pin_list, time_set_name, format, drive_on_edge, drive_data_edge, drive_return_edge, drive_off_edge): # noqa: N802
with self._func_lock:
if self.niDigital_ConfigureTimeSetDriveEdges_cfunc is None:
self.niDigital_ConfigureTimeSetDriveEdges_cfunc = self._get_library_function('niDigital_ConfigureTimeSetDriveEdges')
self.niDigital_ConfigureTimeSetDriveEdges_cfunc.argtypes = [ViSession, ctypes.POINTER(ViChar), ctypes.POINTER(ViChar), ViInt32, ViReal64, ViReal64, ViReal64, ViReal64] # noqa: F405
self.niDigital_ConfigureTimeSetDriveEdges_cfunc.restype = ViStatus # noqa: F405
return self.niDigital_ConfigureTimeSetDriveEdges_cfunc(vi, pin_list, time_set_name, format, drive_on_edge, drive_data_edge, drive_return_edge, drive_off_edge)
def niDigital_ConfigureTimeSetDriveEdges2x(self, vi, pin_list, time_set_name, format, drive_on_edge, drive_data_edge, drive_return_edge, drive_off_edge, drive_data2_edge, drive_return2_edge): # noqa: N802
with self._func_lock:
if self.niDigital_ConfigureTimeSetDriveEdges2x_cfunc is None:
self.niDigital_ConfigureTimeSetDriveEdges2x_cfunc = self._get_library_function('niDigital_ConfigureTimeSetDriveEdges2x')
self.niDigital_ConfigureTimeSetDriveEdges2x_cfunc.argtypes = [ViSession, ctypes.POINTER(ViChar), ctypes.POINTER(ViChar), ViInt32, ViReal64, ViReal64, ViReal64, ViReal64, ViReal64, ViReal64] # noqa: F405
self.niDigital_ConfigureTimeSetDriveEdges2x_cfunc.restype = ViStatus # noqa: F405
return self.niDigital_ConfigureTimeSetDriveEdges2x_cfunc(vi, pin_list, time_set_name, format, drive_on_edge, drive_data_edge, drive_return_edge, drive_off_edge, drive_data2_edge, drive_return2_edge)
def niDigital_ConfigureTimeSetDriveFormat(self, vi, pin_list, time_set_name, drive_format): # noqa: N802
with self._func_lock:
if self.niDigital_ConfigureTimeSetDriveFormat_cfunc is None:
self.niDigital_ConfigureTimeSetDriveFormat_cfunc = self._get_library_function('niDigital_ConfigureTimeSetDriveFormat')
self.niDigital_ConfigureTimeSetDriveFormat_cfunc.argtypes = [ViSession, ctypes.POINTER(ViChar), ctypes.POINTER(ViChar), ViInt32] # noqa: F405
self.niDigital_ConfigureTimeSetDriveFormat_cfunc.restype = ViStatus # noqa: F405
return self.niDigital_ConfigureTimeSetDriveFormat_cfunc(vi, pin_list, time_set_name, drive_format)
def niDigital_ConfigureTimeSetEdge(self, vi, pin_list, time_set_name, edge, time): # noqa: N802
with self._func_lock:
if self.niDigital_ConfigureTimeSetEdge_cfunc is None:
self.niDigital_ConfigureTimeSetEdge_cfunc = self._get_library_function('niDigital_ConfigureTimeSetEdge')
self.niDigital_ConfigureTimeSetEdge_cfunc.argtypes = [ViSession, ctypes.POINTER(ViChar), ctypes.POINTER(ViChar), ViInt32, ViReal64] # noqa: F405
self.niDigital_ConfigureTimeSetEdge_cfunc.restype = ViStatus # noqa: F405
return self.niDigital_ConfigureTimeSetEdge_cfunc(vi, pin_list, time_set_name, edge, time)
def niDigital_ConfigureTimeSetEdgeMultiplier(self, vi, pin_list, time_set_name, edge_multiplier): # noqa: N802
with self._func_lock:
if self.niDigital_ConfigureTimeSetEdgeMultiplier_cfunc is None:
self.niDigital_ConfigureTimeSetEdgeMultiplier_cfunc = self._get_library_function('niDigital_ConfigureTimeSetEdgeMultiplier')
self.niDigital_ConfigureTimeSetEdgeMultiplier_cfunc.argtypes = [ViSession, ctypes.POINTER(ViChar), ctypes.POINTER(ViChar), ViInt32] # noqa: F405
self.niDigital_ConfigureTimeSetEdgeMultiplier_cfunc.restype = ViStatus # noqa: F405
return self.niDigital_ConfigureTimeSetEdgeMultiplier_cfunc(vi, pin_list, time_set_name, edge_multiplier)
def niDigital_ConfigureTimeSetPeriod(self, vi, time_set_name, period): # noqa: N802
with self._func_lock:
if self.niDigital_ConfigureTimeSetPeriod_cfunc is None:
self.niDigital_ConfigureTimeSetPeriod_cfunc = self._get_library_function('niDigital_ConfigureTimeSetPeriod')
self.niDigital_ConfigureTimeSetPeriod_cfunc.argtypes = [ViSession, ctypes.POINTER(ViChar), ViReal64] # noqa: F405
self.niDigital_ConfigureTimeSetPeriod_cfunc.restype = ViStatus # noqa: F405
return self.niDigital_ConfigureTimeSetPeriod_cfunc(vi, time_set_name, period)
def niDigital_ConfigureVoltageLevels(self, vi, channel_list, vil, vih, vol, voh, vterm): # noqa: N802
with self._func_lock:
if self.niDigital_ConfigureVoltageLevels_cfunc is None:
self.niDigital_ConfigureVoltageLevels_cfunc = self._get_library_function('niDigital_ConfigureVoltageLevels')
self.niDigital_ConfigureVoltageLevels_cfunc.argtypes = [ViSession, ctypes.POINTER(ViChar), ViReal64, ViReal64, ViReal64, ViReal64, ViReal64] # noqa: F405
self.niDigital_ConfigureVoltageLevels_cfunc.restype = ViStatus # noqa: F405
return self.niDigital_ConfigureVoltageLevels_cfunc(vi, channel_list, vil, vih, vol, voh, vterm)
def niDigital_CreateCaptureWaveformFromFileDigicapture(self, vi, waveform_name, waveform_file_path): # noqa: N802
with self._func_lock:
if self.niDigital_CreateCaptureWaveformFromFileDigicapture_cfunc is None:
self.niDigital_CreateCaptureWaveformFromFileDigicapture_cfunc = self._get_library_function('niDigital_CreateCaptureWaveformFromFileDigicapture')
self.niDigital_CreateCaptureWaveformFromFileDigicapture_cfunc.argtypes = [ViSession, ctypes.POINTER(ViChar), ctypes.POINTER(ViChar)] # noqa: F405
self.niDigital_CreateCaptureWaveformFromFileDigicapture_cfunc.restype = ViStatus # noqa: F405
return self.niDigital_CreateCaptureWaveformFromFileDigicapture_cfunc(vi, waveform_name, waveform_file_path)
def niDigital_CreateCaptureWaveformParallel(self, vi, pin_list, waveform_name): # noqa: N802
with self._func_lock:
if self.niDigital_CreateCaptureWaveformParallel_cfunc is None:
self.niDigital_CreateCaptureWaveformParallel_cfunc = self._get_library_function('niDigital_CreateCaptureWaveformParallel')
self.niDigital_CreateCaptureWaveformParallel_cfunc.argtypes = [ViSession, ctypes.POINTER(ViChar), ctypes.POINTER(ViChar)] # noqa: F405
self.niDigital_CreateCaptureWaveformParallel_cfunc.restype = ViStatus # noqa: F405
return self.niDigital_CreateCaptureWaveformParallel_cfunc(vi, pin_list, waveform_name)
def niDigital_CreateCaptureWaveformSerial(self, vi, pin_list, waveform_name, sample_width, bit_order): # noqa: N802
with self._func_lock:
if self.niDigital_CreateCaptureWaveformSerial_cfunc is None:
self.niDigital_CreateCaptureWaveformSerial_cfunc = self._get_library_function('niDigital_CreateCaptureWaveformSerial')
self.niDigital_CreateCaptureWaveformSerial_cfunc.argtypes = [ViSession, ctypes.POINTER(ViChar), ctypes.POINTER(ViChar), ViUInt32, ViInt32] # noqa: F405
self.niDigital_CreateCaptureWaveformSerial_cfunc.restype = ViStatus # noqa: F405
return self.niDigital_CreateCaptureWaveformSerial_cfunc(vi, pin_list, waveform_name, sample_width, bit_order)
def niDigital_CreateSourceWaveformFromFileTDMS(self, vi, waveform_name, waveform_file_path, write_waveform_data): # noqa: N802
with self._func_lock:
if self.niDigital_CreateSourceWaveformFromFileTDMS_cfunc is None:
self.niDigital_CreateSourceWaveformFromFileTDMS_cfunc = self._get_library_function('niDigital_CreateSourceWaveformFromFileTDMS')
self.niDigital_CreateSourceWaveformFromFileTDMS_cfunc.argtypes = [ViSession, ctypes.POINTER(ViChar), ctypes.POINTER(ViChar), ViBoolean] # noqa: F405
self.niDigital_CreateSourceWaveformFromFileTDMS_cfunc.restype = ViStatus # noqa: F405
return self.niDigital_CreateSourceWaveformFromFileTDMS_cfunc(vi, waveform_name, waveform_file_path, write_waveform_data)
def niDigital_CreateSourceWaveformParallel(self, vi, pin_list, waveform_name, data_mapping): # noqa: N802
with self._func_lock:
if self.niDigital_CreateSourceWaveformParallel_cfunc is None:
self.niDigital_CreateSourceWaveformParallel_cfunc = self._get_library_function('niDigital_CreateSourceWaveformParallel')
self.niDigital_CreateSourceWaveformParallel_cfunc.argtypes = [ViSession, ctypes.POINTER(ViChar), ctypes.POINTER(ViChar), ViInt32] # noqa: F405
self.niDigital_CreateSourceWaveformParallel_cfunc.restype = ViStatus # noqa: F405
return self.niDigital_CreateSourceWaveformParallel_cfunc(vi, pin_list, waveform_name, data_mapping)
def niDigital_CreateSourceWaveformSerial(self, vi, pin_list, waveform_name, data_mapping, sample_width, bit_order): # noqa: N802
with self._func_lock:
if self.niDigital_CreateSourceWaveformSerial_cfunc is None:
self.niDigital_CreateSourceWaveformSerial_cfunc = self._get_library_function('niDigital_CreateSourceWaveformSerial')
self.niDigital_CreateSourceWaveformSerial_cfunc.argtypes = [ViSession, ctypes.POINTER(ViChar), ctypes.POINTER(ViChar), ViInt32, ViUInt32, ViInt32] # noqa: F405
self.niDigital_CreateSourceWaveformSerial_cfunc.restype = ViStatus # noqa: F405
return self.niDigital_CreateSourceWaveformSerial_cfunc(vi, pin_list, waveform_name, data_mapping, sample_width, bit_order)
def niDigital_CreateTimeSet(self, vi, name): # noqa: N802
with self._func_lock:
if self.niDigital_CreateTimeSet_cfunc is None:
self.niDigital_CreateTimeSet_cfunc = self._get_library_function('niDigital_CreateTimeSet')
self.niDigital_CreateTimeSet_cfunc.argtypes = [ViSession, ctypes.POINTER(ViChar)] # noqa: F405
self.niDigital_CreateTimeSet_cfunc.restype = ViStatus # noqa: F405
return self.niDigital_CreateTimeSet_cfunc(vi, name)
def niDigital_DeleteAllTimeSets(self, vi): # noqa: N802
with self._func_lock:
if self.niDigital_DeleteAllTimeSets_cfunc is None:
self.niDigital_DeleteAllTimeSets_cfunc = self._get_library_function('niDigital_DeleteAllTimeSets')
self.niDigital_DeleteAllTimeSets_cfunc.argtypes = [ViSession] # noqa: F405
self.niDigital_DeleteAllTimeSets_cfunc.restype = ViStatus # noqa: F405
return self.niDigital_DeleteAllTimeSets_cfunc(vi)
def niDigital_DisableSites(self, vi, site_list): # noqa: N802
with self._func_lock:
if self.niDigital_DisableSites_cfunc is None:
self.niDigital_DisableSites_cfunc = self._get_library_function('niDigital_DisableSites')
self.niDigital_DisableSites_cfunc.argtypes = [ViSession, ctypes.POINTER(ViChar)] # noqa: F405
self.niDigital_DisableSites_cfunc.restype = ViStatus # noqa: F405
return self.niDigital_DisableSites_cfunc(vi, site_list)
def niDigital_EnableSites(self, vi, site_list): # noqa: N802
with self._func_lock:
if self.niDigital_EnableSites_cfunc is None:
self.niDigital_EnableSites_cfunc = self._get_library_function('niDigital_EnableSites')
self.niDigital_EnableSites_cfunc.argtypes = [ViSession, ctypes.POINTER(ViChar)] # noqa: F405
self.niDigital_EnableSites_cfunc.restype = ViStatus # noqa: F405
return self.niDigital_EnableSites_cfunc(vi, site_list)
def niDigital_FetchCaptureWaveformU32(self, vi, site_list, waveform_name, samples_to_read, timeout, data_buffer_size, data, actual_num_waveforms, actual_samples_per_waveform): # noqa: N802
with self._func_lock:
if self.niDigital_FetchCaptureWaveformU32_cfunc is None:
self.niDigital_FetchCaptureWaveformU32_cfunc = self._get_library_function('niDigital_FetchCaptureWaveformU32')
self.niDigital_FetchCaptureWaveformU32_cfunc.argtypes = [ViSession, ctypes.POINTER(ViChar), ctypes.POINTER(ViChar), ViInt32, ViReal64, ViInt32, ctypes.POINTER(ViUInt32), ctypes.POINTER(ViInt32), ctypes.POINTER(ViInt32)] # noqa: F405
self.niDigital_FetchCaptureWaveformU32_cfunc.restype = ViStatus # noqa: F405
return self.niDigital_FetchCaptureWaveformU32_cfunc(vi, site_list, waveform_name, samples_to_read, timeout, data_buffer_size, data, actual_num_waveforms, actual_samples_per_waveform)
def niDigital_FetchHistoryRAMCycleInformation(self, vi, site, sample_index, pattern_index, time_set_index, vector_number, cycle_number, num_dut_cycles): # noqa: N802
with self._func_lock:
if self.niDigital_FetchHistoryRAMCycleInformation_cfunc is None:
self.niDigital_FetchHistoryRAMCycleInformation_cfunc = self._get_library_function('niDigital_FetchHistoryRAMCycleInformation')
self.niDigital_FetchHistoryRAMCycleInformation_cfunc.argtypes = [ViSession, ctypes.POINTER(ViChar), ViInt64, ctypes.POINTER(ViInt32), ctypes.POINTER(ViInt32), ctypes.POINTER(ViInt64), ctypes.POINTER(ViInt64), ctypes.POINTER(ViInt32)] # noqa: F405
self.niDigital_FetchHistoryRAMCycleInformation_cfunc.restype = ViStatus # noqa: F405
return self.niDigital_FetchHistoryRAMCycleInformation_cfunc(vi, site, sample_index, pattern_index, time_set_index, vector_number, cycle_number, num_dut_cycles)
def niDigital_FetchHistoryRAMCyclePinData(self, vi, site, pin_list, sample_index, dut_cycle_index, pin_data_buffer_size, expected_pin_states, actual_pin_states, per_pin_pass_fail, actual_num_pin_data): # noqa: N802
with self._func_lock:
if self.niDigital_FetchHistoryRAMCyclePinData_cfunc is None:
self.niDigital_FetchHistoryRAMCyclePinData_cfunc = self._get_library_function('niDigital_FetchHistoryRAMCyclePinData')
self.niDigital_FetchHistoryRAMCyclePinData_cfunc.argtypes = [ViSession, ctypes.POINTER(ViChar), ctypes.POINTER(ViChar), ViInt64, ViInt32, ViInt32, ctypes.POINTER(ViUInt8), ctypes.POINTER(ViUInt8), ctypes.POINTER(ViBoolean), ctypes.POINTER(ViInt32)] # noqa: F405
self.niDigital_FetchHistoryRAMCyclePinData_cfunc.restype = ViStatus # noqa: F405
return self.niDigital_FetchHistoryRAMCyclePinData_cfunc(vi, site, pin_list, sample_index, dut_cycle_index, pin_data_buffer_size, expected_pin_states, actual_pin_states, per_pin_pass_fail, actual_num_pin_data)
def niDigital_FetchHistoryRAMScanCycleNumber(self, vi, site, sample_index, scan_cycle_number): # noqa: N802
with self._func_lock:
if self.niDigital_FetchHistoryRAMScanCycleNumber_cfunc is None:
self.niDigital_FetchHistoryRAMScanCycleNumber_cfunc = self._get_library_function('niDigital_FetchHistoryRAMScanCycleNumber')
self.niDigital_FetchHistoryRAMScanCycleNumber_cfunc.argtypes = [ViSession, ctypes.POINTER(ViChar), ViInt64, ctypes.POINTER(ViInt64)] # noqa: F405
self.niDigital_FetchHistoryRAMScanCycleNumber_cfunc.restype = ViStatus # noqa: F405
return self.niDigital_FetchHistoryRAMScanCycleNumber_cfunc(vi, site, sample_index, scan_cycle_number)
def niDigital_FrequencyCounter_MeasureFrequency(self, vi, channel_list, frequencies_buffer_size, frequencies, actual_num_frequencies): # noqa: N802
with self._func_lock:
if self.niDigital_FrequencyCounter_MeasureFrequency_cfunc is None:
self.niDigital_FrequencyCounter_MeasureFrequency_cfunc = self._get_library_function('niDigital_FrequencyCounter_MeasureFrequency')
self.niDigital_FrequencyCounter_MeasureFrequency_cfunc.argtypes = [ViSession, ctypes.POINTER(ViChar), ViInt32, ctypes.POINTER(ViReal64), ctypes.POINTER(ViInt32)] # noqa: F405
self.niDigital_FrequencyCounter_MeasureFrequency_cfunc.restype = ViStatus # noqa: F405
return self.niDigital_FrequencyCounter_MeasureFrequency_cfunc(vi, channel_list, frequencies_buffer_size, frequencies, actual_num_frequencies)
def niDigital_GetAttributeViBoolean(self, vi, channel_name, attribute, value): # noqa: N802
with self._func_lock:
if self.niDigital_GetAttributeViBoolean_cfunc is None:
self.niDigital_GetAttributeViBoolean_cfunc = self._get_library_function('niDigital_GetAttributeViBoolean')
self.niDigital_GetAttributeViBoolean_cfunc.argtypes = [ViSession, ctypes.POINTER(ViChar), ViAttr, ctypes.POINTER(ViBoolean)] # noqa: F405
self.niDigital_GetAttributeViBoolean_cfunc.restype = ViStatus # noqa: F405
return self.niDigital_GetAttributeViBoolean_cfunc(vi, channel_name, attribute, value)
def niDigital_GetAttributeViInt32(self, vi, channel_name, attribute, value): # noqa: N802
with self._func_lock:
if self.niDigital_GetAttributeViInt32_cfunc is None:
self.niDigital_GetAttributeViInt32_cfunc = self._get_library_function('niDigital_GetAttributeViInt32')
self.niDigital_GetAttributeViInt32_cfunc.argtypes = [ViSession, ctypes.POINTER(ViChar), ViAttr, ctypes.POINTER(ViInt32)] # noqa: F405
self.niDigital_GetAttributeViInt32_cfunc.restype = ViStatus # noqa: F405
return self.niDigital_GetAttributeViInt32_cfunc(vi, channel_name, attribute, value)
def niDigital_GetAttributeViInt64(self, vi, channel_name, attribute, value): # noqa: N802
with self._func_lock:
if self.niDigital_GetAttributeViInt64_cfunc is None:
self.niDigital_GetAttributeViInt64_cfunc = self._get_library_function('niDigital_GetAttributeViInt64')
self.niDigital_GetAttributeViInt64_cfunc.argtypes = [ViSession, ctypes.POINTER(ViChar), ViAttr, ctypes.POINTER(ViInt64)] # noqa: F405
self.niDigital_GetAttributeViInt64_cfunc.restype = ViStatus # noqa: F405
return self.niDigital_GetAttributeViInt64_cfunc(vi, channel_name, attribute, value)
def niDigital_GetAttributeViReal64(self, vi, channel_name, attribute, value): # noqa: N802
with self._func_lock:
if self.niDigital_GetAttributeViReal64_cfunc is None:
self.niDigital_GetAttributeViReal64_cfunc = self._get_library_function('niDigital_GetAttributeViReal64')
self.niDigital_GetAttributeViReal64_cfunc.argtypes = [ViSession, ctypes.POINTER(ViChar), ViAttr, ctypes.POINTER(ViReal64)] # noqa: F405
self.niDigital_GetAttributeViReal64_cfunc.restype = ViStatus # noqa: F405
return self.niDigital_GetAttributeViReal64_cfunc(vi, channel_name, attribute, value)
def niDigital_GetAttributeViString(self, vi, channel_name, attribute, buffer_size, value): # noqa: N802
with self._func_lock:
if self.niDigital_GetAttributeViString_cfunc is None:
self.niDigital_GetAttributeViString_cfunc = self._get_library_function('niDigital_GetAttributeViString')
self.niDigital_GetAttributeViString_cfunc.argtypes = [ViSession, ctypes.POINTER(ViChar), ViAttr, ViInt32, ctypes.POINTER(ViChar)] # noqa: F405
self.niDigital_GetAttributeViString_cfunc.restype = ViStatus # noqa: F405
return self.niDigital_GetAttributeViString_cfunc(vi, channel_name, attribute, buffer_size, value)
def niDigital_GetChannelNameFromString(self, vi, indices, name_buffer_size, names): # noqa: N802
with self._func_lock:
if self.niDigital_GetChannelNameFromString_cfunc is None:
self.niDigital_GetChannelNameFromString_cfunc = self._get_library_function('niDigital_GetChannelNameFromString')
self.niDigital_GetChannelNameFromString_cfunc.argtypes = [ViSession, ctypes.POINTER(ViChar), ViInt32, ctypes.POINTER(ViChar)] # noqa: F405
self.niDigital_GetChannelNameFromString_cfunc.restype = ViStatus # noqa: F405
return self.niDigital_GetChannelNameFromString_cfunc(vi, indices, name_buffer_size, names)
def niDigital_GetError(self, vi, error_code, error_description_buffer_size, error_description): # noqa: N802
with self._func_lock:
if self.niDigital_GetError_cfunc is None:
self.niDigital_GetError_cfunc = self._get_library_function('niDigital_GetError')
self.niDigital_GetError_cfunc.argtypes = [ViSession, ctypes.POINTER(ViStatus), ViInt32, ctypes.POINTER(ViChar)] # noqa: F405
self.niDigital_GetError_cfunc.restype = ViStatus # noqa: F405
return self.niDigital_GetError_cfunc(vi, error_code, error_description_buffer_size, error_description)
def niDigital_GetFailCount(self, vi, channel_list, buffer_size, failure_count, actual_num_read): # noqa: N802
with self._func_lock:
if self.niDigital_GetFailCount_cfunc is None:
self.niDigital_GetFailCount_cfunc = self._get_library_function('niDigital_GetFailCount')
self.niDigital_GetFailCount_cfunc.argtypes = [ViSession, ctypes.POINTER(ViChar), ViInt32, ctypes.POINTER(ViInt64), ctypes.POINTER(ViInt32)] # noqa: F405
self.niDigital_GetFailCount_cfunc.restype = ViStatus # noqa: F405
return self.niDigital_GetFailCount_cfunc(vi, channel_list, buffer_size, failure_count, actual_num_read)
def niDigital_GetHistoryRAMSampleCount(self, vi, site, sample_count): # noqa: N802
with self._func_lock:
if self.niDigital_GetHistoryRAMSampleCount_cfunc is None:
self.niDigital_GetHistoryRAMSampleCount_cfunc = self._get_library_function('niDigital_GetHistoryRAMSampleCount')
self.niDigital_GetHistoryRAMSampleCount_cfunc.argtypes = [ViSession, ctypes.POINTER(ViChar), ctypes.POINTER(ViInt64)] # noqa: F405
self.niDigital_GetHistoryRAMSampleCount_cfunc.restype = ViStatus # noqa: F405
return self.niDigital_GetHistoryRAMSampleCount_cfunc(vi, site, sample_count)
def niDigital_GetPatternName(self, vi, pattern_index, name_buffer_size, name): # noqa: N802
with self._func_lock:
if self.niDigital_GetPatternName_cfunc is None:
self.niDigital_GetPatternName_cfunc = self._get_library_function('niDigital_GetPatternName')
self.niDigital_GetPatternName_cfunc.argtypes = [ViSession, ViInt32, ViInt32, ctypes.POINTER(ViChar)] # noqa: F405
self.niDigital_GetPatternName_cfunc.restype = ViStatus # noqa: F405
return self.niDigital_GetPatternName_cfunc(vi, pattern_index, name_buffer_size, name)
def niDigital_GetPatternPinList(self, vi, start_label, pin_list_buffer_size, pin_list): # noqa: N802
with self._func_lock:
if self.niDigital_GetPatternPinList_cfunc is None:
self.niDigital_GetPatternPinList_cfunc = self._get_library_function('niDigital_GetPatternPinList')
self.niDigital_GetPatternPinList_cfunc.argtypes = [ViSession, ctypes.POINTER(ViChar), ViInt32, ctypes.POINTER(ViChar)] # noqa: F405
self.niDigital_GetPatternPinList_cfunc.restype = ViStatus # noqa: F405
return self.niDigital_GetPatternPinList_cfunc(vi, start_label, pin_list_buffer_size, pin_list)
def niDigital_GetPinName(self, vi, pin_index, name_buffer_size, name): # noqa: N802
with self._func_lock:
if self.niDigital_GetPinName_cfunc is None:
self.niDigital_GetPinName_cfunc = self._get_library_function('niDigital_GetPinName')
self.niDigital_GetPinName_cfunc.argtypes = [ViSession, ViInt32, ViInt32, ctypes.POINTER(ViChar)] # noqa: F405
self.niDigital_GetPinName_cfunc.restype = ViStatus # noqa: F405
return self.niDigital_GetPinName_cfunc(vi, pin_index, name_buffer_size, name)
def niDigital_GetPinResultsPinInformation(self, vi, channel_list, buffer_size, pin_indexes, site_numbers, channel_indexes, actual_num_values): # noqa: N802
with self._func_lock:
if self.niDigital_GetPinResultsPinInformation_cfunc is None:
self.niDigital_GetPinResultsPinInformation_cfunc = self._get_library_function('niDigital_GetPinResultsPinInformation')
self.niDigital_GetPinResultsPinInformation_cfunc.argtypes = [ViSession, ctypes.POINTER(ViChar), ViInt32, ctypes.POINTER(ViInt32), ctypes.POINTER(ViInt32), ctypes.POINTER(ViInt32), ctypes.POINTER(ViInt32)] # noqa: F405
self.niDigital_GetPinResultsPinInformation_cfunc.restype = ViStatus # noqa: F405
return self.niDigital_GetPinResultsPinInformation_cfunc(vi, channel_list, buffer_size, pin_indexes, site_numbers, channel_indexes, actual_num_values)
def niDigital_GetSitePassFail(self, vi, site_list, pass_fail_buffer_size, pass_fail, actual_num_sites): # noqa: N802
with self._func_lock:
if self.niDigital_GetSitePassFail_cfunc is None:
self.niDigital_GetSitePassFail_cfunc = self._get_library_function('niDigital_GetSitePassFail')
self.niDigital_GetSitePassFail_cfunc.argtypes = [ViSession, ctypes.POINTER(ViChar), ViInt32, ctypes.POINTER(ViBoolean), ctypes.POINTER(ViInt32)] # noqa: F405
self.niDigital_GetSitePassFail_cfunc.restype = ViStatus # noqa: F405
return self.niDigital_GetSitePassFail_cfunc(vi, site_list, pass_fail_buffer_size, pass_fail, actual_num_sites)
def niDigital_GetSiteResultsSiteNumbers(self, vi, site_list, site_result_type, site_numbers_buffer_size, site_numbers, actual_num_site_numbers): # noqa: N802
with self._func_lock:
if self.niDigital_GetSiteResultsSiteNumbers_cfunc is None:
self.niDigital_GetSiteResultsSiteNumbers_cfunc = self._get_library_function('niDigital_GetSiteResultsSiteNumbers')
self.niDigital_GetSiteResultsSiteNumbers_cfunc.argtypes = [ViSession, ctypes.POINTER(ViChar), ViInt32, ViInt32, ctypes.POINTER(ViInt32), ctypes.POINTER(ViInt32)] # noqa: F405
self.niDigital_GetSiteResultsSiteNumbers_cfunc.restype = ViStatus # noqa: F405
return self.niDigital_GetSiteResultsSiteNumbers_cfunc(vi, site_list, site_result_type, site_numbers_buffer_size, site_numbers, actual_num_site_numbers)
def niDigital_GetTimeSetDriveFormat(self, vi, pin, time_set_name, format): # noqa: N802
with self._func_lock:
if self.niDigital_GetTimeSetDriveFormat_cfunc is None:
self.niDigital_GetTimeSetDriveFormat_cfunc = self._get_library_function('niDigital_GetTimeSetDriveFormat')
self.niDigital_GetTimeSetDriveFormat_cfunc.argtypes = [ViSession, ctypes.POINTER(ViChar), ctypes.POINTER(ViChar), ctypes.POINTER(ViInt32)] # noqa: F405
self.niDigital_GetTimeSetDriveFormat_cfunc.restype = ViStatus # noqa: F405
return self.niDigital_GetTimeSetDriveFormat_cfunc(vi, pin, time_set_name, format)
def niDigital_GetTimeSetEdge(self, vi, pin, time_set_name, edge, time): # noqa: N802
with self._func_lock:
if self.niDigital_GetTimeSetEdge_cfunc is None:
self.niDigital_GetTimeSetEdge_cfunc = self._get_library_function('niDigital_GetTimeSetEdge')
self.niDigital_GetTimeSetEdge_cfunc.argtypes = [ViSession, ctypes.POINTER(ViChar), ctypes.POINTER(ViChar), ViInt32, ctypes.POINTER(ViReal64)] # noqa: F405
self.niDigital_GetTimeSetEdge_cfunc.restype = ViStatus # noqa: F405
return self.niDigital_GetTimeSetEdge_cfunc(vi, pin, time_set_name, edge, time)
def niDigital_GetTimeSetEdgeMultiplier(self, vi, pin, time_set_name, edge_multiplier): # noqa: N802
with self._func_lock:
if self.niDigital_GetTimeSetEdgeMultiplier_cfunc is None:
self.niDigital_GetTimeSetEdgeMultiplier_cfunc = self._get_library_function('niDigital_GetTimeSetEdgeMultiplier')
self.niDigital_GetTimeSetEdgeMultiplier_cfunc.argtypes = [ViSession, ctypes.POINTER(ViChar), ctypes.POINTER(ViChar), ctypes.POINTER(ViInt32)] # noqa: F405
self.niDigital_GetTimeSetEdgeMultiplier_cfunc.restype = ViStatus # noqa: F405
return self.niDigital_GetTimeSetEdgeMultiplier_cfunc(vi, pin, time_set_name, edge_multiplier)
def niDigital_GetTimeSetName(self, vi, time_set_index, name_buffer_size, name): # noqa: N802
with self._func_lock:
if self.niDigital_GetTimeSetName_cfunc is None:
self.niDigital_GetTimeSetName_cfunc = self._get_library_function('niDigital_GetTimeSetName')
self.niDigital_GetTimeSetName_cfunc.argtypes = [ViSession, ViInt32, ViInt32, ctypes.POINTER(ViChar)] # noqa: F405
self.niDigital_GetTimeSetName_cfunc.restype = ViStatus # noqa: F405
return self.niDigital_GetTimeSetName_cfunc(vi, time_set_index, name_buffer_size, name)
def niDigital_GetTimeSetPeriod(self, vi, time_set_name, period): # noqa: N802
with self._func_lock:
if self.niDigital_GetTimeSetPeriod_cfunc is None:
self.niDigital_GetTimeSetPeriod_cfunc = self._get_library_function('niDigital_GetTimeSetPeriod')
self.niDigital_GetTimeSetPeriod_cfunc.argtypes = [ViSession, ctypes.POINTER(ViChar), ctypes.POINTER(ViReal64)] # noqa: F405
self.niDigital_GetTimeSetPeriod_cfunc.restype = ViStatus # noqa: F405
return self.niDigital_GetTimeSetPeriod_cfunc(vi, time_set_name, period)
def niDigital_InitWithOptions(self, resource_name, id_query, reset_device, option_string, new_vi): # noqa: N802
with self._func_lock:
if self.niDigital_InitWithOptions_cfunc is None:
self.niDigital_InitWithOptions_cfunc = self._get_library_function('niDigital_InitWithOptions')
self.niDigital_InitWithOptions_cfunc.argtypes = [ctypes.POINTER(ViChar), ViBoolean, ViBoolean, ctypes.POINTER(ViChar), ctypes.POINTER(ViSession)] # noqa: F405
self.niDigital_InitWithOptions_cfunc.restype = ViStatus # noqa: F405
return self.niDigital_InitWithOptions_cfunc(resource_name, id_query, reset_device, option_string, new_vi)
def niDigital_Initiate(self, vi): # noqa: N802
with self._func_lock:
if self.niDigital_Initiate_cfunc is None:
self.niDigital_Initiate_cfunc = self._get_library_function('niDigital_Initiate')
self.niDigital_Initiate_cfunc.argtypes = [ViSession] # noqa: F405
self.niDigital_Initiate_cfunc.restype = ViStatus # noqa: F405
return self.niDigital_Initiate_cfunc(vi)
def niDigital_IsDone(self, vi, done): # noqa: N802
with self._func_lock:
if self.niDigital_IsDone_cfunc is None:
self.niDigital_IsDone_cfunc = self._get_library_function('niDigital_IsDone')
self.niDigital_IsDone_cfunc.argtypes = [ViSession, ctypes.POINTER(ViBoolean)] # noqa: F405
self.niDigital_IsDone_cfunc.restype = ViStatus # noqa: F405
return self.niDigital_IsDone_cfunc(vi, done)
def niDigital_IsSiteEnabled(self, vi, site, enable): # noqa: N802
with self._func_lock:
if self.niDigital_IsSiteEnabled_cfunc is None:
self.niDigital_IsSiteEnabled_cfunc = self._get_library_function('niDigital_IsSiteEnabled')
self.niDigital_IsSiteEnabled_cfunc.argtypes = [ViSession, ctypes.POINTER(ViChar), ctypes.POINTER(ViBoolean)] # noqa: F405
self.niDigital_IsSiteEnabled_cfunc.restype = ViStatus # noqa: F405
return self.niDigital_IsSiteEnabled_cfunc(vi, site, enable)
def niDigital_LoadLevels(self, vi, file_path): # noqa: N802
with self._func_lock:
if self.niDigital_LoadLevels_cfunc is None:
self.niDigital_LoadLevels_cfunc = self._get_library_function('niDigital_LoadLevels')
self.niDigital_LoadLevels_cfunc.argtypes = [ViSession, ctypes.POINTER(ViChar)] # noqa: F405
self.niDigital_LoadLevels_cfunc.restype = ViStatus # noqa: F405
return self.niDigital_LoadLevels_cfunc(vi, file_path)
def niDigital_LoadPattern(self, vi, file_path): # noqa: N802
with self._func_lock:
if self.niDigital_LoadPattern_cfunc is None:
self.niDigital_LoadPattern_cfunc = self._get_library_function('niDigital_LoadPattern')
self.niDigital_LoadPattern_cfunc.argtypes = [ViSession, ctypes.POINTER(ViChar)] # noqa: F405
self.niDigital_LoadPattern_cfunc.restype = ViStatus # noqa: F405
return self.niDigital_LoadPattern_cfunc(vi, file_path)
def niDigital_LoadPinMap(self, vi, file_path): # noqa: N802
with self._func_lock:
if self.niDigital_LoadPinMap_cfunc is None:
self.niDigital_LoadPinMap_cfunc = self._get_library_function('niDigital_LoadPinMap')
self.niDigital_LoadPinMap_cfunc.argtypes = [ViSession, ctypes.POINTER(ViChar)] # noqa: F405
self.niDigital_LoadPinMap_cfunc.restype = ViStatus # noqa: F405
return self.niDigital_LoadPinMap_cfunc(vi, file_path)
def niDigital_LoadSpecifications(self, vi, file_path): # noqa: N802
with self._func_lock:
if self.niDigital_LoadSpecifications_cfunc is None:
self.niDigital_LoadSpecifications_cfunc = self._get_library_function('niDigital_LoadSpecifications')
self.niDigital_LoadSpecifications_cfunc.argtypes = [ViSession, ctypes.POINTER(ViChar)] # noqa: F405
self.niDigital_LoadSpecifications_cfunc.restype = ViStatus # noqa: F405
return self.niDigital_LoadSpecifications_cfunc(vi, file_path)
def niDigital_LoadTiming(self, vi, file_path): # noqa: N802
with self._func_lock:
if self.niDigital_LoadTiming_cfunc is None:
self.niDigital_LoadTiming_cfunc = self._get_library_function('niDigital_LoadTiming')
self.niDigital_LoadTiming_cfunc.argtypes = [ViSession, ctypes.POINTER(ViChar)] # noqa: F405
self.niDigital_LoadTiming_cfunc.restype = ViStatus # noqa: F405
return self.niDigital_LoadTiming_cfunc(vi, file_path)
def niDigital_LockSession(self, vi, caller_has_lock): # noqa: N802
with self._func_lock:
if self.niDigital_LockSession_cfunc is None:
self.niDigital_LockSession_cfunc = self._get_library_function('niDigital_LockSession')
self.niDigital_LockSession_cfunc.argtypes = [ViSession, ctypes.POINTER(ViBoolean)] # noqa: F405
self.niDigital_LockSession_cfunc.restype = ViStatus # noqa: F405
return self.niDigital_LockSession_cfunc(vi, caller_has_lock)
def niDigital_PPMU_Measure(self, vi, channel_list, measurement_type, buffer_size, measurements, actual_num_read): # noqa: N802
with self._func_lock:
if self.niDigital_PPMU_Measure_cfunc is None:
self.niDigital_PPMU_Measure_cfunc = self._get_library_function('niDigital_PPMU_Measure')
self.niDigital_PPMU_Measure_cfunc.argtypes = [ViSession, ctypes.POINTER(ViChar), ViInt32, ViInt32, ctypes.POINTER(ViReal64), ctypes.POINTER(ViInt32)] # noqa: F405
self.niDigital_PPMU_Measure_cfunc.restype = ViStatus # noqa: F405
return self.niDigital_PPMU_Measure_cfunc(vi, channel_list, measurement_type, buffer_size, measurements, actual_num_read)
def niDigital_PPMU_Source(self, vi, channel_list): # noqa: N802
with self._func_lock:
if self.niDigital_PPMU_Source_cfunc is None:
self.niDigital_PPMU_Source_cfunc = self._get_library_function('niDigital_PPMU_Source')
self.niDigital_PPMU_Source_cfunc.argtypes = [ViSession, ctypes.POINTER(ViChar)] # noqa: F405
self.niDigital_PPMU_Source_cfunc.restype = ViStatus # noqa: F405
return self.niDigital_PPMU_Source_cfunc(vi, channel_list)
def niDigital_ReadSequencerFlag(self, vi, flag, value): # noqa: N802
with self._func_lock:
if self.niDigital_ReadSequencerFlag_cfunc is None:
self.niDigital_ReadSequencerFlag_cfunc = self._get_library_function('niDigital_ReadSequencerFlag')
self.niDigital_ReadSequencerFlag_cfunc.argtypes = [ViSession, ctypes.POINTER(ViChar), ctypes.POINTER(ViBoolean)] # noqa: F405
self.niDigital_ReadSequencerFlag_cfunc.restype = ViStatus # noqa: F405
return self.niDigital_ReadSequencerFlag_cfunc(vi, flag, value)
def niDigital_ReadSequencerRegister(self, vi, reg, value): # noqa: N802
with self._func_lock:
if self.niDigital_ReadSequencerRegister_cfunc is None:
self.niDigital_ReadSequencerRegister_cfunc = self._get_library_function('niDigital_ReadSequencerRegister')
self.niDigital_ReadSequencerRegister_cfunc.argtypes = [ViSession, ctypes.POINTER(ViChar), ctypes.POINTER(ViInt32)] # noqa: F405
self.niDigital_ReadSequencerRegister_cfunc.restype = ViStatus # noqa: F405
return self.niDigital_ReadSequencerRegister_cfunc(vi, reg, value)
def niDigital_ReadStatic(self, vi, channel_list, buffer_size, data, actual_num_read): # noqa: N802
with self._func_lock:
if self.niDigital_ReadStatic_cfunc is None:
self.niDigital_ReadStatic_cfunc = self._get_library_function('niDigital_ReadStatic')
self.niDigital_ReadStatic_cfunc.argtypes = [ViSession, ctypes.POINTER(ViChar), ViInt32, ctypes.POINTER(ViUInt8), ctypes.POINTER(ViInt32)] # noqa: F405
self.niDigital_ReadStatic_cfunc.restype = ViStatus # noqa: F405
return self.niDigital_ReadStatic_cfunc(vi, channel_list, buffer_size, data, actual_num_read)
def niDigital_ResetDevice(self, vi): # noqa: N802
with self._func_lock:
if self.niDigital_ResetDevice_cfunc is None:
self.niDigital_ResetDevice_cfunc = self._get_library_function('niDigital_ResetDevice')
self.niDigital_ResetDevice_cfunc.argtypes = [ViSession] # noqa: F405
self.niDigital_ResetDevice_cfunc.restype = ViStatus # noqa: F405
return self.niDigital_ResetDevice_cfunc(vi)
def niDigital_SelfCalibrate(self, vi): # noqa: N802
with self._func_lock:
if self.niDigital_SelfCalibrate_cfunc is None:
self.niDigital_SelfCalibrate_cfunc = self._get_library_function('niDigital_SelfCalibrate')
self.niDigital_SelfCalibrate_cfunc.argtypes = [ViSession] # noqa: F405
self.niDigital_SelfCalibrate_cfunc.restype = ViStatus # noqa: F405
return self.niDigital_SelfCalibrate_cfunc(vi)
def niDigital_SendSoftwareEdgeTrigger(self, vi, trigger, trigger_identifier): # noqa: N802
with self._func_lock:
if self.niDigital_SendSoftwareEdgeTrigger_cfunc is None:
self.niDigital_SendSoftwareEdgeTrigger_cfunc = self._get_library_function('niDigital_SendSoftwareEdgeTrigger')
self.niDigital_SendSoftwareEdgeTrigger_cfunc.argtypes = [ViSession, ViInt32, ctypes.POINTER(ViChar)] # noqa: F405
self.niDigital_SendSoftwareEdgeTrigger_cfunc.restype = ViStatus # noqa: F405
return self.niDigital_SendSoftwareEdgeTrigger_cfunc(vi, trigger, trigger_identifier)
def niDigital_SetAttributeViBoolean(self, vi, channel_name, attribute, value): # noqa: N802
with self._func_lock:
if self.niDigital_SetAttributeViBoolean_cfunc is None:
self.niDigital_SetAttributeViBoolean_cfunc = self._get_library_function('niDigital_SetAttributeViBoolean')
self.niDigital_SetAttributeViBoolean_cfunc.argtypes = [ViSession, ctypes.POINTER(ViChar), ViAttr, ViBoolean] # noqa: F405
self.niDigital_SetAttributeViBoolean_cfunc.restype = ViStatus # noqa: F405
return self.niDigital_SetAttributeViBoolean_cfunc(vi, channel_name, attribute, value)
def niDigital_SetAttributeViInt32(self, vi, channel_name, attribute, value): # noqa: N802
with self._func_lock:
if self.niDigital_SetAttributeViInt32_cfunc is None:
self.niDigital_SetAttributeViInt32_cfunc = self._get_library_function('niDigital_SetAttributeViInt32')
self.niDigital_SetAttributeViInt32_cfunc.argtypes = [ViSession, ctypes.POINTER(ViChar), ViAttr, ViInt32] # noqa: F405
self.niDigital_SetAttributeViInt32_cfunc.restype = ViStatus # noqa: F405
return self.niDigital_SetAttributeViInt32_cfunc(vi, channel_name, attribute, value)
def niDigital_SetAttributeViInt64(self, vi, channel_name, attribute, value): # noqa: N802
with self._func_lock:
if self.niDigital_SetAttributeViInt64_cfunc is None:
self.niDigital_SetAttributeViInt64_cfunc = self._get_library_function('niDigital_SetAttributeViInt64')
self.niDigital_SetAttributeViInt64_cfunc.argtypes = [ViSession, ctypes.POINTER(ViChar), ViAttr, ViInt64] # noqa: F405
self.niDigital_SetAttributeViInt64_cfunc.restype = ViStatus # noqa: F405
return self.niDigital_SetAttributeViInt64_cfunc(vi, channel_name, attribute, value)
def niDigital_SetAttributeViReal64(self, vi, channel_name, attribute, value): # noqa: N802
with self._func_lock:
if self.niDigital_SetAttributeViReal64_cfunc is None:
self.niDigital_SetAttributeViReal64_cfunc = self._get_library_function('niDigital_SetAttributeViReal64')
self.niDigital_SetAttributeViReal64_cfunc.argtypes = [ViSession, ctypes.POINTER(ViChar), ViAttr, ViReal64] # noqa: F405
self.niDigital_SetAttributeViReal64_cfunc.restype = ViStatus # noqa: F405
return self.niDigital_SetAttributeViReal64_cfunc(vi, channel_name, attribute, value)
def niDigital_SetAttributeViString(self, vi, channel_name, attribute, value): # noqa: N802
with self._func_lock:
if self.niDigital_SetAttributeViString_cfunc is None:
self.niDigital_SetAttributeViString_cfunc = self._get_library_function('niDigital_SetAttributeViString')
self.niDigital_SetAttributeViString_cfunc.argtypes = [ViSession, ctypes.POINTER(ViChar), ViAttr, ctypes.POINTER(ViChar)] # noqa: F405
self.niDigital_SetAttributeViString_cfunc.restype = ViStatus # noqa: F405
return self.niDigital_SetAttributeViString_cfunc(vi, channel_name, attribute, value)
def niDigital_TDR(self, vi, channel_list, apply_offsets, offsets_buffer_size, offsets, actual_num_offsets): # noqa: N802
with self._func_lock:
if self.niDigital_TDR_cfunc is None:
self.niDigital_TDR_cfunc = self._get_library_function('niDigital_TDR')
self.niDigital_TDR_cfunc.argtypes = [ViSession, ctypes.POINTER(ViChar), ViBoolean, ViInt32, ctypes.POINTER(ViReal64), ctypes.POINTER(ViInt32)] # noqa: F405
self.niDigital_TDR_cfunc.restype = ViStatus # noqa: F405
return self.niDigital_TDR_cfunc(vi, channel_list, apply_offsets, offsets_buffer_size, offsets, actual_num_offsets)
def niDigital_UnloadAllPatterns(self, vi, unload_keep_alive_pattern): # noqa: N802
with self._func_lock:
if self.niDigital_UnloadAllPatterns_cfunc is None:
self.niDigital_UnloadAllPatterns_cfunc = self._get_library_function('niDigital_UnloadAllPatterns')
self.niDigital_UnloadAllPatterns_cfunc.argtypes = [ViSession, ViBoolean] # noqa: F405
self.niDigital_UnloadAllPatterns_cfunc.restype = ViStatus # noqa: F405
return self.niDigital_UnloadAllPatterns_cfunc(vi, unload_keep_alive_pattern)
def niDigital_UnloadSpecifications(self, vi, file_path): # noqa: N802
with self._func_lock:
if self.niDigital_UnloadSpecifications_cfunc is None:
self.niDigital_UnloadSpecifications_cfunc = self._get_library_function('niDigital_UnloadSpecifications')
self.niDigital_UnloadSpecifications_cfunc.argtypes = [ViSession, ctypes.POINTER(ViChar)] # noqa: F405
self.niDigital_UnloadSpecifications_cfunc.restype = ViStatus # noqa: F405
return self.niDigital_UnloadSpecifications_cfunc(vi, file_path)
def niDigital_UnlockSession(self, vi, caller_has_lock): # noqa: N802
with self._func_lock:
if self.niDigital_UnlockSession_cfunc is None:
self.niDigital_UnlockSession_cfunc = self._get_library_function('niDigital_UnlockSession')
self.niDigital_UnlockSession_cfunc.argtypes = [ViSession, ctypes.POINTER(ViBoolean)] # noqa: F405
self.niDigital_UnlockSession_cfunc.restype = ViStatus # noqa: F405
return self.niDigital_UnlockSession_cfunc(vi, caller_has_lock)
def niDigital_WaitUntilDone(self, vi, timeout): # noqa: N802
with self._func_lock:
if self.niDigital_WaitUntilDone_cfunc is None:
self.niDigital_WaitUntilDone_cfunc = self._get_library_function('niDigital_WaitUntilDone')
self.niDigital_WaitUntilDone_cfunc.argtypes = [ViSession, ViReal64] # noqa: F405
self.niDigital_WaitUntilDone_cfunc.restype = ViStatus # noqa: F405
return self.niDigital_WaitUntilDone_cfunc(vi, timeout)
def niDigital_WriteSequencerFlag(self, vi, flag, value): # noqa: N802
with self._func_lock:
if self.niDigital_WriteSequencerFlag_cfunc is None:
self.niDigital_WriteSequencerFlag_cfunc = self._get_library_function('niDigital_WriteSequencerFlag')
self.niDigital_WriteSequencerFlag_cfunc.argtypes = [ViSession, ctypes.POINTER(ViChar), ViBoolean] # noqa: F405
self.niDigital_WriteSequencerFlag_cfunc.restype = ViStatus # noqa: F405
return self.niDigital_WriteSequencerFlag_cfunc(vi, flag, value)
def niDigital_WriteSequencerRegister(self, vi, reg, value): # noqa: N802
with self._func_lock:
if self.niDigital_WriteSequencerRegister_cfunc is None:
self.niDigital_WriteSequencerRegister_cfunc = self._get_library_function('niDigital_WriteSequencerRegister')
self.niDigital_WriteSequencerRegister_cfunc.argtypes = [ViSession, ctypes.POINTER(ViChar), ViInt32] # noqa: F405
self.niDigital_WriteSequencerRegister_cfunc.restype = ViStatus # noqa: F405
return self.niDigital_WriteSequencerRegister_cfunc(vi, reg, value)
def niDigital_WriteSourceWaveformBroadcastU32(self, vi, waveform_name, waveform_size, waveform_data): # noqa: N802
with self._func_lock:
if self.niDigital_WriteSourceWaveformBroadcastU32_cfunc is None:
self.niDigital_WriteSourceWaveformBroadcastU32_cfunc = self._get_library_function('niDigital_WriteSourceWaveformBroadcastU32')
self.niDigital_WriteSourceWaveformBroadcastU32_cfunc.argtypes = [ViSession, ctypes.POINTER(ViChar), ViInt32, ctypes.POINTER(ViUInt32)] # noqa: F405
self.niDigital_WriteSourceWaveformBroadcastU32_cfunc.restype = ViStatus # noqa: F405
return self.niDigital_WriteSourceWaveformBroadcastU32_cfunc(vi, waveform_name, waveform_size, waveform_data)
def niDigital_WriteSourceWaveformDataFromFileTDMS(self, vi, waveform_name, waveform_file_path): # noqa: N802
with self._func_lock:
if self.niDigital_WriteSourceWaveformDataFromFileTDMS_cfunc is None:
self.niDigital_WriteSourceWaveformDataFromFileTDMS_cfunc = self._get_library_function('niDigital_WriteSourceWaveformDataFromFileTDMS')
self.niDigital_WriteSourceWaveformDataFromFileTDMS_cfunc.argtypes = [ViSession, ctypes.POINTER(ViChar), ctypes.POINTER(ViChar)] # noqa: F405
self.niDigital_WriteSourceWaveformDataFromFileTDMS_cfunc.restype = ViStatus # noqa: F405
return self.niDigital_WriteSourceWaveformDataFromFileTDMS_cfunc(vi, waveform_name, waveform_file_path)
def niDigital_WriteSourceWaveformSiteUniqueU32(self, vi, site_list, waveform_name, num_waveforms, samples_per_waveform, waveform_data): # noqa: N802
with self._func_lock:
if self.niDigital_WriteSourceWaveformSiteUniqueU32_cfunc is None:
self.niDigital_WriteSourceWaveformSiteUniqueU32_cfunc = self._get_library_function('niDigital_WriteSourceWaveformSiteUniqueU32')
self.niDigital_WriteSourceWaveformSiteUniqueU32_cfunc.argtypes = [ViSession, ctypes.POINTER(ViChar), ctypes.POINTER(ViChar), ViInt32, ViInt32, ctypes.POINTER(ViUInt32)] # noqa: F405
self.niDigital_WriteSourceWaveformSiteUniqueU32_cfunc.restype = ViStatus # noqa: F405
return self.niDigital_WriteSourceWaveformSiteUniqueU32_cfunc(vi, site_list, waveform_name, num_waveforms, samples_per_waveform, waveform_data)
def niDigital_WriteStatic(self, vi, channel_list, state): # noqa: N802
with self._func_lock:
if self.niDigital_WriteStatic_cfunc is None:
self.niDigital_WriteStatic_cfunc = self._get_library_function('niDigital_WriteStatic')
self.niDigital_WriteStatic_cfunc.argtypes = [ViSession, ctypes.POINTER(ViChar), ViUInt8] # noqa: F405
self.niDigital_WriteStatic_cfunc.restype = ViStatus # noqa: F405
return self.niDigital_WriteStatic_cfunc(vi, channel_list, state)
def niDigital_close(self, vi): # noqa: N802
with self._func_lock:
if self.niDigital_close_cfunc is None:
self.niDigital_close_cfunc = self._get_library_function('niDigital_close')
self.niDigital_close_cfunc.argtypes = [ViSession] # noqa: F405
self.niDigital_close_cfunc.restype = ViStatus # noqa: F405
return self.niDigital_close_cfunc(vi)
def niDigital_error_message(self, vi, error_code, error_message): # noqa: N802
with self._func_lock:
if self.niDigital_error_message_cfunc is None:
self.niDigital_error_message_cfunc = self._get_library_function('niDigital_error_message')
self.niDigital_error_message_cfunc.argtypes = [ViSession, ViStatus, ctypes.POINTER(ViChar)] # noqa: F405
self.niDigital_error_message_cfunc.restype = ViStatus # noqa: F405
return self.niDigital_error_message_cfunc(vi, error_code, error_message)
def niDigital_reset(self, vi): # noqa: N802
with self._func_lock:
if self.niDigital_reset_cfunc is None:
self.niDigital_reset_cfunc = self._get_library_function('niDigital_reset')
self.niDigital_reset_cfunc.argtypes = [ViSession] # noqa: F405
self.niDigital_reset_cfunc.restype = ViStatus # noqa: F405
return self.niDigital_reset_cfunc(vi)
def niDigital_self_test(self, vi, test_result, test_message): # noqa: N802
with self._func_lock:
if self.niDigital_self_test_cfunc is None:
self.niDigital_self_test_cfunc = self._get_library_function('niDigital_self_test')
self.niDigital_self_test_cfunc.argtypes = [ViSession, ctypes.POINTER(ViInt16), ctypes.POINTER(ViChar)] # noqa: F405
self.niDigital_self_test_cfunc.restype = ViStatus # noqa: F405
return self.niDigital_self_test_cfunc(vi, test_result, test_message)
| generated/nidigital/nidigital/_library.py | 65,575 | Library
Wrapper around driver library.
Class will setup the correct ctypes information for every function on first call.
-*- coding: utf-8 -*- This file was generated noqa: F403,H303 noqa: F401 We cache the cfunc object from the ctypes.CDLL object noqa: N802 noqa: F405 noqa: F405 noqa: N802 noqa: F405 noqa: F405 noqa: N802 noqa: F405 noqa: F405 noqa: N802 noqa: F405 noqa: F405 noqa: N802 noqa: F405 noqa: F405 noqa: N802 noqa: F405 noqa: F405 noqa: N802 noqa: F405 noqa: F405 noqa: N802 noqa: F405 noqa: F405 noqa: N802 noqa: F405 noqa: F405 noqa: N802 noqa: F405 noqa: F405 noqa: N802 noqa: F405 noqa: F405 noqa: N802 noqa: F405 noqa: F405 noqa: N802 noqa: F405 noqa: F405 noqa: N802 noqa: F405 noqa: F405 noqa: N802 noqa: F405 noqa: F405 noqa: N802 noqa: F405 noqa: F405 noqa: N802 noqa: F405 noqa: F405 noqa: N802 noqa: F405 noqa: F405 noqa: N802 noqa: F405 noqa: F405 noqa: N802 noqa: F405 noqa: F405 noqa: N802 noqa: F405 noqa: F405 noqa: N802 noqa: F405 noqa: F405 noqa: N802 noqa: F405 noqa: F405 noqa: N802 noqa: F405 noqa: F405 noqa: N802 noqa: F405 noqa: F405 noqa: N802 noqa: F405 noqa: F405 noqa: N802 noqa: F405 noqa: F405 noqa: N802 noqa: F405 noqa: F405 noqa: N802 noqa: F405 noqa: F405 noqa: N802 noqa: F405 noqa: F405 noqa: N802 noqa: F405 noqa: F405 noqa: N802 noqa: F405 noqa: F405 noqa: N802 noqa: F405 noqa: F405 noqa: N802 noqa: F405 noqa: F405 noqa: N802 noqa: F405 noqa: F405 noqa: N802 noqa: F405 noqa: F405 noqa: N802 noqa: F405 noqa: F405 noqa: N802 noqa: F405 noqa: F405 noqa: N802 noqa: F405 noqa: F405 noqa: N802 noqa: F405 noqa: F405 noqa: N802 noqa: F405 noqa: F405 noqa: N802 noqa: F405 noqa: F405 noqa: N802 noqa: F405 noqa: F405 noqa: N802 noqa: F405 noqa: F405 noqa: N802 noqa: F405 noqa: F405 noqa: N802 noqa: F405 noqa: F405 noqa: N802 noqa: F405 noqa: F405 noqa: N802 noqa: F405 noqa: F405 noqa: N802 noqa: F405 noqa: F405 noqa: N802 noqa: F405 noqa: F405 noqa: N802 noqa: F405 noqa: F405 noqa: N802 noqa: F405 noqa: F405 noqa: N802 noqa: F405 noqa: F405 noqa: N802 noqa: F405 noqa: F405 noqa: N802 noqa: F405 noqa: F405 noqa: N802 noqa: F405 noqa: F405 noqa: N802 noqa: F405 noqa: F405 noqa: N802 noqa: F405 noqa: F405 noqa: N802 noqa: F405 noqa: F405 noqa: N802 noqa: F405 noqa: F405 noqa: N802 noqa: F405 noqa: F405 noqa: N802 noqa: F405 noqa: F405 noqa: N802 noqa: F405 noqa: F405 noqa: N802 noqa: F405 noqa: F405 noqa: N802 noqa: F405 noqa: F405 noqa: N802 noqa: F405 noqa: F405 noqa: N802 noqa: F405 noqa: F405 noqa: N802 noqa: F405 noqa: F405 noqa: N802 noqa: F405 noqa: F405 noqa: N802 noqa: F405 noqa: F405 noqa: N802 noqa: F405 noqa: F405 noqa: N802 noqa: F405 noqa: F405 noqa: N802 noqa: F405 noqa: F405 noqa: N802 noqa: F405 noqa: F405 noqa: N802 noqa: F405 noqa: F405 noqa: N802 noqa: F405 noqa: F405 noqa: N802 noqa: F405 noqa: F405 noqa: N802 noqa: F405 noqa: F405 noqa: N802 noqa: F405 noqa: F405 noqa: N802 noqa: F405 noqa: F405 noqa: N802 noqa: F405 noqa: F405 noqa: N802 noqa: F405 noqa: F405 noqa: N802 noqa: F405 noqa: F405 noqa: N802 noqa: F405 noqa: F405 noqa: N802 noqa: F405 noqa: F405 noqa: N802 noqa: F405 noqa: F405 noqa: N802 noqa: F405 noqa: F405 noqa: N802 noqa: F405 noqa: F405 noqa: N802 noqa: F405 noqa: F405 noqa: N802 noqa: F405 noqa: F405 noqa: N802 noqa: F405 noqa: F405 noqa: N802 noqa: F405 noqa: F405 | 3,286 | uz | 0.385906 |
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
"""Client and server classes corresponding to protobuf-defined services."""
import grpc
from chirpstack_api.as_pb.external.api import organization_pb2 as chirpstack__api_dot_as__pb_dot_external_dot_api_dot_organization__pb2
from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2
class OrganizationServiceStub(object):
"""OrganizationService is the service managing the organization access.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.List = channel.unary_unary(
'/api.OrganizationService/List',
request_serializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_organization__pb2.ListOrganizationRequest.SerializeToString,
response_deserializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_organization__pb2.ListOrganizationResponse.FromString,
)
self.Get = channel.unary_unary(
'/api.OrganizationService/Get',
request_serializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_organization__pb2.GetOrganizationRequest.SerializeToString,
response_deserializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_organization__pb2.GetOrganizationResponse.FromString,
)
self.Create = channel.unary_unary(
'/api.OrganizationService/Create',
request_serializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_organization__pb2.CreateOrganizationRequest.SerializeToString,
response_deserializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_organization__pb2.CreateOrganizationResponse.FromString,
)
self.Update = channel.unary_unary(
'/api.OrganizationService/Update',
request_serializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_organization__pb2.UpdateOrganizationRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.Delete = channel.unary_unary(
'/api.OrganizationService/Delete',
request_serializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_organization__pb2.DeleteOrganizationRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.ListUsers = channel.unary_unary(
'/api.OrganizationService/ListUsers',
request_serializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_organization__pb2.ListOrganizationUsersRequest.SerializeToString,
response_deserializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_organization__pb2.ListOrganizationUsersResponse.FromString,
)
self.GetUser = channel.unary_unary(
'/api.OrganizationService/GetUser',
request_serializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_organization__pb2.GetOrganizationUserRequest.SerializeToString,
response_deserializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_organization__pb2.GetOrganizationUserResponse.FromString,
)
self.AddUser = channel.unary_unary(
'/api.OrganizationService/AddUser',
request_serializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_organization__pb2.AddOrganizationUserRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.UpdateUser = channel.unary_unary(
'/api.OrganizationService/UpdateUser',
request_serializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_organization__pb2.UpdateOrganizationUserRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.DeleteUser = channel.unary_unary(
'/api.OrganizationService/DeleteUser',
request_serializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_organization__pb2.DeleteOrganizationUserRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
class OrganizationServiceServicer(object):
"""OrganizationService is the service managing the organization access.
"""
def List(self, request, context):
"""Get organization list.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Get(self, request, context):
"""Get data for a particular organization.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Create(self, request, context):
"""Create a new organization.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Update(self, request, context):
"""Update an existing organization.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Delete(self, request, context):
"""Delete an organization.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ListUsers(self, request, context):
"""Get organization's user list.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetUser(self, request, context):
"""Get data for a particular organization user.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def AddUser(self, request, context):
"""Add a new user to an organization.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def UpdateUser(self, request, context):
"""Update a user in an organization.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def DeleteUser(self, request, context):
"""Delete a user from an organization.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_OrganizationServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
'List': grpc.unary_unary_rpc_method_handler(
servicer.List,
request_deserializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_organization__pb2.ListOrganizationRequest.FromString,
response_serializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_organization__pb2.ListOrganizationResponse.SerializeToString,
),
'Get': grpc.unary_unary_rpc_method_handler(
servicer.Get,
request_deserializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_organization__pb2.GetOrganizationRequest.FromString,
response_serializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_organization__pb2.GetOrganizationResponse.SerializeToString,
),
'Create': grpc.unary_unary_rpc_method_handler(
servicer.Create,
request_deserializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_organization__pb2.CreateOrganizationRequest.FromString,
response_serializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_organization__pb2.CreateOrganizationResponse.SerializeToString,
),
'Update': grpc.unary_unary_rpc_method_handler(
servicer.Update,
request_deserializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_organization__pb2.UpdateOrganizationRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
'Delete': grpc.unary_unary_rpc_method_handler(
servicer.Delete,
request_deserializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_organization__pb2.DeleteOrganizationRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
'ListUsers': grpc.unary_unary_rpc_method_handler(
servicer.ListUsers,
request_deserializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_organization__pb2.ListOrganizationUsersRequest.FromString,
response_serializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_organization__pb2.ListOrganizationUsersResponse.SerializeToString,
),
'GetUser': grpc.unary_unary_rpc_method_handler(
servicer.GetUser,
request_deserializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_organization__pb2.GetOrganizationUserRequest.FromString,
response_serializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_organization__pb2.GetOrganizationUserResponse.SerializeToString,
),
'AddUser': grpc.unary_unary_rpc_method_handler(
servicer.AddUser,
request_deserializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_organization__pb2.AddOrganizationUserRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
'UpdateUser': grpc.unary_unary_rpc_method_handler(
servicer.UpdateUser,
request_deserializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_organization__pb2.UpdateOrganizationUserRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
'DeleteUser': grpc.unary_unary_rpc_method_handler(
servicer.DeleteUser,
request_deserializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_organization__pb2.DeleteOrganizationUserRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'api.OrganizationService', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
class OrganizationService(object):
"""OrganizationService is the service managing the organization access.
"""
@staticmethod
def List(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/api.OrganizationService/List',
chirpstack__api_dot_as__pb_dot_external_dot_api_dot_organization__pb2.ListOrganizationRequest.SerializeToString,
chirpstack__api_dot_as__pb_dot_external_dot_api_dot_organization__pb2.ListOrganizationResponse.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def Get(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/api.OrganizationService/Get',
chirpstack__api_dot_as__pb_dot_external_dot_api_dot_organization__pb2.GetOrganizationRequest.SerializeToString,
chirpstack__api_dot_as__pb_dot_external_dot_api_dot_organization__pb2.GetOrganizationResponse.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def Create(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/api.OrganizationService/Create',
chirpstack__api_dot_as__pb_dot_external_dot_api_dot_organization__pb2.CreateOrganizationRequest.SerializeToString,
chirpstack__api_dot_as__pb_dot_external_dot_api_dot_organization__pb2.CreateOrganizationResponse.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def Update(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/api.OrganizationService/Update',
chirpstack__api_dot_as__pb_dot_external_dot_api_dot_organization__pb2.UpdateOrganizationRequest.SerializeToString,
google_dot_protobuf_dot_empty__pb2.Empty.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def Delete(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/api.OrganizationService/Delete',
chirpstack__api_dot_as__pb_dot_external_dot_api_dot_organization__pb2.DeleteOrganizationRequest.SerializeToString,
google_dot_protobuf_dot_empty__pb2.Empty.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def ListUsers(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/api.OrganizationService/ListUsers',
chirpstack__api_dot_as__pb_dot_external_dot_api_dot_organization__pb2.ListOrganizationUsersRequest.SerializeToString,
chirpstack__api_dot_as__pb_dot_external_dot_api_dot_organization__pb2.ListOrganizationUsersResponse.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def GetUser(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/api.OrganizationService/GetUser',
chirpstack__api_dot_as__pb_dot_external_dot_api_dot_organization__pb2.GetOrganizationUserRequest.SerializeToString,
chirpstack__api_dot_as__pb_dot_external_dot_api_dot_organization__pb2.GetOrganizationUserResponse.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def AddUser(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/api.OrganizationService/AddUser',
chirpstack__api_dot_as__pb_dot_external_dot_api_dot_organization__pb2.AddOrganizationUserRequest.SerializeToString,
google_dot_protobuf_dot_empty__pb2.Empty.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def UpdateUser(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/api.OrganizationService/UpdateUser',
chirpstack__api_dot_as__pb_dot_external_dot_api_dot_organization__pb2.UpdateOrganizationUserRequest.SerializeToString,
google_dot_protobuf_dot_empty__pb2.Empty.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def DeleteUser(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/api.OrganizationService/DeleteUser',
chirpstack__api_dot_as__pb_dot_external_dot_api_dot_organization__pb2.DeleteOrganizationUserRequest.SerializeToString,
google_dot_protobuf_dot_empty__pb2.Empty.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata)
| python/src/chirpstack_api/as_pb/external/api/organization_pb2_grpc.py | 18,785 | OrganizationService is the service managing the organization access.
OrganizationService is the service managing the organization access.
OrganizationService is the service managing the organization access.
Add a new user to an organization.
Create a new organization.
Delete an organization.
Delete a user from an organization.
Get data for a particular organization.
Get data for a particular organization user.
Get organization list.
Get organization's user list.
Update an existing organization.
Update a user in an organization.
Constructor.
Args:
channel: A grpc.Channel.
Client and server classes corresponding to protobuf-defined services.
Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! This class is part of an EXPERIMENTAL API. | 870 | en | 0.882985 |
import sys
import base64
import platform
import functools
from keyring.util import properties
from keyring.backend import KeyringBackend
from keyring.errors import PasswordDeleteError, ExceptionRaisedContext
from . import file_base
try:
# prefer pywin32-ctypes
from win32ctypes import pywintypes
from win32ctypes import win32cred
# force demand import to raise ImportError
win32cred.__name__
except ImportError:
# fallback to pywin32
try:
import pywintypes
import win32cred
except ImportError:
pass
try:
import winreg
except ImportError:
try:
# Python 2 compatibility
import _winreg as winreg
except ImportError:
pass
try:
from . import _win_crypto
except ImportError:
pass
def has_pywin32():
"""
Does this environment have pywin32?
Should return False even when Mercurial's Demand Import allowed import of
win32cred.
"""
with ExceptionRaisedContext() as exc:
win32cred.__name__
return not bool(exc)
def has_wincrypto():
"""
Does this environment have wincrypto?
Should return False even when Mercurial's Demand Import allowed import of
_win_crypto, so accesses an attribute of the module.
"""
with ExceptionRaisedContext() as exc:
_win_crypto.__name__
return not bool(exc)
class EncryptedKeyring(file_base.Keyring):
"""
A File-based keyring secured by Windows Crypto API.
"""
@properties.ClassProperty
@classmethod
def priority(self):
"""
Preferred over file.EncryptedKeyring but not other, more sophisticated
Windows backends.
"""
if not platform.system() == 'Windows':
raise RuntimeError("Requires Windows")
return .8
filename = 'wincrypto_pass.cfg'
def encrypt(self, password):
"""Encrypt the password using the CryptAPI.
"""
return _win_crypto.encrypt(password)
def decrypt(self, password_encrypted):
"""Decrypt the password using the CryptAPI.
"""
return _win_crypto.decrypt(password_encrypted)
class RegistryKeyring(KeyringBackend):
"""
RegistryKeyring is a keyring which use Windows CryptAPI to encrypt
the user's passwords and store them under registry keys
"""
@properties.ClassProperty
@classmethod
def priority(self):
"""
Preferred on Windows when pywin32 isn't installed
"""
if platform.system() != 'Windows':
raise RuntimeError("Requires Windows")
if not has_wincrypto():
raise RuntimeError("Requires ctypes")
return 2
def get_password(self, service, username):
"""Get password of the username for the service
"""
try:
# fetch the password
key = r'Software\%s\Keyring' % service
hkey = winreg.OpenKey(winreg.HKEY_CURRENT_USER, key)
password_saved = winreg.QueryValueEx(hkey, username)[0]
password_base64 = password_saved.encode('ascii')
# decode with base64
password_encrypted = base64.decodestring(password_base64)
# decrypted the password
password = _win_crypto.decrypt(password_encrypted).decode('utf-8')
except EnvironmentError:
password = None
return password
def set_password(self, service, username, password):
"""Write the password to the registry
"""
# encrypt the password
password_encrypted = _win_crypto.encrypt(password.encode('utf-8'))
# encode with base64
password_base64 = base64.encodestring(password_encrypted)
# encode again to unicode
password_saved = password_base64.decode('ascii')
# store the password
key_name = r'Software\%s\Keyring' % service
hkey = winreg.CreateKey(winreg.HKEY_CURRENT_USER, key_name)
winreg.SetValueEx(hkey, username, 0, winreg.REG_SZ, password_saved)
def delete_password(self, service, username):
"""Delete the password for the username of the service.
"""
try:
key_name = r'Software\%s\Keyring' % service
hkey = winreg.OpenKey(winreg.HKEY_CURRENT_USER, key_name, 0,
winreg.KEY_ALL_ACCESS)
winreg.DeleteValue(hkey, username)
winreg.CloseKey(hkey)
except WindowsError:
e = sys.exc_info()[1]
raise PasswordDeleteError(e)
self._delete_key_if_empty(service)
def _delete_key_if_empty(self, service):
key_name = r'Software\%s\Keyring' % service
key = winreg.OpenKey(winreg.HKEY_CURRENT_USER, key_name, 0,
winreg.KEY_ALL_ACCESS)
try:
winreg.EnumValue(key, 0)
return
except WindowsError:
pass
winreg.CloseKey(key)
# it's empty; delete everything
while key_name != 'Software':
parent, sep, base = key_name.rpartition('\\')
key = winreg.OpenKey(winreg.HKEY_CURRENT_USER, parent, 0,
winreg.KEY_ALL_ACCESS)
winreg.DeleteKey(key, base)
winreg.CloseKey(key)
key_name = parent
class OldPywinError(object):
"""
A compatibility wrapper for old PyWin32 errors, such as reported in
https://bitbucket.org/kang/python-keyring-lib/issue/140/
"""
def __init__(self, orig):
self.orig = orig
@property
def funcname(self):
return self.orig[1]
@property
def winerror(self):
return self.orig[0]
@classmethod
def wrap(cls, orig_err):
attr_check = functools.partial(hasattr, orig_err)
is_old = not all(map(attr_check, ['funcname', 'winerror']))
return cls(orig_err) if is_old else orig_err
| src/site-packages/keyrings/alt/Windows.py | 5,833 | A File-based keyring secured by Windows Crypto API.
A compatibility wrapper for old PyWin32 errors, such as reported in
https://bitbucket.org/kang/python-keyring-lib/issue/140/
RegistryKeyring is a keyring which use Windows CryptAPI to encrypt
the user's passwords and store them under registry keys
Decrypt the password using the CryptAPI.
Delete the password for the username of the service.
Encrypt the password using the CryptAPI.
Get password of the username for the service
Does this environment have pywin32?
Should return False even when Mercurial's Demand Import allowed import of
win32cred.
Does this environment have wincrypto?
Should return False even when Mercurial's Demand Import allowed import of
_win_crypto, so accesses an attribute of the module.
Preferred over file.EncryptedKeyring but not other, more sophisticated
Windows backends.
Preferred on Windows when pywin32 isn't installed
Write the password to the registry
prefer pywin32-ctypes force demand import to raise ImportError fallback to pywin32 Python 2 compatibility fetch the password decode with base64 decrypted the password encrypt the password encode with base64 encode again to unicode store the password it's empty; delete everything | 1,266 | en | 0.804666 |
"""
This module is used to manage rules.
## Base format
A context and rule is written in JSON.
A context contains an identifier, AND one or three rules.
One of rules must specify the SCHC Compression/Decompression (CD).
Two specify SCHC Fragmentation/Reassembly (FR) if needed.
Therefore, a context has to be formed to either below structures.
{
"devL2Addr": ...,
"dstIID": ...,
"comp": { ... },
"fragSender": { ... },
"fragReceiver": { ... }
}
"comp": compression rule.
"fragSender": fragmentation rule for inbound.
"fragReceiver": fragmentation rule for outbound.
Or,
{
"devL2Addr": ...,
"dstIID": ...,
"profile": { ... },
"comp": { ... }
}
XXX Q. "profile" should be in the context ?
## Context
A context is uniquely identified by devL2Addr
specifying the L2 address of a SCHC device.
dstIID matches the IP address assigned
to the interface of the communication peer.
In the context of the SCHC device, dstIID indicates the IP address of
the interface at the SCHC Translator,
which is dedicated between the device and
the application.
In the context of the other side, dstIID indicates the IP address of
the SCHC device.
+--------+ +------------+ +-----+
| SCHC | | SCHC |---------| App |
| Device | | Translator | | |
+--------+ +------------+ +-----+
| D (IP addr) | T (IP addr)
| L (L2 addr) |
| |
+--// LPWAN //--| GW |------------+
In the above example, the context of each side is like below:
at the device:
{
"devL2Addr": "L",
"dstIID": "M"
}
at the translator:
{
"devL2Addr": "L",
"dstIID": "D"
}
"*" and "/" can be used for a wild-card match. (XXX should be implemented.)
## Rule
XXX is it true that both ruleID and ruleLength is unique key ?
XXX is the deivce L2 address the real key ?
A rule is uniquely identified by the rule ID of variable length.
Each rule must contain the following information:
{
"ruleID" : 2,
"ruleLength" : 3
}
where ruleID contains the rule ID value aligned on the right and ruleLength
gives
the size in bits of the ruleID. In the previous example, this corresponds to
the binary value 0b010.
if ruleLength is not specified the value is set to 1 byte.
The rule is either a compression/decompression rule
or a fragmentation/reassembly rule.
For C/D rules, the keyword "compression" must be defined.
For F/R rules, the keyword "fragmentation" and "fragmentation"
must be defined.
## Compression Rule
A compression rule is bidirectionnal.
## Fragmentation Rule
A fragmentation rule is uni directionnal.
The "fragmentation" keyword is used to give fragmentation mode and profile:
- one fragmentation mode keywork "noAck", "ackAlways" or "ackOnError".
- FRModeProfile parameters. Default values are automaticaly added.
- dtagSize, WSize and FCNSize are used to define the SCHC fragmentation header
- windowSize can be added if not 2^FCNSize - 1
For "ackOnError" the following parameter is defined:
- "ackBehavior" defined the ack behavior, i.e. when the Ack must be spontaneously sent
by the receiver and therefore when the sender must listen for Ack.
- "afterAll0" means that the sender waits for ack after sending an All-0
- "afterAll1" means that the sender waits only after sending the last fragment
- other behaviors may be defined in the future.
## data model of DB
db = [
{
"devL2Addr": ..,
"dstIID": ..,
"comp": {
"ruleID": ..,
"ruleLength": ..,
"compression": { ...}
},
"fragSender": {
"ruleID": ..,
"ruleLength": ..,
"fragmentation": { ...}
}
"fragReceiver": {
"ruleID": ..,
"ruleLength": ..,
"fragmentation": { ...}
}
}, ...
]
## method
- add_context(context, comp=None, fragSender=None, fragReceiver=None)
It adds the context. If it exists, raise ValueError.
- add_rules(context, comp=None, fragSender=None, fragReceiver=None)
It adds the list of rules into the context specified.
If it exists, raise ValueError.
If context is not specified, the rule will be added into the default
context.
## Rule to add a new key
Each key must be unique through a rule.
For example, below the keys of "profile" are not allowed.
{
"profile": { ... },
"compression": { "profile": ... }
}
## Examples
Example 1:
{
"ruleID" : 14,
"ruleLength" : 4 # rule 0b1110
"compression": { ... }
}
Example 2:
{
"ruleID" : 15,
"ruleLength" : 4 # rule 0b1110
"fragmentationOut": {
"FRMode" : "noAck" # or "ackAlways", "ackOnError"
"FRModeProfile" : {
"dtagSize" : 1,
"WSize": 3,
"FCNSize" : 3,
"windowSize", 7,
"ackBehavior": "afterAll1"
}
}
}
"""
try:
import struct
except ImportError:
import ustruct as struct
from copy import deepcopy
# XXX to be checked whether they are needed.
DEFAULT_FRAGMENT_RID = 1
DEFAULT_L2_SIZE = 8
DEFAULT_RECV_BUFSIZE = 512
DEFAULT_TIMER_T1 = 5
DEFAULT_TIMER_T2 = 10
DEFAULT_TIMER_T3 = 10
DEFAULT_TIMER_T4 = 12
DEFAULT_TIMER_T5 = 14
class DictToAttrDeep:
def __init__(self, **entries):
self.__update(**entries)
def __update(self, **entries):
for k,v in entries.items():
if isinstance(v, dict):
self.__dict__[k] = DictToAttrDeep(**v)
else:
self.__dict__.update(entries)
def __contains__(self, t):
""" t in this """
for k,v in self.__dict__.items():
if k == t:
return True
if isinstance(v, DictToAttrDeep):
if t in v:
return True
def __getitem__(self, t):
""" this[k] """
for k,v in self.__dict__.items():
if k == t:
return v
if isinstance(v, DictToAttrDeep):
if t in v:
return v[t]
def get(self, k, d=None):
""" this.get(k) """
if k not in self:
return d
return self.__getitem__(k)
def __repr__(self):
return "{{{}}}".format(str(", ".join(
['"{}": {}'.format(k,self.__reprx(v))
for k,v in self.__dict__.items()])))
def __reprx(self, t):
if isinstance(t, str):
return '"{}"'.format(t)
elif isinstance(t, dict):
return "{{{}}}".format(str(", ".join(
['"{}": {}'.format(k,self.__reprx(v))
for k,v in t.items()])))
elif isinstance(t, list):
return "[{}]".format(str(", ".join(
["{}".format(self.__reprx(i)) for i in t])))
else:
return repr(t)
class RuleManager:
"""RuleManager class is used to manage Compression/Decompression and Fragmentation/
Reassembly rules."""
def __init__(self):
#RM database
self._db = []
def _checkRuleValue(self, rule_id, rule_id_length):
"""this function looks if bits specified in ruleID are not outside of
rule_id_length"""
if rule_id_length > 32:
raise ValueError("Rule length should be less than 32")
r1 = rule_id
for k in range (32, rule_id_length, -1):
if (0x01 << k) & r1 !=0:
raise ValueError("rule ID too long")
def _ruleIncluded(self, r1ID, r1l, r2ID, r2l):
"""check if a conflict exists between to ruleID (i.e. same first bits equals) """
r1 = r1ID << (32-r1l)
r2 = r2ID << (32-r2l)
l = min(r1l, r2l)
for k in range (32-l, 32):
if ((r1 & (0x01 << k)) != (r2 & (0x01 << k))):
return False
return True
def _nameRule (self, r):
return "Rule {}/{}:".format(r["ruleID"], r["ruleLength"])
def find_rule_bypacket(self, context, packet_bbuf):
""" returns a compression rule or an fragmentation rule
in the context matching with the field value of rule id in the packet.
"""
for k in ["fragSender", "fragReceiver","fragSender2", "fragReceiver2", "comp"]:
r = context.get(k)
if r is not None:
rule_id = packet_bbuf.get_bits(r["ruleLength"],position=0)
if r["ruleID"] == rule_id:
print("--------------------RuleManage------------------")
print("ruleID ",rule_id)
print()
print("--------------------------------------------------")
return k, r
return None, None
def find_context_bydevL2addr(self, dev_L2addr):
""" find a context with dev_L2addr. """
# XXX needs to implement wildcard search or something like that.
for c in self._db:
if c["devL2Addr"] == dev_L2addr:
return c
if c["devL2Addr"] == "*":
return c
return None
def find_context_bydstiid(self, dst_iid):
""" find a context with dst_iid, which can be a wild card. """
# XXX needs to implement wildcard search or something like that.
for c in self._db:
if c["dstIID"] == dst_iid:
return c
if c["dstIID"] == "*":
return c
return None
def find_context_exact(self, dev_L2addr, dst_iid):
""" find a context by both devL2Addr and dstIID.
This is mainly for internal use. """
for c in self._db:
if c["devL2Addr"] == dev_L2addr and c["dstIID"] == dst_iid:
return c
return None
def add_context(self, context, comp=None, fragSender=None, fragReceiver=None, fragSender2=None, fragReceiver2=None):
""" add context into the db. """
if self.find_context_exact(context["devL2Addr"],context["dstIID"]) is not None:
raise ValueError("the context {}/{} exist.".format(
context["devL2Addr"], context["dstIID"]))
# add context
c = deepcopy(context)
self._db.append(c)
self.add_rules(c, comp, fragSender, fragReceiver, fragSender2, fragReceiver2)
def add_rules(self, context, comp=None, fragSender=None, fragReceiver=None, fragSender2=None, fragReceiver2=None):
""" add rules into the context specified. """
if comp is not None:
self.add_rule(context, "comp", comp)
if fragSender is not None:
self.add_rule(context, "fragSender", fragSender)
if fragReceiver is not None:
self.add_rule(context, "fragReceiver", fragReceiver)
if fragSender2 is not None:
self.add_rule(context, "fragSender2", fragSender2)
if fragReceiver2 is not None:
self.add_rule(context, "fragReceiver2", fragReceiver2)
def add_rule(self, context, key, rule):
""" Check rule integrity and uniqueless and add it to the db """
if not "ruleID" in rule:
raise ValueError ("Rule ID not defined.")
if not "ruleLength" in rule:
if rule["ruleID"] < 255:
rule["ruleLength"] = 8
else:
raise ValueError ("RuleID too large for default size on a byte")
# proceed to compression check (TBD)
if key == "comp":
self.check_rule_compression(rule)
elif key in ["fragSender", "fragReceiver","fragSender2", "fragReceiver2", "comp"]:
self.check_rule_fragmentation(rule)
else:
raise ValueError ("key must be either comp, fragSender, fragReceiver, fragSender2, fragReceiver2")
rule_id = rule["ruleID"]
rule_id_length = rule["ruleLength"]
self._checkRuleValue(rule_id, rule_id_length)
for k in ["fragSender", "fragReceiver","fragSender2", "fragReceiver2", "comp"]:
r = context.get(k)
if r is not None:
if rule_id_length == r.ruleLength and rule_id == r.ruleID:
raise ValueError ("Rule {}/{} exists".format(
rule_id, rule_id_length))
context[key] = DictToAttrDeep(**rule)
def check_rule_compression(self, rule):
""" compression rule check """
# XXX need more work.
if (not "compression" in rule or "fragmentation" in rule):
raise ValueError ("{} Invalid rule".format(self._nameRule(rule)))
canon_rule_set = []
for r in rule["compression"]["rule_set"]:
canon_r = {}
for k,v in r.items():
if isinstance(v, str):
canon_r[k.upper()] = v.upper()
else:
canon_r[k.upper()] = v
canon_rule_set.append(canon_r)
rule["compression"]["rule_set"] = canon_rule_set
def check_rule_fragmentation(self, rule):
""" fragmentation rule check """
if (not "fragmentation" in rule or "compression" in rule):
raise ValueError ("{} Invalid rule".format(self._nameRule(rule)))
if "fragmentation" in rule:
fragRule = rule["fragmentation"]
if not "FRMode" in fragRule:
raise ValueError ("{} Fragmentation mode must be specified".format(self._nameRule(rule)))
mode = fragRule["FRMode"]
if not mode in ("noAck", "ackAlways", "ackOnError"):
raise ValueError ("{} Unknown fragmentation mode".format(self._nameRule(rule)))
if not "FRModeProfile" in fragRule:
fragRule["FRModeProfile"] = {}
profile = fragRule["FRModeProfile"]
if not "dtagSize" in profile:
profile["dtagSize"] = 0
if not "WSize" in profile:
if mode == "noAck":
profile["WSize"] = 0
elif mode == "ackAlways":
profile["WSize"] = 1
elif mode == "ackOnError":
profile["WSize"] = 5
if not "FCNSize" in profile:
if mode == "noAck":
profile["FCNSize"] = 1
elif mode == "ackAlways":
profile["FCNSize"] = 3
elif mode == "ackOnError":
profile["FCNSize"] = 3
if "windowSize" in profile:
if profile["windowSize"] > (0x01 << profile["FCNSize"]) - 1 or\
profile["windowSize"] < 0:
raise ValueError ("{} illegal windowSize".format(self._nameRule(rule)))
else:
profile["windowSize"] = (0x01 << profile["FCNSize"]) - 1
if mode == "ackOnError":
if not "ackBehavior" in profile:
raise ValueError ("Ack on error behavior must be specified (afterAll1 or afterAll0)")
if not "tileSize" in profile:
profile["tileSize"] = 64
| rulemanager.py | 15,497 | RuleManager class is used to manage Compression/Decompression and Fragmentation/
Reassembly rules.
t in this
this[k]
this function looks if bits specified in ruleID are not outside of
rule_id_length
check if a conflict exists between to ruleID (i.e. same first bits equals)
add context into the db.
Check rule integrity and uniqueless and add it to the db
add rules into the context specified.
compression rule check
fragmentation rule check
find a context with dev_L2addr.
find a context with dst_iid, which can be a wild card.
find a context by both devL2Addr and dstIID.
This is mainly for internal use.
returns a compression rule or an fragmentation rule
in the context matching with the field value of rule id in the packet.
this.get(k)
This module is used to manage rules.
## Base format
A context and rule is written in JSON.
A context contains an identifier, AND one or three rules.
One of rules must specify the SCHC Compression/Decompression (CD).
Two specify SCHC Fragmentation/Reassembly (FR) if needed.
Therefore, a context has to be formed to either below structures.
{
"devL2Addr": ...,
"dstIID": ...,
"comp": { ... },
"fragSender": { ... },
"fragReceiver": { ... }
}
"comp": compression rule.
"fragSender": fragmentation rule for inbound.
"fragReceiver": fragmentation rule for outbound.
Or,
{
"devL2Addr": ...,
"dstIID": ...,
"profile": { ... },
"comp": { ... }
}
XXX Q. "profile" should be in the context ?
## Context
A context is uniquely identified by devL2Addr
specifying the L2 address of a SCHC device.
dstIID matches the IP address assigned
to the interface of the communication peer.
In the context of the SCHC device, dstIID indicates the IP address of
the interface at the SCHC Translator,
which is dedicated between the device and
the application.
In the context of the other side, dstIID indicates the IP address of
the SCHC device.
+--------+ +------------+ +-----+
| SCHC | | SCHC |---------| App |
| Device | | Translator | | |
+--------+ +------------+ +-----+
| D (IP addr) | T (IP addr)
| L (L2 addr) |
| |
+--// LPWAN //--| GW |------------+
In the above example, the context of each side is like below:
at the device:
{
"devL2Addr": "L",
"dstIID": "M"
}
at the translator:
{
"devL2Addr": "L",
"dstIID": "D"
}
"*" and "/" can be used for a wild-card match. (XXX should be implemented.)
## Rule
XXX is it true that both ruleID and ruleLength is unique key ?
XXX is the deivce L2 address the real key ?
A rule is uniquely identified by the rule ID of variable length.
Each rule must contain the following information:
{
"ruleID" : 2,
"ruleLength" : 3
}
where ruleID contains the rule ID value aligned on the right and ruleLength
gives
the size in bits of the ruleID. In the previous example, this corresponds to
the binary value 0b010.
if ruleLength is not specified the value is set to 1 byte.
The rule is either a compression/decompression rule
or a fragmentation/reassembly rule.
For C/D rules, the keyword "compression" must be defined.
For F/R rules, the keyword "fragmentation" and "fragmentation"
must be defined.
## Compression Rule
A compression rule is bidirectionnal.
## Fragmentation Rule
A fragmentation rule is uni directionnal.
The "fragmentation" keyword is used to give fragmentation mode and profile:
- one fragmentation mode keywork "noAck", "ackAlways" or "ackOnError".
- FRModeProfile parameters. Default values are automaticaly added.
- dtagSize, WSize and FCNSize are used to define the SCHC fragmentation header
- windowSize can be added if not 2^FCNSize - 1
For "ackOnError" the following parameter is defined:
- "ackBehavior" defined the ack behavior, i.e. when the Ack must be spontaneously sent
by the receiver and therefore when the sender must listen for Ack.
- "afterAll0" means that the sender waits for ack after sending an All-0
- "afterAll1" means that the sender waits only after sending the last fragment
- other behaviors may be defined in the future.
## data model of DB
db = [
{
"devL2Addr": ..,
"dstIID": ..,
"comp": {
"ruleID": ..,
"ruleLength": ..,
"compression": { ...}
},
"fragSender": {
"ruleID": ..,
"ruleLength": ..,
"fragmentation": { ...}
}
"fragReceiver": {
"ruleID": ..,
"ruleLength": ..,
"fragmentation": { ...}
}
}, ...
]
## method
- add_context(context, comp=None, fragSender=None, fragReceiver=None)
It adds the context. If it exists, raise ValueError.
- add_rules(context, comp=None, fragSender=None, fragReceiver=None)
It adds the list of rules into the context specified.
If it exists, raise ValueError.
If context is not specified, the rule will be added into the default
context.
## Rule to add a new key
Each key must be unique through a rule.
For example, below the keys of "profile" are not allowed.
{
"profile": { ... },
"compression": { "profile": ... }
}
## Examples
Example 1:
{
"ruleID" : 14,
"ruleLength" : 4 # rule 0b1110
"compression": { ... }
}
Example 2:
{
"ruleID" : 15,
"ruleLength" : 4 # rule 0b1110
"fragmentationOut": {
"FRMode" : "noAck" # or "ackAlways", "ackOnError"
"FRModeProfile" : {
"dtagSize" : 1,
"WSize": 3,
"FCNSize" : 3,
"windowSize", 7,
"ackBehavior": "afterAll1"
}
}
}
XXX to be checked whether they are needed.RM database XXX needs to implement wildcard search or something like that. XXX needs to implement wildcard search or something like that. add context proceed to compression check (TBD) XXX need more work. | 6,359 | en | 0.772817 |
"""
This module is to support *bbox_inches* option in savefig command.
"""
import warnings
from matplotlib.transforms import Bbox, TransformedBbox, Affine2D
def adjust_bbox(fig, format, bbox_inches):
"""
Temporarily adjust the figure so that only the specified area
(bbox_inches) is saved.
It modifies fig.bbox, fig.bbox_inches,
fig.transFigure._boxout, and fig.patch. While the figure size
changes, the scale of the original figure is conserved. A
function which restores the original values are returned.
"""
origBbox = fig.bbox
origBboxInches = fig.bbox_inches
_boxout = fig.transFigure._boxout
asp_list = []
locator_list = []
for ax in fig.axes:
pos = ax.get_position(original=False).frozen()
locator_list.append(ax.get_axes_locator())
asp_list.append(ax.get_aspect())
def _l(a, r, pos=pos):
return pos
ax.set_axes_locator(_l)
ax.set_aspect("auto")
def restore_bbox():
for ax, asp, loc in zip(fig.axes, asp_list, locator_list):
ax.set_aspect(asp)
ax.set_axes_locator(loc)
fig.bbox = origBbox
fig.bbox_inches = origBboxInches
fig.transFigure._boxout = _boxout
fig.transFigure.invalidate()
fig.patch.set_bounds(0, 0, 1, 1)
adjust_bbox_handler = _adjust_bbox_handler_d.get(format)
if adjust_bbox_handler is not None:
adjust_bbox_handler(fig, bbox_inches)
return restore_bbox
else:
warnings.warn("bbox_inches option for %s backend is not "
"implemented yet." % (format))
return None
def adjust_bbox_png(fig, bbox_inches):
"""
adjust_bbox for png (Agg) format
"""
tr = fig.dpi_scale_trans
_bbox = TransformedBbox(bbox_inches,
tr)
x0, y0 = _bbox.x0, _bbox.y0
fig.bbox_inches = Bbox.from_bounds(0, 0,
bbox_inches.width,
bbox_inches.height)
x0, y0 = _bbox.x0, _bbox.y0
w1, h1 = fig.bbox.width, fig.bbox.height
fig.transFigure._boxout = Bbox.from_bounds(-x0, -y0,
w1, h1)
fig.transFigure.invalidate()
fig.bbox = TransformedBbox(fig.bbox_inches, tr)
fig.patch.set_bounds(x0 / w1, y0 / h1,
fig.bbox.width / w1, fig.bbox.height / h1)
def adjust_bbox_pdf(fig, bbox_inches):
"""
adjust_bbox for pdf & eps format
"""
if fig._cachedRenderer.__class__.__name__ == "RendererPgf":
tr = Affine2D().scale(fig.dpi)
f = 1.
else:
tr = Affine2D().scale(72)
f = 72. / fig.dpi
_bbox = TransformedBbox(bbox_inches, tr)
fig.bbox_inches = Bbox.from_bounds(0, 0,
bbox_inches.width,
bbox_inches.height)
x0, y0 = _bbox.x0, _bbox.y0
w1, h1 = fig.bbox.width * f, fig.bbox.height * f
fig.transFigure._boxout = Bbox.from_bounds(-x0, -y0,
w1, h1)
fig.transFigure.invalidate()
fig.bbox = TransformedBbox(fig.bbox_inches, tr)
fig.patch.set_bounds(x0 / w1, y0 / h1,
fig.bbox.width / w1, fig.bbox.height / h1)
def process_figure_for_rasterizing(figure,
bbox_inches_restore, mode):
"""
This need to be called when figure dpi changes during the drawing
(e.g., rasterizing). It recovers the bbox and re-adjust it with
the new dpi.
"""
bbox_inches, restore_bbox = bbox_inches_restore
restore_bbox()
r = adjust_bbox(figure, mode,
bbox_inches)
return bbox_inches, r
_adjust_bbox_handler_d = {}
for format in ["png", "raw", "rgba", "jpg", "jpeg", "tiff"]:
_adjust_bbox_handler_d[format] = adjust_bbox_png
for format in ["pdf", "eps", "svg", "svgz"]:
_adjust_bbox_handler_d[format] = adjust_bbox_pdf
| editing files/Portable Python 3.2.5.1/App/Lib/site-packages/matplotlib/tight_bbox.py | 4,026 | Temporarily adjust the figure so that only the specified area
(bbox_inches) is saved.
It modifies fig.bbox, fig.bbox_inches,
fig.transFigure._boxout, and fig.patch. While the figure size
changes, the scale of the original figure is conserved. A
function which restores the original values are returned.
adjust_bbox for pdf & eps format
adjust_bbox for png (Agg) format
This need to be called when figure dpi changes during the drawing
(e.g., rasterizing). It recovers the bbox and re-adjust it with
the new dpi.
This module is to support *bbox_inches* option in savefig command. | 581 | en | 0.838439 |
import logging
from authlib.common.urls import add_params_to_uri
from .base import BaseGrant, AuthorizationEndpointMixin
from ..errors import (
OAuth2Error,
UnauthorizedClientError,
AccessDeniedError,
)
log = logging.getLogger(__name__)
class ImplicitGrant(BaseGrant, AuthorizationEndpointMixin):
"""The implicit grant type is used to obtain access tokens (it does not
support the issuance of refresh tokens) and is optimized for public
clients known to operate a particular redirection URI. These clients
are typically implemented in a browser using a scripting language
such as JavaScript.
Since this is a redirection-based flow, the client must be capable of
interacting with the resource owner's user-agent (typically a web
browser) and capable of receiving incoming requests (via redirection)
from the authorization server.
Unlike the authorization code grant type, in which the client makes
separate requests for authorization and for an access token, the
client receives the access token as the result of the authorization
request.
The implicit grant type does not include client authentication, and
relies on the presence of the resource owner and the registration of
the redirection URI. Because the access token is encoded into the
redirection URI, it may be exposed to the resource owner and other
applications residing on the same device::
+----------+
| Resource |
| Owner |
| |
+----------+
^
|
(B)
+----|-----+ Client Identifier +---------------+
| -+----(A)-- & Redirection URI --->| |
| User- | | Authorization |
| Agent -|----(B)-- User authenticates -->| Server |
| | | |
| |<---(C)--- Redirection URI ----<| |
| | with Access Token +---------------+
| | in Fragment
| | +---------------+
| |----(D)--- Redirection URI ---->| Web-Hosted |
| | without Fragment | Client |
| | | Resource |
| (F) |<---(E)------- Script ---------<| |
| | +---------------+
+-|--------+
| |
(A) (G) Access Token
| |
^ v
+---------+
| |
| Client |
| |
+---------+
"""
#: authorization_code grant type has authorization endpoint
AUTHORIZATION_ENDPOINT = True
#: Allowed client auth methods for token endpoint
TOKEN_ENDPOINT_AUTH_METHODS = ['none']
RESPONSE_TYPES = {'token'}
GRANT_TYPE = 'implicit'
ERROR_RESPONSE_FRAGMENT = True
def validate_authorization_request(self):
"""The client constructs the request URI by adding the following
parameters to the query component of the authorization endpoint URI
using the "application/x-www-form-urlencoded" format.
Per `Section 4.2.1`_.
response_type
REQUIRED. Value MUST be set to "token".
client_id
REQUIRED. The client identifier as described in Section 2.2.
redirect_uri
OPTIONAL. As described in Section 3.1.2.
scope
OPTIONAL. The scope of the access request as described by
Section 3.3.
state
RECOMMENDED. An opaque value used by the client to maintain
state between the request and callback. The authorization
server includes this value when redirecting the user-agent back
to the client. The parameter SHOULD be used for preventing
cross-site request forgery as described in Section 10.12.
The client directs the resource owner to the constructed URI using an
HTTP redirection response, or by other means available to it via the
user-agent.
For example, the client directs the user-agent to make the following
HTTP request using TLS:
.. code-block:: http
GET /authorize?response_type=token&client_id=s6BhdRkqt3&state=xyz
&redirect_uri=https%3A%2F%2Fclient%2Eexample%2Ecom%2Fcb HTTP/1.1
Host: server.example.com
.. _`Section 4.2.1`: https://tools.ietf.org/html/rfc6749#section-4.2.1
"""
# ignore validate for response_type, since it is validated by
# check_authorization_endpoint
# The implicit grant type is optimized for public clients
client = self.authenticate_token_endpoint_client()
log.debug('Validate authorization request of %r', client)
redirect_uri = self.validate_authorization_redirect_uri(
self.request, client)
response_type = self.request.response_type
if not client.check_response_type(response_type):
raise UnauthorizedClientError(
'The client is not authorized to use '
'"response_type={}"'.format(response_type),
state=self.request.state,
redirect_uri=redirect_uri,
redirect_fragment=True,
)
try:
self.request.client = client
self.validate_requested_scope()
self.execute_hook('after_validate_authorization_request')
except OAuth2Error as error:
error.redirect_uri = redirect_uri
error.redirect_fragment = True
raise error
return redirect_uri
def create_authorization_response(self, redirect_uri, grant_user):
"""If the resource owner grants the access request, the authorization
server issues an access token and delivers it to the client by adding
the following parameters to the fragment component of the redirection
URI using the "application/x-www-form-urlencoded" format.
Per `Section 4.2.2`_.
access_token
REQUIRED. The access token issued by the authorization server.
token_type
REQUIRED. The type of the token issued as described in
Section 7.1. Value is case insensitive.
expires_in
RECOMMENDED. The lifetime in seconds of the access token. For
example, the value "3600" denotes that the access token will
expire in one hour from the time the response was generated.
If omitted, the authorization server SHOULD provide the
expiration time via other means or document the default value.
scope
OPTIONAL, if identical to the scope requested by the client;
otherwise, REQUIRED. The scope of the access token as
described by Section 3.3.
state
REQUIRED if the "state" parameter was present in the client
authorization request. The exact value received from the
client.
The authorization server MUST NOT issue a refresh token.
For example, the authorization server redirects the user-agent by
sending the following HTTP response:
.. code-block:: http
HTTP/1.1 302 Found
Location: http://example.com/cb#access_token=2YotnFZFEjr1zCsicMWpAA
&state=xyz&token_type=example&expires_in=3600
Developers should note that some user-agents do not support the
inclusion of a fragment component in the HTTP "Location" response
header field. Such clients will require using other methods for
redirecting the client than a 3xx redirection response -- for
example, returning an HTML page that includes a 'continue' button
with an action linked to the redirection URI.
.. _`Section 4.2.2`: https://tools.ietf.org/html/rfc6749#section-4.2.2
:param redirect_uri: Redirect to the given URI for the authorization
:param grant_user: if resource owner granted the request, pass this
resource owner, otherwise pass None.
:returns: (status_code, body, headers)
"""
state = self.request.state
if grant_user:
self.request.user = grant_user
client = self.request.client
token = self.generate_token(
client, self.GRANT_TYPE,
user=grant_user,
scope=client.get_allowed_scope(self.request.scope),
include_refresh_token=False
)
log.debug('Grant token %r to %r', token, client)
self.save_token(token)
self.execute_hook('process_token', token=token)
params = [(k, token[k]) for k in token]
if state:
params.append(('state', state))
uri = add_params_to_uri(redirect_uri, params, fragment=True)
headers = [('Location', uri)]
return 302, '', headers
else:
raise AccessDeniedError(
state=state,
redirect_uri=redirect_uri,
redirect_fragment=True
)
| authlib/oauth2/rfc6749/grants/implicit.py | 9,391 | The implicit grant type is used to obtain access tokens (it does not
support the issuance of refresh tokens) and is optimized for public
clients known to operate a particular redirection URI. These clients
are typically implemented in a browser using a scripting language
such as JavaScript.
Since this is a redirection-based flow, the client must be capable of
interacting with the resource owner's user-agent (typically a web
browser) and capable of receiving incoming requests (via redirection)
from the authorization server.
Unlike the authorization code grant type, in which the client makes
separate requests for authorization and for an access token, the
client receives the access token as the result of the authorization
request.
The implicit grant type does not include client authentication, and
relies on the presence of the resource owner and the registration of
the redirection URI. Because the access token is encoded into the
redirection URI, it may be exposed to the resource owner and other
applications residing on the same device::
+----------+
| Resource |
| Owner |
| |
+----------+
^
|
(B)
+----|-----+ Client Identifier +---------------+
| -+----(A)-- & Redirection URI --->| |
| User- | | Authorization |
| Agent -|----(B)-- User authenticates -->| Server |
| | | |
| |<---(C)--- Redirection URI ----<| |
| | with Access Token +---------------+
| | in Fragment
| | +---------------+
| |----(D)--- Redirection URI ---->| Web-Hosted |
| | without Fragment | Client |
| | | Resource |
| (F) |<---(E)------- Script ---------<| |
| | +---------------+
+-|--------+
| |
(A) (G) Access Token
| |
^ v
+---------+
| |
| Client |
| |
+---------+
If the resource owner grants the access request, the authorization
server issues an access token and delivers it to the client by adding
the following parameters to the fragment component of the redirection
URI using the "application/x-www-form-urlencoded" format.
Per `Section 4.2.2`_.
access_token
REQUIRED. The access token issued by the authorization server.
token_type
REQUIRED. The type of the token issued as described in
Section 7.1. Value is case insensitive.
expires_in
RECOMMENDED. The lifetime in seconds of the access token. For
example, the value "3600" denotes that the access token will
expire in one hour from the time the response was generated.
If omitted, the authorization server SHOULD provide the
expiration time via other means or document the default value.
scope
OPTIONAL, if identical to the scope requested by the client;
otherwise, REQUIRED. The scope of the access token as
described by Section 3.3.
state
REQUIRED if the "state" parameter was present in the client
authorization request. The exact value received from the
client.
The authorization server MUST NOT issue a refresh token.
For example, the authorization server redirects the user-agent by
sending the following HTTP response:
.. code-block:: http
HTTP/1.1 302 Found
Location: http://example.com/cb#access_token=2YotnFZFEjr1zCsicMWpAA
&state=xyz&token_type=example&expires_in=3600
Developers should note that some user-agents do not support the
inclusion of a fragment component in the HTTP "Location" response
header field. Such clients will require using other methods for
redirecting the client than a 3xx redirection response -- for
example, returning an HTML page that includes a 'continue' button
with an action linked to the redirection URI.
.. _`Section 4.2.2`: https://tools.ietf.org/html/rfc6749#section-4.2.2
:param redirect_uri: Redirect to the given URI for the authorization
:param grant_user: if resource owner granted the request, pass this
resource owner, otherwise pass None.
:returns: (status_code, body, headers)
The client constructs the request URI by adding the following
parameters to the query component of the authorization endpoint URI
using the "application/x-www-form-urlencoded" format.
Per `Section 4.2.1`_.
response_type
REQUIRED. Value MUST be set to "token".
client_id
REQUIRED. The client identifier as described in Section 2.2.
redirect_uri
OPTIONAL. As described in Section 3.1.2.
scope
OPTIONAL. The scope of the access request as described by
Section 3.3.
state
RECOMMENDED. An opaque value used by the client to maintain
state between the request and callback. The authorization
server includes this value when redirecting the user-agent back
to the client. The parameter SHOULD be used for preventing
cross-site request forgery as described in Section 10.12.
The client directs the resource owner to the constructed URI using an
HTTP redirection response, or by other means available to it via the
user-agent.
For example, the client directs the user-agent to make the following
HTTP request using TLS:
.. code-block:: http
GET /authorize?response_type=token&client_id=s6BhdRkqt3&state=xyz
&redirect_uri=https%3A%2F%2Fclient%2Eexample%2Ecom%2Fcb HTTP/1.1
Host: server.example.com
.. _`Section 4.2.1`: https://tools.ietf.org/html/rfc6749#section-4.2.1
: authorization_code grant type has authorization endpoint: Allowed client auth methods for token endpoint ignore validate for response_type, since it is validated by check_authorization_endpoint The implicit grant type is optimized for public clients | 5,968 | en | 0.819414 |
import pandas as pd
import numpy as np
def load_from_tsfile_to_dataframe(full_file_path_and_name, replace_missing_vals_with='NaN'):
data_started = False
instance_list = []
class_val_list = []
has_time_stamps = False
has_class_labels = False
uses_tuples = False
is_first_case = True
with open(full_file_path_and_name, 'r') as f:
for line in f:
if line.strip():
if "@timestamps" in line.lower():
if "true" in line.lower():
has_time_stamps = True
raise Exception("Not suppoorted yet") # we don't have any data formatted to test with yet
elif "false" in line.lower():
has_time_stamps = False
else:
raise Exception("invalid timestamp argument")
if "@classlabel" in line.lower():
if "true" in line:
has_class_labels = True
elif "false" in line:
has_class_labels = False
else:
raise Exception("invalid classLabel argument")
if "@data" in line.lower():
data_started = True
continue
# if the 'data tag has been found, the header information has been cleared and now data can be loaded
if data_started:
line = line.replace("?", replace_missing_vals_with)
dimensions = line.split(":")
# perhaps not the best way to do this, but on the first row, initialise stored depending on the
# number of dimensions that are present and determine whether data is stored in a list or tuples
if is_first_case:
num_dimensions = len(dimensions)
if has_class_labels:
num_dimensions -= 1
is_first_case = False
for dim in range(0, num_dimensions):
instance_list.append([])
if dimensions[0].startswith("("):
uses_tuples = True
this_num_dimensions = len(dimensions)
if has_class_labels:
this_num_dimensions -= 1
# assuming all dimensions are included for all series, even if they are empty. If this is not true
# it could lead to confusing dimension indices (e.g. if a case only has dimensions 0 and 2 in the
# file, dimension 1 should be represented, even if empty, to make sure 2 doesn't get labelled as 1)
if this_num_dimensions != num_dimensions:
raise Exception("inconsistent number of dimensions")
# go through each dimension that is represented in the file
for dim in range(0, num_dimensions):
# handle whether tuples or list here
if uses_tuples:
without_brackets = dimensions[dim].replace("(", "").replace(")", "").split(",")
without_brackets = [float(i) for i in without_brackets]
indices = []
data = []
i = 0
while i < len(without_brackets):
indices.append(int(without_brackets[i]))
data.append(without_brackets[i + 1])
i += 2
instance_list[dim].append(pd.Series(data, indices))
else:
# if the data is expressed in list form, just read into a pandas.Series
data_series = dimensions[dim].split(",")
data_series = [float(i) for i in data_series]
instance_list[dim].append(pd.Series(data_series))
if has_class_labels:
class_val_list.append(dimensions[num_dimensions].strip())
# note: creating a pandas.DataFrame here, NOT an xpandas.xdataframe
x_data = pd.DataFrame(dtype=np.float32)
for dim in range(0, num_dimensions):
x_data['dim_' + str(dim)] = instance_list[dim]
if has_class_labels:
return x_data, np.asarray(class_val_list)
#
# # otherwise just return an XDataFrame
return x_data | sktime/utils/load_data.py | 4,617 | we don't have any data formatted to test with yet if the 'data tag has been found, the header information has been cleared and now data can be loaded perhaps not the best way to do this, but on the first row, initialise stored depending on the number of dimensions that are present and determine whether data is stored in a list or tuples assuming all dimensions are included for all series, even if they are empty. If this is not true it could lead to confusing dimension indices (e.g. if a case only has dimensions 0 and 2 in the file, dimension 1 should be represented, even if empty, to make sure 2 doesn't get labelled as 1) go through each dimension that is represented in the file handle whether tuples or list here if the data is expressed in list form, just read into a pandas.Series note: creating a pandas.DataFrame here, NOT an xpandas.xdataframe otherwise just return an XDataFrame | 895 | en | 0.923781 |
# -*- coding: utf-8 -*-
"""
Created on Wed Apr 1 17:14:19 2020
@author: Mitchell
model_training.py
~~~~~~~~~~~~~~~~~
This file serves as a script for building and training our VAE model. To do
so we used the VAE and DataSequence classes defined in the file `VAE.py`, as
well as helper functions from the file `dataset_utils` for loading and parsing
our datasets.
The user has the the ability to specify several parameters that control the
loading of our data, the structure of our model, as well as the traininig plan
for our model. After training is complete the script also plots metrics tracked
during training and saves the final model.
"""
# Imports
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
from dataset_utils import load_training, load_validation
from VAE import VAE, DataSequence
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import os, time, json
### Load Data
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Parameters for shape of dataset (note these are also used for model def. and
# training.)
measures = 8
measure_len = 96
# training
training_foldername = '../../nesmdb24_seprsco/train/'
train_save_filename = 'transformed_dataset.json'
dataset , labels2int_map , int2labels_map = \
load_training(training_foldername, train_save_filename,
measures = measures, measure_len = measure_len)
# validation
validation_foldername = '../../nesmdb24_seprsco/valid/'
val_save_filename = 'transformed_val_dataset.json'
val_dataset = load_validation(validation_foldername,\
labels2int_map, val_save_filename,
measures = measures, measure_len = measure_len)
### Build Model
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
### Model Parameters
latent_dim = 124
input_dims = [mapping.shape[0]-1 for mapping in int2labels_map]
dropout = .1
maxnorm = None
vae_b1 , vae_b2 = .02 , .1
# Build Model
model = VAE(latent_dim, input_dims, measures, measure_len, dropout,
maxnorm, vae_b1 , vae_b2)
model.build([tf.TensorShape([None, measures, measure_len, input_dims[i]])
for i in range(4)])
model.summary()
### Train Model
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Training Parameters
batch_size = 100
epochs = 10
# Cost Function
cost_function = model.vae_loss
# Learning_rate schedule
lr_0 = .001
decay_rate = .998
lr_decay = lambda t: lr_0 * decay_rate**t
lr_schedule = tf.keras.callbacks.LearningRateScheduler(lr_decay)
# Optimizer
optimizer = tf.keras.optimizers.Adam()
# Define callbacks
callbacks = [lr_schedule]
# Keras Sequences for Datasets (need to use since one-hot datasets too
# large for storing in memory)
training_seq = DataSequence(dataset, int2labels_map, batch_size)
validation_seq = DataSequence(val_dataset, int2labels_map, batch_size)
# Compile Model
model.compile(optimizer = optimizer,
loss = cost_function)
# Train model
tic = time.perf_counter()
history = model.fit_generator(generator = training_seq,
epochs = epochs)
toc = time.perf_counter()
print(f"Trained Model in {(toc - tic)/60:0.1f} minutes")
### Plot Training Metrics
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
training_loss = history.history['loss']
# Total Loss
plt.figure(1)
plt.plot(training_loss, 'b', label='Training')
plt.title('Loss vs Time')
plt.xlabel('Training Epoch')
plt.ylabel('Avg. Total Loss')
plt.legend()
plt.show()
### Save Model and History
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Save Model Weights
save_model = False
if save_model:
checkpoint_dir = '.\\training_checkpoints'
checkpoint_prefix = os.path.join(checkpoint_dir, "model_ckpt")
model.save_weights(checkpoint_prefix)
print('Model weights saved to files: '+checkpoint_prefix+'.*')
# Save Training History
save_history = False
if save_history:
checkpoint_dir = '.\\training_checkpoints'
history_filename = os.path.join(checkpoint_dir, "training_history.json")
with open(history_filename, 'w') as f:
json.dump({
key:[float(value) for value in history.history[key]]
for key in history.history
}, f)
print('Training history saved to file: '+ history_filename)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#----------------------------------END FILE------------------------------------
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ | VAE/full_model/model_training.py | 4,667 | Created on Wed Apr 1 17:14:19 2020
@author: Mitchell
model_training.py
~~~~~~~~~~~~~~~~~
This file serves as a script for building and training our VAE model. To do
so we used the VAE and DataSequence classes defined in the file `VAE.py`, as
well as helper functions from the file `dataset_utils` for loading and parsing
our datasets.
The user has the the ability to specify several parameters that control the
loading of our data, the structure of our model, as well as the traininig plan
for our model. After training is complete the script also plots metrics tracked
during training and saves the final model.
-*- coding: utf-8 -*- Imports~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Load Data~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Parameters for shape of dataset (note these are also used for model def. and training.) training validation Build Model~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Model Parameters Build Model Train Model~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Training Parameters Cost Function Learning_rate schedule Optimizer Define callbacks Keras Sequences for Datasets (need to use since one-hot datasets too large for storing in memory) Compile Model Train model Plot Training Metrics~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Total Loss Save Model and History~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Save Model Weights Save Training History~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~----------------------------------END FILE------------------------------------~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ | 1,826 | en | 0.493169 |
# Copyright 2014 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from oslo_concurrency import processutils
from oslo_log import log
from nova import exception
from nova import utils
from novadocker.i18n import _
LOG = log.getLogger(__name__)
def teardown_network(container_id):
if os.name == 'nt':
return
try:
output, err = utils.execute('ip', '-o', 'netns', 'list')
for line in output.split('\n'):
if container_id == line.strip():
utils.execute('ip', 'netns', 'delete', container_id,
run_as_root=True)
break
except processutils.ProcessExecutionError:
LOG.warning(_('Cannot remove network namespace, netns id: %s'),
container_id)
def find_fixed_ip(instance, network_info):
for subnet in network_info['subnets']:
netmask = subnet['cidr'].split('/')[1]
for ip in subnet['ips']:
if ip['type'] == 'fixed' and ip['address']:
return ip['address'] + "/" + netmask
raise exception.InstanceDeployFailure(_('Cannot find fixed ip'),
instance_id=instance['uuid'])
def find_gateway(instance, network_info):
for subnet in network_info['subnets']:
return subnet['gateway']['address']
raise exception.InstanceDeployFailure(_('Cannot find gateway'),
instance_id=instance['uuid'])
# NOTE(arosen) - this method should be removed after it's moved into the
# linux_net code in nova.
def get_ovs_interfaceid(vif):
return vif.get('ovs_interfaceid') or vif['id']
| novadocker/virt/docker/network.py | 2,227 | Copyright 2014 OpenStack Foundation All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. NOTE(arosen) - this method should be removed after it's moved into the linux_net code in nova. | 701 | en | 0.881976 |
import unittest
from tests.test_utils import get_sample_pdf_with_labels, get_sample_pdf, get_sample_sdf, get_sample_pdf_with_extra_cols, get_sample_pdf_with_no_text_col ,get_sample_spark_dataframe
from nlu import *
class TestSarcasm(unittest.TestCase):
def test_sarcasm_model(self):
pipe = nlu.load('sarcasm',verbose=True)
df = pipe.predict(['I love pancaces. I hate Mondays', 'I love Fridays'], output_level='sentence')
print(df.columns)
print(df['sentence'], df[['sarcasm','sarcasm_confidence']])
df = pipe.predict(['I love pancaces. I hate Mondays', 'I love Fridays'], output_level='document')
self.assertIsInstance(df.iloc[0]['sarcasm'],str )
print(df.columns)
print(df['document'], df[['sarcasm','sarcasm_confidence']])
self.assertIsInstance(df.iloc[0]['sarcasm'], str)
#
# def test_sarcasm_model_bench(self):
# # Get dataset "
# # todo test light pipe for 50k+
# # ! wget -N https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/resources/en/sarcasm/train-balanced-sarcasm.csv
# # path = '/home/loan/Documents/freelancework/jsl/nlu/nlu_git/tests/datasets/train-balanced-sarcasm.csv'
# path = '/home/loan/Documents/freelancework/jsl/nlu/4realnlugit/tests/datasets/Musical_instruments_reviews.csv'
# sarcasm_df = pd.read_csv(path)
# # sarcasm_df['text'] = sarcasm_df['comment']
# # print(len(sarcasm_df))
# # max 50k , 60K dead
# # count = int(len(sarcasm_df)/15)
# # count = 50100
# # print('using ', count,' Rows')
# print(sarcasm_df.columns)
# #setting meta to true will output scores for keywords. Lower scores are better
# # Sentiment confidence is 2 because it sums the confidences of multiple sentences
# # df = nlu.load('en.classify.sarcasm',verbose=True).predict(sarcasm_df['reviewText'].iloc[0:100])
# df = nlu.load('bert',verbose=True).predict(sarcasm_df['reviewText'].iloc[0:100])
#
# # df = nlu.load('en.classify.sarcasm',verbose=True).predict('How are you today')
#
# # df = nlu.load('en.classify.sarcasm',verbose=True).predict(sarcasm_df['text'])
#
# print(df.columns)
# print(df['bert_embeddings'])
if __name__ == '__main__':
unittest.main()
| tests/nlu_core_tests/component_tests/classifier_tests/sarcasm_tests.py | 2,345 | def test_sarcasm_model_bench(self): Get dataset " todo test light pipe for 50k+ ! wget -N https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/resources/en/sarcasm/train-balanced-sarcasm.csv path = '/home/loan/Documents/freelancework/jsl/nlu/nlu_git/tests/datasets/train-balanced-sarcasm.csv' path = '/home/loan/Documents/freelancework/jsl/nlu/4realnlugit/tests/datasets/Musical_instruments_reviews.csv' sarcasm_df = pd.read_csv(path) sarcasm_df['text'] = sarcasm_df['comment'] print(len(sarcasm_df)) max 50k , 60K dead count = int(len(sarcasm_df)/15) count = 50100 print('using ', count,' Rows') print(sarcasm_df.columns) setting meta to true will output scores for keywords. Lower scores are better Sentiment confidence is 2 because it sums the confidences of multiple sentences df = nlu.load('en.classify.sarcasm',verbose=True).predict(sarcasm_df['reviewText'].iloc[0:100]) df = nlu.load('bert',verbose=True).predict(sarcasm_df['reviewText'].iloc[0:100]) df = nlu.load('en.classify.sarcasm',verbose=True).predict('How are you today') df = nlu.load('en.classify.sarcasm',verbose=True).predict(sarcasm_df['text']) print(df.columns) print(df['bert_embeddings']) | 1,263 | en | 0.428725 |
# Generated by Django 2.2.6 on 2019-10-22 15:22
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('project_core', '0050_added_organisationuid_modl'),
]
operations = [
migrations.RemoveField(
model_name='country',
name='date_created',
),
migrations.AddField(
model_name='country',
name='created_by',
field=models.ForeignKey(blank=True, help_text='User by which the entry was created', null=True, on_delete=django.db.models.deletion.PROTECT, related_name='project_core_country_created_by_related', to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='country',
name='created_on',
field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now, help_text='Date and time at which the entry was created'),
preserve_default=False,
),
migrations.AddField(
model_name='country',
name='modified_by',
field=models.ForeignKey(blank=True, help_text='User by which the entry was modified', null=True, on_delete=django.db.models.deletion.PROTECT, related_name='project_core_country_modified_by_related', to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='country',
name='modified_on',
field=models.DateTimeField(auto_now=True, help_text='Date and time at which the entry was modified', null=True),
),
]
| ProjectApplication/project_core/migrations/0051_createmodify_country.py | 1,733 | Generated by Django 2.2.6 on 2019-10-22 15:22 | 45 | en | 0.614653 |
def combination(n, r):
"""
:param n: the count of different items
:param r: the number of select
:return: combination
n! / (r! * (n - r)!)
"""
r = min(n - r, r)
result = 1
for i in range(n, n - r, -1):
result *= i
for i in range(1, r + 1):
result //= i
return result
def comb2():
# from scipy.misc import comb
pass
| lib/python-lib/combination.py | 385 | :param n: the count of different items
:param r: the number of select
:return: combination
n! / (r! * (n - r)!)
from scipy.misc import comb | 141 | en | 0.543815 |
import os
import json
import time
import datetime
import manageMonitoredUsersDB
pathToJSON = os.getcwd() + '/generatedJSON'
def get_local_json_timestamp_epoch(username, filename):
monitoredJSON = None
try:
monitoredJSON = json.load(open(pathToJSON + os.sep + filename, "r+"))
except:
with open(os.getcwd() + '/logs/fail_to_get_local_epoch', "a") as fileText:
fileText.write("The JSON fail to read is " + pathToJSON + os.sep + filename + " at " + str(datetime.datetime.now()) + "\n")
fileText.close()
if monitoredJSON == None:
return None
user_info = monitoredJSON["user_info"]
json_timestamp_epoch = user_info["json_timestamp_epoch"]
json_timestamp_epoch = float(json_timestamp_epoch) #Epoch LOCAL
return json_timestamp_epoch
def get_remote_json_timestamp_epoch(username):
user_infoRemote = None
monitoredUserSelected = manageMonitoredUsersDB.get_monitoredUserByName(username)
temp = monitoredUserSelected[2]
temp = temp.replace("'", "\"")
temp = temp.replace("True", "true")
temp = temp.replace("False", "false")
temp = json.loads(temp)
for key in temp.keys():
if key == "user_info":
user_infoRemote = temp[key]
if user_infoRemote != None:
json_timestamp_epochRemote = user_infoRemote["json_timestamp_epoch"]
return float(json_timestamp_epochRemote) #Epoch REMOTO, el guardado en monitoredUser.db
else:
print("\n" + "\033[91m" + "ERROR: No se ha podido obtener user_info en remoto, monitoredUser.db" + "\033[0m" + "\n")
with open(os.getcwd() + '/logs/fail_to_get_remote_epoch', "a") as fileText:
fileText.write("The username fail to read is " + username + " at " + str(datetime.datetime.now()) + "\n")
fileText.close()
def checkArrivedJSON():
for filename in sorted(os.listdir(pathToJSON)):
if filename.endswith(".json"):
username = filename.strip(".json")
#Obtención del epoch del JSON local
json_timestamp_epoch = get_local_json_timestamp_epoch(username, filename)
if json_timestamp_epoch == None:
continue
#Obtención del epoch del JSON remoto, en monitoredUser.db
json_timestamp_epochRemote = get_remote_json_timestamp_epoch(username)
#Comprobación del tiempo transcurrido entre local y remoto
#print("\033[92m" + "json_timestamp_epoch: " + str(json_timestamp_epoch) + "\033[0m" + "\n")
#print("\033[92m" + "json_timestamp_epochRemote: " + str(json_timestamp_epochRemote) + "\033[0m" + "\n")
if json_timestamp_epoch > json_timestamp_epochRemote:
monitoredJSON = json.load(open(pathToJSON + os.sep + filename, "r+"))
monitoredJSON = str(monitoredJSON)
manageMonitoredUsersDB.update_monitoredUserByName(username, monitoredJSON)
#MAIN
veces = 0
while True:
checkArrivedJSON()
time.sleep(1)
if veces >= 10:
print("Checking new user activities...\n")
veces = 0
veces += 1 | TFG/checkEpochAndUpdateJSON.py | 3,104 | Epoch LOCALEpoch REMOTO, el guardado en monitoredUser.dbObtención del epoch del JSON localObtención del epoch del JSON remoto, en monitoredUser.dbComprobación del tiempo transcurrido entre local y remotoprint("\033[92m" + "json_timestamp_epoch: " + str(json_timestamp_epoch) + "\033[0m" + "\n")print("\033[92m" + "json_timestamp_epochRemote: " + str(json_timestamp_epochRemote) + "\033[0m" + "\n")MAIN | 403 | es | 0.242577 |
"""CD SEM structures."""
from functools import partial
from typing import Optional, Tuple
from gdsfactory.cell import cell
from gdsfactory.component import Component
from gdsfactory.components.straight import straight as straight_function
from gdsfactory.components.text_rectangular import text_rectangular
from gdsfactory.cross_section import strip
from gdsfactory.grid import grid
from gdsfactory.types import ComponentFactory, CrossSectionFactory
text_rectangular_mini = partial(text_rectangular, size=1)
LINE_LENGTH = 420.0
@cell
def cdsem_straight(
widths: Tuple[float, ...] = (0.4, 0.45, 0.5, 0.6, 0.8, 1.0),
length: float = LINE_LENGTH,
cross_section: CrossSectionFactory = strip,
text: Optional[ComponentFactory] = text_rectangular_mini,
spacing: float = 3,
) -> Component:
"""Returns straight waveguide lines width sweep.
Args:
widths: for the sweep
length: for the line
cross_section: for the lines
text: optional text for labels
spacing: edge to edge spacing
"""
lines = []
for width in widths:
cross_section = partial(cross_section, width=width)
line = straight_function(length=length, cross_section=cross_section)
if text:
line = line.copy()
t = line << text(str(int(width * 1e3)))
t.xmin = line.xmax + 5
t.y = 0
lines.append(line)
return grid(lines, spacing=(0, spacing))
if __name__ == "__main__":
c = cdsem_straight()
c.show()
| gdsfactory/components/cdsem_straight.py | 1,525 | Returns straight waveguide lines width sweep.
Args:
widths: for the sweep
length: for the line
cross_section: for the lines
text: optional text for labels
spacing: edge to edge spacing
CD SEM structures. | 224 | en | 0.520754 |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from ....unittest import TestCase
from oauthlib.oauth2.rfc6749 import BaseEndpoint, catch_errors_and_unavailability
from oauthlib.oauth2 import Server, RequestValidator, FatalClientError, OAuth2Error
class BaseEndpointTest(TestCase):
def test_default_config(self):
endpoint = BaseEndpoint()
self.assertFalse(endpoint.catch_errors)
self.assertTrue(endpoint.available)
endpoint.catch_errors = True
self.assertTrue(endpoint.catch_errors)
endpoint.available = False
self.assertFalse(endpoint.available)
def test_error_catching(self):
validator = RequestValidator()
server = Server(validator)
server.catch_errors = True
h, b, s = server.create_authorization_response('https://example.com')
self.assertIn("server_error", b)
self.assertEqual(s, 500)
def test_unavailability(self):
validator = RequestValidator()
server = Server(validator)
server.available = False
h, b, s = server.create_authorization_response('https://example.com')
self.assertIn("temporarily_unavailable", b)
self.assertEqual(s, 503)
def test_wrapper(self):
class TestServer(Server):
@catch_errors_and_unavailability
def throw_error(self, uri):
raise ValueError()
@catch_errors_and_unavailability
def throw_oauth_error(self, uri):
raise OAuth2Error()
@catch_errors_and_unavailability
def throw_fatal_oauth_error(self, uri):
raise FatalClientError()
validator = RequestValidator()
server = TestServer(validator)
server.catch_errors = True
h, b, s = server.throw_error('a')
self.assertIn("server_error", b)
self.assertEqual(s, 500)
server.available = False
h, b, s = server.throw_error('a')
self.assertIn("temporarily_unavailable", b)
self.assertEqual(s, 503)
server.available = True
self.assertRaises(OAuth2Error, server.throw_oauth_error, 'a')
self.assertRaises(FatalClientError, server.throw_fatal_oauth_error, 'a')
server.catch_errors = False
self.assertRaises(OAuth2Error, server.throw_oauth_error, 'a')
self.assertRaises(FatalClientError, server.throw_fatal_oauth_error, 'a')
| tests/oauth2/rfc6749/endpoints/test_base_endpoint.py | 2,462 | -*- coding: utf-8 -*- | 21 | en | 0.767281 |
from .protocol import *
import serial
import time
from threading import Lock
from PIL import Image, ImageDraw, ImageOps, ImageFont
class Device:
ser = None
inbuffer = ""
awaitingResponseLock = Lock()
testmode = False
nLeds = 0
dispW = 0
dispH = 0
rotFactor = 0
rotCircleSteps = 0
bannerHeight = 12 #Defines the height of top and bottom banner
imageBuffer = []
callbacks = {} #This object stores callback functions that react directly to a keypress reported via serial
ledState = None #Current LED status, so we can animate them over time
ledTime = None #Last time LEDs were set
debug = True
def connect(self, dev):
print("Connecting to ", dev, ".")
self.ser = serial.Serial(dev, 115200, timeout=1)
if not self.requestInfo(3):
self.disconnect()
return False
if self.testmode:
print("Connection to ", self.ser.name, " was successfull, but the device is running the hardware test firmware, which cannot be used for anything but testing. Please flash the proper inkkeys firmware to use it.")
return False
print("Connected to ", self.ser.name, ".")
return True
def disconnect(self):
if self.ser != None:
self.ser.close()
self.ser = None
def sendToDevice(self, command):
if self.debug:
print("Sending: " + command)
self.ser.write((command + "\n").encode())
def sendBinaryToDevice(self, data):
if self.debug:
print("Sending " + str(len(data)) + " bytes of binary data.")
self.ser.write(data)
def readFromDevice(self):
if self.ser.in_waiting > 0:
self.inbuffer += self.ser.read(self.ser.in_waiting).decode().replace("\r", "")
chunks = self.inbuffer.split("\n", 1)
if len(chunks) > 1:
cmd = chunks[0]
self.inbuffer = chunks[1]
if self.debug:
print("Received: " + cmd)
return cmd
return None
def poll(self):
with self.awaitingResponseLock:
input = self.readFromDevice()
if input != None:
if input[0] == KeyCode.JOG.value and (input[1:].isdecimal() or (input[1] == '-' and input[2:].isdecimal())):
if KeyCode.JOG.value in self.callbacks:
self.callbacks[KeyCode.JOG.value](int(input[1:]))
elif input in self.callbacks:
self.callbacks[input]()
def registerCallback(self, cb, key):
self.callbacks[key.value] = cb
def clearCallback(self, key):
if key.value in self.callbacks:
del self.callbacks[key.value]
def clearCallbacks(self):
self.callbacks = {}
def assignKey(self, key, sequence):
self.sendToDevice(CommandCode.ASSIGN.value + " " + key.value + (" " + " ".join(sequence) if len(sequence) > 0 else ""))
def sendLed(self, colors):
self.sendToDevice(CommandCode.LED.value + " " + " ".join(colors))
def requestInfo(self, timeout):
with self.awaitingResponseLock:
print("Requesting device info...")
start = time.time()
self.sendToDevice(CommandCode.INFO.value)
line = self.readFromDevice()
while line != "Inkkeys":
if time.time() - start > timeout:
return False
if line == None:
time.sleep(0.1)
line = self.readFromDevice()
continue
print("Skipping: ", line)
line = self.readFromDevice()
print("Header found. Waiting for infos...")
line = self.readFromDevice()
while line != "Done":
if time.time() - start > timeout:
return False
if line == None:
time.sleep(0.1)
line = self.readFromDevice()
continue
if line.startswith("TEST "):
self.testmode = line[5] != "0"
elif line.startswith("N_LED "):
self.nLeds = int(line[6:])
elif line.startswith("DISP_W "):
self.dispW = int(line[7:])
elif line.startswith("DISP_H "):
self.dispH = int(line[7:])
elif line.startswith("ROT_CIRCLE_STEPS "):
self.rotCircleSteps = int(line[17:])
else:
print("Skipping: ", line)
line = self.readFromDevice()
print("End of info received.")
print("Testmode: ", self.testmode)
print("Number of LEDs: ", self.nLeds)
print("Display width: ", self.dispW)
print("Display height: ", self.dispH)
print("Rotation circle steps: ", self.rotCircleSteps)
return True
def sendImage(self, x, y, image):
self.imageBuffer.append({"x": x, "y": y, "image": image.copy()})
w, h = image.size
data = image.convert("1").rotate(180).tobytes()
self.sendToDevice(CommandCode.DISPLAY.value + " " + str(x) + " " + str(y) + " " + str(w) + " " + str(h))
self.sendBinaryToDevice(data)
return True
def resendImageData(self):
for part in self.imageBuffer:
image = part["image"]
x = part["x"]
y = part["y"]
w, h = image.size
data = image.convert("1").rotate(180).tobytes()
self.sendToDevice(CommandCode.DISPLAY.value + " " + str(x) + " " + str(y) + " " + str(w) + " " + str(h))
self.sendBinaryToDevice(data)
self.imageBuffer = []
def updateDisplay(self, fullRefresh=False, timeout=5):
with self.awaitingResponseLock:
start = time.time()
self.sendToDevice(CommandCode.REFRESH.value + " " + (RefreshTypeCode.FULL.value if fullRefresh else RefreshTypeCode.PARTIAL.value))
line = self.readFromDevice()
while line != "ok":
if time.time() - start > timeout:
return False
if line == None:
time.sleep(0.1)
line = self.readFromDevice()
continue
line = self.readFromDevice()
self.resendImageData()
self.sendToDevice(CommandCode.REFRESH.value + " " + RefreshTypeCode.OFF.value)
line = self.readFromDevice()
while line != "ok":
if time.time() - start > timeout:
return False
if line == None:
time.sleep(0.1)
line = self.readFromDevice()
continue
line = self.readFromDevice()
def getAreaFor(self, function):
if function == "title":
return (0, self.dispH-self.bannerHeight, self.dispW, self.bannerHeight)
elif function == 1:
return (0, 0, self.dispW, self.bannerHeight)
elif function <= 5:
return (self.dispW//2, (5-function)*self.dispH//4+self.bannerHeight, self.dispW//2, self.dispH//4-2*self.bannerHeight)
else:
return (0, (9-function)*self.dispH//4+self.bannerHeight, self.dispW//2, self.dispH//4-2*self.bannerHeight)
def sendImageFor(self, function, image):
x, y, w, h = self.getAreaFor(function)
if (w, h) != image.size:
if self.debug:
print("Rescaling image from " + str(image.size) + " to " + str((w, h)) + ".")
image = image.resize((w, h))
self.sendImage(x, y, image)
def sendTextFor(self, function, text=""):
self.sendToDevice("T "+str(function)+" "+str(text))
def sendIconFor(self, function, icon, inverted=False, centered=True, marked=False, crossed=False):
x, y, w, h = self.getAreaFor(function)
img = Image.new("1", (w, h), color=(0 if inverted else 1))
imgIcon = Image.open(icon).convert("RGB")
if inverted:
imgIcon = ImageOps.invert(imgIcon)
wi, hi = imgIcon.size
if function < 6:
pos = ((w-wi)//2 if centered else 0, (h - hi)//2)
else:
pos = ((w-wi)//2 if centered else (w - wi), (h - hi)//2)
img.paste(imgIcon, pos)
if marked:
imgMarker = Image.open("icons/chevron-compact-right.png" if function < 6 else "icons/chevron-compact-left.png")
wm, hm = imgMarker.size
img.paste(imgMarker, (-16,(h - hm)//2) if function < 6 else (w-wm+16,(h - hm)//2), mask=ImageOps.invert(imgMarker.convert("RGB")).convert("1"))
if crossed:
d = ImageDraw.Draw(img)
d.line([pos[0]+5, pos[1]+5, pos[0]+wi-5, pos[1]+hi-5], width=3)
d.line([pos[0]+5, pos[1]+hi-5, pos[0]+wi-5, pos[1]+5], width=3)
self.sendImage(x, y, img)
def setLeds(self, leds):
ledStr = ['{:06x}'.format(i) for i in leds]
self.ledTime = time.time()
self.ledState = leds
self.sendLed(ledStr)
def setKeyLedFor(self, led, color):
self.sendToDevice(CommandCode.KEYLED.value + " " + str(led)+ " " + str(color))
def fadeLeds(self):
if self.ledState == None:
return
p = (3.5 - (time.time() - self.ledTime))/0.5 #Stay on for 3 seconds and then fade out over 0.5 seconds
if p >= 1:
return
if p <= 0:
self.ledState = None
self.sendLed(["000000" for i in range(self.nLeds)])
return
dimmedLeds = [(int((i & 0xff0000) * p) & 0xff0000) | (int((i & 0xff00) * p) & 0xff00) | (int((i & 0xff) * p) & 0xff) for i in self.ledState]
ledStr = ['{:06x}'.format(i) for i in dimmedLeds]
self.sendLed(ledStr)
| inkkeys/device.py | 9,921 | Defines the height of top and bottom bannerThis object stores callback functions that react directly to a keypress reported via serialCurrent LED status, so we can animate them over timeLast time LEDs were setStay on for 3 seconds and then fade out over 0.5 seconds | 265 | en | 0.870624 |
# -*- coding: utf-8 -*-
# Copyright 2018 Joshua Bronson. All Rights Reserved.
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
"""Provides bidict duplication policies and the :class:`_OnDup` class."""
from collections import namedtuple
from ._marker import _Marker
_OnDup = namedtuple('_OnDup', 'key val kv')
class DuplicationPolicy(_Marker):
"""Base class for bidict's duplication policies.
*See also* :ref:`basic-usage:Values Must Be Unique`
"""
__slots__ = ()
#: Raise an exception when a duplication is encountered.
RAISE = DuplicationPolicy('DUP_POLICY.RAISE')
#: Overwrite an existing item when a duplication is encountered.
OVERWRITE = DuplicationPolicy('DUP_POLICY.OVERWRITE')
#: Keep the existing item and ignore the new item when a duplication is encountered.
IGNORE = DuplicationPolicy('DUP_POLICY.IGNORE')
| bidict/_dup.py | 1,001 | Base class for bidict's duplication policies.
*See also* :ref:`basic-usage:Values Must Be Unique`
Provides bidict duplication policies and the :class:`_OnDup` class.
-*- coding: utf-8 -*- Copyright 2018 Joshua Bronson. All Rights Reserved. This Source Code Form is subject to the terms of the Mozilla Public License, v. 2.0. If a copy of the MPL was not distributed with this file, You can obtain one at http://mozilla.org/MPL/2.0/.: Raise an exception when a duplication is encountered.: Overwrite an existing item when a duplication is encountered.: Keep the existing item and ignore the new item when a duplication is encountered. | 636 | en | 0.789912 |
#!/usr/bin/env python3
# Day 13: Transparent Origami
# https://adventofcode.com/2021/day/13
import sys
data = open("input.txt" if len(sys.argv) == 1 else sys.argv[1]).read().splitlines()
n = 2000
grid = [["." for _ in range(n)] for _ in range(n)]
for line in data:
if not line:
break
x, y = map(int, line.split(","))
grid[y][x] = "#"
# for y, row in enumerate(grid):
# print("%3d" % y, "".join(str(x) for x in row))
part1 = False
for line in data:
if not line.startswith("fold"):
continue
if line.startswith("fold along x="):
fold = int(line.split("=")[1])
for y in range(len(grid)):
for x in range(fold):
if grid[y][fold + 1 + x] == "#":
grid[y][fold - 1 - x] = "#"
del grid[y][fold:]
elif line.startswith("fold along y="):
fold = int(line.split("=")[1])
for y in range(fold):
for x in range(len(grid[0])):
if grid[fold + 1 + y][x] == "#":
grid[fold - 1 - y][x] = "#"
del grid[fold:]
if not part1:
print(sum(1 for row in grid for cell in row if cell == "#"))
part1 = True
print()
for row in grid:
print("".join(str(x) for x in row))
| 2021/day13/day13.py | 1,264 | !/usr/bin/env python3 Day 13: Transparent Origami https://adventofcode.com/2021/day/13 for y, row in enumerate(grid): print("%3d" % y, "".join(str(x) for x in row)) | 168 | en | 0.353833 |
# Copyright lowRISC contributors.
# Licensed under the Apache License, Version 2.0, see LICENSE for details.
# SPDX-License-Identifier: Apache-2.0
from typing import Callable, Dict, List, Sequence
from reggen.field import Field
from reggen.register import Register
from reggen.reg_block import RegBlock
from shared.otbn_reggen import load_registers
from .trace import Trace
class ExtRegChange(Trace):
def __init__(self, op: str, written: int, from_hw: bool, new_value: int):
self.op = op
self.written = written
self.from_hw = from_hw
self.new_value = new_value
class TraceExtRegChange(Trace):
def __init__(self, name: str, erc: ExtRegChange):
self.name = name
self.erc = erc
def trace(self) -> str:
suff = (''
if self.erc.new_value == self.erc.written
else ' (now {:#010x})'.format(self.erc.new_value))
return ("otbn.{} {} {:#010x}{}{}"
.format(self.name,
self.erc.op,
self.erc.written,
' (from SW)' if not self.erc.from_hw else '',
suff))
def rtl_trace(self) -> str:
return '! otbn.{}: {:#010x}'.format(self.name, self.erc.new_value)
class RGField:
'''A wrapper around a field in a register as parsed by reggen'''
def __init__(self,
name: str,
width: int,
lsb: int,
reset_value: int,
swaccess: str):
# We only support some values of swaccess (the ones we need)
assert swaccess in ['rw1c', 'rw', 'wo', 'r0w1c', 'ro']
assert width > 0
assert lsb >= 0
self.name = name
self.width = width
self.lsb = lsb
self.value = reset_value
# swaccess
self.w1c = swaccess in ['rw1c', 'r0w1c']
self.read_only = swaccess == 'ro'
self.read_zero = swaccess in ['wo', 'r0w1c']
self.next_value = reset_value
@staticmethod
def from_field(field: Field) -> 'RGField':
name = field.name
assert isinstance(name, str)
width = field.bits.width()
assert isinstance(width, int)
lsb = field.bits.lsb
assert isinstance(lsb, int)
reset_value = field.resval or 0
assert isinstance(reset_value, int)
swaccess = field.swaccess.key
assert isinstance(swaccess, str)
return RGField(name, width, lsb, reset_value, swaccess)
def _next_sw_read(self) -> int:
return 0 if self.read_zero else self.next_value
def write(self, value: int, from_hw: bool) -> int:
'''Stage the effects of writing a value (see RGReg.write)'''
assert value >= 0
masked = value & ((1 << self.width) - 1)
if self.read_only and not from_hw:
pass
elif self.w1c and not from_hw:
self.next_value &= ~masked
else:
self.next_value = masked
return self._next_sw_read()
def set_bits(self, value: int) -> int:
'''Like write, but |=.'''
masked = value & ((1 << self.width) - 1)
self.next_value |= masked
return self._next_sw_read()
def clear_bits(self, value: int) -> int:
'''Like write, but &= ~.'''
self.next_value &= ~value
return self._next_sw_read()
def read(self, from_hw: bool) -> int:
return 0 if (self.read_zero and not from_hw) else self.value
def commit(self) -> None:
self.value = self.next_value
def abort(self) -> None:
self.next_value = self.value
class RGReg:
'''A wrapper around a register as parsed by reggen'''
def __init__(self, fields: List[RGField], double_flopped: bool):
self.fields = fields
self.double_flopped = double_flopped
self._trace = [] # type: List[ExtRegChange]
self._next_trace = [] # type: List[ExtRegChange]
@staticmethod
def from_register(reg: Register, double_flopped: bool) -> 'RGReg':
return RGReg([RGField.from_field(fd) for fd in reg.fields],
double_flopped)
def _apply_fields(self,
func: Callable[[RGField, int], int],
value: int) -> int:
new_val = 0
for field in self.fields:
field_new_val = func(field, value >> field.lsb)
new_val |= field_new_val << field.lsb
return new_val
def write(self, value: int, from_hw: bool) -> None:
'''Stage the effects of writing a value.
If from_hw is true, this write is from OTBN hardware (rather than the
bus).
'''
assert value >= 0
now = self._apply_fields(lambda fld, fv: fld.write(fv, from_hw), value)
trace = self._next_trace if self.double_flopped else self._trace
trace.append(ExtRegChange('=', value, from_hw, now))
def set_bits(self, value: int) -> None:
assert value >= 0
now = self._apply_fields(lambda fld, fv: fld.set_bits(fv), value)
trace = self._next_trace if self.double_flopped else self._trace
trace.append(ExtRegChange('=', value, False, now))
def read(self, from_hw: bool) -> int:
value = 0
for field in self.fields:
value |= field.read(from_hw) << field.lsb
return value
def commit(self) -> None:
for field in self.fields:
field.commit()
self._trace = self._next_trace
self._next_trace = []
def abort(self) -> None:
for field in self.fields:
field.abort()
self._trace = []
self._next_trace = []
def changes(self) -> List[ExtRegChange]:
return self._trace
def make_flag_reg(name: str, double_flopped: bool) -> RGReg:
return RGReg([RGField(name, 32, 0, 0, 'ro')], double_flopped)
class OTBNExtRegs:
'''A class representing OTBN's externally visible CSRs
This models an extra flop between the core and some of the externally
visible registers by ensuring that a write only becomes visible after an
intervening commit.
'''
double_flopped_regs = ['STATUS']
def __init__(self) -> None:
_, reg_block = load_registers()
self.regs = {} # type: Dict[str, RGReg]
self._dirty = 0
assert isinstance(reg_block, RegBlock)
for entry in reg_block.flat_regs:
assert isinstance(entry.name, str)
# reggen's validation should have checked that we have no
# duplicates.
assert entry.name not in self.regs
double_flopped = entry.name in self.double_flopped_regs
self.regs[entry.name] = RGReg.from_register(entry, double_flopped)
# Add a fake "STOP_PC" register.
#
# TODO: We might well add something like this to the actual design in
# the future (see issue #4327) but, for now, it's just used in
# simulation to help track whether RIG-generated binaries finished
# where they expected to finish.
self.regs['STOP_PC'] = make_flag_reg('STOP_PC', True)
# Add a fake "RND_REQ" register to allow us to tell otbn_core_model to
# generate an EDN request.
self.regs['RND_REQ'] = make_flag_reg('RND_REQ', True)
# Add a fake "WIPE_START" register. We set this for a single cycle when
# starting secure wipe and the C++ model can use this to trigger a dump
# of internal state before it gets zeroed out.
self.regs['WIPE_START'] = make_flag_reg('WIPE_START', False)
def _get_reg(self, reg_name: str) -> RGReg:
reg = self.regs.get(reg_name)
if reg is None:
raise ValueError('Unknown register name: {!r}.'.format(reg_name))
return reg
def write(self, reg_name: str, value: int, from_hw: bool) -> None:
'''Stage the effects of writing a value to a register'''
assert value >= 0
self._get_reg(reg_name).write(value, from_hw)
self._dirty = 2
def set_bits(self, reg_name: str, value: int) -> None:
'''Set some bits of a register (HW access only)'''
assert value >= 0
self._get_reg(reg_name).set_bits(value)
self._dirty = 2
def increment_insn_cnt(self) -> None:
'''Increment the INSN_CNT register'''
reg = self._get_reg('INSN_CNT')
assert len(reg.fields) == 1
fld = reg.fields[0]
reg.write(min(fld.value + 1, (1 << 32) - 1), True)
self._dirty = 2
def read(self, reg_name: str, from_hw: bool) -> int:
reg = self.regs.get(reg_name)
if reg is None:
raise ValueError('Unknown register name: {!r}.'.format(reg_name))
return reg.read(from_hw)
def changes(self) -> Sequence[Trace]:
if self._dirty == 0:
return []
trace = []
for name, reg in self.regs.items():
trace += [TraceExtRegChange(name, erc) for erc in reg.changes()]
return trace
def commit(self) -> None:
# We know that we'll only have any pending changes if self._dirty is
# positive, so needn't bother calling commit on each register if not.
if self._dirty > 0:
for reg in self.regs.values():
reg.commit()
self._dirty = max(0, self._dirty - 1)
def abort(self) -> None:
for reg in self.regs.values():
reg.abort()
self._dirty = 0
| hw/ip/otbn/dv/otbnsim/sim/ext_regs.py | 9,492 | A class representing OTBN's externally visible CSRs
This models an extra flop between the core and some of the externally
visible registers by ensuring that a write only becomes visible after an
intervening commit.
A wrapper around a field in a register as parsed by reggen
A wrapper around a register as parsed by reggen
Like write, but &= ~.
Increment the INSN_CNT register
Like write, but |=.
Set some bits of a register (HW access only)
Stage the effects of writing a value (see RGReg.write)
Stage the effects of writing a value.
If from_hw is true, this write is from OTBN hardware (rather than the
bus).
Stage the effects of writing a value to a register
Copyright lowRISC contributors. Licensed under the Apache License, Version 2.0, see LICENSE for details. SPDX-License-Identifier: Apache-2.0 We only support some values of swaccess (the ones we need) swaccess type: List[ExtRegChange] type: List[ExtRegChange] type: Dict[str, RGReg] reggen's validation should have checked that we have no duplicates. Add a fake "STOP_PC" register. TODO: We might well add something like this to the actual design in the future (see issue 4327) but, for now, it's just used in simulation to help track whether RIG-generated binaries finished where they expected to finish. Add a fake "RND_REQ" register to allow us to tell otbn_core_model to generate an EDN request. Add a fake "WIPE_START" register. We set this for a single cycle when starting secure wipe and the C++ model can use this to trigger a dump of internal state before it gets zeroed out. We know that we'll only have any pending changes if self._dirty is positive, so needn't bother calling commit on each register if not. | 1,683 | en | 0.878336 |
import json
import os
import sys
import numpy as np
import random
import math
import time
from tqdm import tqdm
import torch
import torch.nn as nn
from torch.autograd import Variable
from torch import optim
import torch.nn.functional as F
from env import R2RBatch
from utils import padding_idx, add_idx, Tokenizer
import utils
import model
import param
from param import args
from collections import defaultdict
class BaseAgent(object):
''' Base class for an R2R agent to generate and save trajectories. '''
def __init__(self, env, results_path):
self.env = env
self.results_path = results_path
random.seed(1)
self.results = {}
self.losses = [] # For learning agents
def write_results(self):
output = [{'instr_id':k, 'trajectory': v} for k,v in self.results.items()]
with open(self.results_path, 'w') as f:
json.dump(output, f)
def get_results(self):
output = [{'instr_id': k, 'trajectory': v} for k, v in self.results.items()]
return output
def rollout(self, **args):
''' Return a list of dicts containing instr_id:'xx', path:[(viewpointId, heading_rad, elevation_rad)] '''
raise NotImplementedError
@staticmethod
def get_agent(name):
return globals()[name+"Agent"]
def test(self, iters=None, **kwargs):
self.env.reset_epoch(shuffle=(iters is not None)) # If iters is not none, shuffle the env batch
self.losses = []
self.results = {}
# We rely on env showing the entire batch before repeating anything
looped = False
self.loss = 0
if iters is not None:
# For each time, it will run the first 'iters' iterations. (It was shuffled before)
for i in range(iters):
for traj in self.rollout(**kwargs):
self.loss = 0
self.results[traj['instr_id']] = traj['path']
else: # Do a full round
while True:
for traj in self.rollout(**kwargs):
if traj['instr_id'] in self.results:
looped = True
else:
self.loss = 0
self.results[traj['instr_id']] = traj['path']
if looped:
break
class Seq2SeqAgent(BaseAgent):
''' An agent based on an LSTM seq2seq model with attention. '''
# For now, the agent can't pick which forward move to make - just the one in the middle
env_actions = {
'left': (0,-1, 0), # left
'right': (0, 1, 0), # right
'up': (0, 0, 1), # up
'down': (0, 0,-1), # down
'forward': (1, 0, 0), # forward
'<end>': (0, 0, 0), # <end>
'<start>': (0, 0, 0), # <start>
'<ignore>': (0, 0, 0) # <ignore>
}
def __init__(self, env, results_path, tok, episode_len=20):
super(Seq2SeqAgent, self).__init__(env, results_path)
self.tok = tok
self.episode_len = episode_len
self.feature_size = self.env.feature_size
# Models
enc_hidden_size = args.rnn_dim//2 if args.bidir else args.rnn_dim
self.encoder = model.EncoderLSTM(tok.vocab_size(), args.wemb, enc_hidden_size, padding_idx,
args.dropout, bidirectional=args.bidir).cuda()
self.decoder = model.AttnDecoderLSTM(args.aemb, args.rnn_dim, args.dropout, feature_size=self.feature_size + args.angle_feat_size).cuda()
self.critic = model.Critic().cuda()
self.models = (self.encoder, self.decoder, self.critic)
# Optimizers
self.encoder_optimizer = args.optimizer(self.encoder.parameters(), lr=args.lr)
self.decoder_optimizer = args.optimizer(self.decoder.parameters(), lr=args.lr)
self.critic_optimizer = args.optimizer(self.critic.parameters(), lr=args.lr)
self.optimizers = (self.encoder_optimizer, self.decoder_optimizer, self.critic_optimizer)
# Evaluations
self.losses = []
self.criterion = nn.CrossEntropyLoss(ignore_index=args.ignoreid, size_average=False)
# Logs
sys.stdout.flush()
self.logs = defaultdict(list)
def _sort_batch(self, obs):
''' Extract instructions from a list of observations and sort by descending
sequence length (to enable PyTorch packing). '''
seq_tensor = np.array([ob['instr_encoding'] for ob in obs])
seq_lengths = np.argmax(seq_tensor == padding_idx, axis=1)
seq_lengths[seq_lengths == 0] = seq_tensor.shape[1] # Full length
seq_tensor = torch.from_numpy(seq_tensor)
seq_lengths = torch.from_numpy(seq_lengths)
# Sort sequences by lengths
seq_lengths, perm_idx = seq_lengths.sort(0, True) # True -> descending
sorted_tensor = seq_tensor[perm_idx]
mask = (sorted_tensor == padding_idx)[:,:seq_lengths[0]] # seq_lengths[0] is the Maximum length
return Variable(sorted_tensor, requires_grad=False).long().cuda(), \
mask.byte().cuda(), \
list(seq_lengths), list(perm_idx)
def _feature_variable(self, obs):
''' Extract precomputed features into variable. '''
features = np.empty((len(obs), args.views, self.feature_size + args.angle_feat_size), dtype=np.float32)
for i, ob in enumerate(obs):
features[i, :, :] = ob['feature'] # Image feat
return Variable(torch.from_numpy(features), requires_grad=False).cuda()
def _candidate_variable(self, obs):
candidate_leng = [len(ob['candidate']) + 1 for ob in obs] # +1 is for the end
candidate_feat = np.zeros((len(obs), max(candidate_leng), self.feature_size + args.angle_feat_size), dtype=np.float32)
# Note: The candidate_feat at len(ob['candidate']) is the feature for the END
# which is zero in my implementation
for i, ob in enumerate(obs):
for j, c in enumerate(ob['candidate']):
candidate_feat[i, j, :] = c['feature'] # Image feat
return torch.from_numpy(candidate_feat).cuda(), candidate_leng
def get_input_feat(self, obs):
input_a_t = np.zeros((len(obs), args.angle_feat_size), np.float32)
for i, ob in enumerate(obs):
input_a_t[i] = utils.angle_feature(ob['heading'], ob['elevation'])
input_a_t = torch.from_numpy(input_a_t).cuda()
f_t = self._feature_variable(obs) # Image features from obs
candidate_feat, candidate_leng = self._candidate_variable(obs)
return input_a_t, f_t, candidate_feat, candidate_leng
def _teacher_action(self, obs, ended):
"""
Extract teacher actions into variable.
:param obs: The observation.
:param ended: Whether the action seq is ended
:return:
"""
a = np.zeros(len(obs), dtype=np.int64)
for i, ob in enumerate(obs):
if ended[i]: # Just ignore this index
a[i] = args.ignoreid
else:
for k, candidate in enumerate(ob['candidate']):
if candidate['viewpointId'] == ob['teacher']: # Next view point
a[i] = k
break
else: # Stop here
assert ob['teacher'] == ob['viewpoint'] # The teacher action should be "STAY HERE"
a[i] = len(ob['candidate'])
return torch.from_numpy(a).cuda()
def make_equiv_action(self, a_t, perm_obs, perm_idx=None, traj=None):
"""
Interface between Panoramic view and Egocentric view
It will convert the action panoramic view action a_t to equivalent egocentric view actions for the simulator
"""
def take_action(i, idx, name):
if type(name) is int: # Go to the next view
self.env.env.sims[idx].makeAction(name, 0, 0)
else: # Adjust
self.env.env.sims[idx].makeAction(*self.env_actions[name])
state = self.env.env.sims[idx].getState()
if traj is not None:
traj[i]['path'].append((state.location.viewpointId, state.heading, state.elevation))
if perm_idx is None:
perm_idx = range(len(perm_obs))
for i, idx in enumerate(perm_idx):
action = a_t[i]
if action != -1: # -1 is the <stop> action
select_candidate = perm_obs[i]['candidate'][action]
src_point = perm_obs[i]['viewIndex']
trg_point = select_candidate['pointId']
src_level = (src_point ) // 12 # The point idx started from 0
trg_level = (trg_point ) // 12
while src_level < trg_level: # Tune up
take_action(i, idx, 'up')
src_level += 1
while src_level > trg_level: # Tune down
take_action(i, idx, 'down')
src_level -= 1
while self.env.env.sims[idx].getState().viewIndex != trg_point: # Turn right until the target
take_action(i, idx, 'right')
assert select_candidate['viewpointId'] == \
self.env.env.sims[idx].getState().navigableLocations[select_candidate['idx']].viewpointId
take_action(i, idx, select_candidate['idx'])
def rollout(self, train_ml=None, train_rl=True, reset=True, speaker=None):
"""
:param train_ml: The weight to train with maximum likelihood
:param train_rl: whether use RL in training
:param reset: Reset the environment
:param speaker: Speaker used in back translation.
If the speaker is not None, use back translation.
O.w., normal training
:return:
"""
if self.feedback == 'teacher' or self.feedback == 'argmax':
train_rl = False
if reset:
# Reset env
obs = np.array(self.env.reset())
else:
obs = np.array(self.env._get_obs())
batch_size = len(obs)
if speaker is not None: # Trigger the self_train mode!
noise = self.decoder.drop_env(torch.ones(self.feature_size).cuda())
batch = self.env.batch.copy()
speaker.env = self.env
insts = speaker.infer_batch(featdropmask=noise) # Use the same drop mask in speaker
# Create fake environments with the generated instruction
boss = np.ones((batch_size, 1), np.int64) * self.tok.word_to_index['<BOS>'] # First word is <BOS>
insts = np.concatenate((boss, insts), 1)
for i, (datum, inst) in enumerate(zip(batch, insts)):
if inst[-1] != self.tok.word_to_index['<PAD>']: # The inst is not ended!
inst[-1] = self.tok.word_to_index['<EOS>']
datum.pop('instructions')
datum.pop('instr_encoding')
datum['instructions'] = self.tok.decode_sentence(inst)
datum['instr_encoding'] = inst
obs = np.array(self.env.reset(batch))
# Reorder the language input for the encoder (do not ruin the original code)
seq, seq_mask, seq_lengths, perm_idx = self._sort_batch(obs)
perm_obs = obs[perm_idx]
ctx, h_t, c_t = self.encoder(seq, seq_lengths)
ctx_mask = seq_mask
# Init the reward shaping
last_dist = np.zeros(batch_size, np.float32)
for i, ob in enumerate(perm_obs): # The init distance from the view point to the target
last_dist[i] = ob['distance']
# Record starting point
traj = [{
'instr_id': ob['instr_id'],
'path': [(ob['viewpoint'], ob['heading'], ob['elevation'])]
} for ob in perm_obs]
# For test result submission
visited = [set() for _ in perm_obs]
# Initialization the tracking state
ended = np.array([False] * batch_size) # Indices match permuation of the model, not env
# Init the logs
rewards = []
hidden_states = []
policy_log_probs = []
masks = []
entropys = []
ml_loss = 0.
h1 = h_t
for t in range(self.episode_len):
input_a_t, f_t, candidate_feat, candidate_leng = self.get_input_feat(perm_obs)
if speaker is not None: # Apply the env drop mask to the feat
candidate_feat[..., :-args.angle_feat_size] *= noise
f_t[..., :-args.angle_feat_size] *= noise
h_t, c_t, logit, h1 = self.decoder(input_a_t, f_t, candidate_feat,
h_t, h1, c_t,
ctx, ctx_mask,
already_dropfeat=(speaker is not None))
hidden_states.append(h_t)
# Mask outputs where agent can't move forward
# Here the logit is [b, max_candidate]
candidate_mask = utils.length2mask(candidate_leng)
if args.submit: # Avoding cyclic path
for ob_id, ob in enumerate(perm_obs):
visited[ob_id].add(ob['viewpoint'])
for c_id, c in enumerate(ob['candidate']):
if c['viewpointId'] in visited[ob_id]:
candidate_mask[ob_id][c_id] = 1
logit.masked_fill_(candidate_mask, -float('inf'))
# Supervised training
target = self._teacher_action(perm_obs, ended)
ml_loss += self.criterion(logit, target)
# Determine next model inputs
if self.feedback == 'teacher':
a_t = target # teacher forcing
elif self.feedback == 'argmax':
_, a_t = logit.max(1) # student forcing - argmax
a_t = a_t.detach()
log_probs = F.log_softmax(logit, 1) # Calculate the log_prob here
policy_log_probs.append(log_probs.gather(1, a_t.unsqueeze(1))) # Gather the log_prob for each batch
elif self.feedback == 'sample':
probs = F.softmax(logit, 1) # sampling an action from model
c = torch.distributions.Categorical(probs)
self.logs['entropy'].append(c.entropy().sum().item()) # For log
entropys.append(c.entropy()) # For optimization
a_t = c.sample().detach()
policy_log_probs.append(c.log_prob(a_t))
else:
print(self.feedback)
sys.exit('Invalid feedback option')
# Prepare environment action
# NOTE: Env action is in the perm_obs space
cpu_a_t = a_t.cpu().numpy()
for i, next_id in enumerate(cpu_a_t):
if next_id == (candidate_leng[i]-1) or next_id == args.ignoreid or ended[i]: # The last action is <end>
cpu_a_t[i] = -1 # Change the <end> and ignore action to -1
# Make action and get the new state
self.make_equiv_action(cpu_a_t, perm_obs, perm_idx, traj)
obs = np.array(self.env._get_obs())
perm_obs = obs[perm_idx] # Perm the obs for the resu
# Calculate the mask and reward
dist = np.zeros(batch_size, np.float32)
reward = np.zeros(batch_size, np.float32)
mask = np.ones(batch_size, np.float32)
for i, ob in enumerate(perm_obs):
dist[i] = ob['distance']
if ended[i]: # If the action is already finished BEFORE THIS ACTION.
reward[i] = 0.
mask[i] = 0.
else: # Calculate the reward
action_idx = cpu_a_t[i]
if action_idx == -1: # If the action now is end
if dist[i] < 3: # Correct
reward[i] = 2.
else: # Incorrect
reward[i] = -2.
else: # The action is not end
reward[i] = - (dist[i] - last_dist[i]) # Change of distance
if reward[i] > 0: # Quantification
reward[i] = 1
elif reward[i] < 0:
reward[i] = -1
else:
raise NameError("The action doesn't change the move")
rewards.append(reward)
masks.append(mask)
last_dist[:] = dist
# Update the finished actions
# -1 means ended or ignored (already ended)
ended[:] = np.logical_or(ended, (cpu_a_t == -1))
# Early exit if all ended
if ended.all():
break
if train_rl:
# Last action in A2C
input_a_t, f_t, candidate_feat, candidate_leng = self.get_input_feat(perm_obs)
if speaker is not None:
candidate_feat[..., :-args.angle_feat_size] *= noise
f_t[..., :-args.angle_feat_size] *= noise
last_h_, _, _, _ = self.decoder(input_a_t, f_t, candidate_feat,
h_t, h1, c_t,
ctx, ctx_mask,
speaker is not None)
rl_loss = 0.
# NOW, A2C!!!
# Calculate the final discounted reward
last_value__ = self.critic(last_h_).detach() # The value esti of the last state, remove the grad for safety
discount_reward = np.zeros(batch_size, np.float32) # The inital reward is zero
for i in range(batch_size):
if not ended[i]: # If the action is not ended, use the value function as the last reward
discount_reward[i] = last_value__[i]
length = len(rewards)
total = 0
for t in range(length-1, -1, -1):
discount_reward = discount_reward * args.gamma + rewards[t] # If it ended, the reward will be 0
mask_ = Variable(torch.from_numpy(masks[t]), requires_grad=False).cuda()
clip_reward = discount_reward.copy()
r_ = Variable(torch.from_numpy(clip_reward), requires_grad=False).cuda()
v_ = self.critic(hidden_states[t])
a_ = (r_ - v_).detach()
# r_: The higher, the better. -ln(p(action)) * (discount_reward - value)
rl_loss += (-policy_log_probs[t] * a_ * mask_).sum()
rl_loss += (((r_ - v_) ** 2) * mask_).sum() * 0.5 # 1/2 L2 loss
if self.feedback == 'sample':
rl_loss += (- 0.01 * entropys[t] * mask_).sum()
self.logs['critic_loss'].append((((r_ - v_) ** 2) * mask_).sum().item())
total = total + np.sum(masks[t])
self.logs['total'].append(total)
# Normalize the loss function
if args.normalize_loss == 'total':
rl_loss /= total
elif args.normalize_loss == 'batch':
rl_loss /= batch_size
else:
assert args.normalize_loss == 'none'
self.loss += rl_loss
if train_ml is not None:
self.loss += ml_loss * train_ml / batch_size
if type(self.loss) is int: # For safety, it will be activated if no losses are added
self.losses.append(0.)
else:
self.losses.append(self.loss.item() / self.episode_len) # This argument is useless.
return traj
def _dijkstra(self):
"""
The dijkstra algorithm.
Was called beam search to be consistent with existing work.
But it actually finds the Exact K paths with smallest listener log_prob.
:return:
[{
"scan": XXX
"instr_id":XXX,
'instr_encoding": XXX
'dijk_path': [v1, v2, ..., vn] (The path used for find all the candidates)
"paths": {
"trajectory": [viewpoint_id1, viewpoint_id2, ..., ],
"action": [act_1, act_2, ..., ],
"listener_scores": [log_prob_act1, log_prob_act2, ..., ],
"visual_feature": [(f1_step1, f2_step2, ...), (f1_step2, f2_step2, ...)
}
}]
"""
def make_state_id(viewpoint, action): # Make state id
return "%s_%s" % (viewpoint, str(action))
def decompose_state_id(state_id): # Make state id
viewpoint, action = state_id.split("_")
action = int(action)
return viewpoint, action
# Get first obs
obs = self.env._get_obs()
# Prepare the state id
batch_size = len(obs)
results = [{"scan": ob['scan'],
"instr_id": ob['instr_id'],
"instr_encoding": ob["instr_encoding"],
"dijk_path": [ob['viewpoint']],
"paths": []} for ob in obs]
# Encoder
seq, seq_mask, seq_lengths, perm_idx = self._sort_batch(obs)
recover_idx = np.zeros_like(perm_idx)
for i, idx in enumerate(perm_idx):
recover_idx[idx] = i
ctx, h_t, c_t = self.encoder(seq, seq_lengths)
ctx, h_t, c_t, ctx_mask = ctx[recover_idx], h_t[recover_idx], c_t[recover_idx], seq_mask[recover_idx] # Recover the original order
# Dijk Graph States:
id2state = [
{make_state_id(ob['viewpoint'], -95):
{"next_viewpoint": ob['viewpoint'],
"running_state": (h_t[i], h_t[i], c_t[i]),
"location": (ob['viewpoint'], ob['heading'], ob['elevation']),
"feature": None,
"from_state_id": None,
"score": 0,
"scores": [],
"actions": [],
}
}
for i, ob in enumerate(obs)
] # -95 is the start point
visited = [set() for _ in range(batch_size)]
finished = [set() for _ in range(batch_size)]
graphs = [utils.FloydGraph() for _ in range(batch_size)] # For the navigation path
ended = np.array([False] * batch_size)
# Dijk Algorithm
for _ in range(300):
# Get the state with smallest score for each batch
# If the batch is not ended, find the smallest item.
# Else use a random item from the dict (It always exists)
smallest_idXstate = [
max(((state_id, state) for state_id, state in id2state[i].items() if state_id not in visited[i]),
key=lambda item: item[1]['score'])
if not ended[i]
else
next(iter(id2state[i].items()))
for i in range(batch_size)
]
# Set the visited and the end seqs
for i, (state_id, state) in enumerate(smallest_idXstate):
assert (ended[i]) or (state_id not in visited[i])
if not ended[i]:
viewpoint, action = decompose_state_id(state_id)
visited[i].add(state_id)
if action == -1:
finished[i].add(state_id)
if len(finished[i]) >= args.candidates: # Get enough candidates
ended[i] = True
# Gather the running state in the batch
h_ts, h1s, c_ts = zip(*(idXstate[1]['running_state'] for idXstate in smallest_idXstate))
h_t, h1, c_t = torch.stack(h_ts), torch.stack(h1s), torch.stack(c_ts)
# Recover the env and gather the feature
for i, (state_id, state) in enumerate(smallest_idXstate):
next_viewpoint = state['next_viewpoint']
scan = results[i]['scan']
from_viewpoint, heading, elevation = state['location']
self.env.env.sims[i].newEpisode(scan, next_viewpoint, heading, elevation) # Heading, elevation is not used in panoramic
obs = self.env._get_obs()
# Update the floyd graph
# Only used to shorten the navigation length
# Will not effect the result
for i, ob in enumerate(obs):
viewpoint = ob['viewpoint']
if not graphs[i].visited(viewpoint): # Update the Graph
for c in ob['candidate']:
next_viewpoint = c['viewpointId']
dis = self.env.distances[ob['scan']][viewpoint][next_viewpoint]
graphs[i].add_edge(viewpoint, next_viewpoint, dis)
graphs[i].update(viewpoint)
results[i]['dijk_path'].extend(graphs[i].path(results[i]['dijk_path'][-1], viewpoint))
input_a_t, f_t, candidate_feat, candidate_leng = self.get_input_feat(obs)
# Run one decoding step
h_t, c_t, alpha, logit, h1 = self.decoder(input_a_t, f_t, candidate_feat,
h_t, h1, c_t,
ctx, ctx_mask,
False)
# Update the dijk graph's states with the newly visited viewpoint
candidate_mask = utils.length2mask(candidate_leng)
logit.masked_fill_(candidate_mask, -float('inf'))
log_probs = F.log_softmax(logit, 1) # Calculate the log_prob here
_, max_act = log_probs.max(1)
for i, ob in enumerate(obs):
current_viewpoint = ob['viewpoint']
candidate = ob['candidate']
current_state_id, current_state = smallest_idXstate[i]
old_viewpoint, from_action = decompose_state_id(current_state_id)
assert ob['viewpoint'] == current_state['next_viewpoint']
if from_action == -1 or ended[i]: # If the action is <end> or the batch is ended, skip it
continue
for j in range(len(ob['candidate']) + 1): # +1 to include the <end> action
# score + log_prob[action]
modified_log_prob = log_probs[i][j].detach().cpu().item()
new_score = current_state['score'] + modified_log_prob
if j < len(candidate): # A normal action
next_id = make_state_id(current_viewpoint, j)
next_viewpoint = candidate[j]['viewpointId']
trg_point = candidate[j]['pointId']
heading = (trg_point % 12) * math.pi / 6
elevation = (trg_point // 12 - 1) * math.pi / 6
location = (next_viewpoint, heading, elevation)
else: # The end action
next_id = make_state_id(current_viewpoint, -1) # action is -1
next_viewpoint = current_viewpoint # next viewpoint is still here
location = (current_viewpoint, ob['heading'], ob['elevation'])
if next_id not in id2state[i] or new_score > id2state[i][next_id]['score']:
id2state[i][next_id] = {
"next_viewpoint": next_viewpoint,
"location": location,
"running_state": (h_t[i], h1[i], c_t[i]),
"from_state_id": current_state_id,
"feature": (f_t[i].detach().cpu(), candidate_feat[i][j].detach().cpu()),
"score": new_score,
"scores": current_state['scores'] + [modified_log_prob],
"actions": current_state['actions'] + [len(candidate)+1],
}
# The active state is zero after the updating, then setting the ended to True
for i in range(batch_size):
if len(visited[i]) == len(id2state[i]): # It's the last active state
ended[i] = True
# End?
if ended.all():
break
# Move back to the start point
for i in range(batch_size):
results[i]['dijk_path'].extend(graphs[i].path(results[i]['dijk_path'][-1], results[i]['dijk_path'][0]))
"""
"paths": {
"trajectory": [viewpoint_id1, viewpoint_id2, ..., ],
"action": [act_1, act_2, ..., ],
"listener_scores": [log_prob_act1, log_prob_act2, ..., ],
"visual_feature": [(f1_step1, f2_step2, ...), (f1_step2, f2_step2, ...)
}
"""
# Gather the Path
for i, result in enumerate(results):
assert len(finished[i]) <= args.candidates
for state_id in finished[i]:
path_info = {
"trajectory": [],
"action": [],
"listener_scores": id2state[i][state_id]['scores'],
"listener_actions": id2state[i][state_id]['actions'],
"visual_feature": []
}
viewpoint, action = decompose_state_id(state_id)
while action != -95:
state = id2state[i][state_id]
path_info['trajectory'].append(state['location'])
path_info['action'].append(action)
path_info['visual_feature'].append(state['feature'])
state_id = id2state[i][state_id]['from_state_id']
viewpoint, action = decompose_state_id(state_id)
state = id2state[i][state_id]
path_info['trajectory'].append(state['location'])
for need_reverse_key in ["trajectory", "action", "visual_feature"]:
path_info[need_reverse_key] = path_info[need_reverse_key][::-1]
result['paths'].append(path_info)
return results
def beam_search(self, speaker):
"""
:param speaker: The speaker to be used in searching.
:return:
{
"scan": XXX
"instr_id":XXX,
"instr_encoding": XXX
"dijk_path": [v1, v2, ...., vn]
"paths": [{
"trajectory": [viewoint_id0, viewpoint_id1, viewpoint_id2, ..., ],
"action": [act_1, act_2, ..., ],
"listener_scores": [log_prob_act1, log_prob_act2, ..., ],
"speaker_scores": [log_prob_word1, log_prob_word2, ..., ],
}]
}
"""
self.env.reset()
results = self._dijkstra()
"""
return from self._dijkstra()
[{
"scan": XXX
"instr_id":XXX,
"instr_encoding": XXX
"dijk_path": [v1, v2, ...., vn]
"paths": [{
"trajectory": [viewoint_id0, viewpoint_id1, viewpoint_id2, ..., ],
"action": [act_1, act_2, ..., ],
"listener_scores": [log_prob_act1, log_prob_act2, ..., ],
"visual_feature": [(f1_step1, f2_step2, ...), (f1_step2, f2_step2, ...)
}]
}]
"""
# Compute the speaker scores:
for result in results:
lengths = []
num_paths = len(result['paths'])
for path in result['paths']:
assert len(path['trajectory']) == (len(path['visual_feature']) + 1)
lengths.append(len(path['visual_feature']))
max_len = max(lengths)
img_feats = torch.zeros(num_paths, max_len, 36, self.feature_size + args.angle_feat_size)
can_feats = torch.zeros(num_paths, max_len, self.feature_size + args.angle_feat_size)
for j, path in enumerate(result['paths']):
for k, feat in enumerate(path['visual_feature']):
img_feat, can_feat = feat
img_feats[j][k] = img_feat
can_feats[j][k] = can_feat
img_feats, can_feats = img_feats.cuda(), can_feats.cuda()
features = ((img_feats, can_feats), lengths)
insts = np.array([result['instr_encoding'] for _ in range(num_paths)])
seq_lengths = np.argmax(insts == self.tok.word_to_index['<EOS>'], axis=1) # len(seq + 'BOS') == len(seq + 'EOS')
insts = torch.from_numpy(insts).cuda()
speaker_scores = speaker.teacher_forcing(train=True, features=features, insts=insts, for_listener=True)
for j, path in enumerate(result['paths']):
path.pop("visual_feature")
path['speaker_scores'] = -speaker_scores[j].detach().cpu().numpy()[:seq_lengths[j]]
return results
def beam_search_test(self, speaker):
self.encoder.eval()
self.decoder.eval()
self.critic.eval()
looped = False
self.results = {}
while True:
for traj in self.beam_search(speaker):
if traj['instr_id'] in self.results:
looped = True
else:
self.results[traj['instr_id']] = traj
if looped:
break
def test(self, use_dropout=False, feedback='argmax', allow_cheat=False, iters=None):
''' Evaluate once on each instruction in the current environment '''
self.feedback = feedback
if use_dropout:
self.encoder.train()
self.decoder.train()
self.critic.train()
else:
self.encoder.eval()
self.decoder.eval()
self.critic.eval()
super(Seq2SeqAgent, self).test(iters)
def zero_grad(self):
self.loss = 0.
self.losses = []
for model, optimizer in zip(self.models, self.optimizers):
model.train()
optimizer.zero_grad()
def accumulate_gradient(self, feedback='teacher', **kwargs):
if feedback == 'teacher':
self.feedback = 'teacher'
self.rollout(train_ml=args.teacher_weight, train_rl=False, **kwargs)
elif feedback == 'sample':
self.feedback = 'teacher'
self.rollout(train_ml=args.ml_weight, train_rl=False, **kwargs)
self.feedback = 'sample'
self.rollout(train_ml=None, train_rl=True, **kwargs)
else:
assert False
def optim_step(self):
self.loss.backward()
torch.nn.utils.clip_grad_norm(self.encoder.parameters(), 40.)
torch.nn.utils.clip_grad_norm(self.decoder.parameters(), 40.)
self.encoder_optimizer.step()
self.decoder_optimizer.step()
self.critic_optimizer.step()
def train(self, n_iters, feedback='teacher', **kwargs):
''' Train for a given number of iterations '''
self.feedback = feedback
self.encoder.train()
self.decoder.train()
self.critic.train()
self.losses = []
for iter in tqdm(range(1, n_iters + 1)):
self.encoder_optimizer.zero_grad()
self.decoder_optimizer.zero_grad()
self.critic_optimizer.zero_grad()
self.loss = 0
if feedback == 'teacher':
self.feedback = 'teacher'
self.rollout(train_ml=args.teacher_weight, train_rl=False, **kwargs)
elif feedback == 'sample':
if args.ml_weight != 0:
self.feedback = 'teacher'
self.rollout(train_ml=args.ml_weight, train_rl=False, **kwargs)
self.feedback = 'sample'
self.rollout(train_ml=None, train_rl=True, **kwargs)
else:
assert False
self.loss.backward()
torch.nn.utils.clip_grad_norm(self.encoder.parameters(), 40.)
torch.nn.utils.clip_grad_norm(self.decoder.parameters(), 40.)
self.encoder_optimizer.step()
self.decoder_optimizer.step()
self.critic_optimizer.step()
def save(self, epoch, path):
''' Snapshot models '''
the_dir, _ = os.path.split(path)
os.makedirs(the_dir, exist_ok=True)
states = {}
def create_state(name, model, optimizer):
states[name] = {
'epoch': epoch + 1,
'state_dict': model.state_dict(),
'optimizer': optimizer.state_dict(),
}
all_tuple = [("encoder", self.encoder, self.encoder_optimizer),
("decoder", self.decoder, self.decoder_optimizer),
("critic", self.critic, self.critic_optimizer)]
for param in all_tuple:
create_state(*param)
torch.save(states, path)
def load(self, path):
''' Loads parameters (but not training state) '''
states = torch.load(path)
def recover_state(name, model, optimizer):
state = model.state_dict()
model_keys = set(state.keys())
load_keys = set(states[name]['state_dict'].keys())
if model_keys != load_keys:
print("NOTICE: DIFFERENT KEYS IN THE LISTEREN")
state.update(states[name]['state_dict'])
model.load_state_dict(state)
if args.loadOptim:
optimizer.load_state_dict(states[name]['optimizer'])
all_tuple = [("encoder", self.encoder, self.encoder_optimizer),
("decoder", self.decoder, self.decoder_optimizer),
("critic", self.critic, self.critic_optimizer)]
for param in all_tuple:
recover_state(*param)
return states['encoder']['epoch'] - 1
| r2r_src/agent.py | 38,417 | Base class for an R2R agent to generate and save trajectories.
An agent based on an LSTM seq2seq model with attention.
The dijkstra algorithm.
Was called beam search to be consistent with existing work.
But it actually finds the Exact K paths with smallest listener log_prob.
:return:
[{
"scan": XXX
"instr_id":XXX,
'instr_encoding": XXX
'dijk_path': [v1, v2, ..., vn] (The path used for find all the candidates)
"paths": {
"trajectory": [viewpoint_id1, viewpoint_id2, ..., ],
"action": [act_1, act_2, ..., ],
"listener_scores": [log_prob_act1, log_prob_act2, ..., ],
"visual_feature": [(f1_step1, f2_step2, ...), (f1_step2, f2_step2, ...)
}
}]
Extract precomputed features into variable.
Extract instructions from a list of observations and sort by descending
sequence length (to enable PyTorch packing).
Extract teacher actions into variable.
:param obs: The observation.
:param ended: Whether the action seq is ended
:return:
:param speaker: The speaker to be used in searching.
:return:
{
"scan": XXX
"instr_id":XXX,
"instr_encoding": XXX
"dijk_path": [v1, v2, ...., vn]
"paths": [{
"trajectory": [viewoint_id0, viewpoint_id1, viewpoint_id2, ..., ],
"action": [act_1, act_2, ..., ],
"listener_scores": [log_prob_act1, log_prob_act2, ..., ],
"speaker_scores": [log_prob_word1, log_prob_word2, ..., ],
}]
}
Loads parameters (but not training state)
Interface between Panoramic view and Egocentric view
It will convert the action panoramic view action a_t to equivalent egocentric view actions for the simulator
Return a list of dicts containing instr_id:'xx', path:[(viewpointId, heading_rad, elevation_rad)]
:param train_ml: The weight to train with maximum likelihood
:param train_rl: whether use RL in training
:param reset: Reset the environment
:param speaker: Speaker used in back translation.
If the speaker is not None, use back translation.
O.w., normal training
:return:
Snapshot models
Evaluate once on each instruction in the current environment
Train for a given number of iterations
For learning agents If iters is not none, shuffle the env batch We rely on env showing the entire batch before repeating anything For each time, it will run the first 'iters' iterations. (It was shuffled before) Do a full round For now, the agent can't pick which forward move to make - just the one in the middle left right up down forward <end> <start> <ignore> Models Optimizers Evaluations Logs Full length Sort sequences by lengths True -> descending seq_lengths[0] is the Maximum length Image feat +1 is for the end Note: The candidate_feat at len(ob['candidate']) is the feature for the END which is zero in my implementation Image feat Image features from obs Just ignore this index Next view point Stop here The teacher action should be "STAY HERE" Go to the next view Adjust -1 is the <stop> action The point idx started from 0 Tune up Tune down Turn right until the target Reset env Trigger the self_train mode! Use the same drop mask in speaker Create fake environments with the generated instruction First word is <BOS> The inst is not ended! Reorder the language input for the encoder (do not ruin the original code) Init the reward shaping The init distance from the view point to the target Record starting point For test result submission Initialization the tracking state Indices match permuation of the model, not env Init the logs Apply the env drop mask to the feat Mask outputs where agent can't move forward Here the logit is [b, max_candidate] Avoding cyclic path Supervised training Determine next model inputs teacher forcing student forcing - argmax Calculate the log_prob here Gather the log_prob for each batch sampling an action from model For log For optimization Prepare environment action NOTE: Env action is in the perm_obs space The last action is <end> Change the <end> and ignore action to -1 Make action and get the new state Perm the obs for the resu Calculate the mask and reward If the action is already finished BEFORE THIS ACTION. Calculate the reward If the action now is end Correct Incorrect The action is not end Change of distance Quantification Update the finished actions -1 means ended or ignored (already ended) Early exit if all ended Last action in A2C NOW, A2C!!! Calculate the final discounted reward The value esti of the last state, remove the grad for safety The inital reward is zero If the action is not ended, use the value function as the last reward If it ended, the reward will be 0 r_: The higher, the better. -ln(p(action)) * (discount_reward - value) 1/2 L2 loss Normalize the loss function For safety, it will be activated if no losses are added This argument is useless. Make state id Make state id Get first obs Prepare the state id Encoder Recover the original order Dijk Graph States: -95 is the start point For the navigation path Dijk Algorithm Get the state with smallest score for each batch If the batch is not ended, find the smallest item. Else use a random item from the dict (It always exists) Set the visited and the end seqs Get enough candidates Gather the running state in the batch Recover the env and gather the feature Heading, elevation is not used in panoramic Update the floyd graph Only used to shorten the navigation length Will not effect the result Update the Graph Run one decoding step Update the dijk graph's states with the newly visited viewpoint Calculate the log_prob here If the action is <end> or the batch is ended, skip it +1 to include the <end> action score + log_prob[action] A normal action The end action action is -1 next viewpoint is still here The active state is zero after the updating, then setting the ended to True It's the last active state End? Move back to the start point Gather the Path Compute the speaker scores: len(seq + 'BOS') == len(seq + 'EOS') | 5,978 | en | 0.810303 |
import xarray as xr
import pandas as pd
import numpy as np
import xgboost as xgb
import time
import pickle
import sys
from xgboost import XGBRegressor
# load dataframe with maximal temp
def load_df_max_TREFHT(member, start_date, end_date):
path = "/glade/scratch/zhonghua/CESM-LE-members-csv/"
print("***************Start loading member",member,"***************")
t0 = time.time()
df = pd.read_csv(path+member+"_"+start_date+"_"+end_date+".csv")
elapsed_time = time.time() - t0
print("It takes elapsed_time", elapsed_time, "to read csv")
print("***************Start convert lat/lon to string***************")
t1=time.time()
df[["lat","lon"]]=df[["lat","lon"]].round(4).astype(str)
elapsed_time = time.time() - t1
print("It takes elapsed_time", elapsed_time, "to convert lat/lon to string")
print("***************Start One Hot Encoding***************")
# https://stackoverflow.com/questions/44124436/python-datetime-to-season
t2=time.time()
df["time"]=pd.to_datetime(df["time"],errors="coerce")
#df = df.dropna(subset=['time'])
months = ["Jan","Feb", "Mar", "Apr", "May", "June", "July", "Aug", "Sept", "Oct", "Nov", "Dec"]
month_to_months = dict(zip(range(1,13), months))
df = pd.concat([df,pd.get_dummies(df["time"].dt.month.map(month_to_months).astype('category'))],axis=1)
elapsed_time = time.time() - t2
print("It takes elapsed_time", elapsed_time, "to finish the one hot encoding")
return df
def XGB_test(df,year,lat,lon,member):
t_0=time.time()
#df_temp = df[(df["lat"]==lat) & (df["lon"]==lon)].reset_index()
df_lat = df[df["lat"]==lat]
df_temp = df_lat[df_lat["lon"]==lon]
vari_ls = ["QBOT","UBOT","VBOT",
"TREFHT",
"FLNS","FSNS",
"PRECT","PRSN",
"Jan","Feb", "Mar",
"Apr", "May", "June",
"July", "Aug", "Sept",
"Oct", "Nov", "Dec"]
XGBreg = pickle.load(open("/glade/scratch/zhonghua/ensem_model/"+year+"/"+"MX_"+lat+"_"+lon+".dat","rb"))
df_temp[member]=XGBreg.predict(df_temp[vari_ls])
#print("rmse:",np.sqrt(mean_squared_error(df_temp[member],df_temp[pred])))
#print("mae:",mean_absolute_error(df_temp[member],df_temp[pred]))
df_return=df_temp[["lat","lon","time",member,"TREFMXAV_U"]]
df_return[["lat","lon"]]=df_return[["lat","lon"]].astype(np.float32)
elapsed_time = time.time() - t_0
print("It takes elapsed_time", elapsed_time, "to apply the model")
return df_return.set_index(["lat","lon","time"])
#########################################################
lat_lon_dict=pickle.load(open("/glade/scratch/zhonghua/lat_lon_dict.dat","rb"))
member=sys.argv[1]
start_date=sys.argv[2]
end_date=sys.argv[3]
df = load_df_max_TREFHT(member, start_date, end_date)
i=1
df_final_ls=[]
for lat in lat_lon_dict:
print(lat)
for lon in lat_lon_dict[lat]:
df_final_ls.append(XGB_test(df,start_date,lat,lon,member))
i+=1
if (i%10==0):
print(i)
pd.concat(df_final_ls).to_csv("/glade/scratch/zhonghua/CESM_validation/"+start_date+"/"+member+"_ens.csv")
| 3_model_valid/pred/apply_model_members.py | 3,178 | load dataframe with maximal temp https://stackoverflow.com/questions/44124436/python-datetime-to-seasondf = df.dropna(subset=['time'])df_temp = df[(df["lat"]==lat) & (df["lon"]==lon)].reset_index()print("rmse:",np.sqrt(mean_squared_error(df_temp[member],df_temp[pred])))print("mae:",mean_absolute_error(df_temp[member],df_temp[pred])) | 334 | en | 0.379107 |
'''
This is to fetch the tip table data for a telegram_id
Error Handling
==============
- /withdrawmemo tipuser11111 0.0001 TLOS pay_bill
- /withdrawmemo tipuser11111 0.00001 EOS pay_bill
{"code": 3050003, "name": "eosio_assert_message_exception", "what": "eosio_assert_message assertion failure"
, "details": [{"message": "assertion failure with message: there is no balances available corresponding to t
he parsed quantity symbol for the given from_id.", "file": "cf_system.cpp", "line_number": 14, "method": "eo
sio_assert"}, {"message": "pending console output: ", "file": "apply_context.cpp", "line_number": 143, "meth
od": "exec_one"}]}
- /withdrawmemo tipuser11117 0.0001 EOS pay_bill
{"code": 3010001, "name": "name_type_exception", "what": "Invalid name", "details": [{"message": "Name conta
ins invalid character: (7) ", "file": "name.hpp", "line_number": 26, "method": "char_to_symbol"}, {"message"
: "", "file": "abi_serializer.cpp", "line_number": 570, "method": "_variant_to_binary"}, {"message": "", "fi
le": "abi_serializer.cpp", "line_number": 570, "method": "_variant_to_binary"}, {"message": "", "file": "abi
_serializer.cpp", "line_number": 584, "method": "_variant_to_binary"}, {"message": "\"{"from_id":410894301,"
from_username":"abhi3700","to_ac":"tipuser11117","quantity":"0.0001 EOS","memo":"pay_bill"}\" is invalid arg
s for action \"withdraw\" code \"tippertipper\". expected \"[{"name":"from_id","type":"uint64"},{"name":"fro
m_username","type":"string"},{"name":"to_ac","type":"name"},{"name":"quantity","type":"asset"},{"name":"memo
","type":"string"}]\"", "file": "chain_plugin.cpp", "line_number": 3396, "method": "abi_json_to_bin"}, {"mes
sage": "code: tippertipper, action: withdraw, args: {"from_id":410894301,"from_username":"abhi3700","to_ac":
"tipuser11117","quantity":"0.0001 EOS","memo":"pay_bill"}", "file": "chain_plugin.cpp", "line_number": 3402,
"method": "abi_json_to_bin"}]}
'''
import json
import asyncio
from aioeos import EosAccount, EosJsonRpc, EosTransaction
from aioeos import types
from aioeos.exceptions import EosAccountDoesntExistException
from aioeos.exceptions import EosAssertMessageException
from aioeos.exceptions import EosDeadlineException
from aioeos.exceptions import EosRamUsageExceededException
from aioeos.exceptions import EosTxCpuUsageExceededException
from aioeos.exceptions import EosTxNetUsageExceededException
from input import *
# def validate(j):
# try:
# return json.load(j) # put JSON-data to a variable
# except json.decoder.JSONDecodeError:
# print("Invalid JSON") # in case json is invalid
# else:
# print("Valid JSON") # in case json is valid
async def balance(
from_id,
# chat
):
rpc = EosJsonRpc(url=Chain_URL)
table_response = await rpc.get_table_rows(
code=tip_eosio_ac,
scope= tip_eosio_ac,
table=tip_table,
lower_bound= from_id,
upper_bound= from_id
)
table_response = str(table_response).replace("\'", "\"")
table_response = table_response.replace("False", "false") # As False is invalid in JSON, so replace with false
# print(table_response)
for r in json.loads(table_response)['rows'][0]["balances"]:
prec, sym_name = r["key"]["sym"].split(",")
# print(f'token precision: {prec}') # precision
# print(f'token sym_name: {sym_name}') # symbol name
# print(f'val: {r["value"]/10**int(prec)}\n\n') # exact value
print(f'{r["value"]/10**int(prec)} {sym_name}') # result e.g. 2.0 EOS
if __name__ == '__main__':
asyncio.get_event_loop().run_until_complete(balance(410894301)) | app/chain_table.py | 3,893 | This is to fetch the tip table data for a telegram_id
Error Handling
==============
- /withdrawmemo tipuser11111 0.0001 TLOS pay_bill
- /withdrawmemo tipuser11111 0.00001 EOS pay_bill
{"code": 3050003, "name": "eosio_assert_message_exception", "what": "eosio_assert_message assertion failure"
, "details": [{"message": "assertion failure with message: there is no balances available corresponding to t
he parsed quantity symbol for the given from_id.", "file": "cf_system.cpp", "line_number": 14, "method": "eo
sio_assert"}, {"message": "pending console output: ", "file": "apply_context.cpp", "line_number": 143, "meth
od": "exec_one"}]}
- /withdrawmemo tipuser11117 0.0001 EOS pay_bill
{"code": 3010001, "name": "name_type_exception", "what": "Invalid name", "details": [{"message": "Name conta
ins invalid character: (7) ", "file": "name.hpp", "line_number": 26, "method": "char_to_symbol"}, {"message"
: "", "file": "abi_serializer.cpp", "line_number": 570, "method": "_variant_to_binary"}, {"message": "", "fi
le": "abi_serializer.cpp", "line_number": 570, "method": "_variant_to_binary"}, {"message": "", "file": "abi
_serializer.cpp", "line_number": 584, "method": "_variant_to_binary"}, {"message": ""{"from_id":410894301,"
from_username":"abhi3700","to_ac":"tipuser11117","quantity":"0.0001 EOS","memo":"pay_bill"}" is invalid arg
s for action "withdraw" code "tippertipper". expected "[{"name":"from_id","type":"uint64"},{"name":"fro
m_username","type":"string"},{"name":"to_ac","type":"name"},{"name":"quantity","type":"asset"},{"name":"memo
","type":"string"}]"", "file": "chain_plugin.cpp", "line_number": 3396, "method": "abi_json_to_bin"}, {"mes
sage": "code: tippertipper, action: withdraw, args: {"from_id":410894301,"from_username":"abhi3700","to_ac":
"tipuser11117","quantity":"0.0001 EOS","memo":"pay_bill"}", "file": "chain_plugin.cpp", "line_number": 3402,
"method": "abi_json_to_bin"}]}
def validate(j): try: return json.load(j) put JSON-data to a variable except json.decoder.JSONDecodeError: print("Invalid JSON") in case json is invalid else: print("Valid JSON") in case json is valid chat As False is invalid in JSON, so replace with false print(table_response) print(f'token precision: {prec}') precision print(f'token sym_name: {sym_name}') symbol name print(f'val: {r["value"]/10**int(prec)}\n\n') exact value result e.g. 2.0 EOS | 2,440 | en | 0.439409 |
#!/usr/bin/env python
#Copyright (c) 2008 Erik Tollerud (erik.tollerud@gmail.com)
from __future__ import division,with_statement
from glob import glob
from distribute_setup import use_setuptools
use_setuptools()
from setuptools import setup,find_packages
from distutils.command.build_py import build_py as du_build_py
from distutils.core import Command
from astropysics.version import version as versionstr
from astropysics.config import _recpkgs,_guipkgs
descrip = """
`astropysics` contains a variety of utilities and algorithms for reducing, analyzing, and visualizing astronomical data.
See http://packages.python.org/Astropysics/ for detailed documentation.
"""
apyspkgs = find_packages(exclude=['tests'])
scripts = glob('scripts/*')
#recommended/gui packages are stored in config module - used in extras
recpkgs = [pkg.name for pkg in _recpkgs]
guipkgs = [pkg.name for pkg in _guipkgs]
#custom build_py overwrites version.py with a version overwriting the revno-generating version.py
class apy_build_py(du_build_py):
def run(self):
from os import path
res = du_build_py.run(self)
versfile = path.join(self.build_lib,'astropysics','version.py')
print 'freezing version number to',versfile
with open(versfile,'w') as f: #this overwrites the actual version.py
f.write(self.get_version_py())
return res
def get_version_py(self):
import datetime
from astropysics.version import _frozen_version_py_template
from astropysics.version import version,major,minor,bugfix,dev
timestamp = str(datetime.datetime.now())
t = (timestamp,version,major,minor,bugfix,dev)
return _frozen_version_py_template%t
#custom sphinx builder just makes the directory to build if it hasn't already been made
try:
from sphinx.setup_command import BuildDoc
class apy_build_sphinx(BuildDoc):
def finalize_options(self):
from os.path import isfile
from distutils.cmd import DistutilsOptionError
if self.build_dir is not None:
if isfile(self.build_dir):
raise DistutilsOptionError('Attempted to build_sphinx into a file '+self.build_dir)
self.mkpath(self.build_dir)
return BuildDoc.finalize_options(self)
except ImportError: #sphinx not present
apy_build_sphinx = None
#command to count the number of lines of code (mostly for curiosity's sake) in the main dirs
class CountLines(Command):
# Brief (40-50 characters) description of the command
description = "Print the number of lines in the major directories to the terminal."
# List of option tuples: long name, short name (None if no short
# name), and help string.
user_options = [('includeempty', 'e',
"Include empty lines in the count"),
]
def initialize_options (self):
self.includeempty = False
def finalize_options (self):
pass
def visit_files(self,lists,dirname,fnames):
lcountlist,fcountlist = lists
from os import path
#prefilter for valid extentions
if dirname != 'scripts':
fnames = [fn for fn in fnames if (fn.endswith('.py') or fn.endswith('.pyx')) ]
cnt = 0
for fn in fnames:
fn = path.join(dirname,fn)
with open(fn) as f:
if self.includeempty:
for l in f:
cnt += 1
else:
for l in f:
if l.strip()!='':
cnt += 1
lcountlist.append(cnt)
fcountlist.append(len(fnames))
def run(self):
from os import path
dir,name = path.split(__file__)
apydir = path.join(dir,'astropysics')
apyllst,apyflst = [],[]
path.walk(apydir,self.visit_files,(apyllst,apyflst))
self.apylinecount = sum(apyllst)
self.apyfilecount = sum(apyflst)
scrdir = path.join(dir,'scripts')
scrllst,scrflst = [],[]
path.walk(scrdir,self.visit_files,(scrllst,scrflst))
self.scrlinecount = sum(scrllst)
self.scrfilecount = sum(scrflst)
tstdir = path.join(dir,'tests')
tstllst,tstflst = [],[]
path.walk(tstdir,self.visit_files,(tstllst,tstflst))
self.tstlinecount = sum(tstllst)
self.tstfilecount = sum(tstflst)
self.linecount = self.apylinecount + self.scrlinecount + self.tstlinecount
self.filecount = self.apyfilecount + self.scrfilecount + self.tstfilecount
print 'Astropysics source directory has %i lines in %i files'%(self.apylinecount,self.apyfilecount)
print 'Scripts directory has %i lines in %i files'%(self.scrlinecount,self.scrfilecount)
print 'Tests directory has %i lines in %i files'%(self.tstlinecount,self.tstfilecount)
print 'Total %i lines in %i files'%(self.linecount,self.filecount)
cmdclassd = {'build_py' : apy_build_py,'count_lines':CountLines}
if apy_build_sphinx is not None:
cmdclassd['build_sphinx'] = apy_build_sphinx
setup(name='Astropysics',
version=versionstr,
description='Astrophysics libraries for Python',
packages=apyspkgs,
package_data={'astropysics':['data/*']},
scripts=scripts,
requires=['numpy','scipy'],
install_requires=['numpy'],
provides=['astropysics'],
extras_require={'all':recpkgs+guipkgs,
'nogui':recpkgs},
author='Erik Tollerud',
author_email='erik.tolleru@gmail.com',
license = 'Apache License 2.0',
url='http://packages.python.org/Astropysics/',
long_description=descrip,
cmdclass = cmdclassd
)
| setup.py | 5,968 | !/usr/bin/env pythonCopyright (c) 2008 Erik Tollerud (erik.tollerud@gmail.com) recommended/gui packages are stored in config module - used in extrascustom build_py overwrites version.py with a version overwriting the revno-generating version.pythis overwrites the actual version.pycustom sphinx builder just makes the directory to build if it hasn't already been madesphinx not presentcommand to count the number of lines of code (mostly for curiosity's sake) in the main dirs Brief (40-50 characters) description of the command List of option tuples: long name, short name (None if no short name), and help string.prefilter for valid extentions | 645 | en | 0.662348 |
#!/usr/bin/env python
import argparse, re, os
from StringIO import StringIO
import language
#* Build instruction
#*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*#
def roundup(x, to=8):
return x if x % to == 0 else x + to - x % to
def build_form(ins):
form = ["%i"]
for operand in ins[1:]:
if operand.startswith("r"):
form.append("%r")
elif operand.startswith("q"):
form.append("%q")
elif operand.startswith("#") or operand.startswith("$"):
if "." in operand:
form.append("%f")
else:
form.append("%n")
else:
print "Error: Bad operand for instruction!"
return form
def getnum(strnum):
if strnum.startswith("0x"):
return int(strnum[2:], 16)
elif strnum.startswith("0b"):
return int(strnum[2:], 2)
else:
return int(strnum)
def build_ins(line):
line = line.strip()
line = re.sub(" |,", " ", line)
ins = line.split()
hx = []
# print ins
if ins[0] in ["data", "byte", "d.byte", "d.int", "long"]:
if ins[0] in ["data", "byte", "d.byte"]:
hx.append( format(getnum(ins[1]), "08b") )
elif ins[0] == "d.int":
hx.append( format(getnum(ins[1]), "032b") )
# print hx
return [], [hx]
else:
# print ins
# print build_form(ins)
# print language.ins[ins[0]]
form = build_form(ins)
opcode = language.ins[ins[0]]["opcode"][language.ins[ins[0]]["form"].index(" ".join(form))]
for f,op,i in zip(form, ins, range(len(ins))):
if f == "%i":
hx.append( format(opcode, "07b") )
if f == "%r":
hx.append( format(int(op[1:]), "07b") )
if f == "%q":
hx.append( format(int(op[1:])+(language.registers/2), "07b") )
if f == "%f":
hx.append( format( language.float_to_bits(float(op[1:])), "032b") )
if f == "%n":
if op[0] == "$":
hx.append( op )
elif i == 1:
hx.append( format( (getnum(op[1:]) + (1 << 57)) % (1 << 57), "057b") )
elif i == 2:
hx.append( format( (getnum(op[1:]) + (1 << 50)) % (1 << 50), "050b") )
elif i == 3:
hx.append( format( (getnum(op[1:]) + (1 << 43)) % (1 << 43), "043b") )
return [hx], []
def assemble(code):
# read in the file
if type(code) is file:
lines = [l.rstrip().lower() for l in code.readlines()]
else:
lines = [l.rstrip().lower() for l in code.splitlines()]
# remove comments
lines = [l for l in lines if not l.lstrip().startswith("#")]
# remove blank lines
lines = [l for l in lines if not l.strip() == ""]
# print lines
labels = {}
addr = 0
ins = []
data = []
hexbytes = StringIO()
# build the bit tuple for each instruction as well as label table
for line in lines:
# print line
if line.startswith((" ", "\t")):
i, d = build_ins(line)
ins.extend(i)
data.extend(d)
if line.strip().startswith("d."):
addr += 4
else:
addr = addr + 8
elif line.endswith(":"):
if "@" in line:
key, address = line.split("@")
labels[key] = int(address[:-1])
else:
labels[line[:-1]] = addr
# print labels
# second pass, find all labels and replace them with their program address component
for inst in ins:
# print inst
for p,i in zip(inst, range(len(inst))):
if p[0] == "$":
if i == 1:
inst[1] = format(labels[p[1:]], "057b")
elif i == 2:
inst[2] = format(labels[p[1:]], "050b")
elif i == 3:
inst[3] = format(labels[p[1:]], "043b")
# convert the instructions to hex byte stream and write one instruction per line
for inst in ins:
inst = "".join(inst).ljust(64, "0")
# print inst, len(inst)
inst = format(int(inst, 2), "08x").rjust(16, "0")
# print inst, len(inst)
inst = " ".join(map(''.join, zip(*[iter(inst)]*2)))
# print inst
hexbytes.write(inst+"\n")
# may need to fix this as we could have undefined behaviour if people put data before program
# instructions!
for d in data:
d = "".join(d)
d = d.rjust(roundup(len(d)), "0")
# print d
fstr = "0"+str(roundup(len(d)/4, 2))+"x"
d = format(int(d, 2), fstr)
d = " ".join(map(''.join, zip(*[iter(d)]*2)))
hexbytes.write(d+"\n")
return hexbytes.getvalue().strip()
#* Main
#*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*#
if __name__ == "__main__":
ap = argparse.ArgumentParser(description="SuperScalar assembler")
ap.add_argument("file",
type=str,
nargs=1,
help="Assembler file to assemble.")
ap.add_argument("--out", "-o",
type=str,
nargs=1,
metavar="FILE",
dest="output",
help="Specify an output file for the machine code")
args = ap.parse_args()
if args.output:
hex_path = args.output[0]
else:
hex_path = os.path.splitext(args.file[0])[0]+".hex"
if not os.path.exists(os.path.dirname(hex_path)):
os.makedirs(os.path.dirname(hex_path))
fp = open(args.file[0], "r")
fpx = open(hex_path, "w")
language.assign_opcodes()
fpx.write(assemble(fp))
# print args.file[0],"->",hex_path,"("+str(addr)+" bytes)"
| assembler.py | 4,882 | !/usr/bin/env python* Build instruction*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~* print ins print hx print ins print build_form(ins) print language.ins[ins[0]] read in the file remove comments remove blank lines print lines build the bit tuple for each instruction as well as label table print line print labels second pass, find all labels and replace them with their program address component print inst convert the instructions to hex byte stream and write one instruction per line print inst, len(inst) print inst, len(inst) print inst may need to fix this as we could have undefined behaviour if people put data before program instructions! print d* Main*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~* print args.file[0],"->",hex_path,"("+str(addr)+" bytes)" | 872 | en | 0.574001 |
import time
import serial
print "Iniciando Comunicao Serial com Arduino"
# Iniciando conexao serial
comport = serial.Serial('/dev/ttyACM0', 115200)
#comport = serial.Serial('/dev/ttyUSB0', 115200)
LED_ON='l'
LED_OFF='d'
# Time entre a conexao serial e o tempo para escrever (enviar algo)
time.sleep(1.8) # Entre 1.5s a 2s
print "-*- LOOP -*-"
try:
while True:
print "Led ON"
comport.write(LED_ON)
time.sleep(1)
print "Led OFF"
comport.write(LED_OFF)
time.sleep(1)
except:
# Fechando conexao serial
comport.close()
pass
| comunicacao_serial.py | 555 | Iniciando conexao serialcomport = serial.Serial('/dev/ttyUSB0', 115200) Time entre a conexao serial e o tempo para escrever (enviar algo) Entre 1.5s a 2s Fechando conexao serial | 177 | es | 0.452225 |
#!/usr/bin/env python
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
To run these tests:
$ pip install webtest nosegae
$ nosetests --with-gae --gae-lib-root ~/google_appengine/
"""
import unittest
import webtest
import cloudstorage as gcs
import main
import gcs_async
import gcs_async_test
write = gcs_async_test.write
app = webtest.TestApp(main.app)
JUNIT_SUITE = """<testsuite tests="8" failures="0" time="1000.24">
<testcase name="First" classname="Example e2e suite" time="0">
<skipped/>
</testcase>
<testcase name="Second" classname="Example e2e suite" time="36.49"/>
<testcase name="Third" classname="Example e2e suite" time="96.49">
<failure>/go/src/k8s.io/kubernetes/test.go:123
Error Goes Here</failure>
</testcase>
</testsuite>"""
def init_build(build_dir, started=True, finished=True,
finished_has_version=False):
"""Create faked files for a build."""
start_json = {'timestamp': 1406535800}
finish_json = {'result': 'SUCCESS', 'timestamp': 1406536800}
(finish_json if finished_has_version else start_json)['version'] = 'v1+56'
if started:
write(build_dir + 'started.json', start_json)
if finished:
write(build_dir + 'finished.json', finish_json)
write(build_dir + 'artifacts/junit_01.xml', JUNIT_SUITE)
class TestBase(unittest.TestCase):
def init_stubs(self):
self.testbed.init_memcache_stub()
self.testbed.init_app_identity_stub()
self.testbed.init_urlfetch_stub()
self.testbed.init_blobstore_stub()
self.testbed.init_datastore_v3_stub()
self.testbed.init_app_identity_stub()
# redirect GCS calls to the local proxy
gcs_async.GCS_API_URL = gcs.common.local_api_url()
class AppTest(TestBase):
# pylint: disable=too-many-public-methods
BUILD_DIR = '/kubernetes-jenkins/logs/somejob/1234/'
def setUp(self):
self.init_stubs()
init_build(self.BUILD_DIR)
def test_index(self):
"""Test that the index works."""
response = app.get('/')
self.assertIn('kubernetes-e2e-gce', response)
def test_nodelog_missing_files(self):
"""Test that a missing all files gives a 404."""
build_dir = self.BUILD_DIR + 'nodelog?pod=abc'
response = app.get('/build' + build_dir, status=404)
self.assertIn('Unable to find', response)
def test_nodelog_kubelet(self):
"""Test for a kubelet file with junit file.
- missing the default kube-apiserver"""
nodelog_url = self.BUILD_DIR + 'nodelog?pod=abc&junit=junit_01.xml'
init_build(self.BUILD_DIR)
write(self.BUILD_DIR + 'artifacts/tmp-node-image/junit_01.xml', JUNIT_SUITE)
write(self.BUILD_DIR + 'artifacts/tmp-node-image/kubelet.log',
'abc\nEvent(api.ObjectReference{Name:"abc", UID:"podabc"})\n')
response = app.get('/build' + nodelog_url)
self.assertIn("Wrap line", response)
def test_nodelog_apiserver(self):
"""Test for default apiserver file
- no kubelet file to find objrefdict
- no file with junit file"""
nodelog_url = self.BUILD_DIR + 'nodelog?pod=abc&junit=junit_01.xml'
init_build(self.BUILD_DIR)
write(self.BUILD_DIR + 'artifacts/tmp-node-image/junit_01.xml', JUNIT_SUITE)
write(self.BUILD_DIR + 'artifacts/tmp-node-image/kube-apiserver.log',
'apiserver pod abc\n')
response = app.get('/build' + nodelog_url)
self.assertIn("Wrap line", response)
def test_nodelog_no_junit(self):
"""Test for when no junit in same folder
- multiple folders"""
nodelog_url = self.BUILD_DIR + 'nodelog?pod=abc&junit=junit_01.xml'
init_build(self.BUILD_DIR)
write(self.BUILD_DIR + 'artifacts/junit_01.xml', JUNIT_SUITE)
write(self.BUILD_DIR + 'artifacts/tmp-node-image/kube-apiserver.log',
'apiserver pod abc\n')
write(self.BUILD_DIR + 'artifacts/tmp-node-2/kube-apiserver.log',
'apiserver pod abc\n')
write(self.BUILD_DIR + 'artifacts/tmp-node-image/kubelet.log',
'abc\nEvent(api.ObjectReference{Name:"abc", UID:"podabc"})\n')
response = app.get('/build' + nodelog_url)
self.assertIn("tmp-node-2", response)
def test_nodelog_no_junit_apiserver(self):
"""Test for when no junit in same folder
- multiple folders
- no kube-apiserver.log"""
nodelog_url = self.BUILD_DIR + 'nodelog?pod=abc&junit=junit_01.xml'
init_build(self.BUILD_DIR)
write(self.BUILD_DIR + 'artifacts/junit_01.xml', JUNIT_SUITE)
write(self.BUILD_DIR + 'artifacts/tmp-node-image/docker.log',
'Containers\n')
write(self.BUILD_DIR + 'artifacts/tmp-node-2/kubelet.log',
'apiserver pod abc\n')
write(self.BUILD_DIR + 'artifacts/tmp-node-image/kubelet.log',
'abc\nEvent(api.ObjectReference{Name:"abc", UID:"podabc"})\n')
response = app.get('/build' + nodelog_url)
self.assertIn("tmp-node-2", response)
def test_no_failed_pod(self):
"""Test that filtering page still loads when no failed pod name is given"""
nodelog_url = self.BUILD_DIR + 'nodelog?junit=junit_01.xml'
init_build(self.BUILD_DIR)
write(self.BUILD_DIR + 'artifacts/tmp-node-image/junit_01.xml', JUNIT_SUITE)
write(self.BUILD_DIR + 'artifacts/tmp-node-image/kubelet.log',
'abc\nEvent(api.ObjectReference{Name:"abc", UID:"podabc"} failed)\n')
response = app.get('/build' + nodelog_url)
self.assertIn("Wrap line", response)
def test_parse_by_timestamp(self):
"""Test parse_by_timestamp and get_woven_logs
- Weave separate logs together by timestamp
- Check that lines without timestamp are combined
- Test different timestamp formats"""
kubelet_filepath = self.BUILD_DIR + 'artifacts/tmp-node-image/kubelet.log'
kubeapi_filepath = self.BUILD_DIR + 'artifacts/tmp-node-image/kube-apiserver.log'
query_string = 'nodelog?pod=abc&junit=junit_01.xml&weave=on&logfiles=%s&logfiles=%s' % (
kubelet_filepath, kubeapi_filepath)
nodelog_url = self.BUILD_DIR + query_string
init_build(self.BUILD_DIR)
write(self.BUILD_DIR + 'artifacts/tmp-node-image/junit_01.xml', JUNIT_SUITE)
write(kubelet_filepath,
'abc\n0101 01:01:01.001 Event(api.ObjectReference{Name:"abc", UID:"podabc"})\n')
write(kubeapi_filepath,
'0101 01:01:01.000 kubeapi\n0101 01:01:01.002 pod\n01-01T01:01:01.005Z last line')
expected = ('0101 01:01:01.000 kubeapi\n'
'<span class="highlight">abc0101 01:01:01.001 Event(api.ObjectReference{Name:'
'"<span class="keyword">abc</span>", UID:"podabc"})</span>\n'
'0101 01:01:01.002 pod\n'
'01-01T01:01:01.005Z last line')
response = app.get('/build' + nodelog_url)
print response
self.assertIn(expected, response)
def test_timestamp_no_apiserver(self):
"""Test parse_by_timestamp and get_woven_logs without an apiserver file
- Weave separate logs together by timestamp
- Check that lines without timestamp are combined
- Test different timestamp formats
- no kube-apiserver.log"""
kubelet_filepath = self.BUILD_DIR + 'artifacts/tmp-node-image/kubelet.log'
proxy_filepath = self.BUILD_DIR + 'artifacts/tmp-node-image/kube-proxy.log'
query_string = 'nodelog?pod=abc&junit=junit_01.xml&weave=on&logfiles=%s&logfiles=%s' % (
kubelet_filepath, proxy_filepath)
nodelog_url = self.BUILD_DIR + query_string
init_build(self.BUILD_DIR)
write(self.BUILD_DIR + 'artifacts/tmp-node-image/junit_01.xml', JUNIT_SUITE)
write(kubelet_filepath,
'abc\n0101 01:01:01.001 Event(api.ObjectReference{Name:"abc", UID:"podabc"})\n')
write(proxy_filepath,
'0101 01:01:01.000 proxy\n0101 01:01:01.002 pod\n01-01T01:01:01.005Z last line')
expected = ('0101 01:01:01.000 proxy\n'
'<span class="highlight">abc0101 01:01:01.001 Event(api.ObjectReference{Name:'
'"<span class="keyword">abc</span>", UID:"podabc"})</span>\n'
'0101 01:01:01.002 pod\n'
'01-01T01:01:01.005Z last line')
response = app.get('/build' + nodelog_url)
self.assertIn(expected, response)
| gubernator/main_test.py | 9,134 | !/usr/bin/env python Copyright 2016 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. redirect GCS calls to the local proxy pylint: disable=too-many-public-methods | 659 | en | 0.826781 |
# This file was automatically generated by SWIG (http://www.swig.org).
# Version 3.0.12
#
# Do not make changes to this file unless you know what you are doing--modify
# the SWIG interface file instead.
from sys import version_info as _swig_python_version_info
if _swig_python_version_info >= (9, 9, 9):
def swig_import_helper():
import importlib
pkg = __name__.rpartition('.')[0]
mname = '.'.join((pkg, '_gdalconst')).lstrip('.')
try:
return importlib.import_module(mname)
except ImportError:
return importlib.import_module('_gdalconst')
_gdalconst = swig_import_helper()
del swig_import_helper
elif _swig_python_version_info >= (2, 6, 0):
def swig_import_helper():
from os.path import dirname
import imp
fp = None
try:
fp, pathname, description = imp.find_module('_gdalconst', [dirname(__file__)])
except ImportError:
import _gdalconst
return _gdalconst
try:
_mod = imp.load_module('_gdalconst', fp, pathname, description)
finally:
if fp is not None:
fp.close()
return _mod
_gdalconst = swig_import_helper()
del swig_import_helper
else:
import _gdalconst
del _swig_python_version_info
try:
_swig_property = property
except NameError:
pass # Python < 2.2 doesn't have 'property'.
try:
import builtins as __builtin__
except ImportError:
import __builtin__
def _swig_setattr_nondynamic(self, class_type, name, value, static=1):
if (name == "thisown"):
return self.this.own(value)
if (name == "this"):
if type(value).__name__ == 'SwigPyObject':
self.__dict__[name] = value
return
method = class_type.__swig_setmethods__.get(name, None)
if method:
return method(self, value)
if (not static):
object.__setattr__(self, name, value)
else:
raise AttributeError("You cannot add attributes to %s" % self)
def _swig_setattr(self, class_type, name, value):
return _swig_setattr_nondynamic(self, class_type, name, value, 0)
def _swig_getattr(self, class_type, name):
if (name == "thisown"):
return self.this.own()
method = class_type.__swig_getmethods__.get(name, None)
if method:
return method(self)
raise AttributeError("'%s' object has no attribute '%s'" % (class_type.__name__, name))
def _swig_repr(self):
try:
strthis = "proxy of " + self.this.__repr__()
except __builtin__.Exception:
strthis = ""
return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,)
def _swig_setattr_nondynamic_method(set):
def set_attr(self, name, value):
if (name == "thisown"):
return self.this.own(value)
if hasattr(self, name) or (name == "this"):
set(self, name, value)
else:
raise AttributeError("You cannot add attributes to %s" % self)
return set_attr
GDT_Unknown = _gdalconst.GDT_Unknown
GDT_Byte = _gdalconst.GDT_Byte
GDT_UInt16 = _gdalconst.GDT_UInt16
GDT_Int16 = _gdalconst.GDT_Int16
GDT_UInt32 = _gdalconst.GDT_UInt32
GDT_Int32 = _gdalconst.GDT_Int32
GDT_Float32 = _gdalconst.GDT_Float32
GDT_Float64 = _gdalconst.GDT_Float64
GDT_CInt16 = _gdalconst.GDT_CInt16
GDT_CInt32 = _gdalconst.GDT_CInt32
GDT_CFloat32 = _gdalconst.GDT_CFloat32
GDT_CFloat64 = _gdalconst.GDT_CFloat64
GDT_TypeCount = _gdalconst.GDT_TypeCount
GA_ReadOnly = _gdalconst.GA_ReadOnly
GA_Update = _gdalconst.GA_Update
GF_Read = _gdalconst.GF_Read
GF_Write = _gdalconst.GF_Write
GRIORA_NearestNeighbour = _gdalconst.GRIORA_NearestNeighbour
GRIORA_Bilinear = _gdalconst.GRIORA_Bilinear
GRIORA_Cubic = _gdalconst.GRIORA_Cubic
GRIORA_CubicSpline = _gdalconst.GRIORA_CubicSpline
GRIORA_Lanczos = _gdalconst.GRIORA_Lanczos
GRIORA_Average = _gdalconst.GRIORA_Average
GRIORA_Mode = _gdalconst.GRIORA_Mode
GRIORA_Gauss = _gdalconst.GRIORA_Gauss
GCI_Undefined = _gdalconst.GCI_Undefined
GCI_GrayIndex = _gdalconst.GCI_GrayIndex
GCI_PaletteIndex = _gdalconst.GCI_PaletteIndex
GCI_RedBand = _gdalconst.GCI_RedBand
GCI_GreenBand = _gdalconst.GCI_GreenBand
GCI_BlueBand = _gdalconst.GCI_BlueBand
GCI_AlphaBand = _gdalconst.GCI_AlphaBand
GCI_HueBand = _gdalconst.GCI_HueBand
GCI_SaturationBand = _gdalconst.GCI_SaturationBand
GCI_LightnessBand = _gdalconst.GCI_LightnessBand
GCI_CyanBand = _gdalconst.GCI_CyanBand
GCI_MagentaBand = _gdalconst.GCI_MagentaBand
GCI_YellowBand = _gdalconst.GCI_YellowBand
GCI_BlackBand = _gdalconst.GCI_BlackBand
GCI_YCbCr_YBand = _gdalconst.GCI_YCbCr_YBand
GCI_YCbCr_CrBand = _gdalconst.GCI_YCbCr_CrBand
GCI_YCbCr_CbBand = _gdalconst.GCI_YCbCr_CbBand
GRA_NearestNeighbour = _gdalconst.GRA_NearestNeighbour
GRA_Bilinear = _gdalconst.GRA_Bilinear
GRA_Cubic = _gdalconst.GRA_Cubic
GRA_CubicSpline = _gdalconst.GRA_CubicSpline
GRA_Lanczos = _gdalconst.GRA_Lanczos
GRA_Average = _gdalconst.GRA_Average
GRA_Mode = _gdalconst.GRA_Mode
GRA_Max = _gdalconst.GRA_Max
GRA_Min = _gdalconst.GRA_Min
GRA_Med = _gdalconst.GRA_Med
GRA_Q1 = _gdalconst.GRA_Q1
GRA_Q3 = _gdalconst.GRA_Q3
GPI_Gray = _gdalconst.GPI_Gray
GPI_RGB = _gdalconst.GPI_RGB
GPI_CMYK = _gdalconst.GPI_CMYK
GPI_HLS = _gdalconst.GPI_HLS
CXT_Element = _gdalconst.CXT_Element
CXT_Text = _gdalconst.CXT_Text
CXT_Attribute = _gdalconst.CXT_Attribute
CXT_Comment = _gdalconst.CXT_Comment
CXT_Literal = _gdalconst.CXT_Literal
CE_None = _gdalconst.CE_None
CE_Debug = _gdalconst.CE_Debug
CE_Warning = _gdalconst.CE_Warning
CE_Failure = _gdalconst.CE_Failure
CE_Fatal = _gdalconst.CE_Fatal
CPLE_None = _gdalconst.CPLE_None
CPLE_AppDefined = _gdalconst.CPLE_AppDefined
CPLE_OutOfMemory = _gdalconst.CPLE_OutOfMemory
CPLE_FileIO = _gdalconst.CPLE_FileIO
CPLE_OpenFailed = _gdalconst.CPLE_OpenFailed
CPLE_IllegalArg = _gdalconst.CPLE_IllegalArg
CPLE_NotSupported = _gdalconst.CPLE_NotSupported
CPLE_AssertionFailed = _gdalconst.CPLE_AssertionFailed
CPLE_NoWriteAccess = _gdalconst.CPLE_NoWriteAccess
CPLE_UserInterrupt = _gdalconst.CPLE_UserInterrupt
CPLE_ObjectNull = _gdalconst.CPLE_ObjectNull
CPLE_HttpResponse = _gdalconst.CPLE_HttpResponse
CPLE_AWSBucketNotFound = _gdalconst.CPLE_AWSBucketNotFound
CPLE_AWSObjectNotFound = _gdalconst.CPLE_AWSObjectNotFound
CPLE_AWSAccessDenied = _gdalconst.CPLE_AWSAccessDenied
CPLE_AWSInvalidCredentials = _gdalconst.CPLE_AWSInvalidCredentials
CPLE_AWSSignatureDoesNotMatch = _gdalconst.CPLE_AWSSignatureDoesNotMatch
OF_ALL = _gdalconst.OF_ALL
OF_RASTER = _gdalconst.OF_RASTER
OF_VECTOR = _gdalconst.OF_VECTOR
OF_GNM = _gdalconst.OF_GNM
OF_READONLY = _gdalconst.OF_READONLY
OF_UPDATE = _gdalconst.OF_UPDATE
OF_SHARED = _gdalconst.OF_SHARED
OF_VERBOSE_ERROR = _gdalconst.OF_VERBOSE_ERROR
DMD_LONGNAME = _gdalconst.DMD_LONGNAME
DMD_HELPTOPIC = _gdalconst.DMD_HELPTOPIC
DMD_MIMETYPE = _gdalconst.DMD_MIMETYPE
DMD_EXTENSION = _gdalconst.DMD_EXTENSION
DMD_EXTENSIONS = _gdalconst.DMD_EXTENSIONS
DMD_CONNECTION_PREFIX = _gdalconst.DMD_CONNECTION_PREFIX
DMD_CREATIONOPTIONLIST = _gdalconst.DMD_CREATIONOPTIONLIST
DMD_CREATIONDATATYPES = _gdalconst.DMD_CREATIONDATATYPES
DMD_CREATIONFIELDDATATYPES = _gdalconst.DMD_CREATIONFIELDDATATYPES
DMD_SUBDATASETS = _gdalconst.DMD_SUBDATASETS
DCAP_OPEN = _gdalconst.DCAP_OPEN
DCAP_CREATE = _gdalconst.DCAP_CREATE
DCAP_CREATECOPY = _gdalconst.DCAP_CREATECOPY
DCAP_VIRTUALIO = _gdalconst.DCAP_VIRTUALIO
DCAP_RASTER = _gdalconst.DCAP_RASTER
DCAP_VECTOR = _gdalconst.DCAP_VECTOR
DCAP_NOTNULL_FIELDS = _gdalconst.DCAP_NOTNULL_FIELDS
DCAP_DEFAULT_FIELDS = _gdalconst.DCAP_DEFAULT_FIELDS
DCAP_NOTNULL_GEOMFIELDS = _gdalconst.DCAP_NOTNULL_GEOMFIELDS
CPLES_BackslashQuotable = _gdalconst.CPLES_BackslashQuotable
CPLES_XML = _gdalconst.CPLES_XML
CPLES_URL = _gdalconst.CPLES_URL
CPLES_SQL = _gdalconst.CPLES_SQL
CPLES_CSV = _gdalconst.CPLES_CSV
GFT_Integer = _gdalconst.GFT_Integer
GFT_Real = _gdalconst.GFT_Real
GFT_String = _gdalconst.GFT_String
GFU_Generic = _gdalconst.GFU_Generic
GFU_PixelCount = _gdalconst.GFU_PixelCount
GFU_Name = _gdalconst.GFU_Name
GFU_Min = _gdalconst.GFU_Min
GFU_Max = _gdalconst.GFU_Max
GFU_MinMax = _gdalconst.GFU_MinMax
GFU_Red = _gdalconst.GFU_Red
GFU_Green = _gdalconst.GFU_Green
GFU_Blue = _gdalconst.GFU_Blue
GFU_Alpha = _gdalconst.GFU_Alpha
GFU_RedMin = _gdalconst.GFU_RedMin
GFU_GreenMin = _gdalconst.GFU_GreenMin
GFU_BlueMin = _gdalconst.GFU_BlueMin
GFU_AlphaMin = _gdalconst.GFU_AlphaMin
GFU_RedMax = _gdalconst.GFU_RedMax
GFU_GreenMax = _gdalconst.GFU_GreenMax
GFU_BlueMax = _gdalconst.GFU_BlueMax
GFU_AlphaMax = _gdalconst.GFU_AlphaMax
GFU_MaxCount = _gdalconst.GFU_MaxCount
GRTT_THEMATIC = _gdalconst.GRTT_THEMATIC
GRTT_ATHEMATIC = _gdalconst.GRTT_ATHEMATIC
GMF_ALL_VALID = _gdalconst.GMF_ALL_VALID
GMF_PER_DATASET = _gdalconst.GMF_PER_DATASET
GMF_ALPHA = _gdalconst.GMF_ALPHA
GMF_NODATA = _gdalconst.GMF_NODATA
GDAL_DATA_COVERAGE_STATUS_UNIMPLEMENTED = _gdalconst.GDAL_DATA_COVERAGE_STATUS_UNIMPLEMENTED
GDAL_DATA_COVERAGE_STATUS_DATA = _gdalconst.GDAL_DATA_COVERAGE_STATUS_DATA
GDAL_DATA_COVERAGE_STATUS_EMPTY = _gdalconst.GDAL_DATA_COVERAGE_STATUS_EMPTY
GARIO_PENDING = _gdalconst.GARIO_PENDING
GARIO_UPDATE = _gdalconst.GARIO_UPDATE
GARIO_ERROR = _gdalconst.GARIO_ERROR
GARIO_COMPLETE = _gdalconst.GARIO_COMPLETE
GTO_TIP = _gdalconst.GTO_TIP
GTO_BIT = _gdalconst.GTO_BIT
GTO_BSQ = _gdalconst.GTO_BSQ
| venv/Lib/site-packages/osgeo/gdalconst.py | 9,328 | This file was automatically generated by SWIG (http://www.swig.org). Version 3.0.12 Do not make changes to this file unless you know what you are doing--modify the SWIG interface file instead. Python < 2.2 doesn't have 'property'. | 230 | en | 0.889602 |
from enum import IntFlag, IntEnum
import numpy as np
from . import Base
from . import ByteIO
from .axis_interp_rule import AxisInterpRule
from .jiggle_bone import JiggleRule
from .quat_interp_bone import QuatInterpRule
from ....utils.math_utilities import quat_to_matrix
class BoneFlags(IntFlag):
# BONE_CALCULATE_MASK = 0x1F
PHYSICALLY_SIMULATED = 0x01 # bone is physically simulated when physics are active
PHYSICS_PROCEDURAL = 0x02 # procedural when physics is active
ALWAYS_PROCEDURAL = 0x04 # bone is always procedurally animated
# bone aligns to the screen, not constrained in motion.
SCREEN_ALIGN_SPHERE = 0x08
# bone aligns to the screen, constrained by it's own axis.
SCREEN_ALIGN_CYLINDER = 0x10
# BONE_USED_MASK = 0x0007FF00
USED_BY_ANYTHING = 0x0007FF00
USED_BY_HITBOX = 0x00000100 # bone (or child) is used by a hit box
# bone (or child) is used by an attachment point
USED_BY_ATTACHMENT = 0x00000200
USED_BY_VERTEX_MASK = 0x0003FC00
# bone (or child) is used by the toplevel model via skinned vertex
USED_BY_VERTEX_LOD0 = 0x00000400
USED_BY_VERTEX_LOD1 = 0x00000800
USED_BY_VERTEX_LOD2 = 0x00001000
USED_BY_VERTEX_LOD3 = 0x00002000
USED_BY_VERTEX_LOD4 = 0x00004000
USED_BY_VERTEX_LOD5 = 0x00008000
USED_BY_VERTEX_LOD6 = 0x00010000
USED_BY_VERTEX_LOD7 = 0x00020000
# bone is available for bone merge to occur against it
USED_BY_BONE_MERGE = 0x00040000
class Contents(IntFlag):
# EMPTY = 0 # No contents
SOLID = 0x1 # an eye is never valid in a solid
WINDOW = 0x2 # translucent, but not watery (glass)
AUX = 0x4
# alpha-tested "grate" textures. Bullets/sight pass through, but solids don't
GRATE = 0x8
SLIME = 0x10
WATER = 0x20
BLOCKLOS = 0x40 # block AI line of sight
# things that cannot be seen through (may be non-solid though)
OPAQUE = 0x80
TESTFOGVOLUME = 0x100
UNUSED = 0x200
# unused
# NOTE: If it's visible, grab from the top + update LAST_VISIBLE_CONTENTS
# if not visible, then grab from the bottom.
# OPAQUE + SURF_NODRAW count as OPAQUE (shadow-casting
# toolsblocklight textures)
BLOCKLIGHT = 0x400
TEAM1 = 0x800 # per team contents used to differentiate collisions
TEAM2 = 0x1000 # between players and objects on different teams
# ignore OPAQUE on surfaces that have SURF_NODRAW
IGNORE_NODRAW_OPAQUE = 0x2000
# hits entities which are MOVETYPE_PUSH (doors, plats, etc.)
MOVEABLE = 0x4000
# remaining contents are non-visible, and don't eat brushes
AREAPORTAL = 0x8000
PLAYERCLIP = 0x10000
MONSTERCLIP = 0x20000
# currents can be added to any other contents, and may be mixed
CURRENT_0 = 0x40000
CURRENT_90 = 0x80000
CURRENT_180 = 0x100000
CURRENT_270 = 0x200000
CURRENT_UP = 0x400000
CURRENT_DOWN = 0x800000
ORIGIN = 0x1000000 # removed before bsping an entity
MONSTER = 0x2000000 # should never be on a brush, only in game
DEBRIS = 0x4000000
DETAIL = 0x8000000 # brushes to be added after vis leafs
TRANSLUCENT = 0x10000000 # auto set if any surface has trans
LADDER = 0x20000000
HITBOX = 0x40000000 # use accurate hitboxes on trace
# NOTE: These are stored in a short in the engine now. Don't use more
# than 16 bits
SURF_LIGHT = 0x0001 # value will hold the light strength
# don't draw, indicates we should skylight + draw 2d sky but not draw the
# 3D skybox
SURF_SKY2D = 0x0002
SURF_SKY = 0x0004 # don't draw, but add to skybox
SURF_WARP = 0x0008 # turbulent water warp
SURF_TRANS = 0x0010
SURF_NOPORTAL = 0x0020 # the surface can not have a portal placed on it
# FIXME: This is an xbox hack to work around elimination of trigger
# surfaces, which breaks occluders
SURF_TRIGGER = 0x0040
SURF_NODRAW = 0x0080 # don't bother referencing the texture
SURF_HINT = 0x0100 # make a primary bsp splitter
SURF_SKIP = 0x0200 # completely ignore, allowing non-closed brushes
SURF_NOLIGHT = 0x0400 # Don't calculate light
SURF_BUMPLIGHT = 0x0800 # calculate three lightmaps for the surface for bumpmapping
SURF_NOSHADOWS = 0x1000 # Don't receive shadows
SURF_NODECALS = 0x2000 # Don't receive decals
SURF_NOPAINT = SURF_NODECALS # the surface can not have paint placed on it
SURF_NOCHOP = 0x4000 # Don't subdivide patches on this surface
SURF_HITBOX = 0x8000 # surface is part of a hitbox
class ProceduralBoneType(IntEnum):
AXISINTERP = 1
QUATINTERP = 2
AIMATBONE = 3
AIMATATTACH = 4
JIGGLE = 5
class BoneV36(Base):
def __init__(self, bone_id: int):
self.bone_id = bone_id
self.name = ""
self.parent_bone_index = 0
self.bone_controller_index = []
self.scale = 0
self.position = []
self.quat = []
self.anim_channels = 0
self.rotation = []
self.position_scale = []
self.rotation_scale = []
self.pose_to_bone = []
self.q_alignment = []
self.flags = BoneFlags(0)
self.procedural_rule_type = 0
self.physics_bone_index = 0
self.contents = Contents(0)
self.surface_prop = ''
self.procedural_rule = None
@property
def children(self):
from ..v36.mdl_file import MdlV36
mdl: MdlV36 = self.get_value("MDL")
childes = []
if mdl.bones:
bone_index = mdl.bones.index(self)
for bone in mdl.bones:
if bone.name == self.name:
continue
if bone.parent_bone_index == bone_index:
childes.append(bone)
return childes
@property
def matrix(self):
r_matrix = quat_to_matrix(self.quat)
tmp = np.identity(4)
tmp[0, :3] = r_matrix[0]
tmp[1, :3] = r_matrix[1]
tmp[2, :3] = r_matrix[2]
t_matrix = np.array([
[1, 0, 0, self.position[0]],
[0, 1, 0, self.position[1]],
[0, 0, 1, self.position[2]],
[0, 0, 0, 1],
], dtype=np.float32)
return np.identity(4) @ t_matrix @ tmp
@property
def parent(self):
from ..v36.mdl_file import MdlV36
mdl: MdlV36 = self.get_value("MDL")
if mdl.bones and self.parent_bone_index != -1:
return mdl.bones[self.parent_bone_index]
return None
def read(self, reader: ByteIO):
entry = reader.tell()
self.name = reader.read_source1_string(entry)
self.parent_bone_index = reader.read_int32()
self.bone_controller_index = reader.read_fmt('6f')
self.position = reader.read_fmt('3f')
self.rotation = reader.read_fmt('3f')
self.position_scale = reader.read_fmt('3f')
self.rotation_scale = reader.read_fmt('3f')
self.pose_to_bone = np.array(reader.read_fmt('12f')).reshape((3, 4)).transpose()
self.q_alignment = reader.read_fmt('4f')
self.flags = BoneFlags(reader.read_uint32())
self.procedural_rule_type = reader.read_uint32()
procedural_rule_offset = reader.read_uint32()
self.physics_bone_index = reader.read_uint32()
self.surface_prop = reader.read_source1_string(entry)
self.quat = reader.read_fmt('4f')
self.contents = Contents(reader.read_uint32())
reader.skip(3 * 4)
if self.procedural_rule_type != 0 and procedural_rule_offset != 0:
with reader.save_current_pos():
reader.seek(entry + procedural_rule_offset)
if self.procedural_rule_type == ProceduralBoneType.AXISINTERP:
self.procedural_rule = AxisInterpRule()
if self.procedural_rule_type == ProceduralBoneType.QUATINTERP:
self.procedural_rule = QuatInterpRule()
if self.procedural_rule_type == ProceduralBoneType.JIGGLE:
self.procedural_rule = JiggleRule()
if self.procedural_rule:
self.procedural_rule.read(reader)
class BoneV49(BoneV36):
def read(self, reader: ByteIO):
entry = reader.tell()
self.name = reader.read_source1_string(entry)
self.parent_bone_index = reader.read_int32()
self.bone_controller_index = reader.read_fmt('6f')
self.position = reader.read_fmt('3f')
self.quat = reader.read_fmt('4f')
self.rotation = reader.read_fmt('3f')
self.position_scale = reader.read_fmt('3f')
self.rotation_scale = reader.read_fmt('3f')
self.pose_to_bone = np.array(reader.read_fmt('12f')).reshape((3, 4)).transpose()
self.q_alignment = reader.read_fmt('4f')
self.flags = BoneFlags(reader.read_uint32())
self.procedural_rule_type = reader.read_uint32()
procedural_rule_offset = reader.read_uint32()
self.physics_bone_index = reader.read_uint32()
self.surface_prop = reader.read_source1_string(entry)
self.contents = Contents(reader.read_uint32())
if self.get_value('mdl_version') >= 44:
_ = [reader.read_uint32() for _ in range(8)]
if self.get_value('mdl_version') >= 53:
reader.skip(4 * 7)
if self.procedural_rule_type != 0 and procedural_rule_offset != 0:
with reader.save_current_pos():
reader.seek(entry + procedural_rule_offset)
if self.procedural_rule_type == ProceduralBoneType.AXISINTERP:
self.procedural_rule = AxisInterpRule()
if self.procedural_rule_type == ProceduralBoneType.QUATINTERP:
self.procedural_rule = QuatInterpRule()
if self.procedural_rule_type == ProceduralBoneType.JIGGLE:
self.procedural_rule = JiggleRule()
if self.procedural_rule:
self.procedural_rule.read(reader)
| library/source1/mdl/structs/bone.py | 9,974 | BONE_CALCULATE_MASK = 0x1F bone is physically simulated when physics are active procedural when physics is active bone is always procedurally animated bone aligns to the screen, not constrained in motion. bone aligns to the screen, constrained by it's own axis. BONE_USED_MASK = 0x0007FF00 bone (or child) is used by a hit box bone (or child) is used by an attachment point bone (or child) is used by the toplevel model via skinned vertex bone is available for bone merge to occur against it EMPTY = 0 No contents an eye is never valid in a solid translucent, but not watery (glass) alpha-tested "grate" textures. Bullets/sight pass through, but solids don't block AI line of sight things that cannot be seen through (may be non-solid though) unused NOTE: If it's visible, grab from the top + update LAST_VISIBLE_CONTENTS if not visible, then grab from the bottom. OPAQUE + SURF_NODRAW count as OPAQUE (shadow-casting toolsblocklight textures) per team contents used to differentiate collisions between players and objects on different teams ignore OPAQUE on surfaces that have SURF_NODRAW hits entities which are MOVETYPE_PUSH (doors, plats, etc.) remaining contents are non-visible, and don't eat brushes currents can be added to any other contents, and may be mixed removed before bsping an entity should never be on a brush, only in game brushes to be added after vis leafs auto set if any surface has trans use accurate hitboxes on trace NOTE: These are stored in a short in the engine now. Don't use more than 16 bits value will hold the light strength don't draw, indicates we should skylight + draw 2d sky but not draw the 3D skybox don't draw, but add to skybox turbulent water warp the surface can not have a portal placed on it FIXME: This is an xbox hack to work around elimination of trigger surfaces, which breaks occluders don't bother referencing the texture make a primary bsp splitter completely ignore, allowing non-closed brushes Don't calculate light calculate three lightmaps for the surface for bumpmapping Don't receive shadows Don't receive decals the surface can not have paint placed on it Don't subdivide patches on this surface surface is part of a hitbox | 2,192 | en | 0.932847 |
"""
Core OpenBCI object for handling connections and samples from the WiFi Shield
Note that the LIB will take care on its own to print incoming ASCII messages if any (FIXME, BTW).
EXAMPLE USE:
def handle_sample(sample):
print(sample.channels_data)
wifi = OpenBCIWifi()
wifi.start(handle_sample)
TODO: Cyton/Ganglion JSON
TODO: Ganglion Raw
TODO: Cyton Raw
"""
import asyncore
import atexit
import json
import logging
import re
import socket
import timeit
try:
import urllib2
except ImportError:
import urllib
import requests
import xmltodict
from openbci.utils import k, ParseRaw, OpenBCISample, ssdp
SAMPLE_RATE = 0 # Hz
'''
#Commands for in SDK
command_stop = "s";
command_startBinary = "b";
'''
class OpenBCIWiFi(object):
"""
Handle a connection to an OpenBCI wifi shield.
Args:
ip_address: The IP address of the WiFi Shield, "None" to attempt auto-detect.
shield_name: The unique name of the WiFi Shield, such as `OpenBCI-2AD4`, will use SSDP to get IP address still,
if `shield_name` is "None" and `ip_address` is "None", will connect to the first WiFi Shield found using SSDP
sample_rate: The sample rate to set the attached board to. If the sample rate picked is not a sample rate the attached
board can support, i.e. you send 300 to Cyton, then error will be thrown.
log:
timeout: in seconds, disconnect / reconnect after a period without new data -- should be high if impedance check
max_packets_to_skip: will try to disconnect / reconnect after too many packets are skipped
"""
def __init__(self, ip_address=None, shield_name=None, sample_rate=None, log=True, timeout=3,
max_packets_to_skip=20, latency=10000, high_speed=True, ssdp_attempts=5,
num_channels=8, local_ip_address=None):
# these one are used
self.daisy = False
self.gains = None
self.high_speed = high_speed
self.impedance = False
self.ip_address = ip_address
self.latency = latency
self.log = log # print_incoming_text needs log
self.max_packets_to_skip = max_packets_to_skip
self.num_channels = num_channels
self.sample_rate = sample_rate
self.shield_name = shield_name
self.ssdp_attempts = ssdp_attempts
self.streaming = False
self.timeout = timeout
# might be handy to know API
self.board_type = "none"
# number of EEG channels
self.eeg_channels_per_sample = 0
self.read_state = 0
self.log_packet_count = 0
self.packets_dropped = 0
self.time_last_packet = 0
if self.log:
print("Welcome to OpenBCI Native WiFi Shield Driver - Please contribute code!")
self.local_ip_address = local_ip_address
if not self.local_ip_address:
self.local_ip_address = self._get_local_ip_address()
# Intentionally bind to port 0
self.local_wifi_server = WiFiShieldServer(self.local_ip_address, 0)
self.local_wifi_server_port = self.local_wifi_server.socket.getsockname()[1]
if self.log:
print("Opened socket on %s:%d" % (self.local_ip_address, self.local_wifi_server_port))
if ip_address is None:
for i in range(ssdp_attempts):
try:
self.find_wifi_shield(wifi_shield_cb=self.on_shield_found)
break
except OSError:
# Try again
if self.log:
print("Did not find any WiFi Shields")
else:
self.on_shield_found(ip_address)
def on_shield_found(self, ip_address):
self.ip_address = ip_address
self.connect()
# Disconnects from board when terminated
atexit.register(self.disconnect)
def loop(self):
asyncore.loop()
def _get_local_ip_address(self):
"""
Gets the local ip address of this computer
@returns str Local IP address
"""
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("8.8.8.8", 80))
local_ip_address = s.getsockname()[0]
s.close()
return local_ip_address
def getBoardType(self):
""" Returns the version of the board """
return self.board_type
def setImpedance(self, flag):
""" Enable/disable impedance measure """
self.impedance = bool(flag)
def connect(self):
""" Connect to the board and configure it. Note: recreates various objects upon call. """
if self.ip_address is None:
raise ValueError('self.ip_address cannot be None')
if self.log:
print("Init WiFi connection with IP: " + self.ip_address)
"""
Docs on these HTTP requests and more are found:
https://app.swaggerhub.com/apis/pushtheworld/openbci-wifi-server/1.3.0
"""
res_board = requests.get("http://%s/board" % self.ip_address)
if res_board.status_code == 200:
board_info = res_board.json()
if not board_info['board_connected']:
raise RuntimeError("No board connected to WiFi Shield. To learn how to connect to a Cyton or Ganglion visit http://docs.openbci.com/Tutorials/03-Wifi_Getting_Started_Guide")
self.board_type = board_info['board_type']
self.eeg_channels_per_sample = board_info['num_channels']
if self.log:
print("Connected to %s with %s channels" % (self.board_type, self.eeg_channels_per_sample))
self.gains = None
if self.board_type == k.BOARD_CYTON:
self.gains = [24, 24, 24, 24, 24, 24, 24, 24]
self.daisy = False
elif self.board_type == k.BOARD_DAISY:
self.gains = [24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24]
self.daisy = True
elif self.board_type == k.BOARD_GANGLION:
self.gains = [51, 51, 51, 51]
self.daisy = False
self.local_wifi_server.set_daisy(daisy=self.daisy)
self.local_wifi_server.set_parser(ParseRaw(gains=self.gains, board_type=self.board_type))
if self.high_speed:
output_style = 'raw'
else:
output_style = 'json'
res_tcp_post = requests.post("http://%s/tcp" % self.ip_address,
json={
'ip': self.local_ip_address,
'port': self.local_wifi_server_port,
'output': output_style,
'delimiter': True,
'latency': self.latency
})
if res_tcp_post.status_code == 200:
tcp_status = res_tcp_post.json()
if tcp_status['connected']:
if self.log:
print("WiFi Shield to Python TCP Socket Established")
else:
raise RuntimeWarning("WiFi Shield is not able to connect to local server. Please open an issue.")
def init_streaming(self):
""" Tell the board to record like crazy. """
res_stream_start = requests.get("http://%s/stream/start" % self.ip_address)
if res_stream_start.status_code == 200:
self.streaming = True
self.packets_dropped = 0
self.time_last_packet = timeit.default_timer()
else:
raise EnvironmentError("Unable to start streaming. Check API for status code %d on /stream/start" % res_stream_start.status_code)
def find_wifi_shield(self, shield_name=None, wifi_shield_cb=None):
"""Detects Ganglion board MAC address -- if more than 1 around, will select first. Needs root privilege."""
if self.log:
print("Try to find WiFi shields on your local wireless network")
print("Scanning for %d seconds nearby devices..." % self.timeout)
list_ip = []
list_id = []
found_shield = False
def wifi_shield_found(response):
res = requests.get(response.location, verify=False).text
device_description = xmltodict.parse(res)
cur_shield_name = str(device_description['root']['device']['serialNumber'])
cur_base_url = str(device_description['root']['URLBase'])
cur_ip_address = re.findall(r'[0-9]+(?:\.[0-9]+){3}', cur_base_url)[0]
list_id.append(cur_shield_name)
list_ip.append(cur_ip_address)
found_shield = True
if shield_name is None:
print("Found WiFi Shield %s with IP Address %s" % (cur_shield_name, cur_ip_address))
if wifi_shield_cb is not None:
wifi_shield_cb(cur_ip_address)
else:
if shield_name == cur_shield_name:
if wifi_shield_cb is not None:
wifi_shield_cb(cur_ip_address)
ssdp_hits = ssdp.discover("urn:schemas-upnp-org:device:Basic:1", timeout=self.timeout, wifi_found_cb=wifi_shield_found)
nb_wifi_shields = len(list_id)
if nb_wifi_shields < 1:
print("No WiFi Shields found ;(")
raise OSError('Cannot find OpenBCI WiFi Shield with local name')
if nb_wifi_shields > 1:
print(
"Found " + str(nb_wifi_shields) +
", selecting first named: " + list_id[0] +
" with IPV4: " + list_ip[0])
return list_ip[0]
def wifi_write(self, output):
"""
Pass through commands from the WiFi Shield to the Carrier board
:param output:
:return:
"""
res_command_post = requests.post("http://%s/command" % self.ip_address,
json={'command': output})
if res_command_post.status_code == 200:
ret_val = res_command_post.text
if self.log:
print(ret_val)
return ret_val
else:
if self.log:
print("Error code: %d %s" % (res_command_post.status_code, res_command_post.text))
raise RuntimeError("Error code: %d %s" % (res_command_post.status_code, res_command_post.text))
def getSampleRate(self):
return self.sample_rate
def getNbEEGChannels(self):
"""Will not get new data on impedance check."""
return self.eeg_channels_per_sample
def start_streaming(self, callback, lapse=-1):
"""
Start handling streaming data from the board. Call a provided callback
for every single sample that is processed
Args:
callback: A callback function -- or a list of functions -- that will receive a single argument of the
OpenBCISample object captured.
"""
start_time = timeit.default_timer()
# Enclose callback function in a list if it comes alone
if not isinstance(callback, list):
self.local_wifi_server.set_callback(callback)
else:
self.local_wifi_server.set_callback(callback[0])
if not self.streaming:
self.init_streaming()
# while self.streaming:
# # should the board get disconnected and we could not wait for notification anymore, a reco should be attempted through timeout mechanism
# try:
# # at most we will get one sample per packet
# self.waitForNotifications(1. / self.getSampleRate())
# except Exception as e:
# print("Something went wrong while waiting for a new sample: " + str(e))
# # retrieve current samples on the stack
# samples = self.delegate.getSamples()
# self.packets_dropped = self.delegate.getMaxPacketsDropped()
# if samples:
# self.time_last_packet = timeit.default_timer()
# for call in callback:
# for sample in samples:
# call(sample)
#
# if (lapse > 0 and timeit.default_timer() - start_time > lapse):
# self.stop();
# if self.log:
# self.log_packet_count = self.log_packet_count + 1;
#
# # Checking connection -- timeout and packets dropped
# self.check_connection()
def test_signal(self, signal):
""" Enable / disable test signal """
if signal == 0:
self.warn("Disabling synthetic square wave")
try:
self.wifi_write(']')
except Exception as e:
print("Something went wrong while setting signal: " + str(e))
elif signal == 1:
self.warn("Enabling synthetic square wave")
try:
self.wifi_write('[')
except Exception as e:
print("Something went wrong while setting signal: " + str(e))
else:
self.warn("%s is not a known test signal. Valid signal is 0-1" % signal)
def set_channel(self, channel, toggle_position):
""" Enable / disable channels """
try:
if channel > self.num_channels:
raise ValueError('Cannot set non-existant channel')
# Commands to set toggle to on position
if toggle_position == 1:
if channel is 1:
self.wifi_write('!')
if channel is 2:
self.wifi_write('@')
if channel is 3:
self.wifi_write('#')
if channel is 4:
self.wifi_write('$')
if channel is 5:
self.wifi_write('%')
if channel is 6:
self.wifi_write('^')
if channel is 7:
self.wifi_write('&')
if channel is 8:
self.wifi_write('*')
if channel is 9:
self.wifi_write('Q')
if channel is 10:
self.wifi_write('W')
if channel is 11:
self.wifi_write('E')
if channel is 12:
self.wifi_write('R')
if channel is 13:
self.wifi_write('T')
if channel is 14:
self.wifi_write('Y')
if channel is 15:
self.wifi_write('U')
if channel is 16:
self.wifi_write('I')
# Commands to set toggle to off position
elif toggle_position == 0:
if channel is 1:
self.wifi_write('1')
if channel is 2:
self.wifi_write('2')
if channel is 3:
self.wifi_write('3')
if channel is 4:
self.wifi_write('4')
if channel is 5:
self.wifi_write('5')
if channel is 6:
self.wifi_write('6')
if channel is 7:
self.wifi_write('7')
if channel is 8:
self.wifi_write('8')
if channel is 9:
self.wifi_write('q')
if channel is 10:
self.wifi_write('w')
if channel is 11:
self.wifi_write('e')
if channel is 12:
self.wifi_write('r')
if channel is 13:
self.wifi_write('t')
if channel is 14:
self.wifi_write('y')
if channel is 15:
self.wifi_write('u')
if channel is 16:
self.wifi_write('i')
except Exception as e:
print("Something went wrong while setting channels: " + str(e))
# See Cyton SDK for options
def set_channel_settings(self, channel, enabled=True, gain=24, input_type=0, include_bias=True, use_srb2=True, use_srb1=True):
try:
if channel > self.num_channels:
raise ValueError('Cannot set non-existant channel')
if self.board_type == k.BOARD_GANGLION:
raise ValueError('Cannot use with Ganglion')
ch_array = list("12345678QWERTYUI")
#defaults
command = list("x1060110X")
# Set channel
command[1] = ch_array[channel-1]
# Set power down if needed (default channel enabled)
if not enabled:
command[2] = '1'
# Set gain (default 24)
if gain == 1:
command[3] = '0'
if gain == 2:
command[3] = '1'
if gain == 4:
command[3] = '2'
if gain == 6:
command[3] = '3'
if gain == 8:
command[3] = '4'
if gain == 12:
command[3] = '5'
#TODO: Implement input type (default normal)
# Set bias inclusion (default include)
if not include_bias:
command[5] = '0'
# Set srb2 use (default use)
if not use_srb2:
command[6] = '0'
# Set srb1 use (default don't use)
if use_srb1:
command[6] = '1'
command_send = ''.join(command)
self.wifi_write(command_send)
#Make sure to update gain in wifi
self.gains[channel-1] = gain
self.local_wifi_server.set_gains(gains=self.gains)
self.local_wifi_server.set_parser(ParseRaw(gains=self.gains, board_type=self.board_type))
except ValueError as e:
print("Something went wrong while setting channel settings: " + str(e))
def set_sample_rate(self, sample_rate):
""" Change sample rate """
try:
if self.board_type == k.BOARD_CYTON or self.board_type == k.BOARD_DAISY:
if sample_rate == 250:
self.wifi_write('~6')
elif sample_rate == 500:
self.wifi_write('~5')
elif sample_rate == 1000:
self.wifi_write('~4')
elif sample_rate == 2000:
self.wifi_write('~3')
elif sample_rate == 4000:
self.wifi_write('~2')
elif sample_rate == 8000:
self.wifi_write('~1')
elif sample_rate == 16000:
self.wifi_write('~0')
else:
print("Sample rate not supported: " + str(sample_rate))
elif self.board_type == k.BOARD_GANGLION:
if sample_rate == 200:
self.wifi_write('~7')
elif sample_rate == 400:
self.wifi_write('~6')
elif sample_rate == 800:
self.wifi_write('~5')
elif sample_rate == 1600:
self.wifi_write('~4')
elif sample_rate == 3200:
self.wifi_write('~3')
elif sample_rate == 6400:
self.wifi_write('~2')
elif sample_rate == 12800:
self.wifi_write('~1')
elif sample_rate == 25600:
self.wifi_write('~0')
else:
print("Sample rate not supported: " + str(sample_rate))
else:
print("Board type not supported for setting sample rate")
except Exception as e:
print("Something went wrong while setting sample rate: " + str(e))
def set_accelerometer(self, toggle_position):
""" Enable / disable accelerometer """
try:
if self.board_type == k.BOARD_GANGLION:
# Commands to set toggle to on position
if toggle_position == 1:
self.wifi_write('n')
# Commands to set toggle to off position
elif toggle_position == 0:
self.wifi_write('N')
else:
print("Board type not supported for setting accelerometer")
except Exception as e:
print("Something went wrong while setting accelerometer: " + str(e))
"""
Clean Up (atexit)
"""
def stop(self):
print("Stopping streaming...")
self.streaming = False
# connection might be already down here
try:
if self.impedance:
print("Stopping with impedance testing")
self.wifi_write('Z')
else:
self.wifi_write('s')
except Exception as e:
print("Something went wrong while asking the board to stop streaming: " + str(e))
if self.log:
logging.warning('sent <s>: stopped streaming')
def disconnect(self):
if self.streaming:
self.stop()
# should not try to read/write anything after that, will crash
"""
SETTINGS AND HELPERS
"""
def warn(self, text):
if self.log:
# log how many packets where sent succesfully in between warnings
if self.log_packet_count:
logging.info('Data packets received:' + str(self.log_packet_count))
self.log_packet_count = 0
logging.warning(text)
print("Warning: %s" % text)
def check_connection(self):
""" Check connection quality in term of lag and number of packets drop. Reinit connection if necessary. FIXME: parameters given to the board will be lost."""
# stop checking when we're no longer streaming
if not self.streaming:
return
# check number of dropped packets and duration without new packets, deco/reco if too large
if self.packets_dropped > self.max_packets_to_skip:
self.warn("Too many packets dropped, attempt to reconnect")
self.reconnect()
elif self.timeout > 0 and timeit.default_timer() - self.time_last_packet > self.timeout:
self.warn("Too long since got new data, attempt to reconnect")
# if error, attempt to reconect
self.reconnect()
def reconnect(self):
""" In case of poor connection, will shut down and relaunch everything. FIXME: parameters given to the board will be lost."""
self.warn('Reconnecting')
self.stop()
self.disconnect()
self.connect()
self.init_streaming()
class WiFiShieldHandler(asyncore.dispatcher_with_send):
def __init__(self, sock, callback=None, high_speed=True,
parser=None, daisy=False):
asyncore.dispatcher_with_send.__init__(self, sock)
self.callback = callback
self.daisy = daisy
self.high_speed = high_speed
self.last_odd_sample = OpenBCISample()
self.parser = parser if parser is not None else ParseRaw(gains=[24, 24, 24, 24, 24, 24, 24, 24])
def handle_read(self):
data = self.recv(3000) # 3000 is the max data the WiFi shield is allowed to send over TCP
if len(data) > 2:
if self.high_speed:
packets = int(len(data)/33)
raw_data_packets = []
for i in range(packets):
raw_data_packets.append(bytearray(data[i * k.RAW_PACKET_SIZE: i * k.RAW_PACKET_SIZE + k.RAW_PACKET_SIZE]))
samples = self.parser.transform_raw_data_packets_to_sample(raw_data_packets=raw_data_packets)
for sample in samples:
# if a daisy module is attached, wait to concatenate two samples (main board + daisy)
# before passing it to callback
if self.daisy:
# odd sample: daisy sample, save for later
if ~sample.sample_number % 2:
self.last_odd_sample = sample
# even sample: concatenate and send if last sample was the first part, otherwise drop the packet
elif sample.sample_number - 1 == self.last_odd_sample.sample_number:
# the aux data will be the average between the two samples, as the channel
# samples themselves have been averaged by the board
daisy_sample = self.parser.make_daisy_sample_object_wifi(self.last_odd_sample, sample)
if self.callback is not None:
self.callback(daisy_sample)
else:
if self.callback is not None:
self.callback(sample)
else:
try:
possible_chunks = data.split('\r\n')
if len(possible_chunks) > 1:
possible_chunks = possible_chunks[:-1]
for possible_chunk in possible_chunks:
if len(possible_chunk) > 2:
chunk_dict = json.loads(possible_chunk)
if 'chunk' in chunk_dict:
for sample in chunk_dict['chunk']:
if self.callback is not None:
self.callback(sample)
else:
print("not a sample packet")
except ValueError as e:
print("failed to parse: %s" % data)
print(e)
except BaseException as e:
print(e)
class WiFiShieldServer(asyncore.dispatcher):
def __init__(self, host, port, callback=None, gains=None, high_speed=True, daisy=False):
asyncore.dispatcher.__init__(self)
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
self.set_reuse_addr()
self.bind((host, port))
self.daisy = daisy
self.listen(5)
self.callback = None
self.handler = None
self.parser = ParseRaw(gains=gains)
self.high_speed = high_speed
def handle_accept(self):
pair = self.accept()
if pair is not None:
sock, addr = pair
print('Incoming connection from %s' % repr(addr))
self.handler = WiFiShieldHandler(sock, self.callback, high_speed=self.high_speed,
parser=self.parser, daisy=self.daisy)
def set_callback(self, callback):
self.callback = callback
if self.handler is not None:
self.handler.callback = callback
def set_daisy(self, daisy):
self.daisy = daisy
if self.handler is not None:
self.handler.daisy = daisy
def set_gains(self, gains):
self.parser.set_ads1299_scale_factors(gains)
def set_parser(self, parser):
self.parser = parser
if self.handler is not None:
self.handler.parser = parser
| openbci/wifi.py | 27,227 | Handle a connection to an OpenBCI wifi shield.
Args:
ip_address: The IP address of the WiFi Shield, "None" to attempt auto-detect.
shield_name: The unique name of the WiFi Shield, such as `OpenBCI-2AD4`, will use SSDP to get IP address still,
if `shield_name` is "None" and `ip_address` is "None", will connect to the first WiFi Shield found using SSDP
sample_rate: The sample rate to set the attached board to. If the sample rate picked is not a sample rate the attached
board can support, i.e. you send 300 to Cyton, then error will be thrown.
log:
timeout: in seconds, disconnect / reconnect after a period without new data -- should be high if impedance check
max_packets_to_skip: will try to disconnect / reconnect after too many packets are skipped
Gets the local ip address of this computer
@returns str Local IP address
Check connection quality in term of lag and number of packets drop. Reinit connection if necessary. FIXME: parameters given to the board will be lost.
Connect to the board and configure it. Note: recreates various objects upon call.
Detects Ganglion board MAC address -- if more than 1 around, will select first. Needs root privilege.
Returns the version of the board
Will not get new data on impedance check.
Tell the board to record like crazy.
In case of poor connection, will shut down and relaunch everything. FIXME: parameters given to the board will be lost.
Enable/disable impedance measure
Enable / disable accelerometer
Enable / disable channels
Change sample rate
Start handling streaming data from the board. Call a provided callback
for every single sample that is processed
Args:
callback: A callback function -- or a list of functions -- that will receive a single argument of the
OpenBCISample object captured.
Enable / disable test signal
Pass through commands from the WiFi Shield to the Carrier board
:param output:
:return:
Core OpenBCI object for handling connections and samples from the WiFi Shield
Note that the LIB will take care on its own to print incoming ASCII messages if any (FIXME, BTW).
EXAMPLE USE:
def handle_sample(sample):
print(sample.channels_data)
wifi = OpenBCIWifi()
wifi.start(handle_sample)
TODO: Cyton/Ganglion JSON
TODO: Ganglion Raw
TODO: Cyton Raw
Hz these one are used print_incoming_text needs log might be handy to know API number of EEG channels Intentionally bind to port 0 Try again Disconnects from board when terminated Enclose callback function in a list if it comes alone while self.streaming: should the board get disconnected and we could not wait for notification anymore, a reco should be attempted through timeout mechanism try: at most we will get one sample per packet self.waitForNotifications(1. / self.getSampleRate()) except Exception as e: print("Something went wrong while waiting for a new sample: " + str(e)) retrieve current samples on the stack samples = self.delegate.getSamples() self.packets_dropped = self.delegate.getMaxPacketsDropped() if samples: self.time_last_packet = timeit.default_timer() for call in callback: for sample in samples: call(sample) if (lapse > 0 and timeit.default_timer() - start_time > lapse): self.stop(); if self.log: self.log_packet_count = self.log_packet_count + 1; Checking connection -- timeout and packets dropped self.check_connection() Commands to set toggle to on position Commands to set toggle to off position See Cyton SDK for optionsdefaults Set channel Set power down if needed (default channel enabled) Set gain (default 24)TODO: Implement input type (default normal) Set bias inclusion (default include) Set srb2 use (default use) Set srb1 use (default don't use)Make sure to update gain in wifi Commands to set toggle to on position Commands to set toggle to off position connection might be already down here should not try to read/write anything after that, will crash log how many packets where sent succesfully in between warnings stop checking when we're no longer streaming check number of dropped packets and duration without new packets, deco/reco if too large if error, attempt to reconect 3000 is the max data the WiFi shield is allowed to send over TCP if a daisy module is attached, wait to concatenate two samples (main board + daisy) before passing it to callback odd sample: daisy sample, save for later even sample: concatenate and send if last sample was the first part, otherwise drop the packet the aux data will be the average between the two samples, as the channel samples themselves have been averaged by the board | 4,668 | en | 0.830439 |
"""
Code for understanding type annotations.
This file contains functions that turn various representations of
Python type annotations into :class:`pyanalyze.value.Value` objects.
There are three major functions:
- :func:`type_from_runtime` takes a runtime Python object, for example
``type_from_value(int)`` -> ``TypedValue(int)``.
- :func:`type_from_value` takes an existing :class:`pyanalyze.value.Value`
object. For example, evaluating the expression ``int`` will produce
``KnownValue(int)``, and calling :func:`type_from_value` on that value
will produce ``TypedValue(int)``.
- :func:`type_from_ast` takes an AST node and evaluates it into a type.
These functions all rely on each other. For example, when a forward
reference is found in a runtime annotation, the code parses it and calls
:func:`type_from_ast` to evaluate it.
These functions all use :class:`Context` objects to resolve names and
show errors.
"""
import contextlib
from dataclasses import dataclass, InitVar, field
import typing
import typing_inspect
import qcore
import ast
import builtins
from collections.abc import Callable, Iterable, Hashable
import sys
from typing import (
Any,
Container,
NamedTuple,
cast,
TypeVar,
ContextManager,
Mapping,
NewType,
Sequence,
Optional,
Tuple,
Union,
TYPE_CHECKING,
)
from typing_extensions import ParamSpec, TypedDict
from .error_code import ErrorCode
from .extensions import (
AsynqCallable,
CustomCheck,
ExternalType,
HasAttrGuard,
NoReturnGuard,
ParameterTypeGuard,
TypeGuard,
)
from .find_unused import used
from .functions import FunctionDefNode
from .node_visitor import ErrorContext
from .signature import ELLIPSIS_PARAM, SigParameter, Signature, ParameterKind
from .safe import is_typing_name, is_instance_of_typing_name
from . import type_evaluation
from .value import (
AnnotatedValue,
AnySource,
AnyValue,
CallableValue,
CustomCheckExtension,
Extension,
HasAttrGuardExtension,
KnownValue,
MultiValuedValue,
NO_RETURN_VALUE,
NoReturnGuardExtension,
ParamSpecArgsValue,
ParamSpecKwargsValue,
ParameterTypeGuardExtension,
SelfTVV,
TypeGuardExtension,
TypedValue,
SequenceIncompleteValue,
annotate_value,
unite_values,
Value,
GenericValue,
SubclassValue,
TypedDictValue,
NewTypeValue,
TypeVarValue,
_HashableValue,
)
if TYPE_CHECKING:
from .name_check_visitor import NameCheckVisitor
try:
from typing import get_origin, get_args # Python 3.9
from types import GenericAlias
except ImportError:
GenericAlias = None
def get_origin(obj: object) -> Any:
return None
def get_args(obj: object) -> Tuple[Any, ...]:
return ()
CONTEXT_MANAGER_TYPES = (typing.ContextManager, contextlib.AbstractContextManager)
if sys.version_info >= (3, 7):
ASYNC_CONTEXT_MANAGER_TYPES = (
typing.AsyncContextManager,
# Doesn't exist on 3.6
# static analysis: ignore[undefined_attribute]
contextlib.AbstractAsyncContextManager,
)
else:
ASYNC_CONTEXT_MANAGER_TYPES = (typing.AsyncContextManager,)
@dataclass
class Context:
"""A context for evaluating annotations.
The base implementation does very little. Subclass this to do something more useful.
"""
should_suppress_undefined_names: bool = field(default=False, init=False)
"""While this is True, no errors are shown for undefined names."""
def suppress_undefined_names(self) -> ContextManager[None]:
"""Temporarily suppress errors about undefined names."""
return qcore.override(self, "should_suppress_undefined_names", True)
def show_error(
self,
message: str,
error_code: ErrorCode = ErrorCode.invalid_annotation,
node: Optional[ast.AST] = None,
) -> None:
"""Show an error found while evaluating an annotation."""
pass
def get_name(self, node: ast.Name) -> Value:
"""Return the :class:`Value <pyanalyze.value.Value>` corresponding to a name."""
return AnyValue(AnySource.inference)
def handle_undefined_name(self, name: str) -> Value:
if self.should_suppress_undefined_names:
return AnyValue(AnySource.inference)
self.show_error(
f"Undefined name {name!r} used in annotation", ErrorCode.undefined_name
)
return AnyValue(AnySource.error)
def get_name_from_globals(self, name: str, globals: Mapping[str, Any]) -> Value:
if name in globals:
return KnownValue(globals[name])
elif hasattr(builtins, name):
return KnownValue(getattr(builtins, name))
return self.handle_undefined_name(name)
@dataclass
class RuntimeEvaluator(type_evaluation.Evaluator, Context):
globals: Mapping[str, object] = field(repr=False)
func: typing.Callable[..., Any]
def evaluate_type(self, node: ast.AST) -> Value:
return type_from_ast(node, ctx=self)
def evaluate_value(self, node: ast.AST) -> Value:
return value_from_ast(node, ctx=self, error_on_unrecognized=False)
def get_name(self, node: ast.Name) -> Value:
"""Return the :class:`Value <pyanalyze.value.Value>` corresponding to a name."""
return self.get_name_from_globals(node.id, self.globals)
@dataclass
class SyntheticEvaluator(type_evaluation.Evaluator):
error_ctx: ErrorContext
annotations_context: Context
def show_error(
self,
message: str,
error_code: ErrorCode = ErrorCode.invalid_annotation,
node: Optional[ast.AST] = None,
) -> None:
self.error_ctx.show_error(node or self.node, message, error_code=error_code)
def evaluate_type(self, node: ast.AST) -> Value:
return type_from_ast(node, ctx=self.annotations_context)
def evaluate_value(self, node: ast.AST) -> Value:
return value_from_ast(
node, ctx=self.annotations_context, error_on_unrecognized=False
)
def get_name(self, node: ast.Name) -> Value:
"""Return the :class:`Value <pyanalyze.value.Value>` corresponding to a name."""
return self.annotations_context.get_name(node)
@classmethod
def from_visitor(
cls,
node: FunctionDefNode,
visitor: "NameCheckVisitor",
return_annotation: Value,
) -> "SyntheticEvaluator":
return cls(
node,
return_annotation,
visitor,
_DefaultContext(visitor, node, use_name_node_for_error=True),
)
@used # part of an API
def type_from_ast(
ast_node: ast.AST,
visitor: Optional["NameCheckVisitor"] = None,
ctx: Optional[Context] = None,
) -> Value:
"""Given an AST node representing an annotation, return a
:class:`Value <pyanalyze.value.Value>`.
:param ast_node: AST node to evaluate.
:param visitor: Visitor class to use. This is used in the default
:class:`Context` to resolve names and show errors.
This is ignored if `ctx` is given.
:param ctx: :class:`Context` to use for evaluation.
"""
if ctx is None:
ctx = _DefaultContext(visitor, ast_node)
return _type_from_ast(ast_node, ctx)
def type_from_annotations(
annotations: Mapping[str, object],
key: str,
*,
globals: Optional[Mapping[str, object]] = None,
ctx: Optional[Context] = None,
) -> Optional[Value]:
try:
annotation = annotations[key]
except Exception:
# Malformed __annotations__
return None
else:
maybe_val = type_from_runtime(annotation, globals=globals, ctx=ctx)
if maybe_val != AnyValue(AnySource.incomplete_annotation):
return maybe_val
return None
def type_from_runtime(
val: object,
visitor: Optional["NameCheckVisitor"] = None,
node: Optional[ast.AST] = None,
globals: Optional[Mapping[str, object]] = None,
ctx: Optional[Context] = None,
) -> Value:
"""Given a runtime annotation object, return a
:class:`Value <pyanalyze.value.Value>`.
:param val: Object to evaluate. This will usually come from an
``__annotations__`` dictionary.
:param visitor: Visitor class to use. This is used in the default
:class:`Context` to resolve names and show errors.
This is ignored if `ctx` is given.
:param node: AST node that the annotation derives from. This is
used for showing errors. Ignored if `ctx` is given.
:param globals: Dictionary of global variables that can be used
to resolve names. Ignored if `ctx` is given.
:param ctx: :class:`Context` to use for evaluation.
"""
if ctx is None:
ctx = _DefaultContext(visitor, node, globals)
return _type_from_runtime(val, ctx)
def type_from_value(
value: Value,
visitor: Optional["NameCheckVisitor"] = None,
node: Optional[ast.AST] = None,
ctx: Optional[Context] = None,
is_typeddict: bool = False,
) -> Value:
"""Given a :class:`Value <pyanalyze.value.Value` representing an annotation,
return a :class:`Value <pyanalyze.value.Value>` representing the type.
The input value represents an expression, the output value represents
a type. For example, the :term:`impl` of ``typing.cast(typ, val)``
calls :func:`type_from_value` on the value it receives for its
`typ` argument and returns the result.
:param value: :class:`Value <pyanalyze.value.Value` to evaluate.
:param visitor: Visitor class to use. This is used in the default
:class:`Context` to resolve names and show errors.
This is ignored if `ctx` is given.
:param node: AST node that the annotation derives from. This is
used for showing errors. Ignored if `ctx` is given.
:param ctx: :class:`Context` to use for evaluation.
:param is_typeddict: Whether we are at the top level of a `TypedDict`
definition.
"""
if ctx is None:
ctx = _DefaultContext(visitor, node)
return _type_from_value(value, ctx, is_typeddict=is_typeddict)
def value_from_ast(
ast_node: ast.AST, ctx: Context, *, error_on_unrecognized: bool = True
) -> Value:
val = _Visitor(ctx).visit(ast_node)
if val is None:
if error_on_unrecognized:
ctx.show_error("Invalid type annotation", node=ast_node)
return AnyValue(AnySource.error)
return val
def _type_from_ast(node: ast.AST, ctx: Context, is_typeddict: bool = False) -> Value:
val = value_from_ast(node, ctx)
return _type_from_value(val, ctx, is_typeddict=is_typeddict)
def _type_from_runtime(val: Any, ctx: Context, is_typeddict: bool = False) -> Value:
if isinstance(val, str):
return _eval_forward_ref(val, ctx, is_typeddict=is_typeddict)
elif isinstance(val, tuple):
# This happens under some Python versions for types
# nested in tuples, e.g. on 3.6:
# > typing_inspect.get_args(Union[Set[int], List[str]])
# ((typing.Set, int), (typing.List, str))
if not val:
# from Tuple[()]
return KnownValue(())
origin = val[0]
if len(val) == 2:
args = (val[1],)
else:
args = val[1:]
return _value_of_origin_args(origin, args, val, ctx)
elif GenericAlias is not None and isinstance(val, GenericAlias):
origin = get_origin(val)
args = get_args(val)
if origin is tuple and not args:
return SequenceIncompleteValue(tuple, [])
return _value_of_origin_args(origin, args, val, ctx)
elif typing_inspect.is_literal_type(val):
args = typing_inspect.get_args(val)
if len(args) == 0:
return KnownValue(args[0])
else:
return unite_values(*[KnownValue(arg) for arg in args])
elif typing_inspect.is_union_type(val):
args = typing_inspect.get_args(val)
return unite_values(*[_type_from_runtime(arg, ctx) for arg in args])
elif typing_inspect.is_tuple_type(val):
args = typing_inspect.get_args(val)
if not args:
return TypedValue(tuple)
elif len(args) == 2 and args[1] is Ellipsis:
return GenericValue(tuple, [_type_from_runtime(args[0], ctx)])
elif len(args) == 1 and args[0] == ():
return SequenceIncompleteValue(tuple, []) # empty tuple
else:
args_vals = [_type_from_runtime(arg, ctx) for arg in args]
return SequenceIncompleteValue(tuple, args_vals)
elif is_instance_of_typing_name(val, "_TypedDictMeta"):
required_keys = getattr(val, "__required_keys__", None)
# 3.8's typing.TypedDict doesn't have __required_keys__. With
# inheritance, this makes it apparently impossible to figure out which
# keys are required at runtime.
total = getattr(val, "__total__", True)
return TypedDictValue(
{
key: _get_typeddict_value(value, ctx, key, required_keys, total)
for key, value in val.__annotations__.items()
}
)
elif val is InitVar:
# On 3.6 and 3.7, InitVar[T] just returns InitVar at runtime, so we can't
# get the actual type out.
return AnyValue(AnySource.inference)
elif isinstance(val, InitVar):
# val.type exists only on 3.8+, but on earlier versions
# InitVar instances aren't being created
# static analysis: ignore[undefined_attribute]
return type_from_runtime(val.type)
elif is_instance_of_typing_name(val, "AnnotatedMeta"):
# Annotated in 3.6's typing_extensions
origin, metadata = val.__args__
return _make_annotated(
_type_from_runtime(origin, ctx), [KnownValue(v) for v in metadata], ctx
)
elif is_instance_of_typing_name(val, "_AnnotatedAlias"):
# Annotated in typing and newer typing_extensions
return _make_annotated(
_type_from_runtime(val.__origin__, ctx),
[KnownValue(v) for v in val.__metadata__],
ctx,
)
elif typing_inspect.is_generic_type(val):
origin = typing_inspect.get_origin(val)
args = typing_inspect.get_args(val)
if getattr(val, "_special", False):
args = [] # distinguish List from List[T] on 3.7 and 3.8
return _value_of_origin_args(origin, args, val, ctx, is_typeddict=is_typeddict)
elif typing_inspect.is_callable_type(val):
args = typing_inspect.get_args(val)
return _value_of_origin_args(Callable, args, val, ctx)
elif val is AsynqCallable:
return CallableValue(Signature.make([ELLIPSIS_PARAM], is_asynq=True))
elif isinstance(val, type):
return _maybe_typed_value(val)
elif val is None:
return KnownValue(None)
elif is_typing_name(val, "NoReturn") or is_typing_name(val, "Never"):
return NO_RETURN_VALUE
elif is_typing_name(val, "Self"):
return SelfTVV
elif val is typing.Any:
return AnyValue(AnySource.explicit)
elif hasattr(val, "__supertype__"):
if isinstance(val.__supertype__, type):
# NewType
return NewTypeValue(val)
elif typing_inspect.is_tuple_type(val.__supertype__):
# TODO figure out how to make NewTypes over tuples work
return AnyValue(AnySource.inference)
else:
ctx.show_error(f"Invalid NewType {val}")
return AnyValue(AnySource.error)
elif typing_inspect.is_typevar(val):
tv = cast(TypeVar, val)
return make_type_var_value(tv, ctx)
elif is_instance_of_typing_name(val, "ParamSpec"):
return TypeVarValue(val, is_paramspec=True)
elif is_instance_of_typing_name(val, "ParamSpecArgs"):
return ParamSpecArgsValue(val.__origin__)
elif is_instance_of_typing_name(val, "ParamSpecKwargs"):
return ParamSpecKwargsValue(val.__origin__)
elif is_typing_name(val, "Final") or is_typing_name(val, "ClassVar"):
return AnyValue(AnySource.incomplete_annotation)
elif typing_inspect.is_classvar(val) or typing_inspect.is_final_type(val):
if hasattr(val, "__type__"):
# 3.6
typ = val.__type__
else:
# 3.7+
typ = val.__args__[0]
return _type_from_runtime(typ, ctx)
elif is_instance_of_typing_name(val, "_ForwardRef") or is_instance_of_typing_name(
val, "ForwardRef"
):
# This has issues because the forward ref may be defined in a different file, in
# which case we don't know which names are valid in it.
with ctx.suppress_undefined_names():
try:
code = ast.parse(val.__forward_arg__)
except SyntaxError:
ctx.show_error(
f"Syntax error in forward reference: {val.__forward_arg__}"
)
return AnyValue(AnySource.error)
return _type_from_ast(code.body[0], ctx, is_typeddict=is_typeddict)
elif val is Ellipsis:
# valid in Callable[..., ]
return AnyValue(AnySource.explicit)
elif is_instance_of_typing_name(val, "_TypeAlias"):
# typing.Pattern and Match, which are not normal generic types for some reason
return GenericValue(val.impl_type, [_type_from_runtime(val.type_var, ctx)])
elif isinstance(val, TypeGuard):
return AnnotatedValue(
TypedValue(bool),
[TypeGuardExtension(_type_from_runtime(val.guarded_type, ctx))],
)
elif is_instance_of_typing_name(val, "_TypeGuard"):
# 3.6 only
return AnnotatedValue(
TypedValue(bool),
[TypeGuardExtension(_type_from_runtime(val.__type__, ctx))],
)
elif isinstance(val, AsynqCallable):
params = _callable_args_from_runtime(val.args, "AsynqCallable", ctx)
sig = Signature.make(
params, _type_from_runtime(val.return_type, ctx), is_asynq=True
)
return CallableValue(sig)
elif isinstance(val, ExternalType):
try:
typ = qcore.helpers.object_from_string(val.type_path)
except Exception:
ctx.show_error(f"Cannot resolve type {val.type_path!r}")
return AnyValue(AnySource.error)
return _type_from_runtime(typ, ctx)
# Python 3.6 only (on later versions Required/NotRequired match
# is_generic_type).
elif is_instance_of_typing_name(val, "_MaybeRequired"):
required = is_instance_of_typing_name(val, "_Required")
if is_typeddict:
return Pep655Value(required, _type_from_runtime(val.__type__, ctx))
else:
cls = "Required" if required else "NotRequired"
ctx.show_error(f"{cls}[] used in unsupported context")
return AnyValue(AnySource.error)
elif is_typing_name(val, "TypeAlias"):
return AnyValue(AnySource.incomplete_annotation)
elif is_typing_name(val, "TypedDict"):
return KnownValue(TypedDict)
else:
origin = get_origin(val)
if isinstance(origin, type):
return _maybe_typed_value(origin)
elif val is NamedTuple:
return TypedValue(tuple)
ctx.show_error(f"Invalid type annotation {val}")
return AnyValue(AnySource.error)
def make_type_var_value(tv: TypeVar, ctx: Context) -> TypeVarValue:
if tv.__bound__ is not None:
bound = _type_from_runtime(tv.__bound__, ctx)
else:
bound = None
if tv.__constraints__:
constraints = tuple(
_type_from_runtime(constraint, ctx) for constraint in tv.__constraints__
)
else:
constraints = ()
return TypeVarValue(tv, bound=bound, constraints=constraints)
def _callable_args_from_runtime(
arg_types: Any, label: str, ctx: Context
) -> Sequence[SigParameter]:
if arg_types is Ellipsis or arg_types == [Ellipsis]:
return [ELLIPSIS_PARAM]
elif type(arg_types) in (tuple, list):
if len(arg_types) == 1:
(arg,) = arg_types
if arg is Ellipsis:
return [ELLIPSIS_PARAM]
elif is_typing_name(getattr(arg, "__origin__", None), "Concatenate"):
return _args_from_concatenate(arg, ctx)
elif is_instance_of_typing_name(arg, "ParamSpec"):
param_spec = TypeVarValue(arg, is_paramspec=True)
param = SigParameter(
"__P", kind=ParameterKind.PARAM_SPEC, annotation=param_spec
)
return [param]
types = [_type_from_runtime(arg, ctx) for arg in arg_types]
params = [
SigParameter(
f"__arg{i}",
kind=ParameterKind.PARAM_SPEC
if isinstance(typ, TypeVarValue) and typ.is_paramspec
else ParameterKind.POSITIONAL_ONLY,
annotation=typ,
)
for i, typ in enumerate(types)
]
return params
elif is_instance_of_typing_name(arg_types, "ParamSpec"):
param_spec = TypeVarValue(arg_types, is_paramspec=True)
param = SigParameter(
"__P", kind=ParameterKind.PARAM_SPEC, annotation=param_spec
)
return [param]
elif is_typing_name(getattr(arg_types, "__origin__", None), "Concatenate"):
return _args_from_concatenate(arg_types, ctx)
else:
ctx.show_error(f"Invalid arguments to {label}: {arg_types!r}")
return [ELLIPSIS_PARAM]
def _args_from_concatenate(concatenate: Any, ctx: Context) -> Sequence[SigParameter]:
types = [_type_from_runtime(arg, ctx) for arg in concatenate.__args__]
params = [
SigParameter(
f"__arg{i}",
kind=ParameterKind.PARAM_SPEC
if i == len(types) - 1
else ParameterKind.POSITIONAL_ONLY,
annotation=annotation,
)
for i, annotation in enumerate(types)
]
return params
def _get_typeddict_value(
value: Value,
ctx: Context,
key: str,
required_keys: Optional[Container[str]],
total: bool,
) -> Tuple[bool, Value]:
val = _type_from_runtime(value, ctx, is_typeddict=True)
if isinstance(val, Pep655Value):
return (val.required, val.value)
if required_keys is None:
required = total
else:
required = key in required_keys
return required, val
def _eval_forward_ref(val: str, ctx: Context, is_typeddict: bool = False) -> Value:
try:
tree = ast.parse(val, mode="eval")
except SyntaxError:
ctx.show_error(f"Syntax error in type annotation: {val}")
return AnyValue(AnySource.error)
else:
return _type_from_ast(tree.body, ctx, is_typeddict=is_typeddict)
def _type_from_value(value: Value, ctx: Context, is_typeddict: bool = False) -> Value:
if isinstance(value, KnownValue):
return _type_from_runtime(value.val, ctx, is_typeddict=is_typeddict)
elif isinstance(value, TypeVarValue):
return value
elif isinstance(value, MultiValuedValue):
return unite_values(*[_type_from_value(val, ctx) for val in value.vals])
elif isinstance(value, AnnotatedValue):
return _type_from_value(value.value, ctx)
elif isinstance(value, _SubscriptedValue):
return _type_from_subscripted_value(
value.root, value.members, ctx, is_typeddict=is_typeddict
)
elif isinstance(value, AnyValue):
return value
elif isinstance(value, SubclassValue) and value.exactly:
return value.typ
elif isinstance(value, TypedValue) and isinstance(value.typ, str):
# Synthetic type
return value
else:
ctx.show_error(f"Unrecognized annotation {value}")
return AnyValue(AnySource.error)
def _type_from_subscripted_value(
root: Optional[Value],
members: Sequence[Value],
ctx: Context,
is_typeddict: bool = False,
) -> Value:
if isinstance(root, GenericValue):
if len(root.args) == len(members):
return GenericValue(
root.typ, [_type_from_value(member, ctx) for member in members]
)
if isinstance(root, _SubscriptedValue):
root_type = _type_from_value(root, ctx)
return _type_from_subscripted_value(root_type, members, ctx)
elif isinstance(root, MultiValuedValue):
return unite_values(
*[
_type_from_subscripted_value(subval, members, ctx, is_typeddict)
for subval in root.vals
]
)
if (
isinstance(root, SubclassValue)
and root.exactly
and isinstance(root.typ, TypedValue)
):
return GenericValue(
root.typ.typ, [_type_from_value(elt, ctx) for elt in members]
)
if isinstance(root, TypedValue) and isinstance(root.typ, str):
return GenericValue(root.typ, [_type_from_value(elt, ctx) for elt in members])
if not isinstance(root, KnownValue):
if root != AnyValue(AnySource.error):
ctx.show_error(f"Cannot resolve subscripted annotation: {root}")
return AnyValue(AnySource.error)
root = root.val
if root is typing.Union:
return unite_values(*[_type_from_value(elt, ctx) for elt in members])
elif is_typing_name(root, "Literal"):
# Note that in Python 3.8, the way typing's internal cache works means that
# Literal[1] and Literal[True] are cached to the same value, so if you use
# both, you'll get whichever one was used first in later calls. There's nothing
# we can do about that.
if all(isinstance(elt, KnownValue) for elt in members):
return unite_values(*members)
else:
ctx.show_error(f"Arguments to Literal[] must be literals, not {members}")
return AnyValue(AnySource.error)
elif root is typing.Tuple or root is tuple:
if len(members) == 2 and members[1] == KnownValue(Ellipsis):
return GenericValue(tuple, [_type_from_value(members[0], ctx)])
elif len(members) == 1 and members[0] == KnownValue(()):
return SequenceIncompleteValue(tuple, [])
else:
return SequenceIncompleteValue(
tuple, [_type_from_value(arg, ctx) for arg in members]
)
elif root is typing.Optional:
if len(members) != 1:
ctx.show_error("Optional[] takes only one argument")
return AnyValue(AnySource.error)
return unite_values(KnownValue(None), _type_from_value(members[0], ctx))
elif root is typing.Type or root is type:
if len(members) != 1:
ctx.show_error("Type[] takes only one argument")
return AnyValue(AnySource.error)
argument = _type_from_value(members[0], ctx)
return SubclassValue.make(argument)
elif is_typing_name(root, "Annotated"):
origin, *metadata = members
return _make_annotated(_type_from_value(origin, ctx), metadata, ctx)
elif is_typing_name(root, "TypeGuard"):
if len(members) != 1:
ctx.show_error("TypeGuard requires a single argument")
return AnyValue(AnySource.error)
return AnnotatedValue(
TypedValue(bool), [TypeGuardExtension(_type_from_value(members[0], ctx))]
)
elif is_typing_name(root, "Required"):
if not is_typeddict:
ctx.show_error("Required[] used in unsupported context")
return AnyValue(AnySource.error)
if len(members) != 1:
ctx.show_error("Required[] requires a single argument")
return AnyValue(AnySource.error)
return Pep655Value(True, _type_from_value(members[0], ctx))
elif is_typing_name(root, "NotRequired"):
if not is_typeddict:
ctx.show_error("NotRequired[] used in unsupported context")
return AnyValue(AnySource.error)
if len(members) != 1:
ctx.show_error("NotRequired[] requires a single argument")
return AnyValue(AnySource.error)
return Pep655Value(False, _type_from_value(members[0], ctx))
elif root is Callable or root is typing.Callable:
if len(members) == 2:
args, return_value = members
return _make_callable_from_value(args, return_value, ctx)
ctx.show_error("Callable requires exactly two arguments")
return AnyValue(AnySource.error)
elif root is AsynqCallable:
if len(members) == 2:
args, return_value = members
return _make_callable_from_value(args, return_value, ctx, is_asynq=True)
ctx.show_error("AsynqCallable requires exactly two arguments")
return AnyValue(AnySource.error)
elif typing_inspect.is_generic_type(root):
origin = typing_inspect.get_origin(root)
if origin is None:
# On Python 3.9 at least, get_origin() of a class that inherits
# from Generic[T] is None.
origin = root
origin = _maybe_get_extra(origin)
return GenericValue(origin, [_type_from_value(elt, ctx) for elt in members])
elif isinstance(root, type):
return GenericValue(root, [_type_from_value(elt, ctx) for elt in members])
else:
# In Python 3.9, generics are implemented differently and typing.get_origin
# can help.
origin = get_origin(root)
if isinstance(origin, type):
return GenericValue(origin, [_type_from_value(elt, ctx) for elt in members])
ctx.show_error(f"Unrecognized subscripted annotation: {root}")
return AnyValue(AnySource.error)
def _maybe_get_extra(origin: type) -> Union[type, str]:
# ContextManager is defined oddly and we lose the Protocol if we don't use
# synthetic types.
if any(origin is cls for cls in CONTEXT_MANAGER_TYPES):
return "typing.ContextManager"
elif any(origin is cls for cls in ASYNC_CONTEXT_MANAGER_TYPES):
return "typing.AsyncContextManager"
else:
# turn typing.List into list in some Python versions
# compare https://github.com/ilevkivskyi/typing_inspect/issues/36
extra_origin = getattr(origin, "__extra__", None)
if extra_origin is not None:
return extra_origin
return origin
class _DefaultContext(Context):
def __init__(
self,
visitor: "NameCheckVisitor",
node: Optional[ast.AST],
globals: Optional[Mapping[str, object]] = None,
use_name_node_for_error: bool = False,
) -> None:
super().__init__()
self.visitor = visitor
self.node = node
self.globals = globals
self.use_name_node_for_error = use_name_node_for_error
def show_error(
self,
message: str,
error_code: ErrorCode = ErrorCode.invalid_annotation,
node: Optional[ast.AST] = None,
) -> None:
if node is None:
node = self.node
if self.visitor is not None and node is not None:
self.visitor.show_error(node, message, error_code)
def get_name(self, node: ast.Name) -> Value:
if self.visitor is not None:
val, _ = self.visitor.resolve_name(
node,
error_node=node if self.use_name_node_for_error else self.node,
suppress_errors=self.should_suppress_undefined_names,
)
return val
elif self.globals is not None:
if node.id in self.globals:
return KnownValue(self.globals[node.id])
elif hasattr(builtins, node.id):
return KnownValue(getattr(builtins, node.id))
if self.should_suppress_undefined_names:
return AnyValue(AnySource.inference)
self.show_error(
f"Undefined name {node.id!r} used in annotation",
ErrorCode.undefined_name,
node=node,
)
return AnyValue(AnySource.error)
@dataclass(frozen=True)
class _SubscriptedValue(Value):
root: Optional[Value]
members: Tuple[Value, ...]
@dataclass
class Pep655Value(Value):
required: bool
value: Value
class _Visitor(ast.NodeVisitor):
def __init__(self, ctx: Context) -> None:
self.ctx = ctx
def generic_visit(self, node: ast.AST) -> None:
raise NotImplementedError(f"no visitor implemented for {node!r}")
def visit_Name(self, node: ast.Name) -> Value:
return self.ctx.get_name(node)
def visit_Subscript(self, node: ast.Subscript) -> Value:
value = self.visit(node.value)
index = self.visit(node.slice)
if isinstance(index, SequenceIncompleteValue):
members = index.members
else:
members = (index,)
return _SubscriptedValue(value, members)
def visit_Attribute(self, node: ast.Attribute) -> Optional[Value]:
root_value = self.visit(node.value)
if isinstance(root_value, KnownValue):
try:
return KnownValue(getattr(root_value.val, node.attr))
except AttributeError:
self.ctx.show_error(
f"{root_value.val!r} has no attribute {node.attr!r}", node=node
)
return AnyValue(AnySource.error)
elif not isinstance(root_value, AnyValue):
self.ctx.show_error(f"Cannot resolve annotation {root_value}", node=node)
return AnyValue(AnySource.error)
def visit_Tuple(self, node: ast.Tuple) -> Value:
elts = [self.visit(elt) for elt in node.elts]
return SequenceIncompleteValue(tuple, elts)
def visit_List(self, node: ast.List) -> Value:
elts = [self.visit(elt) for elt in node.elts]
return SequenceIncompleteValue(list, elts)
def visit_Index(self, node: ast.Index) -> Value:
# class is unused in 3.9
return self.visit(node.value) # static analysis: ignore[undefined_attribute]
def visit_Ellipsis(self, node: ast.Ellipsis) -> Value:
return KnownValue(Ellipsis)
def visit_Constant(self, node: ast.Constant) -> Value:
return KnownValue(node.value)
def visit_NameConstant(self, node: ast.NameConstant) -> Value:
return KnownValue(node.value)
def visit_Num(self, node: ast.Num) -> Value:
return KnownValue(node.n)
def visit_Str(self, node: ast.Str) -> Value:
return KnownValue(node.s)
def visit_Bytes(self, node: ast.Bytes) -> Value:
return KnownValue(node.s)
def visit_Expr(self, node: ast.Expr) -> Value:
return self.visit(node.value)
def visit_BinOp(self, node: ast.BinOp) -> Optional[Value]:
if isinstance(node.op, ast.BitOr):
return _SubscriptedValue(
KnownValue(Union), (self.visit(node.left), self.visit(node.right))
)
else:
return None
def visit_UnaryOp(self, node: ast.UnaryOp) -> Optional[Value]:
# Only int and float negation on literals are supported.
if isinstance(node.op, ast.USub):
operand = self.visit(node.operand)
if isinstance(operand, KnownValue) and isinstance(
operand.val, (int, float)
):
return KnownValue(-operand.val)
return None
def visit_Call(self, node: ast.Call) -> Optional[Value]:
func = self.visit(node.func)
if not isinstance(func, KnownValue):
return None
if func.val == NewType:
arg_values = [self.visit(arg) for arg in node.args]
kwarg_values = [(kw.arg, self.visit(kw.value)) for kw in node.keywords]
args = []
kwargs = {}
for arg_value in arg_values:
if isinstance(arg_value, KnownValue):
args.append(arg_value.val)
else:
return None
for name, kwarg_value in kwarg_values:
if name is None:
if isinstance(kwarg_value, KnownValue) and isinstance(
kwarg_value.val, dict
):
kwargs.update(kwarg_value.val)
else:
return None
else:
if isinstance(kwarg_value, KnownValue):
kwargs[name] = kwarg_value.val
else:
return None
return KnownValue(func.val(*args, **kwargs))
elif func.val == TypeVar:
arg_values = [self.visit(arg) for arg in node.args]
kwarg_values = [(kw.arg, self.visit(kw.value)) for kw in node.keywords]
if not arg_values:
self.ctx.show_error(
"TypeVar() requires at least one argument", node=node
)
return None
name_val = arg_values[0]
if not isinstance(name_val, KnownValue):
self.ctx.show_error("TypeVar name must be a literal", node=node.args[0])
return None
constraints = []
for arg_value in arg_values[1:]:
constraints.append(_type_from_value(arg_value, self.ctx))
bound = None
for name, kwarg_value in kwarg_values:
if name in ("covariant", "contravariant"):
continue
elif name == "bound":
bound = _type_from_value(kwarg_value, self.ctx)
else:
self.ctx.show_error(f"Unrecognized TypeVar kwarg {name}", node=node)
return None
tv = TypeVar(name_val.val)
return TypeVarValue(tv, bound, tuple(constraints))
elif is_typing_name(func.val, "ParamSpec"):
arg_values = [self.visit(arg) for arg in node.args]
kwarg_values = [(kw.arg, self.visit(kw.value)) for kw in node.keywords]
if not arg_values:
self.ctx.show_error(
"ParamSpec() requires at least one argument", node=node
)
return None
name_val = arg_values[0]
if not isinstance(name_val, KnownValue):
self.ctx.show_error(
"ParamSpec name must be a literal", node=node.args[0]
)
return None
for name, _ in kwarg_values:
self.ctx.show_error(f"Unrecognized ParamSpec kwarg {name}", node=node)
return None
tv = ParamSpec(name_val.val)
return TypeVarValue(tv, is_paramspec=True)
elif isinstance(func.val, type):
if func.val is object:
return AnyValue(AnySource.inference)
return TypedValue(func.val)
else:
return None
def _value_of_origin_args(
origin: object,
args: Sequence[object],
val: object,
ctx: Context,
is_typeddict: bool = False,
) -> Value:
if origin is typing.Type or origin is type:
if not args:
return TypedValue(type)
return SubclassValue.make(_type_from_runtime(args[0], ctx))
elif origin is typing.Tuple or origin is tuple:
if not args:
return TypedValue(tuple)
elif len(args) == 2 and args[1] is Ellipsis:
return GenericValue(tuple, [_type_from_runtime(args[0], ctx)])
elif len(args) == 1 and args[0] == ():
return SequenceIncompleteValue(tuple, [])
else:
args_vals = [_type_from_runtime(arg, ctx) for arg in args]
return SequenceIncompleteValue(tuple, args_vals)
elif origin is typing.Union:
return unite_values(*[_type_from_runtime(arg, ctx) for arg in args])
elif origin is Callable or origin is typing.Callable:
if len(args) == 0:
return TypedValue(Callable)
*arg_types, return_type = args
if len(arg_types) == 1 and isinstance(arg_types[0], list):
arg_types = arg_types[0]
params = _callable_args_from_runtime(arg_types, "Callable", ctx)
sig = Signature.make(params, _type_from_runtime(return_type, ctx))
return CallableValue(sig)
elif is_typing_name(origin, "Annotated"):
origin, metadata = args
# This should never happen
if not isinstance(metadata, Iterable):
ctx.show_error("Unexpected format in Annotated")
return AnyValue(AnySource.error)
return _make_annotated(
_type_from_runtime(origin, ctx),
[KnownValue(data) for data in metadata],
ctx,
)
elif isinstance(origin, type):
origin = _maybe_get_extra(origin)
if args:
args_vals = [_type_from_runtime(val, ctx) for val in args]
return GenericValue(origin, args_vals)
else:
return _maybe_typed_value(origin)
elif is_typing_name(origin, "TypeGuard"):
if len(args) != 1:
ctx.show_error("TypeGuard requires a single argument")
return AnyValue(AnySource.error)
return AnnotatedValue(
TypedValue(bool), [TypeGuardExtension(_type_from_runtime(args[0], ctx))]
)
elif is_typing_name(origin, "Final"):
if len(args) != 1:
ctx.show_error("Final requires a single argument")
return AnyValue(AnySource.error)
# TODO(#160): properly support Final
return _type_from_runtime(args[0], ctx)
elif is_typing_name(origin, "ClassVar"):
if len(args) != 1:
ctx.show_error("ClassVar requires a single argument")
return AnyValue(AnySource.error)
return _type_from_runtime(args[0], ctx)
elif is_typing_name(origin, "Required"):
if not is_typeddict:
ctx.show_error("Required[] used in unsupported context")
return AnyValue(AnySource.error)
if len(args) != 1:
ctx.show_error("Required[] requires a single argument")
return AnyValue(AnySource.error)
return Pep655Value(True, _type_from_runtime(args[0], ctx))
elif is_typing_name(origin, "NotRequired"):
if not is_typeddict:
ctx.show_error("NotRequired[] used in unsupported context")
return AnyValue(AnySource.error)
if len(args) != 1:
ctx.show_error("NotRequired[] requires a single argument")
return AnyValue(AnySource.error)
return Pep655Value(False, _type_from_runtime(args[0], ctx))
elif origin is None and isinstance(val, type):
# This happens for SupportsInt in 3.7.
return _maybe_typed_value(val)
else:
ctx.show_error(
f"Unrecognized annotation {origin}[{', '.join(map(repr, args))}]"
)
return AnyValue(AnySource.error)
def _maybe_typed_value(val: Union[type, str]) -> Value:
if val is type(None):
return KnownValue(None)
elif val is Hashable:
return _HashableValue(val)
return TypedValue(val)
def _make_callable_from_value(
args: Value, return_value: Value, ctx: Context, is_asynq: bool = False
) -> Value:
return_annotation = _type_from_value(return_value, ctx)
if args == KnownValue(Ellipsis):
return CallableValue(
Signature.make(
[ELLIPSIS_PARAM], return_annotation=return_annotation, is_asynq=is_asynq
)
)
elif isinstance(args, SequenceIncompleteValue):
params = [
SigParameter(
f"__arg{i}",
kind=ParameterKind.POSITIONAL_ONLY,
annotation=_type_from_value(arg, ctx),
)
for i, arg in enumerate(args.members)
]
sig = Signature.make(params, return_annotation, is_asynq=is_asynq)
return CallableValue(sig)
elif isinstance(args, KnownValue) and is_instance_of_typing_name(
args.val, "ParamSpec"
):
annotation = TypeVarValue(args.val, is_paramspec=True)
params = [
SigParameter("__P", kind=ParameterKind.PARAM_SPEC, annotation=annotation)
]
sig = Signature.make(params, return_annotation, is_asynq=is_asynq)
return CallableValue(sig)
elif isinstance(args, TypeVarValue) and args.is_paramspec:
params = [SigParameter("__P", kind=ParameterKind.PARAM_SPEC, annotation=args)]
sig = Signature.make(params, return_annotation, is_asynq=is_asynq)
return CallableValue(sig)
elif (
isinstance(args, _SubscriptedValue)
and isinstance(args.root, KnownValue)
and is_typing_name(args.root.val, "Concatenate")
):
annotations = [_type_from_value(arg, ctx) for arg in args.members]
params = [
SigParameter(
f"__arg{i}",
kind=ParameterKind.PARAM_SPEC
if i == len(annotations) - 1
else ParameterKind.POSITIONAL_ONLY,
annotation=annotation,
)
for i, annotation in enumerate(annotations)
]
sig = Signature.make(params, return_annotation, is_asynq=is_asynq)
return CallableValue(sig)
else:
ctx.show_error(f"Unrecognized Callable type argument {args}")
return AnyValue(AnySource.error)
def _make_annotated(origin: Value, metadata: Sequence[Value], ctx: Context) -> Value:
metadata = [_value_from_metadata(entry, ctx) for entry in metadata]
return annotate_value(origin, metadata)
def _value_from_metadata(entry: Value, ctx: Context) -> Union[Value, Extension]:
if isinstance(entry, KnownValue):
if isinstance(entry.val, ParameterTypeGuard):
return ParameterTypeGuardExtension(
entry.val.varname, _type_from_runtime(entry.val.guarded_type, ctx)
)
elif isinstance(entry.val, NoReturnGuard):
return NoReturnGuardExtension(
entry.val.varname, _type_from_runtime(entry.val.guarded_type, ctx)
)
elif isinstance(entry.val, HasAttrGuard):
return HasAttrGuardExtension(
entry.val.varname,
_type_from_runtime(entry.val.attribute_name, ctx),
_type_from_runtime(entry.val.attribute_type, ctx),
)
elif isinstance(entry.val, CustomCheck):
return CustomCheckExtension(entry.val)
return entry
| pyanalyze/annotations.py | 46,475 | A context for evaluating annotations.
The base implementation does very little. Subclass this to do something more useful.
Return the :class:`Value <pyanalyze.value.Value>` corresponding to a name.
Return the :class:`Value <pyanalyze.value.Value>` corresponding to a name.
Return the :class:`Value <pyanalyze.value.Value>` corresponding to a name.
Show an error found while evaluating an annotation.
Temporarily suppress errors about undefined names.
Given an AST node representing an annotation, return a
:class:`Value <pyanalyze.value.Value>`.
:param ast_node: AST node to evaluate.
:param visitor: Visitor class to use. This is used in the default
:class:`Context` to resolve names and show errors.
This is ignored if `ctx` is given.
:param ctx: :class:`Context` to use for evaluation.
Given a runtime annotation object, return a
:class:`Value <pyanalyze.value.Value>`.
:param val: Object to evaluate. This will usually come from an
``__annotations__`` dictionary.
:param visitor: Visitor class to use. This is used in the default
:class:`Context` to resolve names and show errors.
This is ignored if `ctx` is given.
:param node: AST node that the annotation derives from. This is
used for showing errors. Ignored if `ctx` is given.
:param globals: Dictionary of global variables that can be used
to resolve names. Ignored if `ctx` is given.
:param ctx: :class:`Context` to use for evaluation.
Given a :class:`Value <pyanalyze.value.Value` representing an annotation,
return a :class:`Value <pyanalyze.value.Value>` representing the type.
The input value represents an expression, the output value represents
a type. For example, the :term:`impl` of ``typing.cast(typ, val)``
calls :func:`type_from_value` on the value it receives for its
`typ` argument and returns the result.
:param value: :class:`Value <pyanalyze.value.Value` to evaluate.
:param visitor: Visitor class to use. This is used in the default
:class:`Context` to resolve names and show errors.
This is ignored if `ctx` is given.
:param node: AST node that the annotation derives from. This is
used for showing errors. Ignored if `ctx` is given.
:param ctx: :class:`Context` to use for evaluation.
:param is_typeddict: Whether we are at the top level of a `TypedDict`
definition.
Code for understanding type annotations.
This file contains functions that turn various representations of
Python type annotations into :class:`pyanalyze.value.Value` objects.
There are three major functions:
- :func:`type_from_runtime` takes a runtime Python object, for example
``type_from_value(int)`` -> ``TypedValue(int)``.
- :func:`type_from_value` takes an existing :class:`pyanalyze.value.Value`
object. For example, evaluating the expression ``int`` will produce
``KnownValue(int)``, and calling :func:`type_from_value` on that value
will produce ``TypedValue(int)``.
- :func:`type_from_ast` takes an AST node and evaluates it into a type.
These functions all rely on each other. For example, when a forward
reference is found in a runtime annotation, the code parses it and calls
:func:`type_from_ast` to evaluate it.
These functions all use :class:`Context` objects to resolve names and
show errors.
Python 3.9 Doesn't exist on 3.6 static analysis: ignore[undefined_attribute] part of an API Malformed __annotations__ This happens under some Python versions for types nested in tuples, e.g. on 3.6: > typing_inspect.get_args(Union[Set[int], List[str]]) ((typing.Set, int), (typing.List, str)) from Tuple[()] empty tuple 3.8's typing.TypedDict doesn't have __required_keys__. With inheritance, this makes it apparently impossible to figure out which keys are required at runtime. On 3.6 and 3.7, InitVar[T] just returns InitVar at runtime, so we can't get the actual type out. val.type exists only on 3.8+, but on earlier versions InitVar instances aren't being created static analysis: ignore[undefined_attribute] Annotated in 3.6's typing_extensions Annotated in typing and newer typing_extensions distinguish List from List[T] on 3.7 and 3.8 NewType TODO figure out how to make NewTypes over tuples work 3.6 3.7+ This has issues because the forward ref may be defined in a different file, in which case we don't know which names are valid in it. valid in Callable[..., ] typing.Pattern and Match, which are not normal generic types for some reason 3.6 only Python 3.6 only (on later versions Required/NotRequired match is_generic_type). Synthetic type Note that in Python 3.8, the way typing's internal cache works means that Literal[1] and Literal[True] are cached to the same value, so if you use both, you'll get whichever one was used first in later calls. There's nothing we can do about that. On Python 3.9 at least, get_origin() of a class that inherits from Generic[T] is None. In Python 3.9, generics are implemented differently and typing.get_origin can help. ContextManager is defined oddly and we lose the Protocol if we don't use synthetic types. turn typing.List into list in some Python versions compare https://github.com/ilevkivskyi/typing_inspect/issues/36 class is unused in 3.9 static analysis: ignore[undefined_attribute] Only int and float negation on literals are supported. This should never happen TODO(160): properly support Final This happens for SupportsInt in 3.7. | 5,455 | en | 0.766294 |
"""
Unittests for gpsdio load
"""
from click.testing import CliRunner
import gpsdio
import gpsdio.cli.main
def test_load(types_json_path, types_msg_gz_path, tmpdir, compare_msg):
pth = str(tmpdir.mkdir('test').join('test_load'))
with open(types_json_path) as f:
stdin_input = f.read()
result = CliRunner().invoke(gpsdio.cli.main.main_group, [
'load',
'--o-drv', 'NewlineJSON',
'--o-cmp', 'GZIP',
pth
], input=stdin_input)
assert result.exit_code is 0
with gpsdio.open(types_msg_gz_path) as expected, \
gpsdio.open(pth, driver='NewlineJSON', compression='GZIP') as actual:
for e, a in zip(expected, actual):
assert compare_msg(e, a)
| tests/test_cli_load.py | 738 | Unittests for gpsdio load | 25 | en | 0.325564 |
# -*- coding: utf-8 -*-
'''
Pillar data from vCenter or an ESXi host
.. versionadded:: 2017.7.0
:depends: - pyVmomi
This external pillar can pull attributes from objects in vCenter or an ESXi host and provide those attributes
as pillar data to minions. This can allow for pillar based targeting of minions on ESXi host, Datastore, VM
configuration, etc. This setup requires only the salt master have access to the vCenter server/ESXi hosts.
The pillar will return an empty dict if the 'os' or 'virtual' grain are not 'VMWare', 'ESXi', or 'VMWare ESXi'.
Defaults
========
- The external pillar will search for Virtual Machines with the VM name matching the minion id.
- Data will be returned into the 'vmware' pillar key.
- The external pillar has a default set of properties to return for both VirtualMachine and HostSystem types.
Configuring the VMWare pillar
=============================
The required minimal configuration in the salt master ext_pillar setup:
.. code-block:: yaml
ext_pillar:
- vmware:
host: <vcenter/esx host>
username: <user to connect with>
password: <password>
Optionally, the following keyword arguments can be passed to the ext_pillar for customized configuration:
pillar_key
Optionally set the pillar key to return the data into. Default is ``vmware``.
protocol
Optionally set to alternate protocol if the vCenter server or ESX/ESXi host is not
using the default protocol. Default protocol is ``https``.
port
Optionally set to alternate port if the vCenter server or ESX/ESXi host is not
using the default port. Default port is ``443``.
property_name
Property name to match the minion id against. Defaults to ``name``.
property_types
Optionally specify a list of pyVmomi vim types to search for the minion id in 'property_name'.
Default is ``['VirtualMachine']``.
For example, to search both vim.VirtualMachine and vim.HostSystem object types:
.. code-block:: yaml
ext_pillar:
- vmware:
host: myesx
username: root
password: complex_password
property_types:
- VirtualMachine
- HostSystem
Additionally, the list of property types can be dicts, the item of the dict being a list specifying
the attribute to return for that vim object type.
The pillar will attempt to recurse the attribute and return all child attributes.
To explicitly specify deeper attributes without attempting to recurse an attribute, convert the list
item to a dict with the item of the dict being the child attributes to return. Follow this pattern
to return attributes as deep within the object as necessary.
.. note::
Be careful when specifying custom attributes! Many attributes have objects as attributes which
have the parent object as an attribute and which will cause the pillar to fail due to the attempt
to convert all sub-objects recursively (i.e. infinite attribute loops). Specifying only the
sub-attributes you would like returned will keep the infinite recursion from occurring.
A maximum recursion exception will occur in this case and the pillar will not return as desired.
.. code-block:: yaml
ext_pillar:
- vmware:
host: myvcenter
username: my_user
password: my_pass
replace_default_attributes: True
property_types:
- VirtualMachine:
- config:
- bootOptions:
- bootDelay
- bootRetryDelay
- HostSystem:
- datastore:
- name
The above ext_pillar example would return a pillar like the following for a VirtualMachine object that's
name matched the minion id:
.. code-block:: yaml
vmware:
config:
bootOptions:
bootDelay: 1000
bootRetryDelay: 1000
If you were to retrieve these virtual machine attributes via pyVmomi directly, this would be the same as
.. code-block:: python
vmObject.config.bootOptions.bootDelay
vmObject.config.bootOptionis.bootRetryDelay
The above ext_pillar example would return a pillar like the following for a HostySystem object that's name
matched the minion id:
.. code-block:: yaml
vmware:
datastore:
- name: Datastore1
- name: Datastore2
The 'datastore' property of a HostSystem object is a list of datastores, thus a list is returned.
replace_default_attributes
If custom attributes are specified by the property_types parameter, replace_default_attributes determines
if those will be added to default attributes (False) or replace the default attributes completely (True).
The default setting is 'False'.
.. note::
vCenter "Custom Attributes" (i.e. Annotations) will always be returned if it exists on the object as
part of the pillar regardless of this setting.
'''
from __future__ import absolute_import, print_function, unicode_literals
# Import python libs
import logging
# Import salt libs
import salt.utils.dictupdate as dictupdate
import salt.utils.vmware
# Import 3rd-party libs
from salt.ext import six
try:
# pylint: disable=no-name-in-module
from pyVmomi import vim
from pyVim.connect import Disconnect
HAS_LIBS = True
# pylint: enable=no-name-in-module
except ImportError:
HAS_LIBS = False
__virtualname__ = 'vmware'
# Set up logging
log = logging.getLogger(__name__)
def __virtual__():
'''
Only return if python-etcd is installed
'''
return __virtualname__ if HAS_LIBS else False
def ext_pillar(minion_id,
pillar, # pylint: disable=W0613
**kwargs):
'''
Check vmware/vcenter for all data
'''
vmware_pillar = {}
host = None
username = None
password = None
property_types = []
property_name = 'name'
protocol = None
port = None
pillar_key = 'vmware'
replace_default_attributes = False
type_specific_pillar_attributes = {
'VirtualMachine': [
{
'config':
[
'version',
'guestId',
'files',
'tools',
'flags',
'memoryHotAddEnabled',
'cpuHotAddEnabled',
'cpuHotRemoveEnabled',
'datastoreUrl',
'swapPlacement',
'bootOptions',
'scheduledHardwareUpgradeInfo',
'memoryAllocation',
'cpuAllocation',
]
},
{
'summary':
[
{
'runtime':
[
{
'host':
[
'name',
{'parent': 'name'},
]
},
'bootTime',
]
},
{
'guest':
[
'toolsStatus',
'toolsVersionStatus',
'toolsVersionStatus2',
'toolsRunningStatus',
]
},
{
'config':
[
'cpuReservation',
'memoryReservation',
]
},
{
'storage':
[
'committed',
'uncommitted',
'unshared',
]
},
{'dasVmProtection': ['dasProtected']},
]
},
{
'storage':
[
{
'perDatastoreUsage':
[
{
'datastore': 'name'
},
'committed',
'uncommitted',
'unshared',
]
}
]
},
],
'HostSystem': [
{
'datastore':
[
'name',
'overallStatus',
{
'summary':
[
'url',
'freeSpace',
'maxFileSize',
'maxVirtualDiskCapacity',
'maxPhysicalRDMFileSize',
'maxVirtualRDMFileSize',
{
'vmfs':
[
'capacity',
'blockSizeMb',
'maxBlocks',
'majorVersion',
'version',
'uuid',
{
'extent':
[
'diskName',
'partition',
]
},
'vmfsUpgradeable',
'ssd',
'local',
],
},
],
},
{'vm': 'name'}
]
},
{
'vm':
[
'name',
'overallStatus',
{
'summary':
[
{'runtime': 'powerState'},
]
},
]
},
]
}
pillar_attributes = [
{
'summary':
[
'overallStatus'
]
},
{
'network':
[
'name',
{'config': {'distributedVirtualSwitch': 'name'}},
]
},
{
'datastore':
[
'name',
]
},
{
'parent':
[
'name'
]
},
]
if 'pillar_key' in kwargs:
pillar_key = kwargs['pillar_key']
vmware_pillar[pillar_key] = {}
if 'host' not in kwargs:
log.error('VMWare external pillar configured but host is not specified in ext_pillar configuration.')
return vmware_pillar
else:
host = kwargs['host']
log.debug('vmware_pillar -- host = %s', host)
if 'username' not in kwargs:
log.error('VMWare external pillar requested but username is not specified in ext_pillar configuration.')
return vmware_pillar
else:
username = kwargs['username']
log.debug('vmware_pillar -- username = %s', username)
if 'password' not in kwargs:
log.error('VMWare external pillar requested but password is not specified in ext_pillar configuration.')
return vmware_pillar
else:
password = kwargs['password']
log.debug('vmware_pillar -- password = %s', password)
if 'replace_default_attributes' in kwargs:
replace_default_attributes = kwargs['replace_default_attributes']
if replace_default_attributes:
pillar_attributes = []
type_specific_pillar_attributes = {}
if 'property_types' in kwargs:
for prop_type in kwargs['property_types']:
if isinstance(prop_type, dict):
property_types.append(getattr(vim, prop_type.keys()[0]))
if isinstance(prop_type[prop_type.keys()[0]], list):
pillar_attributes = pillar_attributes + prop_type[prop_type.keys()[0]]
else:
log.warning('A property_type dict was specified, but its value is not a list')
else:
property_types.append(getattr(vim, prop_type))
else:
property_types = [vim.VirtualMachine]
log.debug('vmware_pillar -- property_types = %s', property_types)
if 'property_name' in kwargs:
property_name = kwargs['property_name']
else:
property_name = 'name'
log.debug('vmware_pillar -- property_name = %s', property_name)
if 'protocol' in kwargs:
protocol = kwargs['protocol']
log.debug('vmware_pillar -- protocol = %s', protocol)
if 'port' in kwargs:
port = kwargs['port']
log.debug('vmware_pillar -- port = %s', port)
virtualgrain = None
osgrain = None
if 'virtual' in __grains__:
virtualgrain = __grains__['virtual'].lower()
if 'os' in __grains__:
osgrain = __grains__['os'].lower()
if virtualgrain == 'vmware' or osgrain == 'vmware esxi' or osgrain == 'esxi':
vmware_pillar[pillar_key] = {}
try:
_conn = salt.utils.vmware.get_service_instance(
host, username, password, protocol, port,
verify_ssl=kwargs.get("verify_ssl", True)
)
if _conn:
data = None
for prop_type in property_types:
data = salt.utils.vmware.get_mor_by_property(_conn,
prop_type,
minion_id,
property_name=property_name)
if data:
type_name = type(data).__name__.replace('vim.', '')
if hasattr(data, 'availableField'):
vmware_pillar[pillar_key]['annotations'] = {}
for availableField in data.availableField:
for customValue in data.customValue:
if availableField.key == customValue.key:
vmware_pillar[pillar_key]['annotations'][availableField.name] = customValue.value
type_specific_pillar_attribute = []
if type_name in type_specific_pillar_attributes:
type_specific_pillar_attribute = type_specific_pillar_attributes[type_name]
vmware_pillar[pillar_key] = dictupdate.update(vmware_pillar[pillar_key],
_crawl_attribute(data,
pillar_attributes +
type_specific_pillar_attribute))
break
# explicitly disconnect from vCenter when we are done, connections linger idle otherwise
Disconnect(_conn)
else:
log.error(
'Unable to obtain a connection with %s, please verify '
'your vmware ext_pillar configuration', host
)
except RuntimeError:
log.error(('A runtime error occurred in the vmware_pillar, '
'this is likely caused by an infinite recursion in '
'a requested attribute. Verify your requested attributes '
'and reconfigure the pillar.'))
return vmware_pillar
else:
return {}
def _recurse_config_to_dict(t_data):
'''
helper function to recurse through a vim object and attempt to return all child objects
'''
if not isinstance(t_data, type(None)):
if isinstance(t_data, list):
t_list = []
for i in t_data:
t_list.append(_recurse_config_to_dict(i))
return t_list
elif isinstance(t_data, dict):
t_dict = {}
for k, v in six.iteritems(t_data):
t_dict[k] = _recurse_config_to_dict(v)
return t_dict
else:
if hasattr(t_data, '__dict__'):
return _recurse_config_to_dict(t_data.__dict__)
else:
return _serializer(t_data)
def _crawl_attribute(this_data, this_attr):
'''
helper function to crawl an attribute specified for retrieval
'''
if isinstance(this_data, list):
t_list = []
for d in this_data:
t_list.append(_crawl_attribute(d, this_attr))
return t_list
else:
if isinstance(this_attr, dict):
t_dict = {}
for k in this_attr:
if hasattr(this_data, k):
t_dict[k] = _crawl_attribute(getattr(this_data, k, None), this_attr[k])
return t_dict
elif isinstance(this_attr, list):
this_dict = {}
for l in this_attr:
this_dict = dictupdate.update(this_dict, _crawl_attribute(this_data, l))
return this_dict
else:
return {this_attr: _recurse_config_to_dict(getattr(this_data, this_attr, None))}
def _serializer(obj):
'''
helper function to serialize some objects for prettier return
'''
import datetime
if isinstance(obj, datetime.datetime):
if obj.utcoffset() is not None:
obj = obj - obj.utcoffset()
return obj.__str__()
return obj
| salt/pillar/vmware_pillar.py | 19,506 | Only return if python-etcd is installed
helper function to crawl an attribute specified for retrieval
helper function to recurse through a vim object and attempt to return all child objects
helper function to serialize some objects for prettier return
Check vmware/vcenter for all data
Pillar data from vCenter or an ESXi host
.. versionadded:: 2017.7.0
:depends: - pyVmomi
This external pillar can pull attributes from objects in vCenter or an ESXi host and provide those attributes
as pillar data to minions. This can allow for pillar based targeting of minions on ESXi host, Datastore, VM
configuration, etc. This setup requires only the salt master have access to the vCenter server/ESXi hosts.
The pillar will return an empty dict if the 'os' or 'virtual' grain are not 'VMWare', 'ESXi', or 'VMWare ESXi'.
Defaults
========
- The external pillar will search for Virtual Machines with the VM name matching the minion id.
- Data will be returned into the 'vmware' pillar key.
- The external pillar has a default set of properties to return for both VirtualMachine and HostSystem types.
Configuring the VMWare pillar
=============================
The required minimal configuration in the salt master ext_pillar setup:
.. code-block:: yaml
ext_pillar:
- vmware:
host: <vcenter/esx host>
username: <user to connect with>
password: <password>
Optionally, the following keyword arguments can be passed to the ext_pillar for customized configuration:
pillar_key
Optionally set the pillar key to return the data into. Default is ``vmware``.
protocol
Optionally set to alternate protocol if the vCenter server or ESX/ESXi host is not
using the default protocol. Default protocol is ``https``.
port
Optionally set to alternate port if the vCenter server or ESX/ESXi host is not
using the default port. Default port is ``443``.
property_name
Property name to match the minion id against. Defaults to ``name``.
property_types
Optionally specify a list of pyVmomi vim types to search for the minion id in 'property_name'.
Default is ``['VirtualMachine']``.
For example, to search both vim.VirtualMachine and vim.HostSystem object types:
.. code-block:: yaml
ext_pillar:
- vmware:
host: myesx
username: root
password: complex_password
property_types:
- VirtualMachine
- HostSystem
Additionally, the list of property types can be dicts, the item of the dict being a list specifying
the attribute to return for that vim object type.
The pillar will attempt to recurse the attribute and return all child attributes.
To explicitly specify deeper attributes without attempting to recurse an attribute, convert the list
item to a dict with the item of the dict being the child attributes to return. Follow this pattern
to return attributes as deep within the object as necessary.
.. note::
Be careful when specifying custom attributes! Many attributes have objects as attributes which
have the parent object as an attribute and which will cause the pillar to fail due to the attempt
to convert all sub-objects recursively (i.e. infinite attribute loops). Specifying only the
sub-attributes you would like returned will keep the infinite recursion from occurring.
A maximum recursion exception will occur in this case and the pillar will not return as desired.
.. code-block:: yaml
ext_pillar:
- vmware:
host: myvcenter
username: my_user
password: my_pass
replace_default_attributes: True
property_types:
- VirtualMachine:
- config:
- bootOptions:
- bootDelay
- bootRetryDelay
- HostSystem:
- datastore:
- name
The above ext_pillar example would return a pillar like the following for a VirtualMachine object that's
name matched the minion id:
.. code-block:: yaml
vmware:
config:
bootOptions:
bootDelay: 1000
bootRetryDelay: 1000
If you were to retrieve these virtual machine attributes via pyVmomi directly, this would be the same as
.. code-block:: python
vmObject.config.bootOptions.bootDelay
vmObject.config.bootOptionis.bootRetryDelay
The above ext_pillar example would return a pillar like the following for a HostySystem object that's name
matched the minion id:
.. code-block:: yaml
vmware:
datastore:
- name: Datastore1
- name: Datastore2
The 'datastore' property of a HostSystem object is a list of datastores, thus a list is returned.
replace_default_attributes
If custom attributes are specified by the property_types parameter, replace_default_attributes determines
if those will be added to default attributes (False) or replace the default attributes completely (True).
The default setting is 'False'.
.. note::
vCenter "Custom Attributes" (i.e. Annotations) will always be returned if it exists on the object as
part of the pillar regardless of this setting.
-*- coding: utf-8 -*- Import python libs Import salt libs Import 3rd-party libs pylint: disable=no-name-in-module pylint: enable=no-name-in-module Set up logging pylint: disable=W0613 explicitly disconnect from vCenter when we are done, connections linger idle otherwise | 6,037 | en | 0.678128 |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import unittest
from pyes.tests import ESTestCase
class IndexStatsTestCase(ESTestCase):
def setUp(self):
super(IndexStatsTestCase, self).setUp()
self.conn.indices.create_index(self.index_name)
self.conn.indices.put_mapping(self.document_type, {'properties': self.get_default_mapping()}, self.index_name)
self.conn.indices.put_mapping("test-type2", {"_parent": {"type": self.document_type}}, self.index_name)
self.conn.index({"name": "Joe Tester", "parsedtext": "Joe Testere nice guy", "uuid": "11111", "position": 1},
self.index_name, self.document_type, 1)
self.conn.index({"name": "data1", "value": "value1"}, self.index_name, "test-type2", 1, parent=1)
self.conn.index({"name": "Bill Baloney", "parsedtext": "Bill Testere nice guy", "uuid": "22222", "position": 2},
self.index_name, self.document_type, 2)
self.conn.index({"name": "data2", "value": "value2"}, self.index_name, "test-type2", 2, parent=2)
self.conn.index({"name": "Bill Clinton", "parsedtext": """Bill is not
nice guy""", "uuid": "33333", "position": 3}, self.index_name, self.document_type, 3)
self.conn.default_indices = self.index_name
self.conn.indices.refresh()
def test_all_indices(self):
result = self.conn.indices.stats()
self.assertEqual(5, result._all.total.docs.count)
def test_select_indices(self):
result = self.conn.indices.stats(self.index_name)
self.assertEqual(5, result._all.total.docs.count)
def test_optimize(self):
result = self.conn.indices.optimize(indices=self.index_name, wait_for_merge=True, max_num_segments=1)
self.assertEqual(result._shards["failed"], 0)
if __name__ == "__main__":
unittest.main()
| tests/test_index_stats.py | 1,862 | -*- coding: utf-8 -*- | 21 | en | 0.767281 |
from __future__ import unicode_literals, division, absolute_import
import logging
from flexget import plugin
from flexget.event import event
from flexget.entry import Entry
from flexget.utils.tools import group_entries
log = logging.getLogger('best_quality')
entry_actions = {
'accept': Entry.accept,
'reject': Entry.reject,
}
class FilterBestQuality(object):
schema = {
'type': 'object',
'properties': {
'identified_by': {'type': 'string', 'default': 'auto'},
'on_best': {'type': 'string', 'enum': ['accept', 'reject', 'do_nothing'], 'default': 'do_nothing'},
'on_lower': {'type': 'string', 'enum': ['accept', 'reject', 'do_nothing'], 'default': 'reject'},
},
'additionalProperties': False
}
def on_task_filter(self, task, config):
if not config:
return
identified_by = '{{ id }}' if config['identified_by'] == 'auto' else config['identified_by']
action_on_best = entry_actions[config['on_best']] if config['on_best'] != 'do_nothing' else None
action_on_lower = entry_actions[config['on_lower']] if config['on_lower'] != 'do_nothing' else None
grouped_entries = group_entries(task.accepted + task.undecided, identified_by)
for identifier, entries in grouped_entries.items():
if not entries:
continue
# Sort entities in order of quality and best proper
entries.sort(key=lambda e: (e['quality'], e.get('proper_count', 0)), reverse=True)
# First entry will be the best quality
best = entries.pop(0)
if action_on_best:
action_on_best(best, 'has the best quality for identifier %s' % identifier)
if action_on_lower:
for entry in entries:
action_on_lower(entry, 'lower quality for identifier %s' % identifier)
@event('plugin.register')
def register_plugin():
plugin.register(FilterBestQuality, 'best_quality', api_ver=2)
| flexget/plugins/filter/best_quality.py | 2,036 | Sort entities in order of quality and best proper First entry will be the best quality | 86 | en | 0.877443 |
from mpp.models import SQLTestCase
from mpp.models import SQLConcurrencyTestCase
class HcatalogPrimitiveTypes(SQLConcurrencyTestCase):
"""
@product_version gpdb: [2.0-]
@db_name pxfautomation
@concurrency 1
@gpdiff True
"""
sql_dir = 'sql'
ans_dir = 'expected'
out_dir = 'output'
| automation/tincrepo/main/pxf/features/hcatalog/primitive_types/runTest.py | 318 | @product_version gpdb: [2.0-]
@db_name pxfautomation
@concurrency 1
@gpdiff True | 81 | en | 0.307618 |
'''
Given one or more regular expressions on the command line, searches
the PATH for all files that match.
Copyright (C) 2002 GDS Software
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License as
published by the Free Software Foundation; either version 2 of
the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public
License along with this program; if not, write to the Free
Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
MA 02111-1307 USA
See http://www.gnu.org/licenses/licenses.html for more details.
'''
import sys, getopt, re, os, string
__version__ = "$Id: where.py,v 1.4 2002/08/22 02:25:57 donp Exp $"
ignore_caseG = 0
matches = {} # They'll get stored in here by filename so that there are
# no duplicates.
def CheckDirectory(dir, regexps):
'''dir is a directory name, regexps is a list of compiled
regular expressions.
'''
global matches
currdir = os.getcwd()
try:
os.chdir(dir)
tmp = os.listdir(dir)
files = []
for f in tmp:
if os.path.isfile(f):
files.append(f)
for file in files:
for regexp in regexps:
if regexp.search(file) != None:
matches[dir + "/" + file] = ""
except:
sys.stderr.write("Warning: directory '%s' in PATH not found\n" % dir)
os.chdir(currdir)
def main():
global ignore_caseG
try:
optlist, regexps = getopt.getopt(sys.argv[1:], "i")
except getopt.error, str:
print str
sys.exit(1)
for opt in optlist:
if opt[0] == "-i":
ignore_caseG = 1
if len(regexps) == 0:
print "Usage: where [-i] regexp1 [regexp2...]"
print " regexps are python re style"
sys.exit(1)
# Get a list of the directories in the path
sep = ":"
key = "PATH"
if sys.platform == "win32":
sep = ";"
if key in os.environ.keys():
PATH = os.environ[key]
path = re.split(sep, os.environ[key])
else:
print "No PATH variable in environment"
sys.exit(1)
# Make a list of compiled regular expressions
regexp_list = []
for regex in regexps:
if ignore_caseG:
regexp_list.append(re.compile(regex, re.I))
else:
regexp_list.append(re.compile(regex))
# Now check each command line regexp in each directory
for dir in path:
CheckDirectory(dir, regexp_list)
list = []
for key in matches.keys():
list.append(key)
list.sort()
for file in list:
print string.replace(file, "\\", "/")
main()
| vyperlogix/gds/where.py | 2,980 | They'll get stored in here by filename so that there are no duplicates. Get a list of the directories in the path Make a list of compiled regular expressions Now check each command line regexp in each directory | 210 | en | 0.909919 |
import numpy as np
import pandas as pd
from shapely import prepared
from geopandas import GeoDataFrame
from geopandas import _compat as compat
from geopandas.array import _check_crs, _crs_mismatch_warn
def sjoin(
left_df, right_df, how="inner", op="intersects", lsuffix="left", rsuffix="right"
):
"""Spatial join of two GeoDataFrames.
Parameters
----------
left_df, right_df : GeoDataFrames
how : string, default 'inner'
The type of join:
* 'left': use keys from left_df; retain only left_df geometry column
* 'right': use keys from right_df; retain only right_df geometry column
* 'inner': use intersection of keys from both dfs; retain only
left_df geometry column
op : string, default 'intersects'
Binary predicate, one of {'intersects', 'contains', 'within'}.
See http://shapely.readthedocs.io/en/latest/manual.html#binary-predicates.
lsuffix : string, default 'left'
Suffix to apply to overlapping column names (left GeoDataFrame).
rsuffix : string, default 'right'
Suffix to apply to overlapping column names (right GeoDataFrame).
"""
if not isinstance(left_df, GeoDataFrame):
raise ValueError(
"'left_df' should be GeoDataFrame, got {}".format(type(left_df))
)
if not isinstance(right_df, GeoDataFrame):
raise ValueError(
"'right_df' should be GeoDataFrame, got {}".format(type(right_df))
)
allowed_hows = ["left", "right", "inner"]
if how not in allowed_hows:
raise ValueError(
'`how` was "%s" but is expected to be in %s' % (how, allowed_hows)
)
allowed_ops = ["contains", "within", "intersects"]
if op not in allowed_ops:
raise ValueError(
'`op` was "%s" but is expected to be in %s' % (op, allowed_ops)
)
if not _check_crs(left_df, right_df):
_crs_mismatch_warn(left_df, right_df, stacklevel=3)
index_left = "index_%s" % lsuffix
index_right = "index_%s" % rsuffix
# due to GH 352
if any(left_df.columns.isin([index_left, index_right])) or any(
right_df.columns.isin([index_left, index_right])
):
raise ValueError(
"'{0}' and '{1}' cannot be names in the frames being"
" joined".format(index_left, index_right)
)
# Attempt to re-use spatial indexes, otherwise generate the spatial index
# for the longer dataframe. If we are joining to an empty dataframe,
# don't bother generating the index.
if right_df._sindex_generated or (
not left_df._sindex_generated and right_df.shape[0] > left_df.shape[0]
):
tree_idx = right_df.sindex if len(left_df) > 0 else None
tree_idx_right = True
else:
tree_idx = left_df.sindex if len(right_df) > 0 else None
tree_idx_right = False
# the rtree spatial index only allows limited (numeric) index types, but an
# index in geopandas may be any arbitrary dtype. so reset both indices now
# and store references to the original indices, to be reaffixed later.
# GH 352
left_df = left_df.copy(deep=True)
try:
left_index_name = left_df.index.name
left_df.index = left_df.index.rename(index_left)
except TypeError:
index_left = [
"index_%s" % lsuffix + str(pos)
for pos, ix in enumerate(left_df.index.names)
]
left_index_name = left_df.index.names
left_df.index = left_df.index.rename(index_left)
left_df = left_df.reset_index()
right_df = right_df.copy(deep=True)
try:
right_index_name = right_df.index.name
right_df.index = right_df.index.rename(index_right)
except TypeError:
index_right = [
"index_%s" % rsuffix + str(pos)
for pos, ix in enumerate(right_df.index.names)
]
right_index_name = right_df.index.names
right_df.index = right_df.index.rename(index_right)
right_df = right_df.reset_index()
if op == "within":
# within implemented as the inverse of contains; swap names
left_df, right_df = right_df, left_df
tree_idx_right = not tree_idx_right
r_idx = np.empty((0, 0))
l_idx = np.empty((0, 0))
# get rtree spatial index. If tree_idx does not exist, it is due to either a
# failure to generate the index (e.g., if the column is empty), or the
# other dataframe is empty so it wasn't necessary to generate it.
if tree_idx_right and tree_idx:
idxmatch = left_df.geometry.apply(lambda x: x.bounds).apply(
lambda x: list(tree_idx.intersection(x)) if not x == () else []
)
idxmatch = idxmatch[idxmatch.apply(len) > 0]
# indexes of overlapping boundaries
if idxmatch.shape[0] > 0:
r_idx = np.concatenate(idxmatch.values)
l_idx = np.concatenate([[i] * len(v) for i, v in idxmatch.iteritems()])
elif not tree_idx_right and tree_idx:
# tree_idx_df == 'left'
idxmatch = right_df.geometry.apply(lambda x: x.bounds).apply(
lambda x: list(tree_idx.intersection(x)) if not x == () else []
)
idxmatch = idxmatch[idxmatch.apply(len) > 0]
if idxmatch.shape[0] > 0:
# indexes of overlapping boundaries
l_idx = np.concatenate(idxmatch.values)
r_idx = np.concatenate([[i] * len(v) for i, v in idxmatch.iteritems()])
if len(r_idx) > 0 and len(l_idx) > 0:
if compat.USE_PYGEOS:
import pygeos
predicate_d = {
"intersects": pygeos.intersects,
"contains": pygeos.contains,
"within": pygeos.contains,
}
check_predicates = predicate_d[op]
else:
# Vectorize predicate operations
def find_intersects(a1, a2):
return a1.intersects(a2)
def find_contains(a1, a2):
return a1.contains(a2)
predicate_d = {
"intersects": find_intersects,
"contains": find_contains,
"within": find_contains,
}
check_predicates = np.vectorize(predicate_d[op])
if compat.USE_PYGEOS:
res = check_predicates(
left_df.geometry[l_idx].values.data,
right_df[right_df.geometry.name][r_idx].values.data,
)
else:
res = check_predicates(
left_df.geometry.apply(lambda x: prepared.prep(x))[l_idx],
right_df[right_df.geometry.name][r_idx],
)
result = pd.DataFrame(np.column_stack([l_idx, r_idx, res]))
result.columns = ["_key_left", "_key_right", "match_bool"]
result = pd.DataFrame(result[result["match_bool"] == 1]).drop(
"match_bool", axis=1
)
else:
# when output from the join has no overlapping geometries
result = pd.DataFrame(columns=["_key_left", "_key_right"], dtype=float)
if op == "within":
# within implemented as the inverse of contains; swap names
left_df, right_df = right_df, left_df
result = result.rename(
columns={"_key_left": "_key_right", "_key_right": "_key_left"}
)
if how == "inner":
result = result.set_index("_key_left")
joined = (
left_df.merge(result, left_index=True, right_index=True)
.merge(
right_df.drop(right_df.geometry.name, axis=1),
left_on="_key_right",
right_index=True,
suffixes=("_%s" % lsuffix, "_%s" % rsuffix),
)
.set_index(index_left)
.drop(["_key_right"], axis=1)
)
if isinstance(index_left, list):
joined.index.names = left_index_name
else:
joined.index.name = left_index_name
elif how == "left":
result = result.set_index("_key_left")
joined = (
left_df.merge(result, left_index=True, right_index=True, how="left")
.merge(
right_df.drop(right_df.geometry.name, axis=1),
how="left",
left_on="_key_right",
right_index=True,
suffixes=("_%s" % lsuffix, "_%s" % rsuffix),
)
.set_index(index_left)
.drop(["_key_right"], axis=1)
)
if isinstance(index_left, list):
joined.index.names = left_index_name
else:
joined.index.name = left_index_name
else: # how == 'right':
joined = (
left_df.drop(left_df.geometry.name, axis=1)
.merge(
result.merge(
right_df, left_on="_key_right", right_index=True, how="right"
),
left_index=True,
right_on="_key_left",
how="right",
)
.set_index(index_right)
.drop(["_key_left", "_key_right"], axis=1)
)
if isinstance(index_right, list):
joined.index.names = right_index_name
else:
joined.index.name = right_index_name
return joined
| geopandas/tools/sjoin.py | 9,252 | Spatial join of two GeoDataFrames.
Parameters
----------
left_df, right_df : GeoDataFrames
how : string, default 'inner'
The type of join:
* 'left': use keys from left_df; retain only left_df geometry column
* 'right': use keys from right_df; retain only right_df geometry column
* 'inner': use intersection of keys from both dfs; retain only
left_df geometry column
op : string, default 'intersects'
Binary predicate, one of {'intersects', 'contains', 'within'}.
See http://shapely.readthedocs.io/en/latest/manual.html#binary-predicates.
lsuffix : string, default 'left'
Suffix to apply to overlapping column names (left GeoDataFrame).
rsuffix : string, default 'right'
Suffix to apply to overlapping column names (right GeoDataFrame).
due to GH 352 Attempt to re-use spatial indexes, otherwise generate the spatial index for the longer dataframe. If we are joining to an empty dataframe, don't bother generating the index. the rtree spatial index only allows limited (numeric) index types, but an index in geopandas may be any arbitrary dtype. so reset both indices now and store references to the original indices, to be reaffixed later. GH 352 within implemented as the inverse of contains; swap names get rtree spatial index. If tree_idx does not exist, it is due to either a failure to generate the index (e.g., if the column is empty), or the other dataframe is empty so it wasn't necessary to generate it. indexes of overlapping boundaries tree_idx_df == 'left' indexes of overlapping boundaries Vectorize predicate operations when output from the join has no overlapping geometries within implemented as the inverse of contains; swap names how == 'right': | 1,706 | en | 0.73626 |
# coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for scripts/build.py."""
# pylint: disable=invalid-name
import StringIO
import collections
import json
import os
import random
import subprocess
import sys
import tempfile
import threading
# pylint: disable=relative-import
import build
from core.tests import test_utils
# pylint: enable=relative-import
TEST_DIR = os.path.join('core', 'tests', 'build', '')
TEST_SOURCE_DIR = os.path.join('core', 'tests', 'build_sources')
MOCK_ASSETS_DEV_DIR = os.path.join(TEST_SOURCE_DIR, 'assets', '')
MOCK_ASSETS_OUT_DIR = os.path.join(TEST_DIR, 'static', 'assets', '')
MOCK_EXTENSIONS_DEV_DIR = os.path.join(TEST_SOURCE_DIR, 'extensions', '')
MOCK_EXTENSIONS_COMPILED_JS_DIR = os.path.join(
TEST_SOURCE_DIR, 'local_compiled_js', 'extensions', '')
MOCK_TEMPLATES_DEV_DIR = os.path.join(TEST_SOURCE_DIR, 'templates', '')
MOCK_TEMPLATES_COMPILED_JS_DIR = os.path.join(
TEST_SOURCE_DIR, 'local_compiled_js', 'templates', '')
MOCK_COMPILED_JS_DIR = os.path.join(TEST_SOURCE_DIR, 'compiled_js_dir', '')
MOCK_TSC_OUTPUT_LOG_FILEPATH = os.path.join(
TEST_SOURCE_DIR, 'mock_tsc_output_log.txt')
INVALID_INPUT_FILEPATH = os.path.join(
TEST_DIR, 'invalid', 'path', 'to', 'input.js')
INVALID_OUTPUT_FILEPATH = os.path.join(
TEST_DIR, 'invalid', 'path', 'to', 'output.js')
EMPTY_DIR = os.path.join(TEST_DIR, 'empty', '')
# Override Pylint's protected access rule due to multiple private functions in
# the file.
# pylint: disable=protected-access
class BuildTests(test_utils.GenericTestBase):
"""Test the build methods."""
def tearDown(self):
super(BuildTests, self).tearDown()
build.safe_delete_directory_tree(TEST_DIR)
build.safe_delete_directory_tree(EMPTY_DIR)
def test_minify(self):
"""Tests _minify with an invalid filepath."""
with self.assertRaises(subprocess.CalledProcessError) as called_process:
build._minify(INVALID_INPUT_FILEPATH, INVALID_OUTPUT_FILEPATH)
# `returncode` is the exit status of the child process.
self.assertEqual(called_process.exception.returncode, 1)
def test_minify_and_create_sourcemap(self):
"""Tests _minify_and_create_sourcemap with an invalid filepath."""
with self.assertRaises(subprocess.CalledProcessError) as called_process:
build._minify_and_create_sourcemap(
INVALID_INPUT_FILEPATH, INVALID_OUTPUT_FILEPATH)
# `returncode` is the exit status of the child process.
self.assertEqual(called_process.exception.returncode, 1)
def test_ensure_files_exist(self):
"""Test _ensure_files_exist raises exception with a non-existent
filepath.
"""
non_existent_filepaths = [INVALID_INPUT_FILEPATH]
# Exception will be raised at first file determined to be non-existent.
with self.assertRaisesRegexp(
OSError, ('File %s does not exist.') % non_existent_filepaths[0]):
build._ensure_files_exist(non_existent_filepaths)
def test_join_files(self):
"""Determine third_party.js contains the content of the first 10 JS
files in /third_party/static.
"""
# Prepare a file_stream object from StringIO.
third_party_js_stream = StringIO.StringIO()
# Get all filepaths from manifest.json.
dependency_filepaths = build.get_dependencies_filepaths()
# Join and write all JS files in /third_party/static to file_stream.
build._join_files(dependency_filepaths['js'], third_party_js_stream)
counter = 0
# Only checking first 10 files.
JS_FILE_COUNT = 10
for js_filepath in dependency_filepaths['js']:
if counter == JS_FILE_COUNT:
break
with open(js_filepath, 'r') as js_file:
# Assert that each line is copied over to file_stream object.
for line in js_file:
self.assertIn(line, third_party_js_stream.getvalue())
counter += 1
def test_generate_copy_tasks_for_fonts(self):
"""Test _generate_copy_tasks_for_fonts ensures that the number of copy
tasks matches the number of font files.
"""
copy_tasks = collections.deque()
# Get all filepaths from manifest.json.
dependency_filepaths = build.get_dependencies_filepaths()
# Setup a sandbox folder for copying fonts.
test_target = os.path.join('target', 'fonts', '')
self.assertEqual(len(copy_tasks), 0)
copy_tasks += build._generate_copy_tasks_for_fonts(
dependency_filepaths['fonts'], test_target)
# Asserting the same number of copy tasks and number of font files.
self.assertEqual(len(copy_tasks), len(dependency_filepaths['fonts']))
def test_insert_hash(self):
"""Test _insert_hash returns correct filenames with provided hashes."""
self.assertEqual(
build._insert_hash('file.js', '123456'), 'file.123456.js')
self.assertEqual(
build._insert_hash(
'path/to/file.js', '654321'), 'path/to/file.654321.js')
self.assertEqual(
build._insert_hash('file.min.js', 'abcdef'), 'file.min.abcdef.js')
self.assertEqual(
build._insert_hash(
'path/to/file.min.js', 'fedcba'), 'path/to/file.min.fedcba.js')
def test_get_file_count(self):
"""Test get_file_count returns the correct number of files, excluding
file with extensions in FILE_EXTENSIONS_TO_IGNORE and files that should
not be built.
"""
all_inclusive_file_count = 0
for _, _, files in os.walk(MOCK_EXTENSIONS_DEV_DIR):
all_inclusive_file_count += len(files)
ignored_file_count = 0
for _, _, files in os.walk(MOCK_EXTENSIONS_DEV_DIR):
for filename in files:
if not build.should_file_be_built(filename) or any(
filename.endswith(p)
for p in build.FILE_EXTENSIONS_TO_IGNORE):
ignored_file_count += 1
self.assertEqual(
all_inclusive_file_count - ignored_file_count,
build.get_file_count(MOCK_EXTENSIONS_DEV_DIR))
def test_compare_file_count(self):
"""Test _compare_file_count raises exception when there is a
mismatched file count between 2 dirs list.
"""
# Test when both lists contain single directory.
build.ensure_directory_exists(EMPTY_DIR)
source_dir_file_count = build.get_file_count(EMPTY_DIR)
assert source_dir_file_count == 0
target_dir_file_count = build.get_file_count(MOCK_ASSETS_DEV_DIR)
# Ensure that ASSETS_DEV_DIR has at least 1 file.
assert target_dir_file_count > 0
with self.assertRaisesRegexp(
ValueError, (
'%s files in first dir list != %s files in second dir list') %
(source_dir_file_count, target_dir_file_count)):
build._compare_file_count([EMPTY_DIR], [MOCK_ASSETS_DEV_DIR])
# Test when one of the lists contain multiple directories.
MOCK_EXTENSIONS_DIR_LIST = [
MOCK_EXTENSIONS_DEV_DIR, MOCK_EXTENSIONS_COMPILED_JS_DIR]
target_dir_file_count = build.get_file_count(
MOCK_EXTENSIONS_DEV_DIR) + build.get_file_count(
MOCK_EXTENSIONS_COMPILED_JS_DIR)
# Ensure that MOCK_EXTENSIONS_DIR has at least 1 file.
assert target_dir_file_count > 0
with self.assertRaisesRegexp(
ValueError, (
'%s files in first dir list != %s files in second dir list') %
(source_dir_file_count, target_dir_file_count)):
build._compare_file_count([EMPTY_DIR], MOCK_EXTENSIONS_DIR_LIST)
# Reset EMPTY_DIRECTORY to clean state.
build.safe_delete_directory_tree(EMPTY_DIR)
def test_verify_filepath_hash(self):
"""Test _verify_filepath_hash raises exception:
1) When there is an empty hash dict.
2) When a filename is expected to contain hash but does not.
3) When there is a hash in filename that cannot be found in
hash dict.
"""
# Final filepath example: base.240933e7564bd72a4dde42ee23260c5f.html.
file_hashes = dict()
base_filename = 'base.html'
with self.assertRaisesRegexp(ValueError, 'Hash dict is empty'):
build._verify_filepath_hash(base_filename, file_hashes)
# Generate a random hash dict for base.html.
file_hashes = {base_filename: random.getrandbits(128)}
with self.assertRaisesRegexp(
ValueError, '%s is expected to contain MD5 hash' % base_filename):
build._verify_filepath_hash(base_filename, file_hashes)
bad_filepath = 'README'
with self.assertRaisesRegexp(
ValueError, 'Filepath has less than 2 partitions after splitting'):
build._verify_filepath_hash(bad_filepath, file_hashes)
hashed_base_filename = build._insert_hash(
base_filename, random.getrandbits(128))
with self.assertRaisesRegexp(
KeyError,
'Hash from file named %s does not match hash dict values' %
hashed_base_filename):
build._verify_filepath_hash(hashed_base_filename, file_hashes)
def test_process_html(self):
"""Test process_html removes whitespaces and adds hash to filepaths."""
BASE_HTML_SOURCE_PATH = os.path.join(
MOCK_TEMPLATES_DEV_DIR, 'base.html')
BASE_JS_RELATIVE_PATH = os.path.join('pages', 'Base.js')
BASE_JS_SOURCE_PATH = os.path.join(
MOCK_TEMPLATES_COMPILED_JS_DIR, BASE_JS_RELATIVE_PATH)
build._ensure_files_exist([BASE_HTML_SOURCE_PATH, BASE_JS_SOURCE_PATH])
# Prepare a file_stream object from StringIO.
minified_html_file_stream = StringIO.StringIO()
# Obtain actual file hashes of /templates to add hash to all filepaths
# within the HTML file. The end result will look like:
# E.g <script ... App.js></script>
# --> <script ... App.[hash].js></script>.
# Only need to hash Base.js.
with self.swap(build, 'FILE_EXTENSIONS_TO_IGNORE', ('.html',)):
file_hashes = build.get_file_hashes(MOCK_TEMPLATES_DEV_DIR)
file_hashes.update(
build.get_file_hashes(MOCK_TEMPLATES_COMPILED_JS_DIR))
# Assert that base.html has white spaces and has original filepaths.
with open(BASE_HTML_SOURCE_PATH, 'r') as source_base_file:
source_base_file_content = source_base_file.read()
self.assertRegexpMatches(
source_base_file_content, r'\s{2,}',
msg='No white spaces detected in %s unexpectedly'
% BASE_HTML_SOURCE_PATH)
# Look for templates/pages/Base.js in source_base_file_content.
self.assertIn(BASE_JS_RELATIVE_PATH, source_base_file_content)
# Build base.html file.
with open(BASE_HTML_SOURCE_PATH, 'r') as source_base_file:
build.process_html(
source_base_file, minified_html_file_stream, file_hashes)
minified_html_file_content = minified_html_file_stream.getvalue()
self.assertNotRegexpMatches(
minified_html_file_content, r'\s{2,}',
msg='All white spaces must be removed from %s' %
BASE_HTML_SOURCE_PATH)
# Assert that hashes are inserted into filenames in base.html.
# Final filepath in base.html example:
# /build/templates/head/pages/Base.081ce90f17ecdf07701d83cb860985c2.js.
final_filename = build._insert_hash(
BASE_JS_RELATIVE_PATH, file_hashes[BASE_JS_RELATIVE_PATH])
# Look for templates/pages/Base.081ce90f17ecdf07701d83cb860985c2.js in
# minified_html_file_content.
self.assertIn(final_filename, minified_html_file_content)
def test_should_file_be_built(self):
"""Test should_file_be_built returns the correct boolean value for
filepath that should be built.
"""
service_js_filepath = os.path.join(
'local_compiled_js', 'core', 'pages', 'AudioService.js')
generated_parser_js_filepath = os.path.join(
'core', 'expressions', 'ExpressionParserService.js')
compiled_generated_parser_js_filepath = os.path.join(
'local_compiled_js', 'core', 'expressions',
'ExpressionParserService.js')
service_ts_filepath = os.path.join('core', 'pages', 'AudioService.ts')
spec_js_filepath = os.path.join('core', 'pages', 'AudioServiceSpec.js')
protractor_filepath = os.path.join('extensions', 'protractor.js')
python_controller_filepath = os.path.join('base.py')
pyc_test_filepath = os.path.join(
'core', 'controllers', 'base.pyc')
python_test_filepath = os.path.join(
'core', 'tests', 'base_test.py')
self.assertFalse(build.should_file_be_built(spec_js_filepath))
self.assertFalse(build.should_file_be_built(protractor_filepath))
self.assertTrue(build.should_file_be_built(service_js_filepath))
self.assertFalse(build.should_file_be_built(service_ts_filepath))
self.assertFalse(build.should_file_be_built(python_test_filepath))
self.assertFalse(build.should_file_be_built(pyc_test_filepath))
self.assertTrue(build.should_file_be_built(python_controller_filepath))
# Swapping out constants to check if the reverse is true.
# ALL JS files that ends with ...Service.js should not be built.
with self.swap(
build, 'JS_FILENAME_SUFFIXES_TO_IGNORE', ('Service.js',)):
self.assertFalse(build.should_file_be_built(service_js_filepath))
self.assertTrue(build.should_file_be_built(spec_js_filepath))
with self.swap(
build, 'JS_FILEPATHS_NOT_TO_BUILD', (
'core/expressions/ExpressionParserService.js',)):
self.assertFalse(
build.should_file_be_built(generated_parser_js_filepath))
self.assertTrue(
build.should_file_be_built(
compiled_generated_parser_js_filepath))
def test_hash_should_be_inserted(self):
"""Test hash_should_be_inserted returns the correct boolean value
for filepath that should be hashed.
"""
with self.swap(
build, 'FILEPATHS_NOT_TO_RENAME', (
'*.py', 'path/to/fonts/*', 'path/to/third_party.min.js.map',
'path/to/third_party.min.css.map')):
self.assertFalse(build.hash_should_be_inserted(
'path/to/fonts/fontawesome-webfont.svg'))
self.assertFalse(build.hash_should_be_inserted(
'path/to/third_party.min.css.map'))
self.assertFalse(build.hash_should_be_inserted(
'path/to/third_party.min.js.map'))
self.assertTrue(build.hash_should_be_inserted(
'path/to/wrongFonts/fonta.eot'))
self.assertTrue(build.hash_should_be_inserted(
'rich_text_components/Video/protractor.js'))
self.assertFalse(build.hash_should_be_inserted(
'main.py'))
self.assertFalse(build.hash_should_be_inserted(
'extensions/domain.py'))
def test_generate_copy_tasks_to_copy_from_source_to_target(self):
"""Test generate_copy_tasks_to_copy_from_source_to_target queues up
the same number of copy tasks as the number of files in the directory.
"""
assets_hashes = build.get_file_hashes(MOCK_ASSETS_DEV_DIR)
total_file_count = build.get_file_count(MOCK_ASSETS_DEV_DIR)
copy_tasks = collections.deque()
self.assertEqual(len(copy_tasks), 0)
copy_tasks += build.generate_copy_tasks_to_copy_from_source_to_target(
MOCK_ASSETS_DEV_DIR, MOCK_ASSETS_OUT_DIR, assets_hashes)
self.assertEqual(len(copy_tasks), total_file_count)
def test_is_file_hash_provided_to_frontend(self):
"""Test is_file_hash_provided_to_frontend returns the correct boolean
value for filepath that should be provided to frontend.
"""
with self.swap(
build, 'FILEPATHS_PROVIDED_TO_FRONTEND',
('path/to/file.js', 'path/to/file.html', 'file.js')):
self.assertTrue(
build.is_file_hash_provided_to_frontend('path/to/file.js'))
self.assertTrue(
build.is_file_hash_provided_to_frontend('path/to/file.html'))
self.assertTrue(build.is_file_hash_provided_to_frontend('file.js'))
with self.swap(
build, 'FILEPATHS_PROVIDED_TO_FRONTEND',
('path/to/*', '*.js', '*_end.html')):
self.assertTrue(
build.is_file_hash_provided_to_frontend('path/to/file.js'))
self.assertTrue(
build.is_file_hash_provided_to_frontend('path/to/file.html'))
self.assertTrue(build.is_file_hash_provided_to_frontend('file.js'))
self.assertFalse(
build.is_file_hash_provided_to_frontend('path/file.css'))
self.assertTrue(
build.is_file_hash_provided_to_frontend('good_end.html'))
self.assertFalse(
build.is_file_hash_provided_to_frontend('bad_end.css'))
def test_get_filepaths_by_extensions(self):
"""Test get_filepaths_by_extensions only returns filepaths in
directory with given extensions.
"""
filepaths = []
build.ensure_directory_exists(MOCK_ASSETS_DEV_DIR)
extensions = ('.json', '.svg',)
self.assertEqual(len(filepaths), 0)
filepaths = build.get_filepaths_by_extensions(
MOCK_ASSETS_DEV_DIR, extensions)
for filepath in filepaths:
self.assertTrue(any(filepath.endswith(p) for p in extensions))
file_count = 0
for _, _, filenames in os.walk(MOCK_ASSETS_DEV_DIR):
for filename in filenames:
if any(filename.endswith(p) for p in extensions):
file_count += 1
self.assertEqual(len(filepaths), file_count)
filepaths = []
extensions = ('.pdf', '.viminfo', '.idea',)
self.assertEqual(len(filepaths), 0)
filepaths = build.get_filepaths_by_extensions(
MOCK_ASSETS_DEV_DIR, extensions)
self.assertEqual(len(filepaths), 0)
def test_get_file_hashes(self):
"""Test get_file_hashes gets hashes of all files in directory,
excluding file with extensions in FILE_EXTENSIONS_TO_IGNORE.
"""
# Prevent getting hashes of HTML files.
with self.swap(build, 'FILE_EXTENSIONS_TO_IGNORE', ('.html',)):
file_hashes = dict()
self.assertEqual(len(file_hashes), 0)
file_hashes = build.get_file_hashes(MOCK_EXTENSIONS_DEV_DIR)
self.assertGreater(len(file_hashes), 0)
# Assert that each hash's filepath exists and does not include files
# with extensions in FILE_EXTENSIONS_TO_IGNORE.
for filepath in file_hashes:
abs_filepath = os.path.join(MOCK_EXTENSIONS_DEV_DIR, filepath)
self.assertTrue(os.path.isfile(abs_filepath))
self.assertFalse(filepath.endswith('.html'))
def test_filter_hashes(self):
"""Test filter_hashes filters the provided hash correctly."""
# Set constant to provide everything to frontend.
with self.swap(build, 'FILEPATHS_PROVIDED_TO_FRONTEND', ('*',)):
hashes = {'path/to/file.js': '123456',
'path/file.min.js': '123456'}
filtered_hashes = build.filter_hashes(hashes)
self.assertEqual(
filtered_hashes['/path/to/file.js'],
hashes['path/to/file.js'])
self.assertEqual(
filtered_hashes['/path/file.min.js'],
hashes['path/file.min.js'])
with self.swap(
build, 'FILEPATHS_PROVIDED_TO_FRONTEND',
('test_path/*', 'path/to/file.js')):
hashes = {'path/to/file.js': '123456',
'test_path/to/file.html': '123456',
'test_path/to/file.js': 'abcdef',
'path/path/file.js': 'zyx123',
'file.html': '321xyz'}
filtered_hashes = build.filter_hashes(hashes)
self.assertTrue(filtered_hashes.has_key('/path/to/file.js'))
self.assertTrue(filtered_hashes.has_key('/test_path/to/file.html'))
self.assertTrue(filtered_hashes.has_key('/test_path/to/file.js'))
self.assertFalse(filtered_hashes.has_key('/path/path/file.js'))
self.assertFalse(filtered_hashes.has_key('/file.html'))
def test_get_hashes_json_file_contents(self):
"""Test get_hashes_json_file_contents parses provided hash dict
correctly to JSON format.
"""
# Set constant to provide everything to frontend.
with self.swap(build, 'FILEPATHS_PROVIDED_TO_FRONTEND', ('*',)):
hashes = {'path/file.js': '123456'}
self.assertEqual(
build.get_hashes_json_file_contents(hashes),
'var hashes = JSON.parse(\'{"/path/file.js": "123456"}\');')
hashes = {'file.js': '123456', 'file.min.js': '654321'}
self.assertEqual(
build.get_hashes_json_file_contents(hashes),
('var hashes = JSON.parse(\'{"/file.min.js": "654321", '
'"/file.js": "123456"}\');'))
def test_execute_tasks(self):
"""Test _execute_tasks joins all threads after executing all tasks."""
build_tasks = collections.deque()
TASK_COUNT = 2
count = TASK_COUNT
while count:
task = threading.Thread(
target=build._minify,
args=(INVALID_INPUT_FILEPATH, INVALID_OUTPUT_FILEPATH))
build_tasks.append(task)
count -= 1
self.assertEqual(threading.active_count(), 1)
build._execute_tasks(build_tasks)
with self.assertRaisesRegexp(
OSError, 'threads can only be started once'):
build._execute_tasks(build_tasks)
# Assert that all threads are joined.
self.assertEqual(threading.active_count(), 1)
def test_generate_build_tasks_to_build_all_files_in_directory(self):
"""Test generate_build_tasks_to_build_all_files_in_directory queues up
the same number of build tasks as the number of files in the source
directory.
"""
asset_hashes = build.get_file_hashes(MOCK_ASSETS_DEV_DIR)
tasks = collections.deque()
self.assertEqual(len(tasks), 0)
# Build all files.
tasks = build.generate_build_tasks_to_build_all_files_in_directory(
MOCK_ASSETS_DEV_DIR, MOCK_ASSETS_OUT_DIR, asset_hashes)
total_file_count = build.get_file_count(MOCK_ASSETS_DEV_DIR)
self.assertEqual(len(tasks), total_file_count)
def test_generate_build_tasks_to_build_files_from_filepaths(self):
"""Test generate_build_tasks_to_build_files_from_filepaths queues up a
corresponding number of build tasks to the number of file changes.
"""
new_filename = 'manifest.json'
recently_changed_filenames = [
os.path.join(MOCK_ASSETS_DEV_DIR, new_filename)]
asset_hashes = build.get_file_hashes(MOCK_ASSETS_DEV_DIR)
build_tasks = collections.deque()
self.assertEqual(len(build_tasks), 0)
build_tasks += build.generate_build_tasks_to_build_files_from_filepaths(
MOCK_ASSETS_DEV_DIR, MOCK_ASSETS_OUT_DIR,
recently_changed_filenames, asset_hashes)
self.assertEqual(len(build_tasks), len(recently_changed_filenames))
build_tasks.clear()
svg_filepaths = build.get_filepaths_by_extensions(
MOCK_ASSETS_DEV_DIR, ('.svg',))
# Make sure there is at least 1 SVG file.
self.assertGreater(len(svg_filepaths), 0)
self.assertEqual(len(build_tasks), 0)
build_tasks += build.generate_build_tasks_to_build_files_from_filepaths(
MOCK_ASSETS_DEV_DIR, MOCK_ASSETS_OUT_DIR, svg_filepaths,
asset_hashes)
self.assertEqual(len(build_tasks), len(svg_filepaths))
def test_generate_build_tasks_to_build_directory(self):
"""Test generate_build_tasks_to_build_directory queues up a
corresponding number of build tasks according to the given scenario.
"""
EXTENSIONS_DIRNAMES_TO_DIRPATHS = {
'dev_dir': MOCK_EXTENSIONS_DEV_DIR,
'compiled_js_dir': MOCK_EXTENSIONS_COMPILED_JS_DIR,
'staging_dir': os.path.join(
TEST_DIR, 'backend_prod_files', 'extensions', ''),
'out_dir': os.path.join(TEST_DIR, 'build', 'extensions', '')
}
file_hashes = build.get_file_hashes(MOCK_EXTENSIONS_DEV_DIR)
compiled_js_file_hashes = build.get_file_hashes(
MOCK_EXTENSIONS_COMPILED_JS_DIR)
build_dir_tasks = collections.deque()
build_all_files_tasks = (
build.generate_build_tasks_to_build_all_files_in_directory(
MOCK_EXTENSIONS_DEV_DIR,
EXTENSIONS_DIRNAMES_TO_DIRPATHS['out_dir'],
file_hashes))
build_all_files_tasks += (
build.generate_build_tasks_to_build_all_files_in_directory(
MOCK_EXTENSIONS_COMPILED_JS_DIR,
EXTENSIONS_DIRNAMES_TO_DIRPATHS['out_dir'],
compiled_js_file_hashes))
self.assertGreater(len(build_all_files_tasks), 0)
# Test for building all files when staging dir does not exist.
self.assertEqual(len(build_dir_tasks), 0)
build_dir_tasks += build.generate_build_tasks_to_build_directory(
EXTENSIONS_DIRNAMES_TO_DIRPATHS, file_hashes)
self.assertEqual(len(build_dir_tasks), len(build_all_files_tasks))
build.safe_delete_directory_tree(TEST_DIR)
build_dir_tasks.clear()
# Test for building only new files when staging dir exists.
build.ensure_directory_exists(
EXTENSIONS_DIRNAMES_TO_DIRPATHS['staging_dir'])
self.assertEqual(len(build_dir_tasks), 0)
source_hashes = file_hashes
source_hashes.update(compiled_js_file_hashes)
build_dir_tasks += build.generate_build_tasks_to_build_directory(
EXTENSIONS_DIRNAMES_TO_DIRPATHS, source_hashes)
self.assertEqual(len(build_dir_tasks), len(build_all_files_tasks))
build.safe_delete_directory_tree(TEST_DIR)
# Build all files and save to final directory.
build.ensure_directory_exists(
EXTENSIONS_DIRNAMES_TO_DIRPATHS['staging_dir'])
build._execute_tasks(build_dir_tasks)
self.assertEqual(threading.active_count(), 1)
build._execute_tasks(
build.generate_copy_tasks_to_copy_from_source_to_target(
EXTENSIONS_DIRNAMES_TO_DIRPATHS['staging_dir'],
EXTENSIONS_DIRNAMES_TO_DIRPATHS['out_dir'], file_hashes))
build_dir_tasks.clear()
# Test for only building files that need to be rebuilt.
self.assertEqual(len(build_dir_tasks), 0)
build_dir_tasks += build.generate_build_tasks_to_build_directory(
EXTENSIONS_DIRNAMES_TO_DIRPATHS, build_dir_tasks)
file_extensions_to_always_rebuild = ('.html', '.py',)
always_rebuilt_filepaths = build.get_filepaths_by_extensions(
MOCK_EXTENSIONS_DEV_DIR, file_extensions_to_always_rebuild)
self.assertGreater(len(always_rebuilt_filepaths), 0)
self.assertEqual(len(build_dir_tasks), len(always_rebuilt_filepaths))
build.safe_delete_directory_tree(TEST_DIR)
def test_get_recently_changed_filenames(self):
"""Test get_recently_changed_filenames detects file recently added."""
# Create an empty folder.
build.ensure_directory_exists(EMPTY_DIR)
# Get hashes from ASSETS_DEV_DIR to simulate a folder with built files.
assets_hashes = build.get_file_hashes(MOCK_ASSETS_DEV_DIR)
recently_changed_filenames = []
self.assertEqual(len(recently_changed_filenames), 0)
recently_changed_filenames = build.get_recently_changed_filenames(
assets_hashes, EMPTY_DIR)
# Since all HTML and Python files are already built, they are ignored.
with self.swap(build, 'FILE_EXTENSIONS_TO_IGNORE', ('.html', '.py',)):
self.assertEqual(
len(recently_changed_filenames), build.get_file_count(
MOCK_ASSETS_DEV_DIR))
build.safe_delete_directory_tree(EMPTY_DIR)
def test_generate_delete_tasks_to_remove_deleted_files(self):
"""Test generate_delete_tasks_to_remove_deleted_files queues up the
same number of deletion task as the number of deleted files.
"""
delete_tasks = collections.deque()
# The empty dict means that all files should be removed.
file_hashes = dict()
self.assertEqual(len(delete_tasks), 0)
delete_tasks += build.generate_delete_tasks_to_remove_deleted_files(
file_hashes, MOCK_TEMPLATES_DEV_DIR)
self.assertEqual(
len(delete_tasks), build.get_file_count(MOCK_TEMPLATES_DEV_DIR))
def test_compiled_js_dir_validation(self):
"""Test that build.COMPILED_JS_DIR is validated correctly with
outDir in build.TSCONFIG_FILEPATH.
"""
build.require_compiled_js_dir_to_be_valid()
out_dir = ''
with open(build.TSCONFIG_FILEPATH) as f:
config_data = json.load(f)
out_dir = os.path.join(config_data['compilerOptions']['outDir'], '')
with self.assertRaisesRegexp(
Exception,
'COMPILED_JS_DIR: %s does not match the output directory '
'in %s: %s' % (
MOCK_COMPILED_JS_DIR, build.TSCONFIG_FILEPATH,
out_dir)), self.swap(
build, 'COMPILED_JS_DIR', MOCK_COMPILED_JS_DIR):
build.require_compiled_js_dir_to_be_valid()
def test_compiled_js_dir_is_deleted_before_compilation(self):
"""Test that compiled_js_dir is deleted before a fresh compilation."""
def mock_check_call(unused_cmd):
pass
def mock_require_compiled_js_dir_to_be_valid():
pass
with self.swap(
build, 'COMPILED_JS_DIR', MOCK_COMPILED_JS_DIR), self.swap(
build, 'require_compiled_js_dir_to_be_valid',
mock_require_compiled_js_dir_to_be_valid):
if not os.path.exists(os.path.dirname(MOCK_COMPILED_JS_DIR)):
os.mkdir(os.path.dirname(MOCK_COMPILED_JS_DIR))
with self.swap(subprocess, 'check_call', mock_check_call):
build.compile_typescript_files('.')
self.assertFalse(
os.path.exists(os.path.dirname(MOCK_COMPILED_JS_DIR)))
def test_compiled_js_dir_is_deleted_before_watch_mode_compilation(self):
"""Test that compiled_js_dir is deleted before a fresh watch mode
compilation.
"""
# pylint: disable=unused-argument
def mock_call(unused_cmd, shell, stdout):
pass
def mock_popen(unused_cmd, stdout):
pass
# pylint: enable=unused-argument
def mock_require_compiled_js_dir_to_be_valid():
pass
with self.swap(
build, 'COMPILED_JS_DIR', MOCK_COMPILED_JS_DIR), self.swap(
build, 'require_compiled_js_dir_to_be_valid',
mock_require_compiled_js_dir_to_be_valid):
if not os.path.exists(os.path.dirname(MOCK_COMPILED_JS_DIR)):
os.mkdir(os.path.dirname(MOCK_COMPILED_JS_DIR))
with self.swap(subprocess, 'Popen', mock_popen), self.swap(
subprocess, 'call', mock_call), self.swap(
build, 'TSC_OUTPUT_LOG_FILEPATH',
MOCK_TSC_OUTPUT_LOG_FILEPATH):
build.compile_typescript_files_continuously('.')
self.assertFalse(
os.path.exists(os.path.dirname(MOCK_COMPILED_JS_DIR)))
def test_generate_app_yaml(self):
mock_dev_yaml_filepath = 'mock_app_dev.yaml'
mock_yaml_filepath = 'mock_app.yaml'
app_dev_yaml_filepath_swap = self.swap(
build, 'APP_DEV_YAML_FILEPATH', mock_dev_yaml_filepath)
app_yaml_filepath_swap = self.swap(
build, 'APP_YAML_FILEPATH', mock_yaml_filepath)
app_dev_yaml_temp_file = tempfile.NamedTemporaryFile()
app_dev_yaml_temp_file.name = mock_dev_yaml_filepath
with open(mock_dev_yaml_filepath, 'w') as tmp:
tmp.write('Some content in mock_app_dev.yaml')
app_yaml_temp_file = tempfile.NamedTemporaryFile()
app_yaml_temp_file.name = mock_yaml_filepath
with open(mock_yaml_filepath, 'w') as tmp:
tmp.write('Initial content in mock_app.yaml')
with app_dev_yaml_filepath_swap, app_yaml_filepath_swap:
build.generate_app_yaml()
with open(mock_yaml_filepath, 'r') as yaml_file:
content = yaml_file.read()
self.assertEqual(
content,
'# THIS FILE IS AUTOGENERATED, DO NOT MODIFY\n'
'Some content in mock_app_dev.yaml')
app_yaml_temp_file.close()
app_dev_yaml_temp_file.close()
def test_safe_delete_file(self):
temp_file = tempfile.NamedTemporaryFile()
temp_file.name = 'some_file.txt'
with open('some_file.txt', 'w') as tmp:
tmp.write('Some content.')
self.assertTrue(os.path.isfile('some_file.txt'))
build.safe_delete_file('some_file.txt')
self.assertFalse(os.path.isfile('some_file.txt'))
def test_minify_third_party_libs(self):
def _mock_safe_delete_file(unused_filepath):
"""Mocks build.safe_delete_file()."""
pass
self.assertFalse(os.path.isfile(
'core/tests/data/third_party/css/third_party.min.css'))
self.assertFalse(os.path.isfile(
'core/tests/data/third_party/js/third_party.min.js'))
self.assertFalse(os.path.isfile(
'core/tests/data/third_party/js/third_party.min.js.map'))
with self.swap(build, 'safe_delete_file', _mock_safe_delete_file):
build.minify_third_party_libs('core/tests/data/third_party')
self.assertTrue(os.path.isfile(
'core/tests/data/third_party/css/third_party.min.css'))
self.assertTrue(os.path.isfile(
'core/tests/data/third_party/js/third_party.min.js'))
self.assertTrue(os.path.isfile(
'core/tests/data/third_party/js/third_party.min.js.map'))
self.assertLess(
os.path.getsize(
'core/tests/data/third_party/css/third_party.min.css'),
os.path.getsize('core/tests/data/third_party/css/third_party.css'))
self.assertLess(
os.path.getsize(
'core/tests/data/third_party/js/third_party.min.js'),
os.path.getsize('core/tests/data/third_party/js/third_party.js'))
build.safe_delete_file(
'core/tests/data/third_party/css/third_party.min.css')
build.safe_delete_file(
'core/tests/data/third_party/js/third_party.min.js')
build.safe_delete_file(
'core/tests/data/third_party/js/third_party.min.js.map')
def test_build_with_prod_env(self):
check_function_calls = {
'build_using_webpack_gets_called': False,
'ensure_files_exist_gets_called': False,
'compile_typescript_files_gets_called': False
}
expected_check_function_calls = {
'build_using_webpack_gets_called': True,
'ensure_files_exist_gets_called': True,
'compile_typescript_files_gets_called': True
}
def mock_build_using_webpack():
check_function_calls['build_using_webpack_gets_called'] = True
def mock_ensure_files_exist(unused_filepaths):
check_function_calls['ensure_files_exist_gets_called'] = True
def mock_compile_typescript_files(unused_project_dir):
check_function_calls['compile_typescript_files_gets_called'] = True
ensure_files_exist_swap = self.swap(
build, '_ensure_files_exist', mock_ensure_files_exist)
build_using_webpack_swap = self.swap(
build, 'build_using_webpack', mock_build_using_webpack)
compile_typescript_files_swap = self.swap(
build, 'compile_typescript_files', mock_compile_typescript_files)
args_swap = self.swap(sys, 'argv', ['build.py', '--prod_env'])
with ensure_files_exist_swap, build_using_webpack_swap, (
compile_typescript_files_swap), args_swap:
build.build()
self.assertEqual(check_function_calls, expected_check_function_calls)
def test_build_with_watcher(self):
check_function_calls = {
'ensure_files_exist_gets_called': False,
'compile_typescript_files_continuously_gets_called': False
}
expected_check_function_calls = {
'ensure_files_exist_gets_called': True,
'compile_typescript_files_continuously_gets_called': True
}
def mock_ensure_files_exist(unused_filepaths):
check_function_calls['ensure_files_exist_gets_called'] = True
def mock_compile_typescript_files_continuously(unused_project_dir):
check_function_calls[
'compile_typescript_files_continuously_gets_called'] = True
ensure_files_exist_swap = self.swap(
build, '_ensure_files_exist', mock_ensure_files_exist)
compile_typescript_files_continuously_swap = self.swap(
build, 'compile_typescript_files_continuously',
mock_compile_typescript_files_continuously)
args_swap = self.swap(sys, 'argv', ['build.py', '--enable_watcher'])
with ensure_files_exist_swap, (
compile_typescript_files_continuously_swap), args_swap:
build.build()
self.assertEqual(check_function_calls, expected_check_function_calls)
def test_cannot_minify_third_party_libs_in_dev_mode(self):
check_function_calls = {
'ensure_files_exist_gets_called': False,
'compile_typescript_files_gets_called': False
}
expected_check_function_calls = {
'ensure_files_exist_gets_called': True,
'compile_typescript_files_gets_called': True
}
def mock_ensure_files_exist(unused_filepaths):
check_function_calls['ensure_files_exist_gets_called'] = True
def mock_compile_typescript_files(unused_project_dir):
check_function_calls['compile_typescript_files_gets_called'] = True
ensure_files_exist_swap = self.swap(
build, '_ensure_files_exist', mock_ensure_files_exist)
compile_typescript_files_swap = self.swap(
build, 'compile_typescript_files', mock_compile_typescript_files)
args_swap = self.swap(
sys, 'argv', ['build.py', '--minify_third_party_libs_only'])
assert_raises_regexp_context_manager = self.assertRaisesRegexp(
Exception,
'minify_third_party_libs_only should not be set in non-prod mode.')
with ensure_files_exist_swap, compile_typescript_files_swap, (
assert_raises_regexp_context_manager), args_swap:
build.build()
self.assertEqual(check_function_calls, expected_check_function_calls)
def test_build_using_webpack_command(self):
def mock_check_call(cmd, **unused_kwargs):
self.assertEqual(
cmd,
'%s --config %s'
% (build.WEBPACK_FILE, build.WEBPACK_PROD_CONFIG))
with self.swap(subprocess, 'check_call', mock_check_call):
build.build_using_webpack()
# pylint: enable=protected-access
| scripts/build_test.py | 41,015 | Test the build methods.
Mocks build.safe_delete_file().
Test _compare_file_count raises exception when there is a
mismatched file count between 2 dirs list.
Test that compiled_js_dir is deleted before a fresh compilation.
Test that compiled_js_dir is deleted before a fresh watch mode
compilation.
Test that build.COMPILED_JS_DIR is validated correctly with
outDir in build.TSCONFIG_FILEPATH.
Test _ensure_files_exist raises exception with a non-existent
filepath.
Test _execute_tasks joins all threads after executing all tasks.
Test filter_hashes filters the provided hash correctly.
Test generate_build_tasks_to_build_all_files_in_directory queues up
the same number of build tasks as the number of files in the source
directory.
Test generate_build_tasks_to_build_directory queues up a
corresponding number of build tasks according to the given scenario.
Test generate_build_tasks_to_build_files_from_filepaths queues up a
corresponding number of build tasks to the number of file changes.
Test _generate_copy_tasks_for_fonts ensures that the number of copy
tasks matches the number of font files.
Test generate_copy_tasks_to_copy_from_source_to_target queues up
the same number of copy tasks as the number of files in the directory.
Test generate_delete_tasks_to_remove_deleted_files queues up the
same number of deletion task as the number of deleted files.
Test get_file_count returns the correct number of files, excluding
file with extensions in FILE_EXTENSIONS_TO_IGNORE and files that should
not be built.
Test get_file_hashes gets hashes of all files in directory,
excluding file with extensions in FILE_EXTENSIONS_TO_IGNORE.
Test get_filepaths_by_extensions only returns filepaths in
directory with given extensions.
Test get_hashes_json_file_contents parses provided hash dict
correctly to JSON format.
Test get_recently_changed_filenames detects file recently added.
Test hash_should_be_inserted returns the correct boolean value
for filepath that should be hashed.
Test _insert_hash returns correct filenames with provided hashes.
Test is_file_hash_provided_to_frontend returns the correct boolean
value for filepath that should be provided to frontend.
Determine third_party.js contains the content of the first 10 JS
files in /third_party/static.
Tests _minify with an invalid filepath.
Tests _minify_and_create_sourcemap with an invalid filepath.
Test process_html removes whitespaces and adds hash to filepaths.
Test should_file_be_built returns the correct boolean value for
filepath that should be built.
Test _verify_filepath_hash raises exception:
1) When there is an empty hash dict.
2) When a filename is expected to contain hash but does not.
3) When there is a hash in filename that cannot be found in
hash dict.
Unit tests for scripts/build.py.
coding: utf-8 Copyright 2014 The Oppia Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS-IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. pylint: disable=invalid-name pylint: disable=relative-import pylint: enable=relative-import Override Pylint's protected access rule due to multiple private functions in the file. pylint: disable=protected-access `returncode` is the exit status of the child process. `returncode` is the exit status of the child process. Exception will be raised at first file determined to be non-existent. Prepare a file_stream object from StringIO. Get all filepaths from manifest.json. Join and write all JS files in /third_party/static to file_stream. Only checking first 10 files. Assert that each line is copied over to file_stream object. Get all filepaths from manifest.json. Setup a sandbox folder for copying fonts. Asserting the same number of copy tasks and number of font files. Test when both lists contain single directory. Ensure that ASSETS_DEV_DIR has at least 1 file. Test when one of the lists contain multiple directories. Ensure that MOCK_EXTENSIONS_DIR has at least 1 file. Reset EMPTY_DIRECTORY to clean state. Final filepath example: base.240933e7564bd72a4dde42ee23260c5f.html. Generate a random hash dict for base.html. Prepare a file_stream object from StringIO. Obtain actual file hashes of /templates to add hash to all filepaths within the HTML file. The end result will look like: E.g <script ... App.js></script> --> <script ... App.[hash].js></script>. Only need to hash Base.js. Assert that base.html has white spaces and has original filepaths. Look for templates/pages/Base.js in source_base_file_content. Build base.html file. Assert that hashes are inserted into filenames in base.html. Final filepath in base.html example: /build/templates/head/pages/Base.081ce90f17ecdf07701d83cb860985c2.js. Look for templates/pages/Base.081ce90f17ecdf07701d83cb860985c2.js in minified_html_file_content. Swapping out constants to check if the reverse is true. ALL JS files that ends with ...Service.js should not be built. Prevent getting hashes of HTML files. Assert that each hash's filepath exists and does not include files with extensions in FILE_EXTENSIONS_TO_IGNORE. Set constant to provide everything to frontend. Set constant to provide everything to frontend. Assert that all threads are joined. Build all files. Make sure there is at least 1 SVG file. Test for building all files when staging dir does not exist. Test for building only new files when staging dir exists. Build all files and save to final directory. Test for only building files that need to be rebuilt. Create an empty folder. Get hashes from ASSETS_DEV_DIR to simulate a folder with built files. Since all HTML and Python files are already built, they are ignored. The empty dict means that all files should be removed. pylint: disable=unused-argument pylint: enable=unused-argument pylint: enable=protected-access | 6,173 | en | 0.814023 |
# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "LICENSE.txt" file accompanying this file.
# This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, express or implied.
# See the License for the specific language governing permissions and limitations under the License.
import logging
import pytest
from assertpy import assert_that
from cfn_stacks_factory import CfnStack
from remote_command_executor import RemoteCommandExecutor
from troposphere import Template
from troposphere.route53 import HostedZone, HostedZoneVPCs
from utils import generate_stack_name
from tests.common.mpi_common import _test_mpi
from tests.common.schedulers_common import get_scheduler_commands
from tests.common.utils import fetch_instance_slots
@pytest.mark.usefixtures("os")
def test_hit_no_cluster_dns_mpi(scheduler, region, instance, pcluster_config_reader, clusters_factory, test_datadir):
logging.info("Testing HIT cluster with cluster DNS disabled.")
scaledown_idletime = 3
max_queue_size = 3
min_queue_size = 1
slots_per_instance = fetch_instance_slots(region, instance)
cluster_config = pcluster_config_reader(
scaledown_idletime=scaledown_idletime, max_queue_size=max_queue_size, min_queue_size=min_queue_size
)
cluster = clusters_factory(cluster_config)
remote_command_executor = RemoteCommandExecutor(cluster)
scheduler_commands = get_scheduler_commands(scheduler, remote_command_executor)
# Assert that compute hostname cannot be pinged directly
compute_nodes = scheduler_commands.get_compute_nodes()
result = remote_command_executor.run_remote_command("ping -c 3 {}".format(compute_nodes[0]), raise_on_error=False)
assert_that(result.failed).is_true()
# Assert compute hostname is the same as nodename
_test_hostname_same_as_nodename(scheduler_commands, remote_command_executor, compute_nodes)
# This verifies that the job completes correctly
_test_mpi(
remote_command_executor,
slots_per_instance,
scheduler,
region,
cluster.cfn_name,
scaledown_idletime,
verify_scaling=False,
)
@pytest.mark.usefixtures("os", "instance")
def test_existing_hosted_zone(
hosted_zone_factory,
pcluster_config_reader,
clusters_factory,
vpc_stack,
cfn_stacks_factory,
key_name,
scheduler,
region,
instance,
):
"""Test hosted_zone_id is provided in the config file."""
num_computes = 2
hosted_zone_id, domain_name = hosted_zone_factory()
cluster_config = pcluster_config_reader(existing_hosted_zone=hosted_zone_id, queue_size=num_computes)
cluster = clusters_factory(cluster_config, upper_case_cluster_name=True)
remote_command_executor = RemoteCommandExecutor(cluster)
scheduler_commands = get_scheduler_commands(scheduler, remote_command_executor)
# Test run mpi job
_test_mpi(
remote_command_executor,
slots_per_instance=fetch_instance_slots(region, instance),
scheduler=scheduler,
region=region,
stack_name=cluster.cfn_name,
scaledown_idletime=3,
verify_scaling=False,
)
# Assert compute hostname is the same as nodename
compute_nodes = scheduler_commands.get_compute_nodes()
_test_hostname_same_as_nodename(scheduler_commands, remote_command_executor, compute_nodes)
# Test domain name matches expected domain name
resolv_conf = remote_command_executor.run_remote_command("cat /etc/resolv.conf").stdout
assert_that(resolv_conf).contains(cluster.cfn_name.lower() + "." + domain_name)
@pytest.fixture(scope="class")
def hosted_zone_factory(vpc_stack, cfn_stacks_factory, request, region):
"""Create a hosted zone stack."""
hosted_zone_stack_name = generate_stack_name(
"integ-tests-hosted-zone", request.config.getoption("stackname_suffix")
)
domain_name = hosted_zone_stack_name + ".com"
def create_hosted_zone():
hosted_zone_template = Template()
hosted_zone_template.set_version("2010-09-09")
hosted_zone_template.set_description("Hosted zone stack created for testing existing DNS")
hosted_zone_template.add_resource(
HostedZone(
"HostedZoneResource",
Name=domain_name,
VPCs=[HostedZoneVPCs(VPCId=vpc_stack.cfn_outputs["VpcId"], VPCRegion=region)],
)
)
hosted_zone_stack = CfnStack(
name=hosted_zone_stack_name,
region=region,
template=hosted_zone_template.to_json(),
)
cfn_stacks_factory.create_stack(hosted_zone_stack)
return hosted_zone_stack.cfn_resources["HostedZoneResource"], domain_name
yield create_hosted_zone
if not request.config.getoption("no_delete"):
cfn_stacks_factory.delete_stack(hosted_zone_stack_name, region)
def _test_hostname_same_as_nodename(scheduler_commands, remote_command_executor, compute_nodes):
result = scheduler_commands.submit_command("hostname > /shared/compute_hostname")
job_id = scheduler_commands.assert_job_submitted(result.stdout)
scheduler_commands.wait_job_completed(job_id)
hostname = remote_command_executor.run_remote_command("cat /shared/compute_hostname").stdout
assert_that(compute_nodes).contains(hostname)
| tests/integration-tests/tests/dns/test_dns.py | 5,580 | Create a hosted zone stack.
Test hosted_zone_id is provided in the config file.
Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at http://aws.amazon.com/apache2.0/ or in the "LICENSE.txt" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, express or implied. See the License for the specific language governing permissions and limitations under the License. Assert that compute hostname cannot be pinged directly Assert compute hostname is the same as nodename This verifies that the job completes correctly Test run mpi job Assert compute hostname is the same as nodename Test domain name matches expected domain name | 876 | en | 0.880868 |
import csv
import os
import numpy as np
dir_path = os.path.dirname(os.path.realpath(__file__))
survey_path = os.path.join(dir_path, '../data/test_two_entries.csv')
NUM_QUESTIONS = 8
RESPONSE_PERSON = ['pat', 'jeremy', 'zach']
TASTE_PROFILE_TYPES = ['deliciousness', 'heaviness', 'reliability', 'frequency', 'between']
i = 0
person_responses = []
with open(survey_path) as f:
data = csv.reader(f, delimiter=',', quotechar='|')
for row in data:
if i == 1:
sando_type_row = row
if i > 1:
person_responses.append(row)
i += 1
num_sando_types = int(
(len(sando_type_row) - 3)
/ NUM_QUESTIONS
)
end_index = 2 + num_sando_types
sando_types = sando_type_row[2:end_index]
global_taste_profile = {}
j = 0
for response in person_responses:
taste_profile = {}
name = RESPONSE_PERSON[j]
## Loop through deliciousness, heaviness, etc.
## Pull out deliciousness, etc. scores and store in taste_profile[type]
for data_type in TASTE_PROFILE_TYPES:
start_index = 2 + (1 + TASTE_PROFILE_TYPES.index(data_type)) * num_sando_types
end_index = start_index + num_sando_types
raw_profile = np.array(response[start_index:end_index])
if data_type in ['deliciousness', 'heaviness', 'reliability']:
float_profile = raw_profile.astype(np.float) * 0.01
taste_profile[data_type] = float_profile
else:
int_profile = raw_profile.astype(np.int)
taste_profile[data_type] = int_profile
profile_csv_path = os.path.join(dir_path, '../data/users/profiles', (name + '.csv'))
with open(profile_csv_path, 'w') as f:
profile_writer = csv.writer(f, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
header = ['sando_type']
for data_type in TASTE_PROFILE_TYPES:
header.append(data_type)
profile_writer.writerow(header)
## Loop through sando types and dump to CSV
for sando in sando_types:
sando_index = sando_types.index(sando)
sando_row = [sando]
for data_type in TASTE_PROFILE_TYPES:
sando_row.append(taste_profile[data_type][sando_index])
profile_writer.writerow(sando_row)
| sandwichbot/survey_processor.py | 2,254 | Loop through deliciousness, heaviness, etc. Pull out deliciousness, etc. scores and store in taste_profile[type] Loop through sando types and dump to CSV | 153 | en | 0.842603 |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=line-too-long
r"""TensorFlow AmoebaNet Example.
GCP Run Example
python amoeba_net.py --data_dir=gs://cloud-tpu-datasets/imagenet-data --model_dir=gs://cloud-tpu-ckpts/models/ameoba_net_x/ \
--drop_connect_keep_prob=1.0 --cell_name=evol_net_x --num_cells=12 --reduction_size=256 --image_size=299 --num_epochs=48 \
--train_batch_size=256 --num_epochs_per_eval=4.0 --lr_decay_value=0.89 --lr_num_epochs_per_decay=1 --alsologtostderr \
--tpu=huangyp-tpu-0
"""
# pylint: enable=line-too-long
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import io
import itertools
import math
import os
from absl import app
from absl import flags
import absl.logging as _logging # pylint: disable=unused-import
import numpy as np
from PIL import Image
import tensorflow as tf
import amoeba_net_model as model_lib
from tensorflow_serving.apis import predict_pb2
from tensorflow_serving.apis import prediction_log_pb2
# Cloud TPU Cluster Resolvers
flags.DEFINE_string(
'tpu', default=None,
help='The Cloud TPU to use for training. This should be either the name '
'used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 url.')
flags.DEFINE_string(
'gcp_project', default=None,
help='Project name for the Cloud TPU-enabled project. If not specified, we '
'will attempt to automatically detect the GCE project from metadata.')
flags.DEFINE_string(
'tpu_zone', default=None,
help='GCE zone where the Cloud TPU is located in. If not specified, we '
'will attempt to automatically detect the GCE project from metadata.')
# General Parameters
flags.DEFINE_integer(
'num_shards', 8,
'Number of shards (TPU cores).')
flags.DEFINE_integer(
'distributed_group_size', 1,
help='Size of the distributed batch norm. group.'
'Default is normalization over local examples only.'
'When set to a value greater than 1, it will enable'
'a distribtued batch norm. To enable a global batch norm.'
'set distributed_group_size to FLAGS.num_shards')
flags.DEFINE_bool(
'use_tpu', True,
'Use TPUs rather than CPU or GPU.')
flags.DEFINE_string(
'data_dir', '',
'Directory where input data is stored')
flags.DEFINE_string(
'model_dir', None,
'Directory where model output is stored')
flags.DEFINE_string(
'export_dir', None,
'The directory where the exported SavedModel will be stored.')
flags.DEFINE_bool(
'export_to_tpu', False,
help='Whether to export additional metagraph with "serve, tpu" tags'
' in addition to "serve" only metagraph.')
flags.DEFINE_integer(
'iterations_per_loop', 500,
'Number of iterations per TPU training loop.')
flags.DEFINE_integer(
'train_batch_size', 256,
'Global (not per-shard) batch size for training')
flags.DEFINE_integer(
'eval_batch_size', 256,
'Global (not per-shard) batch size for evaluation')
flags.DEFINE_float(
'num_epochs', 48.,
'Number of steps use for training.')
flags.DEFINE_float(
'num_epochs_per_eval', 1.,
'Number of training epochs to run between evaluations.')
flags.DEFINE_string(
'mode', 'train_and_eval',
'Mode to run: train, eval, train_and_eval, or predict')
flags.DEFINE_integer(
'save_checkpoints_steps', None,
'Interval (in steps) at which the model data '
'should be checkpointed. Set to 0 to disable.')
flags.DEFINE_bool(
'enable_hostcall', True,
'Skip the host_call which is executed every training step. This is'
' generally used for generating training summaries (train loss,'
' learning rate, etc...). When --enable_hostcall=True, there could'
' be a performance drop if host_call function is slow and cannot'
' keep up with the TPU-side computation.')
# Model specific parameters
flags.DEFINE_bool('use_aux_head', True, 'Include aux head or not.')
flags.DEFINE_float(
'aux_scaling', 0.4, 'Scaling factor of aux_head')
flags.DEFINE_float(
'batch_norm_decay', 0.9, 'Batch norm decay.')
flags.DEFINE_float(
'batch_norm_epsilon', 1e-5, 'Batch norm epsilon.')
flags.DEFINE_float(
'dense_dropout_keep_prob', None, 'Dense dropout keep probability.')
flags.DEFINE_float(
'drop_connect_keep_prob', 1.0, 'Drop connect keep probability.')
flags.DEFINE_string(
'drop_connect_version', None, 'Drop connect version.')
flags.DEFINE_string(
'cell_name', 'amoeba_net_d', 'Which network to run.')
flags.DEFINE_integer(
'num_cells', 12, 'Total number of cells.')
flags.DEFINE_integer(
'reduction_size', 256, 'Default cell reduction size.')
flags.DEFINE_integer(
'stem_reduction_size', 32, 'Stem filter size.')
flags.DEFINE_float(
'weight_decay', 4e-05, 'Weight decay for slim model.')
flags.DEFINE_integer(
'num_label_classes', 1001, 'The number of classes that images fit into.')
# Training hyper-parameters
flags.DEFINE_float(
'lr', 0.64, 'Learning rate.')
flags.DEFINE_string(
'optimizer', 'rmsprop',
'Optimizer (one of sgd, rmsprop, momentum)')
flags.DEFINE_float(
'moving_average_decay', 0.9999,
'moving average decay rate')
flags.DEFINE_float(
'lr_decay_value', 0.9,
'Exponential decay rate used in learning rate adjustment')
flags.DEFINE_integer(
'lr_num_epochs_per_decay', 1,
'Exponential decay epochs used in learning rate adjustment')
flags.DEFINE_string(
'lr_decay_method', 'exponential',
'Method of decay: exponential, cosine, constant, stepwise')
flags.DEFINE_float(
'lr_warmup_epochs', 3.0,
'Learning rate increased from zero linearly to lr for the first '
'lr_warmup_epochs.')
flags.DEFINE_float('gradient_clipping_by_global_norm', 0,
'gradient_clipping_by_global_norm')
flags.DEFINE_integer(
'image_size', 299, 'Size of image, assuming image height and width.')
flags.DEFINE_integer(
'num_train_images', 1281167, 'The number of images in the training set.')
flags.DEFINE_integer(
'num_eval_images', 50000, 'The number of images in the evaluation set.')
flags.DEFINE_bool(
'use_bp16', True, 'If True, use bfloat16 for activations')
flags.DEFINE_integer(
'eval_timeout', 60*60*24,
'Maximum seconds between checkpoints before evaluation terminates.')
# Inference configuration.
flags.DEFINE_bool(
'inference_with_all_cores', True, 'Whether to round-robin'
'among all cores visible to the host for TPU inference.')
flags.DEFINE_bool(
'add_warmup_requests', True,
'Whether to add warmup requests into the export saved model dir,'
'especially for TPU inference.')
flags.DEFINE_string('model_name', 'amoeba_net',
'Serving model name used for the model server.')
flags.DEFINE_multi_integer(
'inference_batch_sizes', [8],
'Known inference batch sizes used to warm up for each core.')
FLAGS = flags.FLAGS
def build_run_config():
"""Return RunConfig for TPU estimator."""
tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver(
FLAGS.tpu,
zone=FLAGS.tpu_zone,
project=FLAGS.gcp_project)
eval_steps = FLAGS.num_eval_images // FLAGS.eval_batch_size
iterations_per_loop = (eval_steps if FLAGS.mode == 'eval'
else FLAGS.iterations_per_loop)
save_checkpoints_steps = FLAGS.save_checkpoints_steps or iterations_per_loop
run_config = tf.contrib.tpu.RunConfig(
cluster=tpu_cluster_resolver,
model_dir=FLAGS.model_dir,
save_checkpoints_steps=save_checkpoints_steps,
keep_checkpoint_max=None,
tpu_config=tf.contrib.tpu.TPUConfig(
iterations_per_loop=iterations_per_loop,
num_shards=FLAGS.num_shards,
per_host_input_for_training=tf.contrib.tpu.InputPipelineConfig.PER_HOST_V2
))
return run_config
def build_image_serving_input_receiver_fn(shape,
dtype=tf.float32):
"""Returns a input_receiver_fn for raw images during serving."""
def _preprocess_image(encoded_image):
"""Preprocess a single raw image."""
image = tf.image.decode_image(encoded_image, channels=shape[-1])
image.set_shape(shape)
return tf.cast(image, dtype)
def serving_input_receiver_fn():
image_bytes_list = tf.placeholder(
shape=[None],
dtype=tf.string,
)
images = tf.map_fn(
_preprocess_image, image_bytes_list, back_prop=False, dtype=dtype)
return tf.estimator.export.TensorServingInputReceiver(
features=images, receiver_tensors=image_bytes_list)
return serving_input_receiver_fn
def _encode_image(image_array, fmt='PNG'):
"""encodes an (numpy) image array to string.
Args:
image_array: (numpy) image array
fmt: image format to use
Returns:
encoded image string
"""
pil_image = Image.fromarray(image_array)
image_io = io.BytesIO()
pil_image.save(image_io, format=fmt)
return image_io.getvalue()
def write_warmup_requests(savedmodel_dir,
model_name,
image_size,
batch_sizes=None,
num_requests=8):
"""Writes warmup requests for inference into a tfrecord file.
Args:
savedmodel_dir: string, the file to the exported model folder.
model_name: string, a model name used inside the model server.
image_size: int, size of image, assuming image height and width.
batch_sizes: list, a list of batch sizes to create different input requests.
num_requests: int, number of requests per batch size.
Raises:
ValueError: if batch_sizes is not a valid integer list.
"""
if not isinstance(batch_sizes, list) or not batch_sizes:
raise ValueError('batch sizes should be a valid non-empty list.')
extra_assets_dir = os.path.join(savedmodel_dir, 'assets.extra')
tf.gfile.MkDir(extra_assets_dir)
with tf.python_io.TFRecordWriter(
os.path.join(extra_assets_dir, 'tf_serving_warmup_requests')) as writer:
for batch_size in batch_sizes:
for _ in range(num_requests):
request = predict_pb2.PredictRequest()
image = np.uint8(np.random.rand(image_size, image_size, 3) * 255)
request.inputs['input'].CopyFrom(
tf.make_tensor_proto(
[_encode_image(image)] * batch_size, shape=[batch_size]))
request.model_spec.name = model_name
request.model_spec.signature_name = 'serving_default'
log = prediction_log_pb2.PredictionLog(
predict_log=prediction_log_pb2.PredictLog(request=request))
writer.write(log.SerializeToString())
# TODO(ereal): simplify this.
def override_with_flags(hparams):
"""Overrides parameters with flag values."""
override_flag_names = [
'aux_scaling',
'train_batch_size',
'batch_norm_decay',
'batch_norm_epsilon',
'dense_dropout_keep_prob',
'drop_connect_keep_prob',
'drop_connect_version',
'eval_batch_size',
'gradient_clipping_by_global_norm',
'lr',
'lr_decay_method',
'lr_decay_value',
'lr_num_epochs_per_decay',
'moving_average_decay',
'image_size',
'num_cells',
'reduction_size',
'stem_reduction_size',
'num_epochs',
'num_epochs_per_eval',
'optimizer',
'enable_hostcall',
'use_aux_head',
'use_bp16',
'use_tpu',
'lr_warmup_epochs',
'weight_decay',
'num_shards',
'distributed_group_size',
'num_train_images',
'num_eval_images',
'num_label_classes',
]
for flag_name in override_flag_names:
flag_value = getattr(FLAGS, flag_name, 'INVALID')
if flag_value == 'INVALID':
tf.logging.fatal('Unknown flag %s.' % str(flag_name))
if flag_value is not None:
_set_or_add_hparam(hparams, flag_name, flag_value)
def build_hparams():
"""Build tf.Hparams for training Amoeba Net."""
hparams = model_lib.build_hparams(FLAGS.cell_name)
override_with_flags(hparams)
return hparams
def _terminate_eval():
tf.logging.info('Timeout passed with no new checkpoints ... terminating eval')
return True
def _get_next_checkpoint():
return tf.contrib.training.checkpoints_iterator(
FLAGS.model_dir,
timeout=FLAGS.eval_timeout,
timeout_fn=_terminate_eval)
def _set_or_add_hparam(hparams, name, value):
if getattr(hparams, name, None) is None:
hparams.add_hparam(name, value)
else:
hparams.set_hparam(name, value)
def _load_global_step_from_checkpoint_dir(checkpoint_dir):
try:
checkpoint_reader = tf.train.NewCheckpointReader(
tf.train.latest_checkpoint(checkpoint_dir))
return checkpoint_reader.get_tensor(tf.GraphKeys.GLOBAL_STEP)
except: # pylint: disable=bare-except
return 0
def main(_):
mode = FLAGS.mode
data_dir = FLAGS.data_dir
model_dir = FLAGS.model_dir
hparams = build_hparams()
estimator_parmas = {}
train_steps_per_epoch = int(
math.ceil(hparams.num_train_images / float(hparams.train_batch_size)))
eval_steps = hparams.num_eval_images // hparams.eval_batch_size
eval_batch_size = (None if mode == 'train' else
hparams.eval_batch_size)
model = model_lib.AmoebaNetEstimatorModel(hparams, model_dir)
if hparams.use_tpu:
run_config = build_run_config()
image_classifier = tf.contrib.tpu.TPUEstimator(
model_fn=model.model_fn,
use_tpu=True,
config=run_config,
params=estimator_parmas,
predict_batch_size=eval_batch_size,
train_batch_size=hparams.train_batch_size,
eval_batch_size=eval_batch_size,
export_to_tpu=FLAGS.export_to_tpu,
experimental_exported_model_uses_all_cores=FLAGS
.inference_with_all_cores)
else:
save_checkpoints_steps = (FLAGS.save_checkpoints_steps or
FLAGS.iterations_per_loop)
run_config = tf.estimator.RunConfig(
model_dir=FLAGS.model_dir,
save_checkpoints_steps=save_checkpoints_steps)
image_classifier = tf.estimator.Estimator(
model_fn=model.model_fn,
config=run_config,
params=estimator_parmas)
# Input pipelines are slightly different (with regards to shuffling and
# preprocessing) between training and evaluation.
imagenet_train = model_lib.InputPipeline(
is_training=True, data_dir=data_dir, hparams=hparams)
imagenet_eval = model_lib.InputPipeline(
is_training=False, data_dir=data_dir, hparams=hparams)
if hparams.moving_average_decay < 1:
eval_hooks = [model_lib.LoadEMAHook(model_dir,
hparams.moving_average_decay)]
else:
eval_hooks = []
if mode == 'eval':
for checkpoint in _get_next_checkpoint():
tf.logging.info('Starting to evaluate.')
try:
eval_results = image_classifier.evaluate(
input_fn=imagenet_eval.input_fn,
steps=eval_steps,
hooks=eval_hooks,
checkpoint_path=checkpoint)
tf.logging.info('Evaluation results: %s' % eval_results)
except tf.errors.NotFoundError:
# skip checkpoint if it gets deleted prior to evaluation
tf.logging.info('Checkpoint %s no longer exists ... skipping')
elif mode == 'train_and_eval':
current_step = _load_global_step_from_checkpoint_dir(model_dir)
tf.logging.info('Starting training at step=%d.' % current_step)
train_steps_per_eval = int(
hparams.num_epochs_per_eval * train_steps_per_epoch)
# Final Evaluation if training is finished.
if current_step >= hparams.num_epochs * train_steps_per_epoch:
eval_results = image_classifier.evaluate(
input_fn=imagenet_eval.input_fn, steps=eval_steps, hooks=eval_hooks)
tf.logging.info('Evaluation results: %s' % eval_results)
while current_step < hparams.num_epochs * train_steps_per_epoch:
image_classifier.train(
input_fn=imagenet_train.input_fn, steps=train_steps_per_eval)
current_step += train_steps_per_eval
tf.logging.info('Starting evaluation at step=%d.' % current_step)
eval_results = image_classifier.evaluate(
input_fn=imagenet_eval.input_fn, steps=eval_steps, hooks=eval_hooks)
tf.logging.info('Evaluation results: %s' % eval_results)
elif mode == 'predict':
for checkpoint in _get_next_checkpoint():
tf.logging.info('Starting prediction ...')
time_hook = model_lib.SessionTimingHook()
eval_hooks.append(time_hook)
result_iter = image_classifier.predict(
input_fn=imagenet_eval.input_fn,
hooks=eval_hooks,
checkpoint_path=checkpoint,
yield_single_examples=False)
results = list(itertools.islice(result_iter, eval_steps))
tf.logging.info('Inference speed = {} images per second.'.format(
time_hook.compute_speed(len(results) * eval_batch_size)))
elif mode == 'train':
current_step = _load_global_step_from_checkpoint_dir(model_dir)
total_step = int(hparams.num_epochs * train_steps_per_epoch)
if current_step < total_step:
tf.logging.info('Starting training ...')
image_classifier.train(
input_fn=imagenet_train.input_fn,
steps=total_step-current_step)
else:
tf.logging.info('Mode not found.')
if FLAGS.export_dir is not None:
tf.logging.info('Starting exporting saved model ...')
serving_shape = [hparams.image_size, hparams.image_size, 3]
export_path = image_classifier.export_saved_model(
export_dir_base=FLAGS.export_dir,
serving_input_receiver_fn=build_image_serving_input_receiver_fn(
serving_shape),
as_text=True)
if FLAGS.add_warmup_requests:
write_warmup_requests(
export_path,
FLAGS.model_name,
hparams.image_size,
batch_sizes=FLAGS.inference_batch_sizes)
if __name__ == '__main__':
tf.logging.set_verbosity(tf.logging.INFO)
app.run(main)
| models/official/amoeba_net/amoeba_net.py | 18,537 | encodes an (numpy) image array to string.
Args:
image_array: (numpy) image array
fmt: image format to use
Returns:
encoded image string
Preprocess a single raw image.
Build tf.Hparams for training Amoeba Net.
Returns a input_receiver_fn for raw images during serving.
Return RunConfig for TPU estimator.
Overrides parameters with flag values.
Writes warmup requests for inference into a tfrecord file.
Args:
savedmodel_dir: string, the file to the exported model folder.
model_name: string, a model name used inside the model server.
image_size: int, size of image, assuming image height and width.
batch_sizes: list, a list of batch sizes to create different input requests.
num_requests: int, number of requests per batch size.
Raises:
ValueError: if batch_sizes is not a valid integer list.
TensorFlow AmoebaNet Example.
GCP Run Example
python amoeba_net.py --data_dir=gs://cloud-tpu-datasets/imagenet-data --model_dir=gs://cloud-tpu-ckpts/models/ameoba_net_x/ \
--drop_connect_keep_prob=1.0 --cell_name=evol_net_x --num_cells=12 --reduction_size=256 --image_size=299 --num_epochs=48 \
--train_batch_size=256 --num_epochs_per_eval=4.0 --lr_decay_value=0.89 --lr_num_epochs_per_decay=1 --alsologtostderr \
--tpu=huangyp-tpu-0
Copyright 2018 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ============================================================================== pylint: disable=line-too-long pylint: enable=line-too-long pylint: disable=unused-import Cloud TPU Cluster Resolvers General Parameters Model specific parameters Training hyper-parameters Inference configuration. TODO(ereal): simplify this. pylint: disable=bare-except Input pipelines are slightly different (with regards to shuffling and preprocessing) between training and evaluation. skip checkpoint if it gets deleted prior to evaluation Final Evaluation if training is finished. | 2,397 | en | 0.679271 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.