hexsha stringlengths 40 40 | size int64 4 1.02M | ext stringclasses 8
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 209 | max_stars_repo_name stringlengths 5 121 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 209 | max_issues_repo_name stringlengths 5 121 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 209 | max_forks_repo_name stringlengths 5 121 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 4 1.02M | avg_line_length float64 1.07 66.1k | max_line_length int64 4 266k | alphanum_fraction float64 0.01 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
8a50b328decdc9f345c171a8999d1aa6757ea4e7 | 381 | py | Python | Shop_platform/shops/migrations/0005_auto_20200512_2301.py | Tozman99/Shop_platform | c49a7724882b2f5468877806494319dcd8bd0314 | [
"MIT"
] | 1 | 2021-11-07T08:52:16.000Z | 2021-11-07T08:52:16.000Z | Shop_platform/shops/migrations/0005_auto_20200512_2301.py | Tozman99/shop_platform | c49a7724882b2f5468877806494319dcd8bd0314 | [
"MIT"
] | 18 | 2021-03-19T08:51:38.000Z | 2022-03-12T00:38:18.000Z | Shop_platform/shops/migrations/0005_auto_20200512_2301.py | Tozman99/Shop_platform | c49a7724882b2f5468877806494319dcd8bd0314 | [
"MIT"
] | null | null | null | # Generated by Django 3.0.3 on 2020-05-12 23:01
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('shops', '0004_product_quantity'),
]
operations = [
migrations.AlterField(
model_name='product',
name='image',
field=models.BinaryField(blank=True),
),
]
| 20.052632 | 49 | 0.593176 |
f015f5678ec8bd16528c817082c06579eb23c4a8 | 754 | py | Python | emgapi/apps.py | EBI-Metagenomics/ebi-metagenomics-api | 1f028902fe493583c5c8191dd5dae92cca9e15a9 | [
"Apache-2.0"
] | 6 | 2018-11-20T10:38:56.000Z | 2022-03-08T19:39:11.000Z | emgapi/apps.py | EBI-Metagenomics/ebi-metagenomics-api | 1f028902fe493583c5c8191dd5dae92cca9e15a9 | [
"Apache-2.0"
] | 33 | 2017-10-19T14:07:31.000Z | 2022-03-28T16:02:50.000Z | emgapi/apps.py | EBI-Metagenomics/ebi-metagenomics-api | 1f028902fe493583c5c8191dd5dae92cca9e15a9 | [
"Apache-2.0"
] | 2 | 2017-10-18T19:34:46.000Z | 2019-06-14T22:08:51.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2017 EMBL - European Bioinformatics Institute
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from django.apps import AppConfig
class EmgApiConfig(AppConfig):
name = 'emgapi'
# label = 'api'
| 31.416667 | 74 | 0.745358 |
a6c40763cfc79d7b103721162a9ff0e4cde89275 | 18,468 | py | Python | platform/radio/efr32_multiphy_configurator/pyradioconfig/parts/ocelot/profiles/Profile_Mbus.py | lmnotran/gecko_sdk | 2e82050dc8823c9fe0e8908c1b2666fb83056230 | [
"Zlib"
] | 69 | 2021-12-16T01:34:09.000Z | 2022-03-31T08:27:39.000Z | platform/radio/efr32_multiphy_configurator/pyradioconfig/parts/ocelot/profiles/Profile_Mbus.py | lmnotran/gecko_sdk | 2e82050dc8823c9fe0e8908c1b2666fb83056230 | [
"Zlib"
] | 6 | 2022-01-12T18:22:08.000Z | 2022-03-25T10:19:27.000Z | platform/radio/efr32_multiphy_configurator/pyradioconfig/parts/ocelot/profiles/Profile_Mbus.py | lmnotran/gecko_sdk | 2e82050dc8823c9fe0e8908c1b2666fb83056230 | [
"Zlib"
] | 21 | 2021-12-20T09:05:45.000Z | 2022-03-28T02:52:28.000Z | from pyradioconfig.parts.common.profiles.ocelot_regs import *
from pyradioconfig.parts.common.profiles.profile_common import *
from pyradioconfig.calculator_model_framework.interfaces.iprofile import IProfile
from pyradioconfig.parts.common.utils.units_multiplier import UnitsMultiplier
from pyradioconfig.parts.ocelot.profiles.sw_profile_outputs_common import sw_profile_outputs_common_ocelot
from pyradioconfig.parts.ocelot.profiles.profile_mbus_modes import *
class Profile_Mbus_Ocelot(IProfile):
"""
Init internal variables
"""
def __init__(self):
self._family = "ocelot"
self._profileName = "Mbus"
self._readable_name = "Mbus Profile"
self._category = ""
self._description = "Profile used for Mbus phys"
self._default = False
self._activation_logic = ""
self._sw_profile_outputs_common = sw_profile_outputs_common_ocelot()
"""
Builds inputs, forced, outputs into modem model
"""
def buildProfileModel(self, model):
family = self._family
# Build profile
profile = self._makeProfile(model)
### Required inputs ###
IProfile.make_required_input(profile, model.vars.preamble_length, "general", readable_name="Preamble Length Total", value_limit_min=16, value_limit_max=2097151)
IProfile.make_required_input(profile, model.vars.mbus_mode, category="general", readable_name="Mbus Mode")
IProfile.make_required_input(profile, model.vars.mbus_frame_format, category="general",
readable_name="Mbus Frame Format")
IProfile.make_required_input(profile, model.vars.mbus_symbol_encoding, category="general",
readable_name="Symbol Encoding")
IProfile.make_required_input(profile, model.vars.syncword_dualsync, category="general",
readable_name="Enable Dual Syncword Detection")
IProfile.make_required_input(profile, model.vars.base_frequency_hz, category="operational_frequency",
readable_name="Base Channel Frequency", value_limit_min=long(100000000),
value_limit_max=long(2480000000), units_multiplier=UnitsMultiplier.MEGA)
IProfile.make_required_input(profile, model.vars.channel_spacing_hz, category="operational_frequency",
readable_name="Channel Spacing", value_limit_min=0,
value_limit_max=10000000, units_multiplier=UnitsMultiplier.KILO)
IProfile.make_required_input(profile, model.vars.xtal_frequency_hz, "crystal",
readable_name="Crystal Frequency", value_limit_min=38000000,
value_limit_max=40000000, units_multiplier=UnitsMultiplier.MEGA)
### Optional inputs ###
IProfile.make_optional_input(profile, model.vars.test_ber, category="testing",
readable_name="Reconfigure for BER testing", default=False)
IProfile.make_optional_input(profile, model.vars.deviation_tol_ppm, 'modem', default=0,
readable_name="Maximum deviation offset expected in ppm", value_limit_min=0,
value_limit_max=500000)
IProfile.make_optional_input(profile, model.vars.mbus_postamble_length, category="general", default=0,
readable_name="Mbus postamble legnth", value_limit_min=0,
value_limit_max=4)
### Hidden test inputs ###
# Hidden inputs to allow for fixed frame length testing
IProfile.make_hidden_input(profile, model.vars.frame_length_type, 'frame_general',
readable_name="Frame Length Algorithm")
IProfile.make_hidden_input(profile, model.vars.fixed_length_size, category='frame_fixed_length',
readable_name="Fixed Payload Size", value_limit_min=0, value_limit_max=0x7fffffff)
IProfile.make_hidden_input(profile, model.vars.payload_crc_en, category='frame_payload',
readable_name="Insert/Check CRC after payload")
IProfile.make_hidden_input(profile, model.vars.crc_poly, category='crc', readable_name="CRC Polynomial")
IProfile.make_hidden_input(profile, model.vars.crc_byte_endian, category='crc',
readable_name="CRC Byte Endian")
IProfile.make_hidden_input(profile, model.vars.crc_bit_endian, category='crc',
readable_name="CRC Output Bit Endian")
IProfile.make_hidden_input(profile, model.vars.crc_pad_input, category='crc', readable_name="CRC Input Padding")
IProfile.make_hidden_input(profile, model.vars.crc_input_order, category='crc',
readable_name="CRC Input Bit Endian")
IProfile.make_hidden_input(profile, model.vars.crc_invert, category='crc', readable_name="CRC Invert")
IProfile.make_hidden_input(profile, model.vars.target_osr, category="general",
readable_name="Set desired OSR", value_limit_min=3, value_limit_max=9)
IProfile.make_hidden_input(profile, model.vars.bitrate, category="modem",
readable_name="Bitrate", value_limit_min=100, value_limit_max=2000000,
units_multiplier=UnitsMultiplier.KILO)
IProfile.make_hidden_input(profile, model.vars.demod_select, 'Advanced', readable_name="Demod Selection")
IProfile.make_hidden_input(profile, model.vars.frame_bitendian, category='frame_general',
readable_name="Frame Bit Endian")
IProfile.make_hidden_input(profile, model.vars.synth_settling_mode, 'modem',
readable_name="Synth Settling Mode")
# Hidden inputs to allow for keeping absolute tolerance the same when testing at 915M
IProfile.make_hidden_input(profile, model.vars.rx_xtal_error_ppm, category="general",
readable_name="Set desired xtal tolerance on RX side", value_limit_min=0,
value_limit_max=100)
IProfile.make_hidden_input(profile, model.vars.tx_xtal_error_ppm, category="general",
readable_name="Set desired xtal tolerance on TX side", value_limit_min=0,
value_limit_max=100)
IProfile.make_hidden_input(profile, model.vars.freq_offset_hz, 'Advanced',
readable_name="Frequency Offset Compensation (AFC) Limit", value_limit_min=0,
value_limit_max=500000, units_multiplier=UnitsMultiplier.KILO)
#Deprecated inputs
# These inputs were exposed on or after Ocelot Alpha 1 release, so they may be present in radioconf XML
self.make_deprecated_input(profile, model.vars.max_tx_power_dbm)
# Informational output
self._sw_profile_outputs_common.build_info_outputs(model, profile)
# RAIL Outputs
self._sw_profile_outputs_common.build_rail_outputs(model, profile)
# IRCal outputs
self._sw_profile_outputs_common.build_ircal_outputs(model, profile)
# Output fields
buildFrameOutputs(model, profile, family=family)
buildCrcOutputs(model, profile, family)
buildWhiteOutputs(model, profile)
buildFecOutputs(model, profile)
self._add_reg_profile_outputs(model, profile)
return profile
def _add_reg_profile_outputs(self, model, profile):
build_modem_regs_ocelot(model, profile, family=self._family)
def mbus_profile_frame_format_common(self, model):
# Whitening
model.vars.header_white_en.value_forced = False
model.vars.payload_white_en.value_forced = False
model.vars.white_poly.value_forced = model.vars.white_poly.var_enum.NONE
model.vars.white_seed.value_forced = 0
model.vars.white_output_bit.value_forced = 0
# General frame format
model.vars.frame_bitendian.value_forced = model.vars.frame_bitendian.var_enum.MSB_FIRST
# -- Payload --
model.vars.payload_addtrailtxdata_en.value_forced = False
model.vars.payload_excludesubframewcnt_en.value_forced = False
# -- Header --
model.vars.header_addtrailtxdata_en.value_forced = False
model.vars.header_excludesubframewcnt_en.value_forced = False
# Frame type length variables
model.vars.frame_type_loc.value_forced = 0
model.vars.frame_type_bits.value_forced = 3
model.vars.frame_type_lsbit.value_forced = 0
model.vars.frame_type_0_length.value_forced = 0
model.vars.frame_type_1_length.value_forced = 0
model.vars.frame_type_2_length.value_forced = 0
model.vars.frame_type_3_length.value_forced = 0
model.vars.frame_type_4_length.value_forced = 0
model.vars.frame_type_5_length.value_forced = 0
model.vars.frame_type_6_length.value_forced = 0
model.vars.frame_type_7_length.value_forced = 0
model.vars.frame_type_0_valid.value_forced = False
model.vars.frame_type_1_valid.value_forced = False
model.vars.frame_type_2_valid.value_forced = False
model.vars.frame_type_3_valid.value_forced = False
model.vars.frame_type_4_valid.value_forced = False
model.vars.frame_type_5_valid.value_forced = False
model.vars.frame_type_6_valid.value_forced = False
model.vars.frame_type_7_valid.value_forced = False
# FEC
model.vars.fec_en.value_forced = model.vars.fec_en.var_enum.NONE
# CRC
model.vars.crc_poly.value_forced = model.vars.crc_poly.var_enum.DNP_16
model.vars.crc_seed.value_forced = default=long(0)
model.vars.crc_byte_endian.value_forced = model.vars.crc_byte_endian.var_enum.MSB_FIRST
model.vars.crc_bit_endian.value_forced = model.vars.crc_bit_endian.var_enum.MSB_FIRST
model.vars.crc_pad_input.value_forced = False
model.vars.crc_input_order.value_forced = model.vars.crc_input_order.var_enum.MSB_FIRST
model.vars.crc_invert.value_forced = True
def mbus_profile_frame_format_calc(self, model):
if model.profile.inputs.mbus_frame_format.var_value == model.vars.mbus_frame_format.var_enum.NoFormat:
model.vars.header_en.value_forced = False
model.vars.header_size.value_forced = 0
model.vars.frame_length_type.value_forced = model.vars.frame_length_type.var_enum.FIXED_LENGTH
model.vars.header_calc_crc.value_forced = False
model.vars.fixed_length_size.value_forced = 18 # This is the value Andras was using in the one phy that used this option
model.vars.var_length_numbits.value_forced = 0
model.vars.var_length_bitendian.value_forced = model.vars.var_length_bitendian.var_enum.LSB_FIRST
model.vars.var_length_byteendian.value_forced = model.vars.var_length_byteendian.var_enum.LSB_FIRST
model.vars.var_length_shift.value_forced = 0
model.vars.var_length_minlength.value_forced = 0
model.vars.var_length_maxlength.value_forced = 0
model.vars.var_length_includecrc.value_forced = False
model.vars.var_length_adjust.value_forced = 0
model.vars.payload_crc_en.value_forced = False
elif model.profile.inputs.mbus_frame_format.var_value == model.vars.mbus_frame_format.var_enum.FrameA:
# -- Header --
#Block 1 for frameA
model.vars.header_en.value_forced = True
model.vars.header_size.value_forced = 1 #This controls DFL location AND header size. We set it up for DFL loc
model.vars.FRC_FCD0_WORDS.value_forced = 9 #and override the size to be 10B for TX
model.vars.FRC_FCD2_WORDS.value_forced = 9 #and for RX
model.vars.header_calc_crc.value_forced = True
model.vars.header_include_crc.value_forced = True
#all subsequent blocks are handled as repating 16B subframes
model.vars.FRC_FCD1_WORDS.value_forced = 15
model.vars.FRC_FCD3_WORDS.value_forced = 15
#FCDMODE2 is the default, which is good for us
# -- Variable Length --
model.vars.frame_length_type.value_forced = model.vars.frame_length_type.var_enum.VARIABLE_LENGTH
model.vars.var_length_numbits.value_forced = 8
model.vars.var_length_bitendian.value_forced = model.vars.var_length_bitendian.var_enum.MSB_FIRST
model.vars.var_length_byteendian.value_forced = model.vars.var_length_byteendian.var_enum.MSB_FIRST
model.vars.var_length_shift.value_forced = 0
model.vars.var_length_minlength.value_forced = 10
model.vars.var_length_maxlength.value_forced = 255
model.vars.var_length_includecrc.value_forced = False
model.vars.var_length_adjust.value_forced = 0
model.vars.payload_crc_en.value_forced = True
elif model.profile.inputs.mbus_frame_format.var_value == model.vars.mbus_frame_format.var_enum.FrameB:
# -- Header --
#Block 1 and 2 for frameB
model.vars.header_en.value_forced = True
model.vars.header_size.value_forced = 1 #This controls DFL location AND header size. We set it up for DFL loc
model.vars.FRC_FCD0_WORDS.value_forced = 125 #and override the size to be 125B for TX
model.vars.FRC_FCD2_WORDS.value_forced = 125 #and for RX
model.vars.header_calc_crc.value_forced = True
model.vars.header_include_crc.value_forced = True
#Block 3 is the remaining data, which is the payload from the configurator's perspective
# -- Variable Length --
model.vars.frame_length_type.value_forced = model.vars.frame_length_type.var_enum.VARIABLE_LENGTH
model.vars.var_length_numbits.value_forced = 8
model.vars.var_length_bitendian.value_forced = model.vars.var_length_bitendian.var_enum.MSB_FIRST
model.vars.var_length_byteendian.value_forced = model.vars.var_length_byteendian.var_enum.MSB_FIRST
model.vars.var_length_shift.value_forced = 0
model.vars.var_length_minlength.value_forced = 12
model.vars.var_length_maxlength.value_forced = 255
model.vars.var_length_includecrc.value_forced = True #the big difference: frameB's length include's CRC fields.
model.vars.var_length_adjust.value_forced = 0
model.vars.payload_crc_en.value_forced = True
else:
raise Exception("Unexpected value found for mbus_frame_format")
def mbus_profile_radio_common(self, model):
# Set some variables common to all modes
model.vars.fsk_symbol_map.value_forced = model.vars.fsk_symbol_map.var_enum.MAP0
model.vars.dsss_chipping_code.value_forced = long(0)
model.vars.dsss_len.value_forced = 0
model.vars.dsss_spreading_factor.value_forced = 0
model.vars.diff_encoding_mode.value_forced = model.vars.diff_encoding_mode.var_enum.DISABLED
model.vars.preamble_pattern.value_forced = 1
model.vars.preamble_pattern_len.value_forced = 2
model.vars.asynchronous_rx_enable.value_forced = False
model.vars.syncword_tx_skip.value_forced = False
def mbus_profile_mode_calc(self, model):
mode = model.profile.inputs.mbus_mode.var_value
if mode == model.vars.mbus_mode.var_enum.ModeC_M2O_100k:
profile_MBus_modes.profile_wMbus_ModeC_M2O_100k(model, self._family)
elif mode == model.vars.mbus_mode.var_enum.ModeC_O2M_50k:
profile_MBus_modes.profile_wMbus_ModeC_O2M_50k(model, self._family)
elif mode == model.vars.mbus_mode.var_enum.ModeF_2p4k:
profile_MBus_modes.profile_wMbus_ModeF_2p4k(model, self._family)
elif mode == model.vars.mbus_mode.var_enum.ModeNg:
profile_MBus_modes.profile_wMbus_ModeN2g_19p2k(model, self._family)
elif mode == model.vars.mbus_mode.var_enum.ModeN1a_4p8K:
profile_MBus_modes.profile_wMbus_ModeN1a_4p8K(model, self._family)
elif mode == model.vars.mbus_mode.var_enum.ModeN1c_2p4K:
profile_MBus_modes.profile_wMbus_ModeN1c_2p4K(model, self._family)
elif mode == model.vars.mbus_mode.var_enum.ModeR_4p8k:
profile_MBus_modes.profile_wMbus_ModeR_4p8k(model, self._family)
elif mode == model.vars.mbus_mode.var_enum.ModeT_M2O_100k:
profile_MBus_modes.profile_wMbus_ModeT_M2O_100k(model, self._family)
elif mode == model.vars.mbus_mode.var_enum.ModeT_O2M_32p768k:
profile_MBus_modes.profile_wMbus_ModeT_O2M_32p768k(model, self._family)
elif mode == model.vars.mbus_mode.var_enum.ModeS_32p768k:
profile_MBus_modes.profile_wMbus_ModeS_32p768k(model, self._family)
elif mode == model.vars.mbus_mode.var_enum.ModeN_6p4k:
profile_MBus_modes.profile_wMbus_ModeN_6p4k(model, self._family)
def mbus_profile_symbol_encoding_calc(self, model):
mbus_symbol_encoding = model.profile.inputs.mbus_symbol_encoding.var_value
if mbus_symbol_encoding == model.vars.mbus_symbol_encoding.var_enum.NRZ:
model.vars.symbol_encoding.value_forced = model.vars.symbol_encoding.var_enum.NRZ
elif mbus_symbol_encoding == model.vars.mbus_symbol_encoding.var_enum.Manchester:
model.vars.symbol_encoding.value_forced = model.vars.symbol_encoding.var_enum.Inv_Manchester #Always inverted
elif mbus_symbol_encoding == model.vars.mbus_symbol_encoding.var_enum.MBUS_3OF6:
model.vars.symbol_encoding.value_forced = model.vars.symbol_encoding.var_enum.MBUS_3OF6
else:
raise Exception("Unexpected value found for mbus_symbol_encoding")
def profile_calculate(self, model):
self.mbus_profile_frame_format_common(model)
self.mbus_profile_frame_format_calc(model)
self.mbus_profile_radio_common(model)
self.mbus_profile_mode_calc(model)
self.mbus_profile_symbol_encoding_calc(model)
| 58.075472 | 168 | 0.695311 |
5068d6f08f67d2d232cce457c09cb4c70b2e426a | 2,785 | py | Python | mozillians/phonebook/validators.py | justinpotts/mozillians | efa5cbdfe4992d2ba1c1d85bfbb5b09b2215cc44 | [
"BSD-3-Clause"
] | null | null | null | mozillians/phonebook/validators.py | justinpotts/mozillians | efa5cbdfe4992d2ba1c1d85bfbb5b09b2215cc44 | [
"BSD-3-Clause"
] | null | null | null | mozillians/phonebook/validators.py | justinpotts/mozillians | efa5cbdfe4992d2ba1c1d85bfbb5b09b2215cc44 | [
"BSD-3-Clause"
] | null | null | null | import re
from django.core.validators import EmailValidator, URLValidator
from django.db.models.loading import get_model
from django.forms import ValidationError
from tower import ugettext as _
def validate_twitter(username):
"""Return a twitter username given '@' or http(s) strings."""
if username:
username = re.sub('https?://(www\.)?twitter\.com/|@', '', username)
# Twitter accounts must be alphanumeric ASCII including underscore, and <= 15 chars.
# https://support.twitter.com/articles/101299-why-can-t-i-register-certain-usernames
if len(username) > 15:
raise ValidationError(_('Twitter usernames cannot be longer than 15 characters.'))
if not re.match('^\w+$', username):
raise ValidationError(_('Twitter usernames must contain only alphanumeric'
' characters and the underscore.'))
return username
def validate_username(username):
"""Validate username.
Import modules here to prevent dependency breaking.
"""
username = username.lower()
UsernameBlacklist = get_model('users', 'UsernameBlacklist')
if (UsernameBlacklist.
objects.filter(value=username, is_regex=False).exists()):
return False
for regex_value in UsernameBlacklist.objects.filter(is_regex=True):
if re.match(regex_value.value, username):
return False
return True
def validate_website(url):
"""Validate and return a properly formatted website url."""
validate_url = URLValidator()
if url and '://' not in url:
url = u'http://%s' % url
try:
validate_url(url)
except ValidationError:
raise ValidationError(_('Enter a valid URL.'))
return url
def validate_username_not_url(username):
"""Validate that a username is not a URL."""
if username.startswith('http://') or username.startswith('https://'):
raise ValidationError(_('This field requires an identifier, not a URL.'))
return username
def validate_email(value):
"""Validate that a username is email like."""
_validate_email = EmailValidator()
try:
_validate_email(value)
except ValidationError:
raise ValidationError(_('Enter a valid email address.'))
return value
def validate_phone_number(value):
"""Validate that a phone number is in international format. (5-15 characters)."""
value = value.replace(' ', '')
value = re.sub(r'^00', '+', value)
# Ensure that there are 5 to 15 digits
pattern = re.compile(r'^\+\d{5,15}$')
if not pattern.match(value):
raise ValidationError(_('Please enter a valid phone number in international format '
'(e.g. +1 555 555 5555)'))
return value
| 29.010417 | 94 | 0.656732 |
13b16a860666812b40ab1982f95d6b09c011d6ac | 4,451 | py | Python | main.py | floydhub/regression | aecc20cdae4edfbd6235bac67c1f309942fd5170 | [
"BSD-3-Clause"
] | 9 | 2017-10-23T13:39:20.000Z | 2022-02-18T21:08:41.000Z | main.py | ReDeiPirati/regression | aecc20cdae4edfbd6235bac67c1f309942fd5170 | [
"BSD-3-Clause"
] | null | null | null | main.py | ReDeiPirati/regression | aecc20cdae4edfbd6235bac67c1f309942fd5170 | [
"BSD-3-Clause"
] | 4 | 2018-01-04T06:35:10.000Z | 2021-11-12T06:47:23.000Z | #!/usr/bin/env python
from __future__ import print_function
from itertools import count
import os
import numpy as np
import argparse
import torch
import torch.autograd
import torch.nn.functional as F
from torch.autograd import Variable
parser = argparse.ArgumentParser(description='Pytorch Linear Regression')
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed (default: 1)')
parser.add_argument('--outf', default='/output',
help='folder to output images and model checkpoints')
parser.add_argument('--ckpf', default='',
help="path to model checkpoint file (to continue training)")
parser.add_argument('--degree', type=int, default=4, metavar='P',
help='polynomial degree to learn(default: 4)')
parser.add_argument('--batch-size', type=int, default=32, metavar='N',
help='input batch size for training (default: 32)')
parser.add_argument('--train', action='store_true',
help='training a fully connected layer')
parser.add_argument('--evaluate', action='store_true',
help='Evaluate a [pre]trained model from a random tensor.')
args = parser.parse_args()
# Is there the outf?
try:
os.makedirs(args.outf)
except OSError:
pass
# Is CUDA available?
cuda = torch.cuda.is_available()
# Seed for replicability
torch.manual_seed(args.seed)
if cuda:
torch.cuda.manual_seed(args.seed)
POLY_DEGREE = args.degree
W_target = torch.randn(POLY_DEGREE, 1) * 5
b_target = torch.randn(1) * 5
def make_features(x):
"""Builds features i.e. a matrix with columns [x, x^2, x^3, x^4]."""
x = x.unsqueeze(1)
return torch.cat([x ** i for i in range(1, POLY_DEGREE+1)], 1)
def f(x):
"""Approximated function."""
return x.mm(W_target) + b_target[0]
def poly_desc(W, b):
"""Creates a string description of a polynomial."""
result = 'y = '
for i, w in enumerate(W):
result += '{:+.2f} x^{} '.format(w, len(W) - i)
result += '{:+.2f}'.format(b[0])
return result
def get_batch(batch_size=32):
"""Builds a batch i.e. (x, f(x)) pair."""
# Build samples from a normal distribution with zero mean
# and variance of one.
random = torch.randn(batch_size)
x = make_features(random)
y = f(x)
return Variable(x), Variable(y)
# Define model
fc = torch.nn.Linear(W_target.size(0), 1)
if cuda:
fc.cuda()
# Load checkpoint
if args.ckpf != '':
if cuda:
fc.load_state_dict(torch.load(args.ckpf))
else:
# Load GPU model on CPU
fc.load_state_dict(torch.load(args.ckpf, map_location=lambda storage, loc: storage))
fc.cpu()
# Check if model use cuda
#print (next(fc.parameters()).is_cuda)
# Train?
if args.train:
# Iterate until the loss is under 1e-3 threshold
for batch_idx in count(1):
fc.train()
# Get data
batch_x, batch_y = get_batch(args.batch_size)
if cuda:
batch_x, batch_y = batch_x.cuda(), batch_y.cuda()
# Reset gradients
fc.zero_grad()
# Forward pass
output = F.smooth_l1_loss(fc(batch_x), batch_y)
loss = output.data[0]
# Backward pass
output.backward()
# Apply gradients (SGD with learning_rate=0.1 and batch_size=32)
for param in fc.parameters():
param.data.add_(-0.1 * param.grad.data)
# Stop criterion
if loss < 1e-3:
break
print('Loss: {:.6f} after {} batches'.format(loss, batch_idx))
print('==> Learned function:\t' + poly_desc(fc.weight.data.view(-1), fc.bias.data))
print('==> Actual function:\t' + poly_desc(W_target.view(-1), b_target))
# Do checkpointing - Is saved in outf
torch.save(fc.state_dict(), '%s/regression_%d_degree_polynomial.pth' % (args.outf, args.degree))
# Evaluate?
if args.evaluate:
fc.eval()
# Custom Tensor
# t_test = torch.Tensor([[... ** i for i in range(1, POLY_DEGREE+1)]])
# v_test = Variable(t_test)
# print (v_test.size())
# print('==> Actual function result:\t' + str(f(t_test.cpu())))
x_test, y_test = get_batch(batch_size=1)
if cuda:
x_test = x_test.cuda()
out = np.asscalar(fc(x_test).data.cpu().numpy())
y_test = np.asscalar(y_test.data.cpu().numpy())
# Comparison
print ('==> Learned function result:\t' + str(out))
print('==> Actual function result:\t' + str(y_test))
| 30.278912 | 100 | 0.631543 |
d3e533b1fb7c73987e65d43d4fac589d3dfcd9e1 | 3,390 | py | Python | rc/smallplayer.py | tarasfrompir/MajordomoHome | 673474450720eb96d523bc9ea71dcf8cad86d7b1 | [
"MIT"
] | null | null | null | rc/smallplayer.py | tarasfrompir/MajordomoHome | 673474450720eb96d523bc9ea71dcf8cad86d7b1 | [
"MIT"
] | 4 | 2017-08-11T19:27:42.000Z | 2017-12-07T08:45:11.000Z | rc/smallplayer.py | tarasfrompir/MajordomoHome | 673474450720eb96d523bc9ea71dcf8cad86d7b1 | [
"MIT"
] | 1 | 2021-02-24T21:55:28.000Z | 2021-02-24T21:55:28.000Z | # -*- coding: utf-8 -*-
#from __future__ import print_function, unicode_literals
#import time
#https://github.com/jonisb/AudioEndpointControl
import AudioEndpointControl
from AudioEndpointControl import Render, Capture, All
from AudioEndpointControl import Console, Multimedia, Communications
from AudioEndpointControl import (
DEVICE_STATE_ACTIVE,
DEVICE_STATE_DISABLED,
DEVICE_STATE_NOTPRESENT,
DEVICE_STATE_UNPLUGGED,
DEVICE_STATEMASK_ALL
)
from AudioEndpointControl import (
Device_FriendlyName,
Device_DeviceDesc,
DeviceInterface_FriendlyName)
from comtypes import GUID
AppID = GUID('{00000000-0000-0000-0000-000000000001}')
import pyaudio
import wave
import sys
#import subprocess
p = pyaudio.PyAudio()
# аргумент -devicelist - список выходных устройств
# аргумент -play nameoffile devicenumber проиграет файл(имя) на устройстве номер(1)
# аргумент -setvolume volume devicename установит громкость волуме на звуковой карте девайснаме
# аргумент -getvolume devicename получим уровень громкости на девайснаме
if (sys.argv[1] == '-devicelist'):
# get device list
AudioDevices = AudioEndpointControl.AudioEndpoints(DEVICE_STATE=DEVICE_STATE_ACTIVE, PKEY_Device=Device_FriendlyName, EventContext=AppID)
out = ''
info = p.get_host_api_info_by_index(0)
numdevices = info.get('deviceCount')
for i in range(0, numdevices):
if (p.get_device_info_by_host_api_device_index(0, i).get('maxOutputChannels')) > 0 :
temp = str(p.get_device_info_by_host_api_device_index(0, i).get('name')).encode('latin1').decode('cp1251')
for device in AudioDevices:
if (str(device).find(temp) != -1):
out = out + str(i) + "^" + str(device) + ','
print (str(out))
elif (sys.argv[1] == '-play' and sys.argv[2] != "" and sys.argv[3] != ""):
CHUNK = 1024
wf = wave.open(sys.argv[2], 'rb')
stream = p.open(format=p.get_format_from_width(wf.getsampwidth()),
channels=wf.getnchannels(),
rate=wf.getframerate(),
output=True,
output_device_index=int(sys.argv[3]))
data = wf.readframes(CHUNK)
while data :
stream.write(data)
data = wf.readframes(CHUNK)
stream.stop_stream()
stream.close()
elif (sys.argv[1] == '-setvolume' and sys.argv[2] != "" and sys.argv[3] != ""):
device = sys.argv[3]
AudioDevices = AudioEndpointControl.AudioEndpoints(DEVICE_STATE=DEVICE_STATE_ACTIVE, PKEY_Device=Device_FriendlyName, EventContext=AppID)
volume = sys.argv[2]
for endpoint in AudioDevices:
if (str(endpoint).find(device) != -1):
endpoint.volume.Set(float(volume))
elif (sys.argv[1] == '-getvolume' and sys.argv[2] != ""):
device = sys.argv[2]
AudioDevices = AudioEndpointControl.AudioEndpoints(DEVICE_STATE=DEVICE_STATE_ACTIVE, PKEY_Device=Device_FriendlyName, EventContext=AppID)
for endpoint in AudioDevices:
if (str(endpoint).find(device) != -1):
VolSave = endpoint.volume.Get()
print (VolSave)
else :
print ("неправильный аргумент")
print ("# аргумент -devicelist - список выходных устройств")
print ("# аргумент -play nameoffile devicenumber проиграет файл(имя) на устройстве номер(1)")
| 35.684211 | 142 | 0.673156 |
2b43b940dc920b7d94d32861686fa0d8767a8221 | 31,624 | py | Python | pyqtgraph/imageview/ImageView.py | pbmanis/pyqtgraph | 3558216be2b50d6b0069c82e51e5a048dad34c73 | [
"MIT"
] | 150 | 2018-03-27T16:45:37.000Z | 2022-03-30T03:47:56.000Z | pyqtgraph/imageview/ImageView.py | Jhongesell/pyqtgraph | 229f650adfd04053213fe6567d6308a4751a349b | [
"MIT"
] | 34 | 2018-09-28T00:01:59.000Z | 2022-03-21T15:40:02.000Z | pyqtgraph/imageview/ImageView.py | Jhongesell/pyqtgraph | 229f650adfd04053213fe6567d6308a4751a349b | [
"MIT"
] | 40 | 2018-04-06T19:42:21.000Z | 2022-01-11T00:34:17.000Z | # -*- coding: utf-8 -*-
"""
ImageView.py - Widget for basic image dispay and analysis
Copyright 2010 Luke Campagnola
Distributed under MIT/X11 license. See license.txt for more infomation.
Widget used for displaying 2D or 3D data. Features:
- float or int (including 16-bit int) image display via ImageItem
- zoom/pan via GraphicsView
- black/white level controls
- time slider for 3D data sets
- ROI plotting
- Image normalization through a variety of methods
"""
import os, sys
import numpy as np
from ..Qt import QtCore, QtGui, QT_LIB
if QT_LIB == 'PySide':
from .ImageViewTemplate_pyside import *
elif QT_LIB == 'PySide2':
from .ImageViewTemplate_pyside2 import *
elif QT_LIB == 'PyQt5':
from .ImageViewTemplate_pyqt5 import *
else:
from .ImageViewTemplate_pyqt import *
from ..graphicsItems.ImageItem import *
from ..graphicsItems.ROI import *
from ..graphicsItems.LinearRegionItem import *
from ..graphicsItems.InfiniteLine import *
from ..graphicsItems.ViewBox import *
from ..graphicsItems.VTickGroup import VTickGroup
from ..graphicsItems.GradientEditorItem import addGradientListToDocstring
from .. import ptime as ptime
from .. import debug as debug
from ..SignalProxy import SignalProxy
from .. import getConfigOption
try:
from bottleneck import nanmin, nanmax
except ImportError:
from numpy import nanmin, nanmax
class PlotROI(ROI):
def __init__(self, size):
ROI.__init__(self, pos=[0,0], size=size) #, scaleSnap=True, translateSnap=True)
self.addScaleHandle([1, 1], [0, 0])
self.addRotateHandle([0, 0], [0.5, 0.5])
class ImageView(QtGui.QWidget):
"""
Widget used for display and analysis of image data.
Implements many features:
* Displays 2D and 3D image data. For 3D data, a z-axis
slider is displayed allowing the user to select which frame is displayed.
* Displays histogram of image data with movable region defining the dark/light levels
* Editable gradient provides a color lookup table
* Frame slider may also be moved using left/right arrow keys as well as pgup, pgdn, home, and end.
* Basic analysis features including:
* ROI and embedded plot for measuring image values across frames
* Image normalization / background subtraction
Basic Usage::
imv = pg.ImageView()
imv.show()
imv.setImage(data)
**Keyboard interaction**
* left/right arrows step forward/backward 1 frame when pressed,
seek at 20fps when held.
* up/down arrows seek at 100fps
* pgup/pgdn seek at 1000fps
* home/end seek immediately to the first/last frame
* space begins playing frames. If time values (in seconds) are given
for each frame, then playback is in realtime.
"""
sigTimeChanged = QtCore.Signal(object, object)
sigProcessingChanged = QtCore.Signal(object)
def __init__(self, parent=None, name="ImageView", view=None, imageItem=None,
levelMode='mono', *args):
"""
By default, this class creates an :class:`ImageItem <pyqtgraph.ImageItem>` to display image data
and a :class:`ViewBox <pyqtgraph.ViewBox>` to contain the ImageItem.
============= =========================================================
**Arguments**
parent (QWidget) Specifies the parent widget to which
this ImageView will belong. If None, then the ImageView
is created with no parent.
name (str) The name used to register both the internal ViewBox
and the PlotItem used to display ROI data. See the *name*
argument to :func:`ViewBox.__init__()
<pyqtgraph.ViewBox.__init__>`.
view (ViewBox or PlotItem) If specified, this will be used
as the display area that contains the displayed image.
Any :class:`ViewBox <pyqtgraph.ViewBox>`,
:class:`PlotItem <pyqtgraph.PlotItem>`, or other
compatible object is acceptable.
imageItem (ImageItem) If specified, this object will be used to
display the image. Must be an instance of ImageItem
or other compatible object.
levelMode See the *levelMode* argument to
:func:`HistogramLUTItem.__init__()
<pyqtgraph.HistogramLUTItem.__init__>`
============= =========================================================
Note: to display axis ticks inside the ImageView, instantiate it
with a PlotItem instance as its view::
pg.ImageView(view=pg.PlotItem())
"""
QtGui.QWidget.__init__(self, parent, *args)
self._imageLevels = None # [(min, max), ...] per channel image metrics
self.levelMin = None # min / max levels across all channels
self.levelMax = None
self.name = name
self.image = None
self.axes = {}
self.imageDisp = None
self.ui = Ui_Form()
self.ui.setupUi(self)
self.scene = self.ui.graphicsView.scene()
self.ui.histogram.setLevelMode(levelMode)
self.ignoreTimeLine = False
if view is None:
self.view = ViewBox()
else:
self.view = view
self.ui.graphicsView.setCentralItem(self.view)
self.view.setAspectLocked(True)
self.view.invertY()
if imageItem is None:
self.imageItem = ImageItem()
else:
self.imageItem = imageItem
self.view.addItem(self.imageItem)
self.currentIndex = 0
self.ui.histogram.setImageItem(self.imageItem)
self.menu = None
self.ui.normGroup.hide()
self.roi = PlotROI(10)
self.roi.setZValue(20)
self.view.addItem(self.roi)
self.roi.hide()
self.normRoi = PlotROI(10)
self.normRoi.setPen('y')
self.normRoi.setZValue(20)
self.view.addItem(self.normRoi)
self.normRoi.hide()
self.roiCurves = []
self.timeLine = InfiniteLine(0, movable=True, markers=[('^', 0), ('v', 1)])
self.timeLine.setPen((255, 255, 0, 200))
self.timeLine.setZValue(1)
self.ui.roiPlot.addItem(self.timeLine)
self.ui.splitter.setSizes([self.height()-35, 35])
self.ui.roiPlot.hideAxis('left')
self.frameTicks = VTickGroup(yrange=[0.8, 1], pen=0.4)
self.ui.roiPlot.addItem(self.frameTicks, ignoreBounds=True)
self.keysPressed = {}
self.playTimer = QtCore.QTimer()
self.playRate = 0
self.lastPlayTime = 0
self.normRgn = LinearRegionItem()
self.normRgn.setZValue(0)
self.ui.roiPlot.addItem(self.normRgn)
self.normRgn.hide()
## wrap functions from view box
for fn in ['addItem', 'removeItem']:
setattr(self, fn, getattr(self.view, fn))
## wrap functions from histogram
for fn in ['setHistogramRange', 'autoHistogramRange', 'getLookupTable', 'getLevels']:
setattr(self, fn, getattr(self.ui.histogram, fn))
self.timeLine.sigPositionChanged.connect(self.timeLineChanged)
self.ui.roiBtn.clicked.connect(self.roiClicked)
self.roi.sigRegionChanged.connect(self.roiChanged)
#self.ui.normBtn.toggled.connect(self.normToggled)
self.ui.menuBtn.clicked.connect(self.menuClicked)
self.ui.normDivideRadio.clicked.connect(self.normRadioChanged)
self.ui.normSubtractRadio.clicked.connect(self.normRadioChanged)
self.ui.normOffRadio.clicked.connect(self.normRadioChanged)
self.ui.normROICheck.clicked.connect(self.updateNorm)
self.ui.normFrameCheck.clicked.connect(self.updateNorm)
self.ui.normTimeRangeCheck.clicked.connect(self.updateNorm)
self.playTimer.timeout.connect(self.timeout)
self.normProxy = SignalProxy(self.normRgn.sigRegionChanged, slot=self.updateNorm)
self.normRoi.sigRegionChangeFinished.connect(self.updateNorm)
self.ui.roiPlot.registerPlot(self.name + '_ROI')
self.view.register(self.name)
self.noRepeatKeys = [QtCore.Qt.Key_Right, QtCore.Qt.Key_Left, QtCore.Qt.Key_Up, QtCore.Qt.Key_Down, QtCore.Qt.Key_PageUp, QtCore.Qt.Key_PageDown]
self.roiClicked() ## initialize roi plot to correct shape / visibility
def setImage(self, img, autoRange=True, autoLevels=True, levels=None, axes=None, xvals=None, pos=None, scale=None, transform=None, autoHistogramRange=True, levelMode=None):
"""
Set the image to be displayed in the widget.
================== ===========================================================================
**Arguments:**
img (numpy array) the image to be displayed. See :func:`ImageItem.setImage` and
*notes* below.
xvals (numpy array) 1D array of z-axis values corresponding to the first axis
in a 3D image. For video, this array should contain the time of each
frame.
autoRange (bool) whether to scale/pan the view to fit the image.
autoLevels (bool) whether to update the white/black levels to fit the image.
levels (min, max); the white and black level values to use.
axes Dictionary indicating the interpretation for each axis.
This is only needed to override the default guess. Format is::
{'t':0, 'x':1, 'y':2, 'c':3};
pos Change the position of the displayed image
scale Change the scale of the displayed image
transform Set the transform of the displayed image. This option overrides *pos*
and *scale*.
autoHistogramRange If True, the histogram y-range is automatically scaled to fit the
image data.
levelMode If specified, this sets the user interaction mode for setting image
levels. Options are 'mono', which provides a single level control for
all image channels, and 'rgb' or 'rgba', which provide individual
controls for each channel.
================== ===========================================================================
**Notes:**
For backward compatibility, image data is assumed to be in column-major order (column, row).
However, most image data is stored in row-major order (row, column) and will need to be
transposed before calling setImage()::
imageview.setImage(imagedata.T)
This requirement can be changed by the ``imageAxisOrder``
:ref:`global configuration option <apiref_config>`.
"""
profiler = debug.Profiler()
if hasattr(img, 'implements') and img.implements('MetaArray'):
img = img.asarray()
if not isinstance(img, np.ndarray):
required = ['dtype', 'max', 'min', 'ndim', 'shape', 'size']
if not all([hasattr(img, attr) for attr in required]):
raise TypeError("Image must be NumPy array or any object "
"that provides compatible attributes/methods:\n"
" %s" % str(required))
self.image = img
self.imageDisp = None
if levelMode is not None:
self.ui.histogram.setLevelMode(levelMode)
profiler()
if axes is None:
x,y = (0, 1) if self.imageItem.axisOrder == 'col-major' else (1, 0)
if img.ndim == 2:
self.axes = {'t': None, 'x': x, 'y': y, 'c': None}
elif img.ndim == 3:
# Ambiguous case; make a guess
if img.shape[2] <= 4:
self.axes = {'t': None, 'x': x, 'y': y, 'c': 2}
else:
self.axes = {'t': 0, 'x': x+1, 'y': y+1, 'c': None}
elif img.ndim == 4:
# Even more ambiguous; just assume the default
self.axes = {'t': 0, 'x': x+1, 'y': y+1, 'c': 3}
else:
raise Exception("Can not interpret image with dimensions %s" % (str(img.shape)))
elif isinstance(axes, dict):
self.axes = axes.copy()
elif isinstance(axes, list) or isinstance(axes, tuple):
self.axes = {}
for i in range(len(axes)):
self.axes[axes[i]] = i
else:
raise Exception("Can not interpret axis specification %s. Must be like {'t': 2, 'x': 0, 'y': 1} or ('t', 'x', 'y', 'c')" % (str(axes)))
for x in ['t', 'x', 'y', 'c']:
self.axes[x] = self.axes.get(x, None)
axes = self.axes
if xvals is not None:
self.tVals = xvals
elif axes['t'] is not None:
if hasattr(img, 'xvals'):
try:
self.tVals = img.xvals(axes['t'])
except:
self.tVals = np.arange(img.shape[axes['t']])
else:
self.tVals = np.arange(img.shape[axes['t']])
profiler()
self.currentIndex = 0
self.updateImage(autoHistogramRange=autoHistogramRange)
if levels is None and autoLevels:
self.autoLevels()
if levels is not None: ## this does nothing since getProcessedImage sets these values again.
self.setLevels(*levels)
if self.ui.roiBtn.isChecked():
self.roiChanged()
profiler()
if self.axes['t'] is not None:
self.ui.roiPlot.setXRange(self.tVals.min(), self.tVals.max())
self.frameTicks.setXVals(self.tVals)
self.timeLine.setValue(0)
if len(self.tVals) > 1:
start = self.tVals.min()
stop = self.tVals.max() + abs(self.tVals[-1] - self.tVals[0]) * 0.02
elif len(self.tVals) == 1:
start = self.tVals[0] - 0.5
stop = self.tVals[0] + 0.5
else:
start = 0
stop = 1
for s in [self.timeLine, self.normRgn]:
s.setBounds([start, stop])
profiler()
self.imageItem.resetTransform()
if scale is not None:
self.imageItem.scale(*scale)
if pos is not None:
self.imageItem.setPos(*pos)
if transform is not None:
self.imageItem.setTransform(transform)
profiler()
if autoRange:
self.autoRange()
self.roiClicked()
profiler()
def clear(self):
self.image = None
self.imageItem.clear()
def play(self, rate):
"""Begin automatically stepping frames forward at the given rate (in fps).
This can also be accessed by pressing the spacebar."""
#print "play:", rate
self.playRate = rate
if rate == 0:
self.playTimer.stop()
return
self.lastPlayTime = ptime.time()
if not self.playTimer.isActive():
self.playTimer.start(16)
def autoLevels(self):
"""Set the min/max intensity levels automatically to match the image data."""
self.setLevels(rgba=self._imageLevels)
def setLevels(self, *args, **kwds):
"""Set the min/max (bright and dark) levels.
See :func:`HistogramLUTItem.setLevels <pyqtgraph.HistogramLUTItem.setLevels>`.
"""
self.ui.histogram.setLevels(*args, **kwds)
def autoRange(self):
"""Auto scale and pan the view around the image such that the image fills the view."""
image = self.getProcessedImage()
self.view.autoRange()
def getProcessedImage(self):
"""Returns the image data after it has been processed by any normalization options in use.
"""
if self.imageDisp is None:
image = self.normalize(self.image)
self.imageDisp = image
self._imageLevels = self.quickMinMax(self.imageDisp)
self.levelMin = min([level[0] for level in self._imageLevels])
self.levelMax = max([level[1] for level in self._imageLevels])
return self.imageDisp
def close(self):
"""Closes the widget nicely, making sure to clear the graphics scene and release memory."""
self.ui.roiPlot.close()
self.ui.graphicsView.close()
self.scene.clear()
del self.image
del self.imageDisp
super(ImageView, self).close()
self.setParent(None)
def keyPressEvent(self, ev):
#print ev.key()
if ev.key() == QtCore.Qt.Key_Space:
if self.playRate == 0:
fps = (self.getProcessedImage().shape[0]-1) / (self.tVals[-1] - self.tVals[0])
self.play(fps)
#print fps
else:
self.play(0)
ev.accept()
elif ev.key() == QtCore.Qt.Key_Home:
self.setCurrentIndex(0)
self.play(0)
ev.accept()
elif ev.key() == QtCore.Qt.Key_End:
self.setCurrentIndex(self.getProcessedImage().shape[0]-1)
self.play(0)
ev.accept()
elif ev.key() in self.noRepeatKeys:
ev.accept()
if ev.isAutoRepeat():
return
self.keysPressed[ev.key()] = 1
self.evalKeyState()
else:
QtGui.QWidget.keyPressEvent(self, ev)
def keyReleaseEvent(self, ev):
if ev.key() in [QtCore.Qt.Key_Space, QtCore.Qt.Key_Home, QtCore.Qt.Key_End]:
ev.accept()
elif ev.key() in self.noRepeatKeys:
ev.accept()
if ev.isAutoRepeat():
return
try:
del self.keysPressed[ev.key()]
except:
self.keysPressed = {}
self.evalKeyState()
else:
QtGui.QWidget.keyReleaseEvent(self, ev)
def evalKeyState(self):
if len(self.keysPressed) == 1:
key = list(self.keysPressed.keys())[0]
if key == QtCore.Qt.Key_Right:
self.play(20)
self.jumpFrames(1)
self.lastPlayTime = ptime.time() + 0.2 ## 2ms wait before start
## This happens *after* jumpFrames, since it might take longer than 2ms
elif key == QtCore.Qt.Key_Left:
self.play(-20)
self.jumpFrames(-1)
self.lastPlayTime = ptime.time() + 0.2
elif key == QtCore.Qt.Key_Up:
self.play(-100)
elif key == QtCore.Qt.Key_Down:
self.play(100)
elif key == QtCore.Qt.Key_PageUp:
self.play(-1000)
elif key == QtCore.Qt.Key_PageDown:
self.play(1000)
else:
self.play(0)
def timeout(self):
now = ptime.time()
dt = now - self.lastPlayTime
if dt < 0:
return
n = int(self.playRate * dt)
if n != 0:
self.lastPlayTime += (float(n)/self.playRate)
if self.currentIndex+n > self.image.shape[self.axes['t']]:
self.play(0)
self.jumpFrames(n)
def setCurrentIndex(self, ind):
"""Set the currently displayed frame index."""
self.currentIndex = np.clip(ind, 0, self.getProcessedImage().shape[self.axes['t']]-1)
self.updateImage()
self.ignoreTimeLine = True
self.timeLine.setValue(self.tVals[self.currentIndex])
self.ignoreTimeLine = False
def jumpFrames(self, n):
"""Move video frame ahead n frames (may be negative)"""
if self.axes['t'] is not None:
self.setCurrentIndex(self.currentIndex + n)
def normRadioChanged(self):
self.imageDisp = None
self.updateImage()
self.autoLevels()
self.roiChanged()
self.sigProcessingChanged.emit(self)
def updateNorm(self):
if self.ui.normTimeRangeCheck.isChecked():
self.normRgn.show()
else:
self.normRgn.hide()
if self.ui.normROICheck.isChecked():
self.normRoi.show()
else:
self.normRoi.hide()
if not self.ui.normOffRadio.isChecked():
self.imageDisp = None
self.updateImage()
self.autoLevels()
self.roiChanged()
self.sigProcessingChanged.emit(self)
def normToggled(self, b):
self.ui.normGroup.setVisible(b)
self.normRoi.setVisible(b and self.ui.normROICheck.isChecked())
self.normRgn.setVisible(b and self.ui.normTimeRangeCheck.isChecked())
def hasTimeAxis(self):
return 't' in self.axes and self.axes['t'] is not None
def roiClicked(self):
showRoiPlot = False
if self.ui.roiBtn.isChecked():
showRoiPlot = True
self.roi.show()
#self.ui.roiPlot.show()
self.ui.roiPlot.setMouseEnabled(True, True)
self.ui.splitter.setSizes([self.height()*0.6, self.height()*0.4])
for c in self.roiCurves:
c.show()
self.roiChanged()
self.ui.roiPlot.showAxis('left')
else:
self.roi.hide()
self.ui.roiPlot.setMouseEnabled(False, False)
for c in self.roiCurves:
c.hide()
self.ui.roiPlot.hideAxis('left')
if self.hasTimeAxis():
showRoiPlot = True
mn = self.tVals.min()
mx = self.tVals.max()
self.ui.roiPlot.setXRange(mn, mx, padding=0.01)
self.timeLine.show()
self.timeLine.setBounds([mn, mx])
self.ui.roiPlot.show()
if not self.ui.roiBtn.isChecked():
self.ui.splitter.setSizes([self.height()-35, 35])
else:
self.timeLine.hide()
#self.ui.roiPlot.hide()
self.ui.roiPlot.setVisible(showRoiPlot)
def roiChanged(self):
if self.image is None:
return
image = self.getProcessedImage()
# Extract image data from ROI
axes = (self.axes['x'], self.axes['y'])
data, coords = self.roi.getArrayRegion(image.view(np.ndarray), self.imageItem, axes, returnMappedCoords=True)
if data is None:
return
# Convert extracted data into 1D plot data
if self.axes['t'] is None:
# Average across y-axis of ROI
data = data.mean(axis=axes[1])
coords = coords[:,:,0] - coords[:,0:1,0]
xvals = (coords**2).sum(axis=0) ** 0.5
else:
# Average data within entire ROI for each frame
data = data.mean(axis=max(axes)).mean(axis=min(axes))
xvals = self.tVals
# Handle multi-channel data
if data.ndim == 1:
plots = [(xvals, data, 'w')]
if data.ndim == 2:
if data.shape[1] == 1:
colors = 'w'
else:
colors = 'rgbw'
plots = []
for i in range(data.shape[1]):
d = data[:,i]
plots.append((xvals, d, colors[i]))
# Update plot line(s)
while len(plots) < len(self.roiCurves):
c = self.roiCurves.pop()
c.scene().removeItem(c)
while len(plots) > len(self.roiCurves):
self.roiCurves.append(self.ui.roiPlot.plot())
for i in range(len(plots)):
x, y, p = plots[i]
self.roiCurves[i].setData(x, y, pen=p)
def quickMinMax(self, data):
"""
Estimate the min/max values of *data* by subsampling.
Returns [(min, max), ...] with one item per channel
"""
while data.size > 1e6:
ax = np.argmax(data.shape)
sl = [slice(None)] * data.ndim
sl[ax] = slice(None, None, 2)
data = data[sl]
cax = self.axes['c']
if cax is None:
if data.size == 0:
return [(0, 0)]
return [(float(nanmin(data)), float(nanmax(data)))]
else:
if data.size == 0:
return [(0, 0)] * data.shape[-1]
return [(float(nanmin(data.take(i, axis=cax))),
float(nanmax(data.take(i, axis=cax)))) for i in range(data.shape[-1])]
def normalize(self, image):
"""
Process *image* using the normalization options configured in the
control panel.
This can be repurposed to process any data through the same filter.
"""
if self.ui.normOffRadio.isChecked():
return image
div = self.ui.normDivideRadio.isChecked()
norm = image.view(np.ndarray).copy()
#if div:
#norm = ones(image.shape)
#else:
#norm = zeros(image.shape)
if div:
norm = norm.astype(np.float32)
if self.ui.normTimeRangeCheck.isChecked() and image.ndim == 3:
(sind, start) = self.timeIndex(self.normRgn.lines[0])
(eind, end) = self.timeIndex(self.normRgn.lines[1])
#print start, end, sind, eind
n = image[sind:eind+1].mean(axis=0)
n.shape = (1,) + n.shape
if div:
norm /= n
else:
norm -= n
if self.ui.normFrameCheck.isChecked() and image.ndim == 3:
n = image.mean(axis=1).mean(axis=1)
n.shape = n.shape + (1, 1)
if div:
norm /= n
else:
norm -= n
if self.ui.normROICheck.isChecked() and image.ndim == 3:
n = self.normRoi.getArrayRegion(norm, self.imageItem, (1, 2)).mean(axis=1).mean(axis=1)
n = n[:,np.newaxis,np.newaxis]
#print start, end, sind, eind
if div:
norm /= n
else:
norm -= n
return norm
def timeLineChanged(self):
#(ind, time) = self.timeIndex(self.ui.timeSlider)
if self.ignoreTimeLine:
return
self.play(0)
(ind, time) = self.timeIndex(self.timeLine)
if ind != self.currentIndex:
self.currentIndex = ind
self.updateImage()
#self.timeLine.setPos(time)
#self.emit(QtCore.SIGNAL('timeChanged'), ind, time)
self.sigTimeChanged.emit(ind, time)
def updateImage(self, autoHistogramRange=True):
## Redraw image on screen
if self.image is None:
return
image = self.getProcessedImage()
if autoHistogramRange:
self.ui.histogram.setHistogramRange(self.levelMin, self.levelMax)
# Transpose image into order expected by ImageItem
if self.imageItem.axisOrder == 'col-major':
axorder = ['t', 'x', 'y', 'c']
else:
axorder = ['t', 'y', 'x', 'c']
axorder = [self.axes[ax] for ax in axorder if self.axes[ax] is not None]
image = image.transpose(axorder)
# Select time index
if self.axes['t'] is not None:
self.ui.roiPlot.show()
image = image[self.currentIndex]
self.imageItem.updateImage(image)
def timeIndex(self, slider):
## Return the time and frame index indicated by a slider
if self.image is None:
return (0,0)
t = slider.value()
xv = self.tVals
if xv is None:
ind = int(t)
else:
if len(xv) < 2:
return (0,0)
totTime = xv[-1] + (xv[-1]-xv[-2])
inds = np.argwhere(xv < t)
if len(inds) < 1:
return (0,t)
ind = inds[-1,0]
return ind, t
def getView(self):
"""Return the ViewBox (or other compatible object) which displays the ImageItem"""
return self.view
def getImageItem(self):
"""Return the ImageItem for this ImageView."""
return self.imageItem
def getRoiPlot(self):
"""Return the ROI PlotWidget for this ImageView"""
return self.ui.roiPlot
def getHistogramWidget(self):
"""Return the HistogramLUTWidget for this ImageView"""
return self.ui.histogram
def export(self, fileName):
"""
Export data from the ImageView to a file, or to a stack of files if
the data is 3D. Saving an image stack will result in index numbers
being added to the file name. Images are saved as they would appear
onscreen, with levels and lookup table applied.
"""
img = self.getProcessedImage()
if self.hasTimeAxis():
base, ext = os.path.splitext(fileName)
fmt = "%%s%%0%dd%%s" % int(np.log10(img.shape[0])+1)
for i in range(img.shape[0]):
self.imageItem.setImage(img[i], autoLevels=False)
self.imageItem.save(fmt % (base, i, ext))
self.updateImage()
else:
self.imageItem.save(fileName)
def exportClicked(self):
fileName = QtGui.QFileDialog.getSaveFileName()
if isinstance(fileName, tuple):
fileName = fileName[0] # Qt4/5 API difference
if fileName == '':
return
self.export(str(fileName))
def buildMenu(self):
self.menu = QtGui.QMenu()
self.normAction = QtGui.QAction("Normalization", self.menu)
self.normAction.setCheckable(True)
self.normAction.toggled.connect(self.normToggled)
self.menu.addAction(self.normAction)
self.exportAction = QtGui.QAction("Export", self.menu)
self.exportAction.triggered.connect(self.exportClicked)
self.menu.addAction(self.exportAction)
def menuClicked(self):
if self.menu is None:
self.buildMenu()
self.menu.popup(QtGui.QCursor.pos())
def setColorMap(self, colormap):
"""Set the color map.
============= =========================================================
**Arguments**
colormap (A ColorMap() instance) The ColorMap to use for coloring
images.
============= =========================================================
"""
self.ui.histogram.gradient.setColorMap(colormap)
@addGradientListToDocstring()
def setPredefinedGradient(self, name):
"""Set one of the gradients defined in :class:`GradientEditorItem <pyqtgraph.graphicsItems.GradientEditorItem>`.
Currently available gradients are:
"""
self.ui.histogram.gradient.loadPreset(name)
| 38.055355 | 176 | 0.54759 |
eb76e284e27d1024b609e1c25cbb88d515e6b2e0 | 31,816 | py | Python | lumen/sources/base.py | holoviz/monitor | db04d037c17101b9e126973a21e77f940f6cf83c | [
"BSD-3-Clause"
] | 1 | 2020-09-25T20:21:59.000Z | 2020-09-25T20:21:59.000Z | lumen/sources/base.py | holoviz/monitor | db04d037c17101b9e126973a21e77f940f6cf83c | [
"BSD-3-Clause"
] | 3 | 2020-09-24T16:59:03.000Z | 2020-10-01T12:32:49.000Z | lumen/sources/base.py | holoviz/monitor | db04d037c17101b9e126973a21e77f940f6cf83c | [
"BSD-3-Clause"
] | null | null | null | import hashlib
import json
import os
import pathlib
import re
import shutil
import sys
from concurrent import futures
from functools import wraps
from itertools import product
from pathlib import Path
from urllib.parse import quote
import numpy as np
import pandas as pd
import panel as pn
import param
import requests
from ..base import Component
from ..filters import Filter
from ..state import state
from ..transforms import Filter as FilterTransform, Transform
from ..util import get_dataframe_schema, is_ref, merge_schemas
def cached(with_query=True):
"""
Adds caching to a Source.get query.
Arguments
---------
with_query: boolean
Whether the Source.get query uses the query parameters.
Sources that have no ability to pre-filter the data can
use this option to cache the full query and the decorator
will apply the filtering after the fact.
Returns
-------
Returns method wrapped in caching functionality.
"""
def _inner_cached(method):
@wraps(method)
def wrapped(self, table, **query):
cache_query = query if with_query else {}
df, no_query = self._get_cache(table, **cache_query)
if df is None:
if not with_query and (hasattr(self, 'dask') or hasattr(self, 'use_dask')):
cache_query['__dask'] = True
df = method(self, table, **cache_query)
self._set_cache(df, table, **cache_query)
filtered = df
if (not with_query or no_query) and query:
filtered = FilterTransform.apply_to(
df, conditions=list(query.items())
)
if getattr(self, 'dask', False) or not hasattr(filtered, 'compute'):
return filtered
return filtered.compute()
return wrapped
return _inner_cached
def cached_schema(method):
@wraps(method)
def wrapped(self, table=None):
schema = self._get_schema_cache()
if schema is None or (table is not None and table not in schema):
schema = schema or {}
if table is None:
missing_tables = [
table for table in self.get_tables()
if table not in schema
]
else:
missing_tables = [table]
for missing_table in missing_tables:
schema[missing_table] = method(self, missing_table)
self._set_schema_cache(schema)
if table is None:
return schema
return schema[table]
return wrapped
class Source(Component):
"""
A Source provides a set of tables which declare their available
fields. The Source must also be able to return a schema describing
the types of the variables and indexes in each table and allow
querying the data.
"""
cache_dir = param.String(default=None, doc="""
Whether to enable local cache and write file to disk.""")
shared = param.Boolean(default=False, doc="""
Whether the Source can be shared across all instances of the
dashboard. If set to `True` the Source will be loaded on
initial server load.""")
source_type = None
# Declare whether source supports SQL transforms
_supports_sql = False
__abstract = True
def _update_ref(self, pname, event):
self.clear_cache()
super()._update_ref(pname, event)
@classmethod
def _recursive_resolve(cls, spec, source_type):
resolved_spec, refs = {}, {}
if 'sources' in source_type.param and 'sources' in spec:
resolved_spec['sources'] = {
source: cls.from_spec(source)
for source in spec.pop('sources')
}
if 'source' in source_type.param and 'source' in spec:
resolved_spec['source'] = cls.from_spec(spec.pop('source'))
for k, v in spec.items():
if is_ref(v):
refs[k] = v
v = state.resolve_reference(v)
elif isinstance(v, dict):
v, subrefs = cls._recursive_resolve(v, source_type)
if subrefs:
cls.param.warning(
"Resolving nested variable references currently not supported."
)
if k == 'filters' and 'source' in resolved_spec:
source_schema = resolved_spec['source'].get_schema()
v = [Filter.from_spec(fspec, source_schema) for fspec in v]
if k == 'transforms':
v = [Transform.from_spec(tspec) for tspec in v]
resolved_spec[k] = v
return resolved_spec, refs
@classmethod
def from_spec(cls, spec):
"""
Creates a Source object from a specification. If a Source
specification references other sources these may be supplied
in the sources dictionary and be referenced by name.
Parameters
----------
spec : dict or str
Specification declared as a dictionary of parameter values
or a string referencing a source in the sources dictionary.
Returns
-------
Resolved and instantiated Source object
"""
if spec is None:
raise ValueError('Source specification empty.')
elif isinstance(spec, str):
if spec in state.sources:
source = state.sources[spec]
elif spec in state.spec.get('sources', {}):
source = state.load_source(spec, state.spec['sources'][spec])
else:
raise ValueError(f"Source with name '{spec}' was not found.")
return source
spec = dict(spec)
source_type = Source._get_type(spec.pop('type'))
resolved_spec, refs = cls._recursive_resolve(spec, source_type)
return source_type(refs=refs, **resolved_spec)
def __init__(self, **params):
from ..config import config
self.root = params.pop('root', config.root)
super().__init__(**params)
self._cache = {}
self._schema_cache = {}
def _get_key(self, table, **query):
key = (table,)
for k, v in sorted(query.items()):
if isinstance(v, list):
v = tuple(v)
key += (k, v)
return key
def _get_schema_cache(self):
schema = self._schema_cache if self._schema_cache else None
if self.cache_dir:
path = os.path.join(self.root, self.cache_dir, f'{self.name}.json')
if not os.path.isfile(path):
return schema
with open(path) as f:
json_schema = json.load(f)
if schema is None:
schema = {}
for table, tschema in json_schema.items():
if table in schema:
continue
for col, cschema in tschema.items():
if cschema.get('type') == 'string' and cschema.get('format') == 'datetime':
cschema['inclusiveMinimum'] = pd.to_datetime(
cschema['inclusiveMinimum']
)
cschema['inclusiveMaximum'] = pd.to_datetime(
cschema['inclusiveMaximum']
)
schema[table] = tschema
return schema
def _set_schema_cache(self, schema):
self._schema_cache = schema
if self.cache_dir:
path = Path(os.path.join(self.root, self.cache_dir))
path.mkdir(parents=True, exist_ok=True)
try:
with open(path / f'{self.name}.json', 'w') as f:
json.dump(schema, f, default=str)
except Exception as e:
self.param.warning(
f"Could not cache schema to disk. Error while "
f"serializing schema to disk: {e}"
)
def _get_cache(self, table, **query):
query.pop('__dask', None)
key = self._get_key(table, **query)
if key in self._cache:
return self._cache[key], not bool(query)
elif self.cache_dir:
if query:
sha = hashlib.sha256(str(key).encode('utf-8')).hexdigest()
filename = f'{sha}_{table}.parq'
else:
filename = f'{table}.parq'
path = os.path.join(self.root, self.cache_dir, filename)
if os.path.isfile(path) or os.path.isdir(path):
if 'dask.dataframe' in sys.modules or os.path.isdir(path):
import dask.dataframe as dd
return dd.read_parquet(path), not bool(query)
return pd.read_parquet(path), not bool(query)
return None, not bool(query)
def _set_cache(self, data, table, write_to_file=True, **query):
query.pop('__dask', None)
key = self._get_key(table, **query)
self._cache[key] = data
if self.cache_dir and write_to_file:
path = os.path.join(self.root, self.cache_dir)
Path(path).mkdir(parents=True, exist_ok=True)
if query:
sha = hashlib.sha256(str(key).encode('utf-8')).hexdigest()
filename = f'{sha}_{table}.parq'
else:
filename = f'{table}.parq'
filepath = os.path.join(path, filename)
try:
data.to_parquet(filepath)
except Exception as e:
path = pathlib.Path(filepath)
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(path)
self.param.warning(
f"Could not cache '{table}' to parquet file. "
f"Error during saving process: {e}"
)
def clear_cache(self):
"""
Clears any cached data.
"""
self._cache = {}
self._schema_cache = {}
if self.cache_dir:
path = os.path.join(self.root, self.cache_dir)
if os.path.isdir(path):
shutil.rmtree(path)
@property
def panel(self):
"""
A Source can return a Panel object which displays information
about the Source or controls how the Source queries data.
"""
return None
def get_tables(self):
"""
Returns the list of tables available on this source.
Returns
-------
list
The list of available tables on this source.
"""
@cached_schema
def get_schema(self, table=None):
"""
Returns JSON schema describing the tables returned by the
Source.
Parameters
----------
table : str or None
The name of the table to return the schema for. If None
returns schema for all available tables.
Returns
-------
dict
JSON schema(s) for one or all the tables.
"""
schemas = {}
for name in self.get_tables():
if table is not None and name != table:
continue
df = self.get(name, __dask=True)
schemas[name] = get_dataframe_schema(df)['items']['properties']
return schemas if table is None else schemas[table]
def get(self, table, **query):
"""
Return a table; optionally filtered by the given query.
Parameters
----------
table : str
The name of the table to query
query : dict
A dictionary containing all the query parameters
Returns
-------
DataFrame
A DataFrame containing the queried table.
"""
class RESTSource(Source):
"""
Queries a REST API which is expected to conform to the monitoring
REST API specification.
"""
url = param.String(doc="URL of the REST endpoint to monitor.")
source_type = 'rest'
@cached_schema
def get_schema(self, table=None):
query = {} if table is None else {'table': table}
response = requests.get(self.url+'/schema', params=query)
return {table: schema['items']['properties'] for table, schema in
response.json().items()}
@cached()
def get(self, table, **query):
query = dict(table=table, **query)
r = requests.get(self.url+'/data', params=query)
df = pd.DataFrame(r.json())
return df
class FileSource(Source):
"""
Loads CSV, Excel, JSON and Parquet files using pandas.read_* or
dask.read_* functions.
"""
dask = param.Boolean(default=False, doc="""
Whether to return a Dask dataframe.""")
kwargs = param.Dict(doc="""
Keyword arguments to the pandas/dask loading function.""")
tables = param.ClassSelector(class_=(list, dict), doc="""
List or dictionary of tables to load. If a list is supplied the
names are computed from the filenames, otherwise the keys are
the names. The values must filepaths or URLs to the data:
{
'local' : '/home/user/local_file.csv',
'remote': 'https://test.com/test.csv'
}
if the filepath does not have a declared extension an extension
may be provided in a list or tuple, e.g.:
{'table': ['http://test.com/api', 'json']}
""")
use_dask = param.Boolean(default=True, doc="""
Whether to use dask to load files.""")
_pd_load_fns = {
'csv': pd.read_csv,
'xlsx': pd.read_excel,
'xls': pd.read_excel,
'parq': pd.read_parquet,
'parquet': pd.read_parquet,
'json': pd.read_json
}
_load_kwargs = {
'csv': {'parse_dates': True}
}
source_type = 'file'
def __init__(self, **params):
if 'files' in params:
params['tables'] = params.pop('files')
super().__init__(**params)
self._template_re = re.compile(r'(@\{.*\})')
def _load_fn(self, ext, dask=True):
kwargs = dict(self._load_kwargs.get(ext, {}))
if self.kwargs:
kwargs.update(self.kwargs)
if self.use_dask and dask:
try:
import dask.dataframe as dd
except Exception:
return self._load_fn(ext, dask=False)
if ext == 'csv':
return dd.read_csv, kwargs
elif ext in ('parq', 'parquet'):
return dd.read_parquet, kwargs
elif ext == 'json':
if 'orient' not in kwargs:
kwargs['orient'] = None
return dd.read_json, kwargs
if ext not in self._pd_load_fns:
raise ValueError("File type '{ext}' not recognized and cannot be loaded.")
return self._pd_load_fns[ext], kwargs
def _set_cache(self, data, table, **query):
_, ext = self._named_files[table]
if ext in ('parq', 'parquet'):
query['write_to_file'] = False
super()._set_cache(data, table, **query)
@property
def _named_files(self):
if isinstance(self.tables, list):
tables = {}
for f in self.tables:
if f.startswith('http'):
name = f
else:
name = '.'.join(os.path.basename(f).split('.')[:-1])
tables[name] = f
else:
tables = self.tables
files = {}
for name, table in tables.items():
ext = None
if isinstance(table, (list, tuple)):
table, ext = table
else:
basename = os.path.basename(table)
if '.' in basename:
ext = basename.split('.')[-1]
files[name] = (table, ext)
return files
def _resolve_template_vars(self, table):
for m in self._template_re.findall(table):
values = state.resolve_reference(f'${m[2:-1]}')
values = ','.join([v for v in values])
table = table.replace(m, quote(values))
return [table]
def get_tables(self):
return list(self._named_files)
def _load_table(self, table, dask=True):
df = None
for name, filepath in self._named_files.items():
filepath, ext = filepath
if '://' not in filepath:
filepath = os.path.join(self.root, filepath)
if name != table:
continue
load_fn, kwargs = self._load_fn(ext, dask=dask)
paths = self._resolve_template_vars(filepath)
if self.use_dask and ext in ('csv', 'json', 'parquet', 'parq') and dask:
try:
df = load_fn(paths, **kwargs)
except Exception as e:
if dask:
return self._load_table(table, dask=False)
raise e
else:
try:
dfs = [load_fn(path, **kwargs) for path in paths]
except Exception as e:
if dask:
return self._load_table(table, dask=False)
raise e
if len(dfs) <= 1:
df = dfs[0] if dfs else None
elif self.use_dask and hasattr(dfs[0], 'compute'):
import dask.dataframe as dd
df = dd.concat(dfs)
else:
df = pd.concat(dfs)
if hasattr(df, 'persist'):
df = df.persist()
if df is None:
tables = list(self._named_files)
raise ValueError(f"Table '{table}' not found. Available tables include: {tables}.")
return df
@cached()
def get(self, table, **query):
dask = query.pop('__dask', self.dask)
df = self._load_table(table)
df = FilterTransform.apply_to(df, conditions=list(query.items()))
return df if dask or not hasattr(df, 'compute') else df.compute()
class JSONSource(FileSource):
chunk_size = param.Integer(default=0, doc="""
Number of items to load per chunk if a template variable
is provided.""")
tables = param.ClassSelector(class_=(list, dict), doc="""
List or dictionary of tables to load. If a list is supplied the
names are computed from the filenames, otherwise the keys are
the names. The values must filepaths or URLs to the data:
{
'local' : '/home/user/local_file.csv',
'remote': 'https://test.com/test.csv'
}
""")
source_type = 'json'
def _resolve_template_vars(self, template):
template_vars = self._template_re.findall(template)
template_values = []
for m in template_vars:
values = state.resolve_reference(f'${m[2:-1]}')
template_values.append(values)
tables = []
cross_product = list(product(*template_values))
if self.chunk_size and len(cross_product) > self.chunk_size:
for i in range(len(cross_product)//self.chunk_size):
start = i*self.chunk_size
chunk = cross_product[start: start+self.chunk_size]
tvalues = zip(*chunk)
table = template
for m, tvals in zip(template_vars, tvalues):
tvals = ','.join([v for v in set(tvals)])
table = table.replace(m, quote(tvals))
tables.append(table)
else:
tvalues = list(zip(*cross_product))
table = template
for m, tvals in zip(template_vars, tvalues):
values = ','.join([v for v in set(tvals)])
table = table.replace(m, quote(values))
tables.append(table)
return tables
def _load_fn(self, ext, dask=True):
return super()._load_fn('json', dask=dask)
@cached(with_query=False)
def get(self, table, **query):
return super().get(table, **query)
class WebsiteSource(Source):
"""
Queries whether a website responds with a 400 status code.
"""
urls = param.List(doc="URLs of the websites to monitor.")
source_type = 'live'
@cached_schema
def get_schema(self, table=None):
schema = {
"status": {
"url": {"type": "string", 'enum': self.urls},
"live": {"type": "boolean"}
}
}
return schema if table is None else schema[table]
def get_tables(self):
return ['status']
@cached(with_query=False)
def get(self, table, **query):
data = []
for url in self.urls:
try:
r = requests.get(url)
live = r.status_code == 200
except Exception:
live = False
data.append({"live": live, "url": url})
df = pd.DataFrame(data)
return df
class PanelSessionSource(Source):
endpoint = param.String(default="rest/session_info")
urls = param.List(doc="URL of the websites to monitor.")
timeout = param.Parameter(default=5)
source_type = 'session_info'
@cached_schema
def get_schema(self, table=None):
schema = {
"summary": {
"url": {"type": "string", "enum": self.urls},
"total": {"type": "int"},
"live": {"type": "int"},
"render_duration": {"type": "float"},
"session_duration": {"type": "float"}
},
"sessions": {
"url": {"type": "string", "enum": self.urls},
"id": {"type": "string"},
"started": {"type": "float"},
"ended": {"type": "float"},
"rendered": {"type": "float"},
"render_duration": {"type": "float"},
"session_duration": {"type": "float"},
"user_agent": {"type": "string"}
}
}
return schema if table is None else schema[table]
def get_tables(self):
return ['summary', 'sessions']
def _get_session_info(self, table, url):
r = requests.get(
url + self.endpoint, verify=False, timeout=self.timeout
)
data = []
if r.status_code != 200:
return data
r = r.json()
session_info = r['session_info']
sessions = session_info['sessions']
if table == "summary":
rendered = [s for s in sessions.values()
if s['rendered'] is not None]
ended = [s for s in sessions.values()
if s['ended'] is not None]
row = {
'url': url,
'total': session_info['total'],
'live': session_info['live'],
'render_duration': np.mean([s['rendered']-s['started']
for s in rendered]),
'session_duration': np.mean([s['ended']-s['started']
for s in ended])
}
data.append(row)
elif table == "sessions":
for sid, session in sessions.items():
row = dict(url=url, id=sid, **session)
if session["rendered"]:
row["render_duration"] = session["rendered"]-session["started"]
else:
row["render_duration"] = float('NaN')
if session["ended"]:
row["session_duration"] = session["ended"]-session["started"]
else:
row["session_duration"] = float('NaN')
data.push(row)
return data
@cached(with_query=False)
def get(self, table, **query):
data = []
with futures.ThreadPoolExecutor(len(self.urls)) as executor:
tasks = {executor.submit(self._get_session_info, table, url): url
for url in self.urls}
for future in futures.as_completed(tasks):
url = tasks[future] + self.endpoint
try:
data.extend(future.result())
except Exception as e:
exception = f"{type(e).__name__}({e})"
self.param.warning("Failed to fetch session_info from "
f"{url}, errored with {exception}.")
return pd.DataFrame(data, columns=list(self.get_schema(table)))
class JoinedSource(Source):
"""
A JoinedSource applies a join on two or more sources returning
new table(s) with data from all sources. It iterates over the
`tables` specification and merges the specified tables from the
declared sources on the supplied index.
In this way multiple tables from multiple sources can be merged.
Individual tables from sources that should not be joined may also
be surfaced by declaring a single source and table in the
specification.
As a simple example we may have sources A and B, which contain
tables 'foo' and 'bar' respectively. We now want to merge these
tables on column 'a' in Table A with column 'b' in Table B:
{'new_table': [
{'source': 'A', 'table': 'foo', 'index': 'a'},
{'source': 'B', 'table': 'bar', 'index': 'b'}
]}
The joined source will now publish the "new_table" with all
columns from tables "foo" and "bar" except for the index column
from table "bar", which was merged with the index column "a" from
table "foo".
"""
sources = param.Dict(default={}, doc="""
A dictionary of sources indexed by their assigned name.""")
tables = param.Dict(default={}, doc="""
A dictionary with the names of the joined sources as keys
and a specification of the source, table and index to merge
on.
{"new_table": [
{'source': <source_name>,
'table': <table_name>,
'index': <index_name>
},
{'source': <source_name>,
'table': <table_name>,
'index': <index_name>
},
...
]}""")
source_type = 'join'
def get_tables(self):
return list(self.tables)
@cached_schema
def get_schema(self, table=None):
schemas = {}
for name, specs in self.tables.items():
if table is not None and name != table:
continue
schemas[name] = schema = {}
for spec in specs:
source, subtable = spec['source'], spec['table']
table_schema = self.sources[source].get_schema(subtable)
if not schema:
schema.update(table_schema)
else:
for column, col_schema in table_schema.items():
schema[column] = merge_schemas(col_schema, schema.get(column))
return schemas if table is None else schemas[table]
@cached()
def get(self, table, **query):
df, left_key = None, None
for spec in self.tables[table]:
source, subtable = spec['source'], spec['table']
source_query = dict(query)
right_key = spec.get('index')
if df is not None and left_key and right_key not in query:
source_query[right_key] = list(df[left_key].unique())
df_merge = self.sources[source].get(subtable, **source_query)
if df is None:
df = df_merge
left_key = spec.get('index')
else:
df = pd.merge(df, df_merge, left_on=left_key,
right_on=right_key, how='outer')
return df
@property
def panel(self):
column = pn.Column(sizing_mode='stretch_width')
for name, source in self.sources.items():
panel = source.panel
if not panel:
continue
header = pn.pane.Markdown(f'#### {name.title()}', margin=(0, 5))
column.extend([header, *source.panel])
return column
def clear_cache(self):
super().clear_cache()
for source in self.sources.values():
source.clear_cache()
class DerivedSource(Source):
"""
A DerivedSource references tables on other sources and optionally
allows applying filters and transforms to the returned data which
is then made available as a new (derived) table.
The DerivedSource has two modes:
1) When an explicit `tables` specification is provided full
control over the exact tables to filter and transform is
available. This is referred to as the 'table' mode.
2) When a `source` is declared all tables on that Source are
mirrored and filtered and transformed acccording to the
supplied `filters` and `transforms`. This is referred to as
'mirror' mode.
1. Table Mode
~~~~~~~~~~~~~
In 'table' mode the tables can reference any table on any source
using the reference syntax and declare filters and transforms to
apply to that specific table, e.g. a table specification might
look like this:
{
'derived_table':
{
'source': 'original_source',
'table': 'original_table'
'filters': [
...
],
'transforms': [
...
]
}
}
2. Mirror
~~~~~~~~~
In mirror mode the DerivedSource may reference an existing source
directly, e.g.:
{
'type': 'derived',
'source': 'original_source',
'filters': [...],
'transforms': [...],
}
"""
filters = param.List(doc="""
A list of filters to apply to all tables of this source.""")
source = param.ClassSelector(class_=Source, doc="""
A source to mirror the tables on.""")
tables = param.Dict(default={}, doc="""
The dictionary of tables and associated filters and transforms.""")
transforms = param.List(doc="""
A list of transforms to apply to all tables of this source.""")
source_type = 'derived'
def _get_source_table(self, table):
if self.tables:
spec = self.tables.get(table)
if spec is None:
raise ValueError(f"Table '{table}' was not declared on the"
"DerivedSource. Available tables include "
f"{list(self.tables)}")
source, table = spec['source'], spec['table']
filters = spec.get('filters', []) + self.filters
else:
source = self.source
filters = self.filters
query = dict({filt.field: filt.value for filt in filters})
return source.get(table, **query)
@cached(with_query=False)
def get(self, table, **query):
df = self._get_source_table(table)
if self.tables:
transforms = self.tables[table].get('transforms', []) + self.transforms
else:
transforms = self.transforms
transforms.append(FilterTransform(conditions=list(query.items())))
for transform in transforms:
df = transform.apply(df)
return df
get.__doc__ = Source.get.__doc__
def get_tables(self):
return list(self.tables) if self.tables else self.source.get_tables()
def clear_cache(self):
super().clear_cache()
if self.tables:
for spec in self.tables.values():
spec['source'].clear_cache()
else:
self.source.clear_cache()
| 34.4329 | 95 | 0.544349 |
0fcd3205ba1b957d72152bffe86ea29ae65d990d | 6,576 | bzl | Python | go/private/sdk.bzl | dims/rules_go | 076ffb3636a94bc136b94b9215d7eefe1ed0399b | [
"Apache-2.0"
] | null | null | null | go/private/sdk.bzl | dims/rules_go | 076ffb3636a94bc136b94b9215d7eefe1ed0399b | [
"Apache-2.0"
] | null | null | null | go/private/sdk.bzl | dims/rules_go | 076ffb3636a94bc136b94b9215d7eefe1ed0399b | [
"Apache-2.0"
] | null | null | null | # Copyright 2014 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
load(
"@io_bazel_rules_go//go/private:common.bzl",
"env_execute",
"executable_path",
)
load(
"@io_bazel_rules_go//go/private:go_toolchain.bzl",
"generate_toolchain_names",
"generate_toolchains",
)
def _go_host_sdk_impl(ctx):
goroot = _detect_host_sdk(ctx)
platform = _detect_sdk_platform(ctx, goroot)
_sdk_build_file(ctx, platform)
_local_sdk(ctx, goroot)
_go_host_sdk = repository_rule(
_go_host_sdk_impl,
environ = ["GOROOT"],
)
def go_host_sdk(name, **kwargs):
_go_host_sdk(name = name, **kwargs)
_register_toolchains(name)
def _go_download_sdk_impl(ctx):
sdks = ctx.attr.sdks
if not ctx.attr.goos and not ctx.attr.goarch:
platform = _detect_host_platform(ctx)
else:
platform = ctx.attr.goos + "_" + ctx.attr.goarch
if platform not in sdks:
fail("Unsupported platform {}".format(platform))
filename, sha256 = ctx.attr.sdks[platform]
_sdk_build_file(ctx, platform)
_remote_sdk(ctx, [url.format(filename) for url in ctx.attr.urls], ctx.attr.strip_prefix, sha256)
_go_download_sdk = repository_rule(
_go_download_sdk_impl,
attrs = {
"goos": attr.string(),
"goarch": attr.string(),
"sdks": attr.string_list_dict(),
"urls": attr.string_list(default = ["https://dl.google.com/go/{}"]),
"strip_prefix": attr.string(default = "go"),
},
)
def go_download_sdk(name, **kwargs):
_go_download_sdk(name = name, **kwargs)
_register_toolchains(name)
def _go_local_sdk_impl(ctx):
goroot = ctx.attr.path
platform = _detect_sdk_platform(ctx, goroot)
_sdk_build_file(ctx, platform)
_local_sdk(ctx, goroot)
_go_local_sdk = repository_rule(
_go_local_sdk_impl,
attrs = {
"path": attr.string(),
},
)
def go_local_sdk(name, **kwargs):
_go_local_sdk(name = name, **kwargs)
_register_toolchains(name)
def _go_wrap_sdk_impl(ctx):
goroot = str(ctx.path(ctx.attr.root_file).dirname)
platform = _detect_sdk_platform(ctx, goroot)
_sdk_build_file(ctx, platform)
_local_sdk(ctx, goroot)
_go_wrap_sdk = repository_rule(
_go_wrap_sdk_impl,
attrs = {
"root_file": attr.label(
mandatory = True,
doc = "A file in the SDK root direcotry. Used to determine GOROOT.",
),
},
)
def go_wrap_sdk(name, **kwargs):
_go_wrap_sdk(name = name, **kwargs)
_register_toolchains(name)
def _register_toolchains(repo):
labels = [
"@{}//:{}".format(repo, name)
for name in generate_toolchain_names()
]
native.register_toolchains(*labels)
def _remote_sdk(ctx, urls, strip_prefix, sha256):
# TODO(bazelbuild/bazel#7055): download_and_extract fails to extract
# archives containing files with non-ASCII names. Go 1.12b1 has a test
# file like this. Remove this workaround when the bug is fixed.
if len(urls) == 0:
fail("no urls specified")
if urls[0].endswith(".tar.gz"):
if strip_prefix != "go":
fail("strip_prefix not supported")
ctx.download(
url = urls,
sha256 = sha256,
output = "go_sdk.tar.gz",
)
res = ctx.execute(["tar", "-xf", "go_sdk.tar.gz", "--strip-components=1"])
if res.return_code:
fail("error extracting Go SDK:\n" + res.stdout + res.stderr)
ctx.execute(["rm", "go_sdk.tar.gz"])
else:
ctx.download_and_extract(
url = urls,
stripPrefix = strip_prefix,
sha256 = sha256,
)
def _local_sdk(ctx, path):
for entry in ["src", "pkg", "bin"]:
ctx.symlink(path + "/" + entry, entry)
def _sdk_build_file(ctx, platform):
ctx.file("ROOT")
goos, _, goarch = platform.partition("_")
ctx.template(
"BUILD.bazel",
Label("@io_bazel_rules_go//go/private:BUILD.sdk.bazel"),
executable = False,
substitutions = {
"{goos}": goos,
"{goarch}": goarch,
"{exe}": ".exe" if goos == "windows" else "",
},
)
def _detect_host_platform(ctx):
if ctx.os.name == "linux":
host = "linux_amd64"
res = ctx.execute(["uname", "-p"])
if res.return_code == 0:
uname = res.stdout.strip()
if uname == "s390x":
host = "linux_s390x"
elif uname == "i686":
host = "linux_386"
# uname -p is not working on Aarch64 boards
# or for ppc64le on some distros
res = ctx.execute(["uname", "-m"])
if res.return_code == 0:
uname = res.stdout.strip()
if uname == "aarch64":
host = "linux_arm64"
elif uname == "armv6l":
host = "linux_arm"
elif uname == "armv7l":
host = "linux_arm"
elif uname == "ppc64le":
host = "linux_ppc64le"
# Default to amd64 when uname doesn't return a known value.
elif ctx.os.name == "mac os x":
host = "darwin_amd64"
elif ctx.os.name.startswith("windows"):
host = "windows_amd64"
elif ctx.os.name == "freebsd":
host = "freebsd_amd64"
else:
fail("Unsupported operating system: " + ctx.os.name)
return host
def _detect_host_sdk(ctx):
root = "@invalid@"
if "GOROOT" in ctx.os.environ:
return ctx.os.environ["GOROOT"]
res = ctx.execute([executable_path(ctx, "go"), "env", "GOROOT"])
if res.return_code:
fail("Could not detect host go version")
root = res.stdout.strip()
if not root:
fail("host go version failed to report it's GOROOT")
return root
def _detect_sdk_platform(ctx, goroot):
res = ctx.execute(["ls", goroot + "/pkg/tool"])
if res.return_code != 0:
fail("Could not detect SDK platform")
for f in res.stdout.strip().split("\n"):
if f.find("_") >= 0:
return f
fail("Could not detect SDK platform")
| 30.873239 | 100 | 0.614811 |
cef6dd68987fc6587c0950c45cde440329e0086e | 38,023 | py | Python | instances/passenger_demand/pas-20210421-2109-int18e/86.py | LHcau/scheduling-shared-passenger-and-freight-transport-on-a-fixed-infrastructure | bba1e6af5bc8d9deaa2dc3b83f6fe9ddf15d2a11 | [
"BSD-3-Clause"
] | null | null | null | instances/passenger_demand/pas-20210421-2109-int18e/86.py | LHcau/scheduling-shared-passenger-and-freight-transport-on-a-fixed-infrastructure | bba1e6af5bc8d9deaa2dc3b83f6fe9ddf15d2a11 | [
"BSD-3-Clause"
] | null | null | null | instances/passenger_demand/pas-20210421-2109-int18e/86.py | LHcau/scheduling-shared-passenger-and-freight-transport-on-a-fixed-infrastructure | bba1e6af5bc8d9deaa2dc3b83f6fe9ddf15d2a11 | [
"BSD-3-Clause"
] | null | null | null |
"""
PASSENGERS
"""
numPassengers = 4177
passenger_arriving = (
(5, 5, 6, 5, 2, 0, 10, 15, 7, 3, 5, 0), # 0
(7, 9, 6, 8, 2, 0, 9, 14, 7, 4, 4, 0), # 1
(5, 8, 8, 5, 0, 0, 16, 9, 11, 8, 0, 0), # 2
(4, 12, 13, 4, 3, 0, 8, 13, 2, 4, 0, 0), # 3
(2, 13, 12, 6, 2, 0, 9, 2, 6, 2, 1, 0), # 4
(6, 9, 15, 6, 2, 0, 10, 11, 14, 6, 5, 0), # 5
(3, 8, 9, 1, 5, 0, 6, 7, 4, 5, 4, 0), # 6
(6, 15, 11, 2, 4, 0, 6, 13, 11, 7, 1, 0), # 7
(6, 14, 6, 0, 5, 0, 9, 4, 5, 5, 1, 0), # 8
(3, 15, 14, 5, 1, 0, 6, 12, 4, 9, 2, 0), # 9
(7, 11, 11, 2, 2, 0, 10, 10, 5, 12, 2, 0), # 10
(4, 15, 14, 7, 2, 0, 9, 9, 5, 10, 4, 0), # 11
(6, 8, 3, 6, 2, 0, 13, 13, 12, 3, 2, 0), # 12
(6, 17, 7, 4, 2, 0, 7, 14, 14, 12, 2, 0), # 13
(6, 12, 9, 6, 4, 0, 9, 8, 3, 4, 3, 0), # 14
(5, 10, 8, 3, 4, 0, 12, 9, 12, 10, 7, 0), # 15
(6, 7, 10, 1, 2, 0, 11, 9, 9, 8, 4, 0), # 16
(7, 8, 7, 6, 1, 0, 4, 14, 7, 2, 5, 0), # 17
(7, 9, 8, 2, 6, 0, 9, 13, 8, 3, 2, 0), # 18
(8, 4, 14, 5, 1, 0, 8, 13, 14, 5, 2, 0), # 19
(5, 15, 18, 5, 3, 0, 6, 10, 8, 11, 3, 0), # 20
(8, 16, 14, 1, 4, 0, 14, 13, 5, 8, 1, 0), # 21
(5, 13, 8, 3, 4, 0, 6, 13, 7, 6, 4, 0), # 22
(8, 13, 7, 1, 2, 0, 9, 15, 3, 5, 2, 0), # 23
(5, 13, 13, 7, 2, 0, 7, 12, 12, 3, 5, 0), # 24
(7, 20, 12, 7, 4, 0, 4, 16, 6, 7, 3, 0), # 25
(5, 18, 5, 4, 3, 0, 7, 13, 8, 5, 6, 0), # 26
(9, 11, 8, 7, 0, 0, 7, 14, 7, 7, 2, 0), # 27
(3, 6, 13, 5, 4, 0, 13, 10, 11, 6, 4, 0), # 28
(8, 17, 11, 1, 0, 0, 10, 17, 12, 6, 3, 0), # 29
(5, 15, 9, 7, 5, 0, 8, 14, 4, 8, 2, 0), # 30
(9, 9, 11, 6, 0, 0, 7, 11, 9, 6, 3, 0), # 31
(7, 9, 9, 3, 5, 0, 8, 12, 9, 4, 3, 0), # 32
(8, 6, 11, 1, 3, 0, 9, 15, 7, 5, 3, 0), # 33
(9, 13, 8, 3, 4, 0, 5, 13, 5, 8, 3, 0), # 34
(1, 7, 12, 8, 4, 0, 12, 2, 5, 8, 1, 0), # 35
(7, 12, 13, 7, 3, 0, 9, 7, 9, 9, 3, 0), # 36
(4, 14, 3, 9, 3, 0, 11, 8, 8, 7, 2, 0), # 37
(4, 11, 9, 4, 4, 0, 9, 15, 10, 2, 3, 0), # 38
(3, 13, 10, 2, 1, 0, 6, 6, 7, 11, 4, 0), # 39
(12, 14, 10, 4, 1, 0, 13, 8, 5, 9, 6, 0), # 40
(6, 9, 13, 5, 3, 0, 9, 12, 7, 7, 4, 0), # 41
(8, 10, 9, 5, 3, 0, 8, 10, 3, 6, 3, 0), # 42
(8, 14, 12, 4, 0, 0, 9, 13, 4, 3, 2, 0), # 43
(3, 12, 2, 4, 2, 0, 5, 14, 11, 4, 2, 0), # 44
(4, 13, 12, 5, 1, 0, 4, 11, 7, 4, 3, 0), # 45
(4, 17, 8, 5, 3, 0, 9, 19, 8, 7, 1, 0), # 46
(4, 14, 6, 7, 5, 0, 6, 25, 7, 7, 6, 0), # 47
(7, 16, 7, 4, 7, 0, 5, 10, 8, 8, 2, 0), # 48
(6, 12, 9, 6, 1, 0, 9, 9, 6, 7, 6, 0), # 49
(4, 12, 6, 4, 4, 0, 7, 10, 10, 7, 2, 0), # 50
(5, 10, 6, 4, 0, 0, 8, 11, 17, 9, 5, 0), # 51
(1, 9, 9, 2, 2, 0, 6, 12, 9, 12, 1, 0), # 52
(7, 16, 10, 3, 4, 0, 5, 14, 9, 3, 0, 0), # 53
(6, 8, 10, 2, 1, 0, 8, 15, 4, 3, 2, 0), # 54
(3, 16, 4, 7, 2, 0, 11, 10, 4, 8, 2, 0), # 55
(8, 14, 11, 6, 2, 0, 4, 15, 3, 4, 2, 0), # 56
(4, 13, 8, 2, 1, 0, 7, 9, 14, 1, 3, 0), # 57
(5, 11, 9, 6, 5, 0, 8, 9, 8, 8, 2, 0), # 58
(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), # 59
)
station_arriving_intensity = (
(4.769372805092186, 12.233629261363635, 14.389624839331619, 11.405298913043477, 12.857451923076923, 8.562228260869567), # 0
(4.81413961808604, 12.369674877683082, 14.46734796754499, 11.46881589673913, 12.953819711538461, 8.559309850543478), # 1
(4.8583952589991215, 12.503702525252525, 14.54322622107969, 11.530934782608696, 13.048153846153847, 8.556302173913043), # 2
(4.902102161984196, 12.635567578125, 14.617204169344474, 11.591602581521737, 13.14036778846154, 8.553205638586958), # 3
(4.94522276119403, 12.765125410353535, 14.689226381748071, 11.650766304347826, 13.230375, 8.550020652173911), # 4
(4.987719490781387, 12.892231395991162, 14.759237427699228, 11.708372961956522, 13.318088942307691, 8.546747622282608), # 5
(5.029554784899035, 13.01674090909091, 14.827181876606687, 11.764369565217393, 13.403423076923078, 8.54338695652174), # 6
(5.0706910776997365, 13.138509323705808, 14.893004297879177, 11.818703125, 13.486290865384618, 8.5399390625), # 7
(5.1110908033362605, 13.257392013888888, 14.956649260925452, 11.871320652173912, 13.56660576923077, 8.536404347826087), # 8
(5.1507163959613695, 13.373244353693181, 15.018061335154243, 11.922169157608696, 13.644281249999999, 8.532783220108696), # 9
(5.1895302897278315, 13.485921717171717, 15.077185089974291, 11.971195652173915, 13.719230769230771, 8.529076086956522), # 10
(5.227494918788412, 13.595279478377526, 15.133965094794343, 12.018347146739131, 13.791367788461539, 8.525283355978262), # 11
(5.2645727172958745, 13.701173011363636, 15.188345919023137, 12.063570652173912, 13.860605769230768, 8.521405434782608), # 12
(5.3007261194029835, 13.803457690183082, 15.240272132069407, 12.106813179347826, 13.926858173076925, 8.51744273097826), # 13
(5.335917559262511, 13.90198888888889, 15.289688303341899, 12.148021739130433, 13.99003846153846, 8.513395652173912), # 14
(5.370109471027217, 13.996621981534089, 15.336539002249355, 12.187143342391304, 14.050060096153846, 8.509264605978261), # 15
(5.403264288849868, 14.087212342171718, 15.380768798200515, 12.224124999999999, 14.10683653846154, 8.50505), # 16
(5.4353444468832315, 14.173615344854797, 15.422322260604112, 12.258913722826087, 14.16028125, 8.500752241847827), # 17
(5.46631237928007, 14.255686363636363, 15.461143958868895, 12.291456521739132, 14.210307692307696, 8.496371739130435), # 18
(5.496130520193152, 14.333280772569443, 15.4971784624036, 12.321700407608695, 14.256829326923079, 8.491908899456522), # 19
(5.524761303775241, 14.40625394570707, 15.530370340616965, 12.349592391304348, 14.299759615384616, 8.487364130434782), # 20
(5.552167164179106, 14.47446125710227, 15.56066416291774, 12.375079483695652, 14.339012019230768, 8.482737839673913), # 21
(5.578310535557506, 14.537758080808082, 15.588004498714653, 12.398108695652175, 14.374499999999998, 8.47803043478261), # 22
(5.603153852063214, 14.595999790877526, 15.612335917416454, 12.418627038043478, 14.40613701923077, 8.473242323369567), # 23
(5.62665954784899, 14.649041761363636, 15.633602988431875, 12.43658152173913, 14.433836538461538, 8.468373913043479), # 24
(5.648790057067603, 14.696739366319445, 15.651750281169667, 12.451919157608696, 14.457512019230768, 8.463425611413044), # 25
(5.669507813871817, 14.738947979797977, 15.66672236503856, 12.464586956521739, 14.477076923076922, 8.458397826086957), # 26
(5.688775252414398, 14.77552297585227, 15.6784638094473, 12.474531929347828, 14.492444711538463, 8.453290964673915), # 27
(5.7065548068481124, 14.806319728535353, 15.68691918380463, 12.481701086956523, 14.503528846153845, 8.448105434782608), # 28
(5.722808911325724, 14.831193611900254, 15.69203305751928, 12.486041440217392, 14.510242788461538, 8.44284164402174), # 29
(5.7375, 14.85, 15.69375, 12.4875, 14.512500000000001, 8.4375), # 30
(5.751246651214834, 14.865621839488634, 15.692462907608693, 12.487236580882353, 14.511678590425532, 8.430077267616193), # 31
(5.7646965153452685, 14.881037215909092, 15.68863804347826, 12.486451470588234, 14.509231914893617, 8.418644565217393), # 32
(5.777855634590792, 14.896244211647728, 15.682330027173915, 12.485152389705883, 14.50518630319149, 8.403313830584706), # 33
(5.790730051150895, 14.91124090909091, 15.67359347826087, 12.483347058823531, 14.499568085106382, 8.38419700149925), # 34
(5.803325807225064, 14.926025390624996, 15.662483016304348, 12.481043198529411, 14.492403590425532, 8.361406015742128), # 35
(5.815648945012788, 14.940595738636366, 15.649053260869564, 12.478248529411767, 14.48371914893617, 8.335052811094453), # 36
(5.8277055067135555, 14.954950035511365, 15.63335883152174, 12.474970772058823, 14.47354109042553, 8.305249325337332), # 37
(5.839501534526853, 14.969086363636364, 15.615454347826088, 12.471217647058824, 14.461895744680852, 8.272107496251873), # 38
(5.851043070652174, 14.983002805397728, 15.595394429347825, 12.466996875000001, 14.44880944148936, 8.23573926161919), # 39
(5.862336157289003, 14.99669744318182, 15.573233695652176, 12.462316176470589, 14.434308510638296, 8.196256559220389), # 40
(5.873386836636828, 15.010168359374997, 15.549026766304348, 12.457183272058824, 14.418419281914893, 8.153771326836583), # 41
(5.88420115089514, 15.023413636363639, 15.522828260869566, 12.451605882352942, 14.401168085106384, 8.108395502248875), # 42
(5.894785142263428, 15.03643135653409, 15.494692798913043, 12.445591727941178, 14.38258125, 8.060241023238381), # 43
(5.905144852941176, 15.049219602272727, 15.464675, 12.439148529411764, 14.36268510638298, 8.009419827586207), # 44
(5.915286325127877, 15.061776455965909, 15.432829483695656, 12.43228400735294, 14.341505984042554, 7.956043853073464), # 45
(5.925215601023019, 15.074100000000003, 15.39921086956522, 12.425005882352941, 14.319070212765958, 7.90022503748126), # 46
(5.934938722826087, 15.086188316761364, 15.363873777173913, 12.417321874999999, 14.295404122340427, 7.842075318590705), # 47
(5.944461732736574, 15.098039488636365, 15.326872826086957, 12.409239705882353, 14.27053404255319, 7.7817066341829095), # 48
(5.953790672953963, 15.10965159801136, 15.288262635869566, 12.400767095588236, 14.24448630319149, 7.71923092203898), # 49
(5.96293158567775, 15.121022727272724, 15.248097826086958, 12.391911764705883, 14.217287234042553, 7.65476011994003), # 50
(5.971890513107417, 15.132150958806818, 15.206433016304347, 12.38268143382353, 14.188963164893616, 7.588406165667167), # 51
(5.980673497442456, 15.143034375, 15.163322826086954, 12.373083823529411, 14.159540425531915, 7.5202809970015), # 52
(5.989286580882353, 15.153671058238638, 15.118821875, 12.363126654411765, 14.129045345744682, 7.450496551724138), # 53
(5.9977358056266, 15.164059090909088, 15.072984782608694, 12.352817647058824, 14.09750425531915, 7.379164767616192), # 54
(6.00602721387468, 15.174196555397728, 15.02586616847826, 12.342164522058825, 14.064943484042553, 7.306397582458771), # 55
(6.014166847826087, 15.184081534090907, 14.977520652173913, 12.331175, 14.031389361702129, 7.232306934032984), # 56
(6.022160749680308, 15.193712109375003, 14.92800285326087, 12.319856801470587, 13.996868218085105, 7.15700476011994), # 57
(6.030014961636829, 15.203086363636363, 14.877367391304347, 12.308217647058825, 13.961406382978723, 7.0806029985007495), # 58
(0.0, 0.0, 0.0, 0.0, 0.0, 0.0), # 59
)
passenger_arriving_acc = (
(5, 5, 6, 5, 2, 0, 10, 15, 7, 3, 5, 0), # 0
(12, 14, 12, 13, 4, 0, 19, 29, 14, 7, 9, 0), # 1
(17, 22, 20, 18, 4, 0, 35, 38, 25, 15, 9, 0), # 2
(21, 34, 33, 22, 7, 0, 43, 51, 27, 19, 9, 0), # 3
(23, 47, 45, 28, 9, 0, 52, 53, 33, 21, 10, 0), # 4
(29, 56, 60, 34, 11, 0, 62, 64, 47, 27, 15, 0), # 5
(32, 64, 69, 35, 16, 0, 68, 71, 51, 32, 19, 0), # 6
(38, 79, 80, 37, 20, 0, 74, 84, 62, 39, 20, 0), # 7
(44, 93, 86, 37, 25, 0, 83, 88, 67, 44, 21, 0), # 8
(47, 108, 100, 42, 26, 0, 89, 100, 71, 53, 23, 0), # 9
(54, 119, 111, 44, 28, 0, 99, 110, 76, 65, 25, 0), # 10
(58, 134, 125, 51, 30, 0, 108, 119, 81, 75, 29, 0), # 11
(64, 142, 128, 57, 32, 0, 121, 132, 93, 78, 31, 0), # 12
(70, 159, 135, 61, 34, 0, 128, 146, 107, 90, 33, 0), # 13
(76, 171, 144, 67, 38, 0, 137, 154, 110, 94, 36, 0), # 14
(81, 181, 152, 70, 42, 0, 149, 163, 122, 104, 43, 0), # 15
(87, 188, 162, 71, 44, 0, 160, 172, 131, 112, 47, 0), # 16
(94, 196, 169, 77, 45, 0, 164, 186, 138, 114, 52, 0), # 17
(101, 205, 177, 79, 51, 0, 173, 199, 146, 117, 54, 0), # 18
(109, 209, 191, 84, 52, 0, 181, 212, 160, 122, 56, 0), # 19
(114, 224, 209, 89, 55, 0, 187, 222, 168, 133, 59, 0), # 20
(122, 240, 223, 90, 59, 0, 201, 235, 173, 141, 60, 0), # 21
(127, 253, 231, 93, 63, 0, 207, 248, 180, 147, 64, 0), # 22
(135, 266, 238, 94, 65, 0, 216, 263, 183, 152, 66, 0), # 23
(140, 279, 251, 101, 67, 0, 223, 275, 195, 155, 71, 0), # 24
(147, 299, 263, 108, 71, 0, 227, 291, 201, 162, 74, 0), # 25
(152, 317, 268, 112, 74, 0, 234, 304, 209, 167, 80, 0), # 26
(161, 328, 276, 119, 74, 0, 241, 318, 216, 174, 82, 0), # 27
(164, 334, 289, 124, 78, 0, 254, 328, 227, 180, 86, 0), # 28
(172, 351, 300, 125, 78, 0, 264, 345, 239, 186, 89, 0), # 29
(177, 366, 309, 132, 83, 0, 272, 359, 243, 194, 91, 0), # 30
(186, 375, 320, 138, 83, 0, 279, 370, 252, 200, 94, 0), # 31
(193, 384, 329, 141, 88, 0, 287, 382, 261, 204, 97, 0), # 32
(201, 390, 340, 142, 91, 0, 296, 397, 268, 209, 100, 0), # 33
(210, 403, 348, 145, 95, 0, 301, 410, 273, 217, 103, 0), # 34
(211, 410, 360, 153, 99, 0, 313, 412, 278, 225, 104, 0), # 35
(218, 422, 373, 160, 102, 0, 322, 419, 287, 234, 107, 0), # 36
(222, 436, 376, 169, 105, 0, 333, 427, 295, 241, 109, 0), # 37
(226, 447, 385, 173, 109, 0, 342, 442, 305, 243, 112, 0), # 38
(229, 460, 395, 175, 110, 0, 348, 448, 312, 254, 116, 0), # 39
(241, 474, 405, 179, 111, 0, 361, 456, 317, 263, 122, 0), # 40
(247, 483, 418, 184, 114, 0, 370, 468, 324, 270, 126, 0), # 41
(255, 493, 427, 189, 117, 0, 378, 478, 327, 276, 129, 0), # 42
(263, 507, 439, 193, 117, 0, 387, 491, 331, 279, 131, 0), # 43
(266, 519, 441, 197, 119, 0, 392, 505, 342, 283, 133, 0), # 44
(270, 532, 453, 202, 120, 0, 396, 516, 349, 287, 136, 0), # 45
(274, 549, 461, 207, 123, 0, 405, 535, 357, 294, 137, 0), # 46
(278, 563, 467, 214, 128, 0, 411, 560, 364, 301, 143, 0), # 47
(285, 579, 474, 218, 135, 0, 416, 570, 372, 309, 145, 0), # 48
(291, 591, 483, 224, 136, 0, 425, 579, 378, 316, 151, 0), # 49
(295, 603, 489, 228, 140, 0, 432, 589, 388, 323, 153, 0), # 50
(300, 613, 495, 232, 140, 0, 440, 600, 405, 332, 158, 0), # 51
(301, 622, 504, 234, 142, 0, 446, 612, 414, 344, 159, 0), # 52
(308, 638, 514, 237, 146, 0, 451, 626, 423, 347, 159, 0), # 53
(314, 646, 524, 239, 147, 0, 459, 641, 427, 350, 161, 0), # 54
(317, 662, 528, 246, 149, 0, 470, 651, 431, 358, 163, 0), # 55
(325, 676, 539, 252, 151, 0, 474, 666, 434, 362, 165, 0), # 56
(329, 689, 547, 254, 152, 0, 481, 675, 448, 363, 168, 0), # 57
(334, 700, 556, 260, 157, 0, 489, 684, 456, 371, 170, 0), # 58
(334, 700, 556, 260, 157, 0, 489, 684, 456, 371, 170, 0), # 59
)
passenger_arriving_rate = (
(4.769372805092186, 9.786903409090908, 8.63377490359897, 4.56211956521739, 2.5714903846153843, 0.0, 8.562228260869567, 10.285961538461537, 6.843179347826086, 5.755849935732647, 2.446725852272727, 0.0), # 0
(4.81413961808604, 9.895739902146465, 8.680408780526994, 4.587526358695651, 2.5907639423076922, 0.0, 8.559309850543478, 10.363055769230769, 6.881289538043478, 5.786939187017995, 2.4739349755366162, 0.0), # 1
(4.8583952589991215, 10.00296202020202, 8.725935732647814, 4.612373913043478, 2.609630769230769, 0.0, 8.556302173913043, 10.438523076923076, 6.918560869565217, 5.817290488431875, 2.500740505050505, 0.0), # 2
(4.902102161984196, 10.1084540625, 8.770322501606683, 4.636641032608694, 2.628073557692308, 0.0, 8.553205638586958, 10.512294230769232, 6.954961548913042, 5.846881667737789, 2.527113515625, 0.0), # 3
(4.94522276119403, 10.212100328282828, 8.813535829048842, 4.66030652173913, 2.6460749999999997, 0.0, 8.550020652173911, 10.584299999999999, 6.990459782608696, 5.875690552699228, 2.553025082070707, 0.0), # 4
(4.987719490781387, 10.313785116792928, 8.855542456619537, 4.6833491847826085, 2.663617788461538, 0.0, 8.546747622282608, 10.654471153846153, 7.025023777173913, 5.90369497107969, 2.578446279198232, 0.0), # 5
(5.029554784899035, 10.413392727272727, 8.896309125964011, 4.705747826086957, 2.680684615384615, 0.0, 8.54338695652174, 10.72273846153846, 7.058621739130436, 5.930872750642674, 2.603348181818182, 0.0), # 6
(5.0706910776997365, 10.510807458964646, 8.935802578727506, 4.72748125, 2.697258173076923, 0.0, 8.5399390625, 10.789032692307693, 7.0912218750000005, 5.95720171915167, 2.6277018647411614, 0.0), # 7
(5.1110908033362605, 10.60591361111111, 8.97398955655527, 4.7485282608695645, 2.7133211538461537, 0.0, 8.536404347826087, 10.853284615384615, 7.122792391304347, 5.982659704370181, 2.6514784027777774, 0.0), # 8
(5.1507163959613695, 10.698595482954543, 9.010836801092546, 4.768867663043478, 2.7288562499999993, 0.0, 8.532783220108696, 10.915424999999997, 7.153301494565217, 6.007224534061697, 2.6746488707386358, 0.0), # 9
(5.1895302897278315, 10.788737373737373, 9.046311053984574, 4.7884782608695655, 2.743846153846154, 0.0, 8.529076086956522, 10.975384615384616, 7.182717391304348, 6.030874035989716, 2.697184343434343, 0.0), # 10
(5.227494918788412, 10.87622358270202, 9.080379056876605, 4.807338858695652, 2.7582735576923074, 0.0, 8.525283355978262, 11.03309423076923, 7.2110082880434785, 6.053586037917737, 2.719055895675505, 0.0), # 11
(5.2645727172958745, 10.960938409090907, 9.113007551413881, 4.825428260869565, 2.7721211538461534, 0.0, 8.521405434782608, 11.088484615384614, 7.238142391304347, 6.0753383676092545, 2.740234602272727, 0.0), # 12
(5.3007261194029835, 11.042766152146465, 9.144163279241644, 4.8427252717391305, 2.7853716346153847, 0.0, 8.51744273097826, 11.141486538461539, 7.264087907608696, 6.096108852827762, 2.760691538036616, 0.0), # 13
(5.335917559262511, 11.121591111111112, 9.173812982005138, 4.859208695652173, 2.7980076923076918, 0.0, 8.513395652173912, 11.192030769230767, 7.288813043478259, 6.115875321336759, 2.780397777777778, 0.0), # 14
(5.370109471027217, 11.19729758522727, 9.201923401349612, 4.874857336956521, 2.810012019230769, 0.0, 8.509264605978261, 11.240048076923076, 7.312286005434782, 6.134615600899742, 2.7993243963068175, 0.0), # 15
(5.403264288849868, 11.269769873737372, 9.228461278920308, 4.88965, 2.8213673076923076, 0.0, 8.50505, 11.28546923076923, 7.334474999999999, 6.152307519280206, 2.817442468434343, 0.0), # 16
(5.4353444468832315, 11.338892275883836, 9.253393356362468, 4.903565489130434, 2.83205625, 0.0, 8.500752241847827, 11.328225, 7.3553482336956515, 6.168928904241644, 2.834723068970959, 0.0), # 17
(5.46631237928007, 11.40454909090909, 9.276686375321336, 4.916582608695652, 2.842061538461539, 0.0, 8.496371739130435, 11.368246153846156, 7.374873913043479, 6.184457583547558, 2.8511372727272724, 0.0), # 18
(5.496130520193152, 11.466624618055553, 9.298307077442159, 4.928680163043477, 2.8513658653846155, 0.0, 8.491908899456522, 11.405463461538462, 7.393020244565217, 6.198871384961439, 2.866656154513888, 0.0), # 19
(5.524761303775241, 11.525003156565655, 9.318222204370178, 4.939836956521739, 2.859951923076923, 0.0, 8.487364130434782, 11.439807692307692, 7.409755434782609, 6.212148136246785, 2.8812507891414136, 0.0), # 20
(5.552167164179106, 11.579569005681815, 9.336398497750643, 4.95003179347826, 2.8678024038461536, 0.0, 8.482737839673913, 11.471209615384614, 7.425047690217391, 6.224265665167096, 2.894892251420454, 0.0), # 21
(5.578310535557506, 11.630206464646465, 9.352802699228791, 4.95924347826087, 2.8748999999999993, 0.0, 8.47803043478261, 11.499599999999997, 7.438865217391305, 6.235201799485861, 2.907551616161616, 0.0), # 22
(5.603153852063214, 11.67679983270202, 9.367401550449872, 4.967450815217391, 2.8812274038461534, 0.0, 8.473242323369567, 11.524909615384614, 7.451176222826087, 6.244934366966581, 2.919199958175505, 0.0), # 23
(5.62665954784899, 11.719233409090908, 9.380161793059125, 4.974632608695652, 2.8867673076923075, 0.0, 8.468373913043479, 11.54706923076923, 7.461948913043478, 6.25344119537275, 2.929808352272727, 0.0), # 24
(5.648790057067603, 11.757391493055556, 9.391050168701799, 4.980767663043478, 2.8915024038461534, 0.0, 8.463425611413044, 11.566009615384614, 7.471151494565217, 6.260700112467866, 2.939347873263889, 0.0), # 25
(5.669507813871817, 11.79115838383838, 9.400033419023135, 4.985834782608695, 2.8954153846153843, 0.0, 8.458397826086957, 11.581661538461537, 7.478752173913043, 6.266688946015424, 2.947789595959595, 0.0), # 26
(5.688775252414398, 11.820418380681815, 9.40707828566838, 4.989812771739131, 2.8984889423076923, 0.0, 8.453290964673915, 11.593955769230769, 7.484719157608696, 6.271385523778919, 2.9551045951704538, 0.0), # 27
(5.7065548068481124, 11.84505578282828, 9.412151510282778, 4.992680434782609, 2.9007057692307687, 0.0, 8.448105434782608, 11.602823076923075, 7.489020652173913, 6.274767673521851, 2.96126394570707, 0.0), # 28
(5.722808911325724, 11.864954889520202, 9.415219834511568, 4.994416576086956, 2.902048557692307, 0.0, 8.44284164402174, 11.608194230769229, 7.491624864130435, 6.276813223007712, 2.9662387223800506, 0.0), # 29
(5.7375, 11.879999999999999, 9.41625, 4.995, 2.9025, 0.0, 8.4375, 11.61, 7.4925, 6.277499999999999, 2.9699999999999998, 0.0), # 30
(5.751246651214834, 11.892497471590906, 9.415477744565216, 4.994894632352941, 2.9023357180851064, 0.0, 8.430077267616193, 11.609342872340426, 7.492341948529411, 6.276985163043476, 2.9731243678977264, 0.0), # 31
(5.7646965153452685, 11.904829772727274, 9.413182826086956, 4.994580588235293, 2.901846382978723, 0.0, 8.418644565217393, 11.607385531914892, 7.49187088235294, 6.275455217391303, 2.9762074431818184, 0.0), # 32
(5.777855634590792, 11.916995369318181, 9.40939801630435, 4.994060955882353, 2.9010372606382977, 0.0, 8.403313830584706, 11.60414904255319, 7.491091433823529, 6.272932010869566, 2.9792488423295453, 0.0), # 33
(5.790730051150895, 11.928992727272727, 9.40415608695652, 4.993338823529412, 2.899913617021276, 0.0, 8.38419700149925, 11.599654468085104, 7.490008235294118, 6.269437391304347, 2.9822481818181816, 0.0), # 34
(5.803325807225064, 11.940820312499996, 9.39748980978261, 4.9924172794117645, 2.898480718085106, 0.0, 8.361406015742128, 11.593922872340425, 7.488625919117647, 6.264993206521739, 2.985205078124999, 0.0), # 35
(5.815648945012788, 11.952476590909091, 9.389431956521738, 4.9912994117647065, 2.896743829787234, 0.0, 8.335052811094453, 11.586975319148936, 7.486949117647059, 6.259621304347825, 2.988119147727273, 0.0), # 36
(5.8277055067135555, 11.96396002840909, 9.380015298913044, 4.989988308823529, 2.8947082180851056, 0.0, 8.305249325337332, 11.578832872340422, 7.484982463235293, 6.253343532608695, 2.9909900071022726, 0.0), # 37
(5.839501534526853, 11.97526909090909, 9.369272608695653, 4.988487058823529, 2.89237914893617, 0.0, 8.272107496251873, 11.56951659574468, 7.4827305882352935, 6.246181739130434, 2.9938172727272727, 0.0), # 38
(5.851043070652174, 11.986402244318182, 9.357236657608695, 4.98679875, 2.8897618882978717, 0.0, 8.23573926161919, 11.559047553191487, 7.480198125, 6.23815777173913, 2.9966005610795454, 0.0), # 39
(5.862336157289003, 11.997357954545455, 9.343940217391305, 4.984926470588235, 2.886861702127659, 0.0, 8.196256559220389, 11.547446808510635, 7.477389705882353, 6.22929347826087, 2.999339488636364, 0.0), # 40
(5.873386836636828, 12.008134687499997, 9.329416059782607, 4.982873308823529, 2.8836838563829783, 0.0, 8.153771326836583, 11.534735425531913, 7.474309963235294, 6.219610706521738, 3.002033671874999, 0.0), # 41
(5.88420115089514, 12.01873090909091, 9.31369695652174, 4.980642352941176, 2.880233617021277, 0.0, 8.108395502248875, 11.520934468085107, 7.4709635294117644, 6.209131304347826, 3.0046827272727277, 0.0), # 42
(5.894785142263428, 12.02914508522727, 9.296815679347825, 4.978236691176471, 2.8765162499999994, 0.0, 8.060241023238381, 11.506064999999998, 7.467355036764706, 6.1978771195652165, 3.0072862713068176, 0.0), # 43
(5.905144852941176, 12.03937568181818, 9.278805, 4.975659411764705, 2.8725370212765955, 0.0, 8.009419827586207, 11.490148085106382, 7.4634891176470575, 6.1858699999999995, 3.009843920454545, 0.0), # 44
(5.915286325127877, 12.049421164772726, 9.259697690217394, 4.972913602941176, 2.8683011968085106, 0.0, 7.956043853073464, 11.473204787234042, 7.459370404411764, 6.1731317934782615, 3.0123552911931815, 0.0), # 45
(5.925215601023019, 12.059280000000001, 9.239526521739132, 4.970002352941176, 2.8638140425531913, 0.0, 7.90022503748126, 11.455256170212765, 7.455003529411765, 6.159684347826087, 3.0148200000000003, 0.0), # 46
(5.934938722826087, 12.06895065340909, 9.218324266304347, 4.966928749999999, 2.859080824468085, 0.0, 7.842075318590705, 11.43632329787234, 7.450393124999999, 6.145549510869564, 3.0172376633522724, 0.0), # 47
(5.944461732736574, 12.07843159090909, 9.196123695652174, 4.9636958823529405, 2.854106808510638, 0.0, 7.7817066341829095, 11.416427234042551, 7.445543823529412, 6.130749130434782, 3.0196078977272727, 0.0), # 48
(5.953790672953963, 12.087721278409088, 9.17295758152174, 4.960306838235294, 2.8488972606382976, 0.0, 7.71923092203898, 11.39558904255319, 7.4404602573529415, 6.115305054347826, 3.021930319602272, 0.0), # 49
(5.96293158567775, 12.096818181818177, 9.148858695652175, 4.956764705882353, 2.8434574468085105, 0.0, 7.65476011994003, 11.373829787234042, 7.43514705882353, 6.099239130434783, 3.0242045454545443, 0.0), # 50
(5.971890513107417, 12.105720767045453, 9.123859809782608, 4.953072573529411, 2.837792632978723, 0.0, 7.588406165667167, 11.351170531914892, 7.429608860294118, 6.082573206521738, 3.026430191761363, 0.0), # 51
(5.980673497442456, 12.114427499999998, 9.097993695652173, 4.949233529411764, 2.8319080851063827, 0.0, 7.5202809970015, 11.32763234042553, 7.4238502941176465, 6.065329130434781, 3.0286068749999995, 0.0), # 52
(5.989286580882353, 12.122936846590909, 9.071293125, 4.945250661764706, 2.8258090691489364, 0.0, 7.450496551724138, 11.303236276595745, 7.417875992647058, 6.04752875, 3.030734211647727, 0.0), # 53
(5.9977358056266, 12.13124727272727, 9.043790869565216, 4.941127058823529, 2.8195008510638297, 0.0, 7.379164767616192, 11.278003404255319, 7.411690588235294, 6.0291939130434775, 3.0328118181818176, 0.0), # 54
(6.00602721387468, 12.139357244318182, 9.015519701086955, 4.93686580882353, 2.8129886968085103, 0.0, 7.306397582458771, 11.251954787234041, 7.405298713235295, 6.010346467391304, 3.0348393110795455, 0.0), # 55
(6.014166847826087, 12.147265227272724, 8.986512391304348, 4.9324699999999995, 2.8062778723404254, 0.0, 7.232306934032984, 11.225111489361701, 7.398705, 5.991008260869565, 3.036816306818181, 0.0), # 56
(6.022160749680308, 12.154969687500001, 8.95680171195652, 4.927942720588234, 2.7993736436170207, 0.0, 7.15700476011994, 11.197494574468083, 7.391914080882352, 5.9712011413043475, 3.0387424218750003, 0.0), # 57
(6.030014961636829, 12.16246909090909, 8.926420434782608, 4.923287058823529, 2.792281276595744, 0.0, 7.0806029985007495, 11.169125106382976, 7.384930588235295, 5.950946956521738, 3.0406172727272724, 0.0), # 58
(0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0), # 59
)
passenger_allighting_rate = (
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 0
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 1
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 2
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 3
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 4
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 5
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 6
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 7
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 8
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 9
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 10
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 11
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 12
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 13
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 14
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 15
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 16
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 17
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 18
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 19
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 20
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 21
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 22
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 23
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 24
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 25
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 26
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 27
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 28
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 29
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 30
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 31
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 32
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 33
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 34
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 35
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 36
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 37
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 38
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 39
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 40
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 41
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 42
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 43
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 44
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 45
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 46
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 47
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 48
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 49
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 50
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 51
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 52
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 53
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 54
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 55
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 56
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 57
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 58
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 59
)
"""
parameters for reproducibiliy. More information: https://numpy.org/doc/stable/reference/random/parallel.html
"""
#initial entropy
entropy = 258194110137029475889902652135037600173
#index for seed sequence child
child_seed_index = (
1, # 0
85, # 1
)
| 113.501493 | 213 | 0.730058 |
b2234199fd5d082c018c29d0a4fa4d12770535d2 | 21,551 | py | Python | msticpy/datamodel/entities/entity.py | lucky-luk3/msticpy | 623f726f15fa35dafae7e1e65df91b7601456002 | [
"MIT"
] | 820 | 2019-05-16T07:24:34.000Z | 2022-03-31T09:18:10.000Z | msticpy/datamodel/entities/entity.py | lucky-luk3/msticpy | 623f726f15fa35dafae7e1e65df91b7601456002 | [
"MIT"
] | 205 | 2019-06-24T19:24:19.000Z | 2022-03-30T23:13:46.000Z | msticpy/datamodel/entities/entity.py | lucky-luk3/msticpy | 623f726f15fa35dafae7e1e65df91b7601456002 | [
"MIT"
] | 171 | 2019-06-23T13:53:12.000Z | 2022-03-29T18:22:46.000Z | # -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
"""Entity Entity class."""
import json
import pprint
import typing
from abc import ABC
from copy import deepcopy
from typing import Any, Dict, List, Mapping, Optional, Type, Union
import networkx as nx
from ..._version import VERSION
from ...common.utility import export, valid_pyname
from .entity_enums import ENTITY_ENUMS
from .entity_graph import Node
__version__ = VERSION
__author__ = "Ian Hellen"
# pylint: disable=invalid-name, too-few-public-methods
@export
class ContextObject:
"""Information object attached to entity but is not an Entity."""
# pylint: enable=too-few-public-methods
class _EntityJSONEncoder(json.JSONEncoder):
"""Encode entity to JSON."""
def default(self, o):
if isinstance(o, Entity):
return {
name: value
for name, value in o.properties.items()
if value and name != "edges"
}
# Let the base class default method raise the TypeError
return json.JSONEncoder.default(self, o)
# Future: replace setting entity properties in __dict__ with
# setattr (to support attributes implemented as properties)
@export
class Entity(ABC, Node):
"""
Entity abstract base class.
Implements common methods for Entity classes
"""
ENTITY_NAME_MAP: Dict[str, type] = {}
_entity_schema: Dict[str, Any] = {}
ID_PROPERTIES: List[str] = []
JSONEncoder = _EntityJSONEncoder
def __init__(self, src_entity: Mapping[str, Any] = None, **kwargs):
"""
Create a new instance of an entity.
Parameters
----------
src_entity : Mapping[str, Any], optional
If src_entity is supplied it attempts to extract common
properties from the source entity and assign them to
the new instance. (the default is None)
Other Parameters
----------------
kwargs : Dict[str, Any]
Supply the entity properties as a set of
kw arguments.
"""
super().__init__()
self.TimeGenerated = None
self.Type = self._get_entity_type_name(type(self))
# If we have an unknown entity see if we a type passed in
if self.Type == "unknownentity" and "Type" in kwargs:
self.Type = kwargs["Type"]
# Make sure Type is in the class schema dictionary
self._entity_schema["Type"] = None
# if we didn't populate AdditionalData, add an empty dict in case it's
# needed
if "AdditionalData" not in self:
self.AdditionalData = {}
if src_entity is not None:
self._extract_src_entity(src_entity)
# add AdditionalData dictionary if it's populated
if "AdditionalData" in src_entity:
self.AdditionalData = src_entity["AdditionalData"]
if "TimeGenerated" in src_entity:
self.TimeGenerated = src_entity["TimeGenerated"]
if kwargs:
self.__dict__.update(kwargs)
@classmethod
def create(cls, src_entity: Mapping[str, Any] = None, **kwargs) -> "Entity":
"""
Create an entity from a mapping type (e.g. pd.Series) or dict or kwargs.
Returns
-------
Entity
Instantiated entity
Notes
-----
The entity type should be specified as "Type", in either a key of `src_entity`
or as a keyword argument.
"""
ent_type = (
src_entity.get("Type") or src_entity.get("type")
if src_entity
else kwargs.get("Type") or kwargs.get("type")
)
if not ent_type:
ent_type = "unknown"
ent_cls = cls.ENTITY_NAME_MAP.get(ent_type)
if not ent_cls:
ent_cls = cls.ENTITY_NAME_MAP["unknown"]
return ent_cls(src_entity, **kwargs)
def _extract_src_entity(self, src_entity: Mapping[str, Any]):
"""
Extract source entity properties.
Parameters
----------
src_entity : Mapping[str, Any]
The source mappable object from which to
extract entity properties.
"""
schema_dict = self._entity_schema.copy()
schema_dict["Type"] = None
for attr, val in schema_dict.items():
if attr not in src_entity:
continue
self[attr] = src_entity[attr]
if val is None:
continue
try:
# If the property is an enum
if val in ENTITY_ENUMS.values():
self[attr] = val[src_entity[attr]]
elif val in ENTITY_ENUMS:
self[attr] = ENTITY_ENUMS[val][src_entity[attr]]
continue
except KeyError:
# Catch key errors from invalid enum values
self[attr] = None
if isinstance(val, tuple):
self._instantiate_from_value(attr, val, src_entity)
else:
self._instantiate_from_entity(attr, val, src_entity)
def _instantiate_from_value(self, attr, val, src_entity):
# if the property is a collection
entity_type = None
if isinstance(val[1], (type)) and issubclass(val[1], Entity):
entity_type = val[1]
entity_list = [
Entity.instantiate_entity(col_entity, entity_type=entity_type)
for col_entity in src_entity[attr]
]
self[attr] = entity_list
for child_entity in entity_list:
if isinstance(child_entity, Entity):
self.add_edge(child_entity, edge_attrs={"name": attr})
def _instantiate_from_entity(self, attr, val, src_entity):
# else try to instantiate an entity
entity_type = None
if isinstance(val, type) and issubclass(val, Entity):
entity_type = val
self[attr] = Entity.instantiate_entity(
src_entity[attr], entity_type=entity_type
)
if isinstance(self[attr], Entity):
self.add_edge(self[attr], edge_attrs={"name": attr})
def __getitem__(self, key: str):
"""Allow property get using dictionary key syntax."""
if key in self.__dict__:
return self.__dict__[key]
if key in self._entity_schema:
return None
raise KeyError
def __setitem__(self, key: str, value: Any):
"""Allow property set using dictionary key syntax."""
self.__dict__[key] = value
def __contains__(self, key: str):
"""Allow property in test."""
# In operator overload
return key in self.__dict__
def __getattr__(self, name: str):
"""Return the value of the named property 'name'."""
props = ["name_str", "description_str"]
if name in self._entity_schema or name in props:
return None
raise AttributeError(f"{name} is not a valid attribute.")
def __iter__(self):
"""Iterate over entity_properties."""
return iter(self.properties)
def __len__(self) -> int:
"""Return length/number of entity_properties."""
return len(self.properties)
def __str__(self) -> str:
"""Return string representation of entity."""
return pprint.pformat(self._to_dict(), indent=2, width=100)
def __repr__(self) -> str:
"""Return repr of entity."""
params = ", ".join(
f"{name}={val}" for name, val in self.properties.items() if val
)
if self.edges:
params = f"{params}, edges={'. '.join(str(edge) for edge in self.edges)}"
if len(params) > 80:
params = params[:80] + "..."
return f"{self.__class__.__name__}({params})"
def _to_dict(self) -> dict:
"""Return as simple nested dictionary."""
# pylint: disable=protected-access
return {
prop: val._to_dict() if isinstance(val, Entity) else val
for prop, val in self.properties.items()
if val is not None
}
# pylint: enable=protected-access
def _repr_html_(self) -> str:
"""
Display entity in IPython/Notebook.
Returns
-------
HTML
IPython HTML object
"""
return self.to_html()
def to_html(self) -> str:
"""
Return HTML representation of entity.
Returns
-------
str
HTML representation of entity
"""
e_text = str(self)
e_type = self.Type
e_text = e_text.replace("\n", "<br>").replace(" ", " ")
return f"<h3>{e_type}</h3>{e_text}"
def to_json(self): # noqa: N802
"""Return object as a JSON string."""
return json.dumps(self, cls=self.JSONEncoder)
def __eq__(self, other: Any) -> bool:
"""
Return True if the entities have the same properties/values.
Parameters
----------
other : Any
The entity (object) to compare
Returns
-------
bool
True if the two objects have the same property values
"""
if self.__class__ != other.__class__ or not isinstance(other, Entity):
return False
return self.properties == other.properties
def __hash__(self) -> int:
"""Return the hash of the entity based on non-empty property values."""
return hash(
" ".join(
f"{prop}:{val}" for prop, val in self.properties.items() if str(val)
)
)
def is_equivalent(self, other: Any) -> bool:
"""
Return True if the entities are equivalent.
Parameters
----------
other : Any
The entity to check
Returns
-------
bool
True if equivalent.
Notes
-----
This method checks that the compared entities do not have
any property values with conflicting values. E.g.
self.A == other.A
self.B == "xyz" and other.B == None
self.C == [] and other.C == [1, 2, 3]
"""
if self == other:
return True
if not isinstance(other, Entity):
return False
return not any(
self.properties[prop] != other.properties[prop]
and self.properties[prop]
and other.properties[prop]
for prop in self.properties
)
def merge(self, other: Any) -> "Entity":
"""
Merge with other entity to create new entity.
Returns
-------
Entity
Merged entity.
Raises
------
AttributeError
If the entities cannot be merged.
"""
if self == other:
return self
if not self.can_merge(other):
raise AttributeError("Entities cannot be merged.")
merged = deepcopy(self)
for prop, value in other.properties.items():
if not value:
continue
if not self.properties[prop]:
setattr(merged, prop, value)
# Future (ianhelle) - cannot merge ID field
if other.edges:
self.edges.update(other.edges)
return merged
def can_merge(self, other: Any) -> bool:
"""
Return True if the entities can be merged.
Parameters
----------
other : Any
The other entity (object) to check
Returns
-------
bool
True if other has no conflicting properties.
"""
if self.__class__ != other.__class__ or not isinstance(other, Entity):
return False
other_id_props = {
prop: getattr(other, prop, None)
for prop in other.ID_PROPERTIES
if getattr(other, prop, None) is not None
}
self_id_props = {
prop: getattr(self, prop, None)
for prop in self.ID_PROPERTIES
if getattr(self, prop, None) is not None
}
# Return True if there is no overlap
overlap = self_id_props.keys() | other_id_props.keys()
if not overlap:
return True
return all(getattr(self, prop) == getattr(other, prop) for prop in overlap)
@property
def properties(self) -> dict:
"""
Return dictionary properties of entity.
Returns
-------
dict
Entity properties.
"""
return {
name: value
for name, value in self.__dict__.items()
if not name.startswith("_") and name != "edges"
}
@property
def description_str(self) -> str:
"""
Return Entity Description.
Returns
-------
str
Entity description (optional). If not overridden
by the Entity instance type, it will return the
Type string.
"""
return self.Type
@property
def name_str(self) -> str:
"""
Return Name Description.
Returns
-------
str
Entity Name (optional). If not overridden
by the Entity instance type, it will return the
class name string.
"""
return self.__class__.__name__
@classmethod
def instantiate_entity(
cls, raw_entity: Mapping[str, Any], entity_type: Optional[Type] = None
) -> Union["Entity", Mapping[str, Any]]:
"""
Class factory to return entity from raw dictionary representation.
Parameters
----------
raw_entity : Mapping[str, Any]
A mapping object (e.g. dictionary or pandas Series)
that contains the properties of the entity.
entity_type : Optional[Type]
The entity type to create, by default None.
Returns
-------
Entity
The instantiated entity
"""
if "Type" not in raw_entity and entity_type is None:
return raw_entity
entity_type_name = raw_entity.get("Type")
if not entity_type_name and entity_type:
entity_type_name = cls._get_entity_type_name(entity_type)
if entity_type:
return entity_type(raw_entity)
if entity_type_name and entity_type_name.lower() in cls.ENTITY_NAME_MAP:
return cls.ENTITY_NAME_MAP[entity_type_name.lower()](raw_entity)
raise TypeError(f"Could not find a suitable type for {entity_type}")
@classmethod
def _get_entity_type_name(cls, entity_type: Type) -> str:
"""
Get V3 entity name for an entity.
Parameters
----------
entity_type : Type
The Entity class
Returns
-------
str
The V3 serialized name.
"""
name = next(
iter(
(key for key, val in cls.ENTITY_NAME_MAP.items() if val == entity_type)
)
)
return name or "unknown"
@property
def node_properties(self) -> Dict[str, Any]:
"""
Return all public properties that are not entities.
Returns
-------
Dict[str, Any]
Dictionary of name, value properties.
"""
props = {
name: str(value)
for name, value in self.properties.items()
if not isinstance(value, (Entity, list)) and name != "edges"
}
props["Description"] = self.description_str
props["Name"] = self.name_str
return props
def to_networkx(self, graph: nx.Graph = None) -> nx.Graph:
"""
Return networkx graph of entities.
Parameters
----------
graph : nx.Graph, optional
Graph to add entities to. If not supplied the function
creates and returns a new graph.
By default None
Returns
-------
nx.Graph
Graph with entity and any connected entities.
"""
graph = graph or nx.Graph()
if not graph.has_node(self):
graph.add_node(self.name_str, **self.node_properties)
for edge in self.edges:
if not isinstance(edge.source, Entity) or not isinstance(
edge.target, Entity
):
continue
if graph.has_edge(edge.source.name_str, edge.target.name_str):
continue
graph.add_edge(edge.source.name_str, edge.target.name_str, **edge.attrs)
for node in (edge.source, edge.target):
# If this node has edges that are not in our graph
# call to_networkx recursively on that node.
if any(
edge
for edge in node.edges
if isinstance(edge.source, Entity)
and isinstance(edge.target, Entity)
and not graph.has_edge(edge.source.name_str, edge.target.name_str)
):
ent_node = typing.cast(Entity, node)
ent_node.to_networkx(graph)
return graph
@classmethod
def get_pivot_list(cls) -> List[str]:
"""
Return list of current pivot functions.
Returns
-------
List[str]
List of pivot functions assigned to entity.
"""
pivots = []
for prop in dir(cls):
attr = getattr(cls, prop)
if hasattr(attr, "pivot_properties"):
pivots.append(prop)
continue
if attr.__class__.__name__ != "QueryContainer":
continue
for name, qt_attr in attr:
if hasattr(qt_attr, "pivot_properties"):
pivots.append(f"{prop}.{name}")
return sorted(pivots)
# alias for get_pivot_list
pivots = get_pivot_list
def list_pivot_funcs(self):
"""Print list of pivot functions assigned to entity."""
print("\n".join(self.get_pivot_list()))
@classmethod
def make_pivot_shortcut(cls, func_name: str, target: str, overwrite: bool = False):
"""
Add a shortcut to a pivot function to the class.
Parameters
----------
func_name : str
The name of source pivot function.
target : str
The shortcut name (this will be a member function of the class)
overwrite : bool, optional
Force overwrite an existing pivot function, by default False
Raises
------
AttributeError
The source function does not exist
TypeError
The source function is not a pivot function.
TypeError
The target attribute exists and is not a pivot function
AttributeError
The target function exists and 'overwrite=True' was not specified.
"""
func_path = func_name.split(".") if "." in func_name else [func_name]
curr_attr = cls
for path in func_path:
curr_attr = getattr(curr_attr, path, None)
if not curr_attr:
raise AttributeError(f"No function found for {func_name}")
if not hasattr(curr_attr, "pivot_properties"):
raise TypeError(f"Function {func_name} is not a Pivot function")
tgt_name = valid_pyname(target)
if tgt_name != target:
print(f"{target} rename to valid Python identifier {tgt_name}")
existing_attr = getattr(cls, tgt_name, None)
if existing_attr:
if not hasattr(existing_attr, "pivot_properties"):
raise TypeError(
f"Cannot overwrite existing an attribute {tgt_name}.",
"This is not a pivot function.",
)
if not overwrite:
raise AttributeError(
f"{cls.__name__} already has an attribute {tgt_name}",
"Use 'overwrite' parameter to force.",
)
setattr(cls, tgt_name, curr_attr)
@classmethod
def del_pivot_shortcut(cls, func_name: str):
"""
Remove a pivot shortcut.
Parameters
----------
func_name : str
The name of the shortcut function.
Raises
------
AttributeError
The class does not have an attribute `func_name`
TypeError
The attribute to delete is not a pivot shortcut.
"""
existing_attr = getattr(cls, func_name, None)
if not existing_attr:
raise AttributeError(
f"{cls.__name__} has no attribute {func_name}",
)
if not hasattr(existing_attr, "pivot_properties"):
raise TypeError(
f"Cannot delete an attribute {func_name} that isn't a pivot function.",
"This is not a pivot function.",
)
delattr(cls, func_name)
def camelcase_property_names(input_ent: Dict[str, Any]) -> Dict[str, Any]:
"""Change initial letter Azure Sentinel API entity properties to upper case."""
return {key[0].upper() + key[1:]: input_ent[key] for key in input_ent}
| 30.831187 | 87 | 0.556494 |
dd4228481d32444fb9aeebd9817f081feb53fbe5 | 4,680 | py | Python | secret/config/defaults.py | LunarShen/SECRET | 0f652e63ce760ece8690cbad013f0d9bdb341e84 | [
"MIT"
] | null | null | null | secret/config/defaults.py | LunarShen/SECRET | 0f652e63ce760ece8690cbad013f0d9bdb341e84 | [
"MIT"
] | null | null | null | secret/config/defaults.py | LunarShen/SECRET | 0f652e63ce760ece8690cbad013f0d9bdb341e84 | [
"MIT"
] | null | null | null | from .config import CfgNode as CN
# -----------------------------------------------------------------------------
# Convention about Training / Test specific parameters
# -----------------------------------------------------------------------------
# Whenever an argument can be either used for training or for testing, the
# corresponding name will be post-fixed by a _TRAIN for a training parameter,
# or _TEST for a test-specific parameter.
# For example, the number of images during training will be
# IMAGES_PER_BATCH_TRAIN, while the number of images for testing will be
# IMAGES_PER_BATCH_TEST
# -----------------------------------------------------------------------------
# Config definition
# -----------------------------------------------------------------------------
_C = CN()
# -----------------------------------------------------------------------------
# MODEL
# -----------------------------------------------------------------------------
_C.MODEL = CN()
_C.MODEL.DEVICE = "cuda"
_C.MODEL.ARCH = 'resnet50'
# ---------------------------------------------------------------------------- #
# Backbone options
# ---------------------------------------------------------------------------- #
_C.MODEL.BACKBONE = CN()
# If use ImageNet pretrain model
_C.MODEL.BACKBONE.PRETRAIN = True
_C.MODEL.PART_DETACH = False
# ---------------------------------------------------------------------------- #
# REID LOSSES options
# ---------------------------------------------------------------------------- #
_C.MODEL.LOSSES = CN()
# Cross Entropy Loss options
_C.MODEL.LOSSES.CE = CN()
_C.MODEL.LOSSES.CE.EPSILON = 0.1
# Triplet Loss options
_C.MODEL.LOSSES.TRI = CN()
_C.MEAN_TEACH = CN()
_C.MEAN_TEACH.CE_SOFT_WRIGHT = 0.5
_C.MEAN_TEACH.TRI_SOFT_WRIGHT = 0.8
_C.MEAN_TEACH.ALPHA = 0.999
_C.CLUSTER = CN()
_C.CLUSTER.K1 = 30
_C.CLUSTER.K2 = 6
_C.CLUSTER.EPS = 0.600
_C.CLUSTER.REFINE_K = 0.4
# -----------------------------------------------------------------------------
# INPU
# -----------------------------------------------------------------------------
_C.INPUT = CN()
# Size of the image during training
_C.INPUT.SIZE_TRAIN = [256, 128]
# Size of the image during test
_C.INPUT.SIZE_TEST = [256, 128]
# Values to be used for image normalization
_C.INPUT.PIXEL_MEAN = [0.485, 0.456, 0.406]
# Values to be used for image normalization
_C.INPUT.PIXEL_STD = [0.229, 0.224, 0.225]
# Random probability for image horizontal flip
_C.INPUT.DO_FLIP = True
_C.INPUT.FLIP_PROB = 0.5
# Value of padding size
_C.INPUT.DO_PAD = True
_C.INPUT.PADDING = 10
# Random Erasing
_C.INPUT.REA = CN()
_C.INPUT.REA.ENABLED = False
_C.INPUT.REA.PROB = 0.5
_C.INPUT.REA.MEAN = [0.485, 0.456, 0.406]
# -----------------------------------------------------------------------------
# Dataset
# -----------------------------------------------------------------------------
_C.DATASETS = CN()
_C.DATASETS.SOURCE = "dukemtmc"
_C.DATASETS.TARGET = "market1501"
_C.DATASETS.DIR = "Data"
# -----------------------------------------------------------------------------
# DataLoader
# -----------------------------------------------------------------------------
_C.DATALOADER = CN()
# Number of instance for each person
_C.DATALOADER.NUM_INSTANCES = 4
_C.DATALOADER.NUM_WORKERS = 4
_C.DATALOADER.BATCH_SIZE = 64
_C.DATALOADER.ITER_MODE = True
_C.DATALOADER.ITERS = 100
# ---------------------------------------------------------------------------- #
# OPTIM
# ---------------------------------------------------------------------------- #
_C.OPTIM = CN()
_C.OPTIM.OPT = 'adam'
_C.OPTIM.LR = 0.00035
_C.OPTIM.WEIGHT_DECAY = 5e-04
_C.OPTIM.MOMENTUM = 0.9
_C.OPTIM.SGD_DAMPENING = 0
_C.OPTIM.SGD_NESTEROV = False
_C.OPTIM.RMSPROP_ALPHA = 0.99
_C.OPTIM.ADAM_BETA1 = 0.9
_C.OPTIM.ADAM_BETA2 = 0.999
# Multi-step learning rate options
_C.OPTIM.SCHED = "warmupmultisteplr"
_C.OPTIM.GAMMA = 0.1
_C.OPTIM.STEPS = [40, 70]
_C.OPTIM.WARMUP_ITERS = 10
_C.OPTIM.WARMUP_FACTOR = 0.01
_C.OPTIM.WARMUP_METHOD = "linear"
_C.OPTIM.EPOCHS = 80
_C.TEST = CN()
_C.TEST.PRINT_PERIOD = 200
# Re-rank
_C.TEST.RERANK = CN()
_C.TEST.RERANK.ENABLED = False
_C.TEST.RERANK.K1 = 20
_C.TEST.RERANK.K2 = 6
_C.TEST.RERANK.LAMBDA = 0.3
# ---------------------------------------------------------------------------- #
# Misc options
# ---------------------------------------------------------------------------- #
_C.MODE = "USL"
_C.OUTPUT_DIR = "log/test"
_C.RESUME = ""
_C.PRINT_PERIOD = 100
_C.SEED = 1
_C.GPU_Device = [0,1,2,3]
_C.CHECKPOING = CN()
_C.CHECKPOING.REMAIN_CLASSIFIER = True
_C.CHECKPOING.SAVE_STEP = [10]
_C.CHECKPOING.PRETRAIN_PATH = ''
_C.CHECKPOING.EVAL = ''
_C.CUDNN_BENCHMARK = True
| 29.068323 | 80 | 0.47906 |
b5c9f395228b388766bda6dde97586c44b7fd251 | 346 | py | Python | languages/__init__.py | bastion-gaming/GG-Server | 6b0e6054581e5e50223f45977d1ee52dcb90899b | [
"MIT"
] | 2 | 2020-02-06T10:49:28.000Z | 2020-04-24T09:31:23.000Z | languages/__init__.py | bastion-gaming/GG-Server | 6b0e6054581e5e50223f45977d1ee52dcb90899b | [
"MIT"
] | null | null | null | languages/__init__.py | bastion-gaming/GG-Server | 6b0e6054581e5e50223f45977d1ee52dcb90899b | [
"MIT"
] | null | null | null | from os import listdir
import json
general_dict_lang = dict()
for file in listdir("languages"):
if ".json" in file:
langname = file.replace(".json", "")
path = "languages/"+file
with open(path, encoding='utf-8') as json_file:
data = json.load(json_file)
general_dict_lang[langname] = data
| 28.833333 | 55 | 0.612717 |
a423f98d91e5bd52fc27c6d6d8bb6c5735def8a4 | 774 | py | Python | src/products/admin.py | jayesh96/ecommerce2 | f33e4eac54d2710f48a94f7042565d36e4924c08 | [
"MIT"
] | null | null | null | src/products/admin.py | jayesh96/ecommerce2 | f33e4eac54d2710f48a94f7042565d36e4924c08 | [
"MIT"
] | null | null | null | src/products/admin.py | jayesh96/ecommerce2 | f33e4eac54d2710f48a94f7042565d36e4924c08 | [
"MIT"
] | null | null | null | from django.contrib import admin
# Register your models here.
from .models import Product,Variation,ProductImage,Category
class ProductImageInline(admin.TabularInline):
model = ProductImage
extra = 0
class VariationInline(admin.TabularInline):
model = Variation
extra = 0
class VariationAdmin(admin.ModelAdmin):
list_display = ["id","title","price","product","price","sale_price","active","inventory"]
class Meta:
model = Variation
class ProductAdmin(admin.ModelAdmin):
list_display = ["title","price","active","default"]
inlines = [
VariationInline,ProductImageInline
]
class Meta:
model = Product
admin.site.register(Product,ProductAdmin)
admin.site.register(Variation,VariationAdmin)
admin.site.register(ProductImage)
admin.site.register(Category)
| 23.454545 | 90 | 0.775194 |
93232f1bfbbdb615733340151eb8a30888ef4fea | 13,012 | py | Python | src/_pytest/compat.py | ctb/pytest | 49827adcb9256c9c9c06a25729421dcc3c385edc | [
"MIT"
] | null | null | null | src/_pytest/compat.py | ctb/pytest | 49827adcb9256c9c9c06a25729421dcc3c385edc | [
"MIT"
] | null | null | null | src/_pytest/compat.py | ctb/pytest | 49827adcb9256c9c9c06a25729421dcc3c385edc | [
"MIT"
] | null | null | null | """Python version compatibility code."""
import enum
import functools
import inspect
import os
import re
import sys
from contextlib import contextmanager
from inspect import Parameter
from inspect import signature
from typing import Any
from typing import Callable
from typing import Generic
from typing import Optional
from typing import overload as overload
from typing import Tuple
from typing import TypeVar
from typing import Union
import attr
import py
from _pytest._io.saferepr import saferepr
from _pytest.outcomes import fail
from _pytest.outcomes import TEST_OUTCOME
if sys.version_info < (3, 5, 2):
TYPE_CHECKING = False # type: bool
else:
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from typing import NoReturn
from typing import Type
from typing_extensions import Final
_T = TypeVar("_T")
_S = TypeVar("_S")
# fmt: off
# Singleton type for NOTSET, as described in:
# https://www.python.org/dev/peps/pep-0484/#support-for-singleton-types-in-unions
class NotSetType(enum.Enum):
token = 0
NOTSET = NotSetType.token # type: Final # noqa: E305
# fmt: on
MODULE_NOT_FOUND_ERROR = (
"ModuleNotFoundError" if sys.version_info[:2] >= (3, 6) else "ImportError"
)
if sys.version_info >= (3, 8):
from importlib import metadata as importlib_metadata
else:
import importlib_metadata # noqa: F401
def _format_args(func: Callable[..., Any]) -> str:
return str(signature(func))
# The type of re.compile objects is not exposed in Python.
REGEX_TYPE = type(re.compile(""))
if sys.version_info < (3, 6):
def fspath(p):
"""os.fspath replacement, useful to point out when we should replace it by the
real function once we drop py35."""
return str(p)
else:
fspath = os.fspath
def is_generator(func: object) -> bool:
genfunc = inspect.isgeneratorfunction(func)
return genfunc and not iscoroutinefunction(func)
def iscoroutinefunction(func: object) -> bool:
"""Return True if func is a coroutine function (a function defined with async
def syntax, and doesn't contain yield), or a function decorated with
@asyncio.coroutine.
Note: copied and modified from Python 3.5's builtin couroutines.py to avoid
importing asyncio directly, which in turns also initializes the "logging"
module as a side-effect (see issue #8).
"""
return inspect.iscoroutinefunction(func) or getattr(func, "_is_coroutine", False)
def is_async_function(func: object) -> bool:
"""Return True if the given function seems to be an async function or
an async generator."""
return iscoroutinefunction(func) or (
sys.version_info >= (3, 6) and inspect.isasyncgenfunction(func)
)
def getlocation(function, curdir=None) -> str:
function = get_real_func(function)
fn = py.path.local(inspect.getfile(function))
lineno = function.__code__.co_firstlineno
if curdir is not None:
relfn = fn.relto(curdir)
if relfn:
return "%s:%d" % (relfn, lineno + 1)
return "%s:%d" % (fn, lineno + 1)
def num_mock_patch_args(function) -> int:
"""Return number of arguments used up by mock arguments (if any)."""
patchings = getattr(function, "patchings", None)
if not patchings:
return 0
mock_sentinel = getattr(sys.modules.get("mock"), "DEFAULT", object())
ut_mock_sentinel = getattr(sys.modules.get("unittest.mock"), "DEFAULT", object())
return len(
[
p
for p in patchings
if not p.attribute_name
and (p.new is mock_sentinel or p.new is ut_mock_sentinel)
]
)
def getfuncargnames(
function: Callable[..., Any],
*,
name: str = "",
is_method: bool = False,
cls: Optional[type] = None
) -> Tuple[str, ...]:
"""Return the names of a function's mandatory arguments.
Should return the names of all function arguments that:
* Aren't bound to an instance or type as in instance or class methods.
* Don't have default values.
* Aren't bound with functools.partial.
* Aren't replaced with mocks.
The is_method and cls arguments indicate that the function should
be treated as a bound method even though it's not unless, only in
the case of cls, the function is a static method.
The name parameter should be the original name in which the function was collected.
"""
# TODO(RonnyPfannschmidt): This function should be refactored when we
# revisit fixtures. The fixture mechanism should ask the node for
# the fixture names, and not try to obtain directly from the
# function object well after collection has occurred.
# The parameters attribute of a Signature object contains an
# ordered mapping of parameter names to Parameter instances. This
# creates a tuple of the names of the parameters that don't have
# defaults.
try:
parameters = signature(function).parameters
except (ValueError, TypeError) as e:
fail(
"Could not determine arguments of {!r}: {}".format(function, e),
pytrace=False,
)
arg_names = tuple(
p.name
for p in parameters.values()
if (
p.kind is Parameter.POSITIONAL_OR_KEYWORD
or p.kind is Parameter.KEYWORD_ONLY
)
and p.default is Parameter.empty
)
if not name:
name = function.__name__
# If this function should be treated as a bound method even though
# it's passed as an unbound method or function, remove the first
# parameter name.
if is_method or (
cls and not isinstance(cls.__dict__.get(name, None), staticmethod)
):
arg_names = arg_names[1:]
# Remove any names that will be replaced with mocks.
if hasattr(function, "__wrapped__"):
arg_names = arg_names[num_mock_patch_args(function) :]
return arg_names
if sys.version_info < (3, 7):
@contextmanager
def nullcontext():
yield
else:
from contextlib import nullcontext as nullcontext # noqa: F401
def get_default_arg_names(function: Callable[..., Any]) -> Tuple[str, ...]:
# Note: this code intentionally mirrors the code at the beginning of
# getfuncargnames, to get the arguments which were excluded from its result
# because they had default values.
return tuple(
p.name
for p in signature(function).parameters.values()
if p.kind in (Parameter.POSITIONAL_OR_KEYWORD, Parameter.KEYWORD_ONLY)
and p.default is not Parameter.empty
)
_non_printable_ascii_translate_table = {
i: "\\x{:02x}".format(i) for i in range(128) if i not in range(32, 127)
}
_non_printable_ascii_translate_table.update(
{ord("\t"): "\\t", ord("\r"): "\\r", ord("\n"): "\\n"}
)
def _translate_non_printable(s: str) -> str:
return s.translate(_non_printable_ascii_translate_table)
STRING_TYPES = bytes, str
def _bytes_to_ascii(val: bytes) -> str:
return val.decode("ascii", "backslashreplace")
def ascii_escaped(val: Union[bytes, str]) -> str:
r"""If val is pure ASCII, return it as an str, otherwise, escape
bytes objects into a sequence of escaped bytes:
b'\xc3\xb4\xc5\xd6' -> r'\xc3\xb4\xc5\xd6'
and escapes unicode objects into a sequence of escaped unicode
ids, e.g.:
r'4\nV\U00043efa\x0eMXWB\x1e\u3028\u15fd\xcd\U0007d944'
Note:
The obvious "v.decode('unicode-escape')" will return
valid UTF-8 unicode if it finds them in bytes, but we
want to return escaped bytes for any byte, even if they match
a UTF-8 string.
"""
if isinstance(val, bytes):
ret = _bytes_to_ascii(val)
else:
ret = val.encode("unicode_escape").decode("ascii")
return _translate_non_printable(ret)
@attr.s
class _PytestWrapper:
"""Dummy wrapper around a function object for internal use only.
Used to correctly unwrap the underlying function object when we are
creating fixtures, because we wrap the function object ourselves with a
decorator to issue warnings when the fixture function is called directly.
"""
obj = attr.ib()
def get_real_func(obj):
"""Get the real function object of the (possibly) wrapped object by
functools.wraps or functools.partial."""
start_obj = obj
for i in range(100):
# __pytest_wrapped__ is set by @pytest.fixture when wrapping the fixture function
# to trigger a warning if it gets called directly instead of by pytest: we don't
# want to unwrap further than this otherwise we lose useful wrappings like @mock.patch (#3774)
new_obj = getattr(obj, "__pytest_wrapped__", None)
if isinstance(new_obj, _PytestWrapper):
obj = new_obj.obj
break
new_obj = getattr(obj, "__wrapped__", None)
if new_obj is None:
break
obj = new_obj
else:
raise ValueError(
("could not find real function of {start}\nstopped at {current}").format(
start=saferepr(start_obj), current=saferepr(obj)
)
)
if isinstance(obj, functools.partial):
obj = obj.func
return obj
def get_real_method(obj, holder):
"""Attempt to obtain the real function object that might be wrapping
``obj``, while at the same time returning a bound method to ``holder`` if
the original object was a bound method."""
try:
is_method = hasattr(obj, "__func__")
obj = get_real_func(obj)
except Exception: # pragma: no cover
return obj
if is_method and hasattr(obj, "__get__") and callable(obj.__get__):
obj = obj.__get__(holder)
return obj
def getimfunc(func):
try:
return func.__func__
except AttributeError:
return func
def safe_getattr(object: Any, name: str, default: Any) -> Any:
"""Like getattr but return default upon any Exception or any OutcomeException.
Attribute access can potentially fail for 'evil' Python objects.
See issue #214.
It catches OutcomeException because of #2490 (issue #580), new outcomes
are derived from BaseException instead of Exception (for more details
check #2707).
"""
try:
return getattr(object, name, default)
except TEST_OUTCOME:
return default
def safe_isclass(obj: object) -> bool:
"""Ignore any exception via isinstance on Python 3."""
try:
return inspect.isclass(obj)
except Exception:
return False
if sys.version_info < (3, 5, 2):
def overload(f): # noqa: F811
return f
if getattr(attr, "__version_info__", ()) >= (19, 2):
ATTRS_EQ_FIELD = "eq"
else:
ATTRS_EQ_FIELD = "cmp"
if sys.version_info >= (3, 8):
from functools import cached_property as cached_property
else:
class cached_property(Generic[_S, _T]):
__slots__ = ("func", "__doc__")
def __init__(self, func: Callable[[_S], _T]) -> None:
self.func = func
self.__doc__ = func.__doc__
@overload
def __get__(
self, instance: None, owner: Optional["Type[_S]"] = ...
) -> "cached_property[_S, _T]":
raise NotImplementedError()
@overload # noqa: F811
def __get__( # noqa: F811
self, instance: _S, owner: Optional["Type[_S]"] = ...
) -> _T:
raise NotImplementedError()
def __get__(self, instance, owner=None): # noqa: F811
if instance is None:
return self
value = instance.__dict__[self.func.__name__] = self.func(instance)
return value
# Sometimes an algorithm needs a dict which yields items in the order in which
# they were inserted when iterated. Since Python 3.7, `dict` preserves
# insertion order. Since `dict` is faster and uses less memory than
# `OrderedDict`, prefer to use it if possible.
if sys.version_info >= (3, 7):
order_preserving_dict = dict
else:
from collections import OrderedDict
order_preserving_dict = OrderedDict
# Perform exhaustiveness checking.
#
# Consider this example:
#
# MyUnion = Union[int, str]
#
# def handle(x: MyUnion) -> int {
# if isinstance(x, int):
# return 1
# elif isinstance(x, str):
# return 2
# else:
# raise Exception('unreachable')
#
# Now suppose we add a new variant:
#
# MyUnion = Union[int, str, bytes]
#
# After doing this, we must remember ourselves to go and update the handle
# function to handle the new variant.
#
# With `assert_never` we can do better:
#
# // raise Exception('unreachable')
# return assert_never(x)
#
# Now, if we forget to handle the new variant, the type-checker will emit a
# compile-time error, instead of the runtime error we would have gotten
# previously.
#
# This also work for Enums (if you use `is` to compare) and Literals.
def assert_never(value: "NoReturn") -> "NoReturn":
assert False, "Unhandled value: {} ({})".format(value, type(value).__name__)
| 29.844037 | 102 | 0.666769 |
6127262dfb6a5d9b0739ea822cf658adda3c57b6 | 617 | py | Python | conv_layer.py | motokimura/CapsNet-Pytorch | 76befdaeb166b381870fa92a44800e16c4bf215f | [
"MIT"
] | 16 | 2018-09-05T16:38:21.000Z | 2021-07-20T08:17:01.000Z | conv_layer.py | motokimura/CapsNet-Pytorch | 76befdaeb166b381870fa92a44800e16c4bf215f | [
"MIT"
] | 1 | 2018-06-08T15:49:27.000Z | 2018-06-08T15:49:27.000Z | conv_layer.py | motokimura/CapsNet-Pytorch | 76befdaeb166b381870fa92a44800e16c4bf215f | [
"MIT"
] | 8 | 2019-05-21T08:43:50.000Z | 2021-02-25T05:12:16.000Z | #
# Dynamic Routing Between Capsules
# https://arxiv.org/pdf/1710.09829.pdf
#
import torch
import torch.nn as nn
import torch.optim as optim
from torch.autograd import Variable
from torchvision import datasets, transforms
import torch.nn.functional as F
class Conv1(nn.Module):
def __init__(self):
super(Conv1, self).__init__()
self.conv = nn.Conv2d(
in_channels=1,
out_channels=256,
kernel_size=9,
stride=1,
bias=True
)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
# x: [batch_size, 1, 28, 28]
h = self.relu(self.conv(x))
# h: [batch_size, 256, 20, 20]
return h
| 17.628571 | 44 | 0.693679 |
f9ea7a50a56903c30f126ba4245f8a2f83f82e41 | 3,150 | py | Python | multilanetrafficsim.py | benjithedalilama/traffic-simulation | 2c1e524207ea7531cee0af7ceca03ce7489899d6 | [
"MIT"
] | null | null | null | multilanetrafficsim.py | benjithedalilama/traffic-simulation | 2c1e524207ea7531cee0af7ceca03ce7489899d6 | [
"MIT"
] | null | null | null | multilanetrafficsim.py | benjithedalilama/traffic-simulation | 2c1e524207ea7531cee0af7ceca03ce7489899d6 | [
"MIT"
] | null | null | null | from singlelanetrafficsim import SingleLaneTrafficSimulation
from trafficsim import TrafficSimulation
from copy import deepcopy
import random
class MultiLaneTrafficSimulation(TrafficSimulation):
def __init__(self, road_length, traffic_density, v_max, p_slow, l, l_o, l_o_back, p_change=0.5, left_lane=[], right_lane=[], verbose = True, strategy = 'regular'):
super().__init__(road_length, traffic_density, v_max, p_slow, verbose = verbose, strategy = strategy)
self.l = l
self.l_o = l_o
self.l_o_back = l_o_back
self.p_change = p_change
self.p_slow = p_slow
self.verbose = verbose
self.left_lane = left_lane if left_lane else SingleLaneTrafficSimulation(road_length, traffic_density, v_max, p_slow, verbose = verbose, strategy = strategy)
self.right_lane = right_lane if right_lane else SingleLaneTrafficSimulation(road_length, traffic_density, v_max, p_slow, verbose = verbose, strategy = strategy)
def step(self):
self.change_lanes()
self.left_lane.step()
self.right_lane.step()
def change_lanes(self):
l = self.l
l_o = self.l_o
l_o_back = self.l_o_back
left = self.left_lane
right = self.right_lane
temp_left_state = deepcopy(left.state)
temp_right_state = deepcopy(right.state)
left_prev_j = 0
right_prev_j = 0
changed = 0
for i, speed_tuple in enumerate(zip(left.state, right.state)):
# No cars at index
if speed_tuple[0] < 0 and speed_tuple[1] < 0:
continue
# Two cars side by side
if speed_tuple[0] >= 0 and speed_tuple[1] >= 0:
continue
left_not_empty = len(set(left.state)) != 1
right_not_empty = len(set(right.state)) != 1
# count spaces in front
left_j = 1
while left.state[(i + left_j)%left.road_length] < 0 and left_not_empty:
left_j += 1
right_j = 1
while right.state[(i + right_j)%right.road_length] < 0 and right_not_empty:
right_j += 1
# if the car is in the left lane, else the right lane
if speed_tuple[1] == -1:
gap, gap_o, gap_o_back = left_j, right_j, right_prev_j
else:
gap, gap_o, gap_o_back = right_j, left_j, left_prev_j
# change lanes
if gap < l and gap_o > l_o and gap_o_back > l_o_back and random.random() < self.p_change:
temp_left_state[i], temp_right_state[i] = right.state[i], left.state[i]
left_prev_j = left_j
right_prev_j = right_j
right.state = temp_right_state
left.state = temp_left_state
def get_flow(self):
left_flow = self.left_lane.get_flow()
right_flow = self.right_lane.get_flow()
flow = (left_flow + right_flow)/2
return flow
def display(self):
print(''.join('.' if x == -1 else str(x) for x in self.left_lane.state))
print(''.join('.' if x == -1 else str(x) for x in self.right_lane.state))
| 37.951807 | 168 | 0.614286 |
682aa1357b1be972637bd26203f90b626405d53f | 2,199 | py | Python | src/toil/utils/toilSshCluster.py | thiagogenez/toil | b25e7d0616fef3aa9085a7d7d7ae6bdc257f2d92 | [
"Apache-2.0"
] | null | null | null | src/toil/utils/toilSshCluster.py | thiagogenez/toil | b25e7d0616fef3aa9085a7d7d7ae6bdc257f2d92 | [
"Apache-2.0"
] | null | null | null | src/toil/utils/toilSshCluster.py | thiagogenez/toil | b25e7d0616fef3aa9085a7d7d7ae6bdc257f2d92 | [
"Apache-2.0"
] | null | null | null | # Copyright (C) 2015-2021 Regents of the University of California
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""SSH into the toil appliance container running on the leader of the cluster."""
import argparse
import logging
import sys
from toil.common import parser_with_common_options
from toil.provisioners import cluster_factory
from toil.statsAndLogging import set_logging_from_options
logger = logging.getLogger(__name__)
def main():
parser = parser_with_common_options(provisioner_options=True, jobstore_option=False)
parser.add_argument("--insecure", action='store_true',
help="Temporarily disable strict host key checking.")
parser.add_argument("--sshOption", dest='sshOptions', default=[], action='append',
help="Pass an additional option to the SSH command.")
parser.add_argument('args', nargs=argparse.REMAINDER)
options = parser.parse_args()
set_logging_from_options(options)
# Since we collect all the remaining arguments at the end for a command to
# run, it's easy to lose options.
if len(options.args) > 0 and options.args[0].startswith('-'):
logger.warning('Argument \'%s\' interpreted as a command to run '
'despite looking like an option.', options.args[0])
cluster = cluster_factory(provisioner=options.provisioner,
clusterName=options.clusterName,
zone=options.zone)
command = options.args if options.args else ['bash']
cluster.getLeader().sshAppliance(*command, strict=not options.insecure, tty=sys.stdin.isatty(),
sshOptions=options.sshOptions)
| 45.8125 | 99 | 0.704411 |
a5c792c0e1e8b34fdb10aef92cfa9358afb9de0f | 1,522 | py | Python | python/fate_client/flow_client/flow_cli/utils/detect_utils.py | rubenlozanoaht3m/DataDogm | cd605e8072cca31e8418830c3300657ae2fa5b16 | [
"Apache-2.0"
] | 715 | 2019-01-24T10:52:03.000Z | 2019-10-31T12:19:22.000Z | python/fate_client/flow_client/flow_cli/utils/detect_utils.py | rubenlozanoaht3m/DataDogm | cd605e8072cca31e8418830c3300657ae2fa5b16 | [
"Apache-2.0"
] | 270 | 2019-02-11T02:57:36.000Z | 2019-08-29T11:22:33.000Z | python/fate_client/flow_client/flow_cli/utils/detect_utils.py | rubenlozanoaht3m/DataDogm | cd605e8072cca31e8418830c3300657ae2fa5b16 | [
"Apache-2.0"
] | 200 | 2019-01-26T14:21:35.000Z | 2019-11-01T01:14:36.000Z | #
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import typing
def check_config(config: typing.Dict, required_arguments: typing.List):
no_arguments = []
error_arguments = []
for require_argument in required_arguments:
if isinstance(require_argument, tuple):
config_value = config.get(require_argument[0], None)
if isinstance(require_argument[1], (tuple, list)):
if config_value not in require_argument[1]:
error_arguments.append(require_argument)
elif config_value != require_argument[1]:
error_arguments.append(require_argument)
elif require_argument not in config:
no_arguments.append(require_argument)
if no_arguments or error_arguments:
raise Exception('the following arguments are required: {} {}'.format(
','.join(no_arguments), ','.join(['{}={}'.format(a[0], a[1]) for a in error_arguments])))
| 43.485714 | 101 | 0.691196 |
be8f503597ecf35a1344d8975dc797ef29b27f73 | 1,564 | py | Python | virtman/drivers/iscsi.py | vmthunder/virtman | af81ee1eeab96094b716253ad93bda8630bc219d | [
"Apache-2.0"
] | 6 | 2015-02-09T23:39:58.000Z | 2021-05-17T22:18:57.000Z | virtman/drivers/iscsi.py | vmthunder/virtman | af81ee1eeab96094b716253ad93bda8630bc219d | [
"Apache-2.0"
] | 1 | 2015-05-04T03:41:49.000Z | 2015-05-04T03:41:49.000Z | virtman/drivers/iscsi.py | vmthunder/virtman | af81ee1eeab96094b716253ad93bda8630bc219d | [
"Apache-2.0"
] | 1 | 2016-08-12T07:00:01.000Z | 2016-08-12T07:00:01.000Z |
from brick.iscsi.iscsi import TgtAdm
from virtman.utils import rootwrap
from virtman.utils import singleton
from virtman.openstack.common import processutils as putils
class TgtExecutor(TgtAdm):
def __init__(self, root_helper='', volumes_dir='/etc/tgt/stack.d'):
TgtAdm.__init__(self, root_helper, volumes_dir)
tgt = TgtExecutor(rootwrap.root_helper(), '/etc/tgt/stack.d')
def create_iscsi_target(iqn, path):
# params included (iqn tid lun path) but (tid lun) not need for
# tgt to create target
return tgt.create_iscsi_target(iqn, '', '', path)
def remove_iscsi_target(vol_id, vol_name):
return tgt.remove_iscsi_target('', '', vol_id, vol_name)
def exists(iqn):
return tgt.exist(iqn)
def is_connected(target_id):
"""This method is to judge whether a target is hanging by other VMs"""
# TODO: try to call brick.iscsi
# cmd = "tgtadm --lld iscsi --mode conn --op show --tid " + str(target_id)
(output, error) = putils.execute("tgtadm",
'--lld',
'iscsi',
'--mode',
'conn',
'--op',
'show',
'--tid',
str(target_id),
run_as_root=True,
root_helper=rootwrap.root_helper())
if len(output) == 0:
return False
return True
| 31.28 | 78 | 0.526854 |
680a5358804bc8ee91a4a1b65cb5c8bc9fcb491d | 387 | py | Python | pybaiduphoto/contribution.py | HengyueLi/baiduphoto | a65dcb5a92931298ed561e6cea48fb1d816a06f3 | [
"MIT"
] | 1 | 2022-03-28T08:56:24.000Z | 2022-03-28T08:56:24.000Z | pybaiduphoto/contribution.py | HengyueLi/baiduphoto | a65dcb5a92931298ed561e6cea48fb1d816a06f3 | [
"MIT"
] | null | null | null | pybaiduphoto/contribution.py | HengyueLi/baiduphoto | a65dcb5a92931298ed561e6cea48fb1d816a06f3 | [
"MIT"
] | null | null | null | class downLoader:
def __init__(self, req):
self.req = req
def getDownloadZip(self, url, dirPath, fileName):
# 通过url下载文件<fileName>,文件存储到路径<dirPath>下
# 希望这是一个支持断点续传功能的(还不确定支持不支持)
# 希望这是一个带进度条的 (推荐使用rich,https://pypi.org/project/rich/)
# 网路参数请从self.req中取出,最好直接用self.req。可以参考Requests.py中的说明
pass
print(url, dirPath, fileName)
| 32.25 | 63 | 0.653747 |
df584775d2b9bd07acb8473f6aabe6ed85b5e540 | 1,597 | py | Python | old_and_simple_and_deprecated/check_for_files_from_list.py | mcjczapiewski/work | 2540afa6b18bf6ff92a7c07b16695035785c0dd8 | [
"MIT"
] | null | null | null | old_and_simple_and_deprecated/check_for_files_from_list.py | mcjczapiewski/work | 2540afa6b18bf6ff92a7c07b16695035785c0dd8 | [
"MIT"
] | null | null | null | old_and_simple_and_deprecated/check_for_files_from_list.py | mcjczapiewski/work | 2540afa6b18bf6ff92a7c07b16695035785c0dd8 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import os
import datetime
# import ctypes
from pathlib import Path
# aktualna data i godzina
czasstart = datetime.datetime.now()
print("~~~~~~START~~~~~~\t" + str(czasstart).split(".")[0])
print(
"\nPodaj ścieżkę, w której znajduje się \
plik sprawdzaj.txt (format ANSI) :"
)
plik_lista = input()
textfile = plik_lista + "\\sprawdzaj.txt"
print("\nPodaj lokalizację dla pliku z błędami:")
sciezka = input()
wynikowy = os.path.basename(os.path.normpath(sciezka))
bledny = (
sciezka
+ "\\"
+ wynikowy
+ "_NIEISTNIEJACE_"
+ czasstart.strftime("%Y-%m-%d")
+ ".txt"
)
# print('\nPodaj nazwę okna skryptu:')
# nazwaokna = input()
# ctypes.windll.kernel32.SetConsoleTitleW(nazwaokna)
alllines = 0
with open(textfile, "r") as otxtl:
for line in otxtl:
alllines += 1
input(
"\nPlików do sprawdzenia: "
+ str(alllines)
+ "\n\nWciśnij ENTER aby kontynuować..."
)
with open(textfile, "r") as otxt:
for line in otxt:
print(str(alllines))
alllines -= 1
sprawdz = line.rstrip("\n")
if not Path(sprawdz).exists():
with open(bledny, "a") as bl:
bl.write(sprawdz + "\n")
# czas trwania calego skryptu
czaskoniec = datetime.datetime.now()
roznicaczas = czaskoniec - czasstart
czastrwania = roznicaczas.total_seconds() / 60
print("\nCałość zajęła (minuty):")
print("%.2f" % czastrwania)
print("\n~~~~~~KONIEC~~~~~~\t" + str(czaskoniec).split(".")[0])
if Path(bledny).exists():
print("\n!PRZEANALIZUJ PLIK Z BŁĘDAMI!")
input("Wciśnij ENTER aby wyjść...")
| 23.835821 | 63 | 0.63181 |
3ab9d5d349cd7b8eb24a2d2dd77a1f98070183f0 | 4,477 | py | Python | braid/postgres.py | twisted-infra/braid | deffb8ea7eaeac31fcbfe1135a326ad0e7712d61 | [
"MIT"
] | 8 | 2015-10-18T11:02:54.000Z | 2019-03-29T18:33:18.000Z | braid/postgres.py | twisted-infra/braid | deffb8ea7eaeac31fcbfe1135a326ad0e7712d61 | [
"MIT"
] | 214 | 2015-01-19T06:58:36.000Z | 2022-02-10T10:22:30.000Z | braid/postgres.py | twisted-infra/braid | deffb8ea7eaeac31fcbfe1135a326ad0e7712d61 | [
"MIT"
] | 12 | 2015-02-08T17:32:13.000Z | 2020-10-25T22:22:59.000Z | from fabric.api import sudo, hide, task, env, run, settings
from braid import package, utils
from pipes import quote
def install():
package.install(['postgresql-12'])
def _runQuery(query, database=None):
with hide('running', 'output'):
database = '--dbname={}'.format(database) if database else ''
return sudo('/usr/bin/psql --no-align --no-readline --no-password --quiet '
'--tuples-only {} -c {}'.format(database, quote(query)),
user='postgres', pty=False, combine_stderr=False)
def _dbExists(name):
res = _runQuery("select count(*) from pg_database "
"where datname = '{}';".format(name))
return res == '1'
def _userExists(name):
res = _runQuery("select count(*) from pg_user "
"where usename = '{}';".format(name))
return res == '1'
def tableExists(database, table):
"""
Check that a table exists in a given database.
"""
res = _runQuery("select table_name from information_schema.tables "
"where table_name = '{}';".format(table),
database=database)
return res == table
def createUser(name):
if not _userExists(name):
sudo('/usr/bin/createuser -D -R -S {}'.format(name), user='postgres',
pty=False)
def createDb(name, owner):
if not _dbExists(name):
sudo('/usr/bin/createdb -E utf8 -O {} {}'.format(owner, name), user='postgres',
pty=False)
def _grantSchemaAccess(database, user):
# Double quotes tell postgres to consider names as case sensitive. If we
# omit them and have uppercase chars in the names, the relevant object
# (user or database) will not be found.
_runQuery('grant connect on database "{}" to "{}";'.format(database, user))
_runQuery('grant usage on schema public to "{}";'.format(user), database)
def grantRead(database, user):
"""
Grant read permissions to C{user} to all tables in C{database}.
"""
_grantSchemaAccess(database, user)
# This only affects existing tables. It is possible to set the default
# privileges on the schema for new objects but this rapidly complicates
# everything. The simple solution is to just re-run this command
_runQuery('grant select on all tables in schema public to "{}";'
.format(user), database)
def grantReadWrite(database, user):
"""
Grant read and write permissions to C{user} to all tables in C{database}.
"""
_grantSchemaAccess(database, user)
_runQuery('grant select, insert, update, delete on all tables in '
'schema public to "{}";'.format(user), database)
_runQuery('grant all privileges on all sequences in schema public to "{}";'
.format(user), database)
def dropDb(name):
return _runQuery('drop database if exists {};'.format(quote(name)))
@task
def dump(database, dumpPath, user=None):
"""
Download a dump of the specified database to C{dumpPath}. This has to be
executed as a user with enough privileges on the selected database.
Alternatively a user can be manually provided.
"""
if user is None:
user = env.user
with settings(user=user):
with utils.tempfile(saveTo=dumpPath) as temp:
dumpToPath(database, temp)
def dumpToPath(database, dumpPath):
cmd = [
'/usr/bin/pg_dump',
'--blobs',
'--no-owner',
'--format', 'custom',
'--create',
'--file', dumpPath,
'--compress', '9',
database,
]
run(' '.join(cmd))
@task
def restore(database, dumpPath, user=None, clean=False):
"""
Upload a local dump and restore it to the named database.
If no user is specified, set the owner to the current active SSH user.
This function only works for postgres users which have a corresponding
system user.
If clean is specified, the database will be dropped and recreated. The
database will always be created if it does not exits.
"""
if user is None:
user = env.user
if clean:
dropDb(database)
createDb(database, user)
with settings(user=user):
with utils.tempfile(uploadFrom=dumpPath) as temp:
restoreFromPath(database, temp)
def restoreFromPath(database, dumpPath):
cmd = [
'/usr/bin/pg_restore',
'--dbname', database,
'--schema', 'public',
dumpPath,
]
run(' '.join(cmd))
| 30.04698 | 87 | 0.625419 |
404f72285d3345346628df9a4c8ae28746dd2bc3 | 1,596 | py | Python | env_user.py | gve-sw/MerakiCaptivePortal | e88b5716ade2c5b6faaee1ab0842577e49e510a1 | [
"BSD-Source-Code"
] | null | null | null | env_user.py | gve-sw/MerakiCaptivePortal | e88b5716ade2c5b6faaee1ab0842577e49e510a1 | [
"BSD-Source-Code"
] | null | null | null | env_user.py | gve-sw/MerakiCaptivePortal | e88b5716ade2c5b6faaee1ab0842577e49e510a1 | [
"BSD-Source-Code"
] | null | null | null | """Set your Environment Information once, not many times.
The provided sample code in this repository will reference this file to get the
needed information about you and your context to complete the labs. You
provide this info here once and the scripts in this repository will access it
as needed by the lab.
TODO: To setup your `env_user.py` copy this file then edit and save your info
$ cp env_user.template env_user.py
Copyright (c) 2018 Cisco and/or its affiliates.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
# User Input
WT_ACCESS_TOKEN = ""
WT_ROOM_ID = ""
MERAKI_API_KEY = ""
# End User Input
| 38 | 79 | 0.7901 |
03858f284e93476342209078550ced917fd6286e | 8,296 | py | Python | setup.py | Smasher-z/UniverseNet | 794e7d3d2c7a8f2f6601a60b67366d93a3f30a1f | [
"Apache-2.0"
] | 2 | 2022-02-21T06:56:20.000Z | 2022-02-21T06:57:02.000Z | setup.py | Smasher-z/UniverseNet | 794e7d3d2c7a8f2f6601a60b67366d93a3f30a1f | [
"Apache-2.0"
] | null | null | null | setup.py | Smasher-z/UniverseNet | 794e7d3d2c7a8f2f6601a60b67366d93a3f30a1f | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# Copyright (c) OpenMMLab. All rights reserved.
import os
import os.path as osp
import platform
import shutil
import sys
import warnings
from setuptools import find_packages, setup
import torch
from torch.utils.cpp_extension import (BuildExtension, CppExtension,
CUDAExtension)
def readme():
with open('README.md', encoding='utf-8') as f:
content = f.read()
return content
version_file = 'mmdet/version.py'
def get_version():
with open(version_file, 'r') as f:
exec(compile(f.read(), version_file, 'exec'))
return locals()['__version__']
def make_cuda_ext(name, module, sources, sources_cuda=[]):
define_macros = []
extra_compile_args = {'cxx': []}
if torch.cuda.is_available() or os.getenv('FORCE_CUDA', '0') == '1':
define_macros += [('WITH_CUDA', None)]
extension = CUDAExtension
extra_compile_args['nvcc'] = [
'-D__CUDA_NO_HALF_OPERATORS__',
'-D__CUDA_NO_HALF_CONVERSIONS__',
'-D__CUDA_NO_HALF2_OPERATORS__',
]
sources += sources_cuda
else:
print(f'Compiling {name} without CUDA')
extension = CppExtension
return extension(
name=f'{module}.{name}',
sources=[os.path.join(*module.split('.'), p) for p in sources],
define_macros=define_macros,
extra_compile_args=extra_compile_args)
def parse_requirements(fname='requirements.txt', with_version=True):
"""Parse the package dependencies listed in a requirements file but strips
specific versioning information.
Args:
fname (str): path to requirements file
with_version (bool, default=False): if True include version specs
Returns:
List[str]: list of requirements items
CommandLine:
python -c "import setup; print(setup.parse_requirements())"
"""
import sys
from os.path import exists
import re
require_fpath = fname
def parse_line(line):
"""Parse information from a line in a requirements text file."""
if line.startswith('-r '):
# Allow specifying requirements in other files
target = line.split(' ')[1]
for info in parse_require_file(target):
yield info
else:
info = {'line': line}
if line.startswith('-e '):
info['package'] = line.split('#egg=')[1]
elif '@git+' in line:
info['package'] = line
else:
# Remove versioning from the package
pat = '(' + '|'.join(['>=', '==', '>']) + ')'
parts = re.split(pat, line, maxsplit=1)
parts = [p.strip() for p in parts]
info['package'] = parts[0]
if len(parts) > 1:
op, rest = parts[1:]
if ';' in rest:
# Handle platform specific dependencies
# http://setuptools.readthedocs.io/en/latest/setuptools.html#declaring-platform-specific-dependencies
version, platform_deps = map(str.strip,
rest.split(';'))
info['platform_deps'] = platform_deps
else:
version = rest # NOQA
info['version'] = (op, version)
yield info
def parse_require_file(fpath):
with open(fpath, 'r') as f:
for line in f.readlines():
line = line.strip()
if line and not line.startswith('#'):
for info in parse_line(line):
yield info
def gen_packages_items():
if exists(require_fpath):
for info in parse_require_file(require_fpath):
parts = [info['package']]
if with_version and 'version' in info:
parts.extend(info['version'])
if not sys.version.startswith('3.4'):
# apparently package_deps are broken in 3.4
platform_deps = info.get('platform_deps')
if platform_deps is not None:
parts.append(';' + platform_deps)
item = ''.join(parts)
yield item
packages = list(gen_packages_items())
return packages
def add_mim_extension():
"""Add extra files that are required to support MIM into the package.
These files will be added by creating a symlink to the originals if the
package is installed in `editable` mode (e.g. pip install -e .), or by
copying from the originals otherwise.
"""
# parse installment mode
if 'develop' in sys.argv:
# installed by `pip install -e .`
if platform.system() == 'Windows':
# set `copy` mode here since symlink fails on Windows.
mode = 'copy'
else:
mode = 'symlink'
elif 'sdist' in sys.argv or 'bdist_wheel' in sys.argv:
# installed by `pip install .`
# or create source distribution by `python setup.py sdist`
mode = 'copy'
else:
return
filenames = ['tools', 'configs', 'demo', 'model-index.yml']
repo_path = osp.dirname(__file__)
mim_path = osp.join(repo_path, 'mmdet', '.mim')
os.makedirs(mim_path, exist_ok=True)
for filename in filenames:
if osp.exists(filename):
src_path = osp.join(repo_path, filename)
tar_path = osp.join(mim_path, filename)
if osp.isfile(tar_path) or osp.islink(tar_path):
os.remove(tar_path)
elif osp.isdir(tar_path):
shutil.rmtree(tar_path)
if mode == 'symlink':
src_relpath = osp.relpath(src_path, osp.dirname(tar_path))
os.symlink(src_relpath, tar_path)
elif mode == 'copy':
if osp.isfile(src_path):
shutil.copyfile(src_path, tar_path)
elif osp.isdir(src_path):
shutil.copytree(src_path, tar_path)
else:
warnings.warn(f'Cannot copy file {src_path}.')
else:
raise ValueError(f'Invalid mode {mode}')
if __name__ == '__main__':
add_mim_extension()
setup(
name='mmdet',
version=get_version(),
description='OpenMMLab Detection Toolbox and Benchmark',
long_description=readme(),
long_description_content_type='text/markdown',
author='MMDetection Contributors',
author_email='openmmlab@gmail.com',
keywords='computer vision, object detection',
url='https://github.com/open-mmlab/mmdetection',
packages=find_packages(exclude=('configs', 'tools', 'demo')),
# package_data={'mmdet.ops': ['*/*.so']},
include_package_data=True,
classifiers=[
'Development Status :: 5 - Production/Stable',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
],
license='Apache License 2.0',
install_requires=parse_requirements('requirements/runtime.txt'),
extras_require={
'all': parse_requirements('requirements.txt'),
'tests': parse_requirements('requirements/tests.txt'),
'build': parse_requirements('requirements/build.txt'),
'optional': parse_requirements('requirements/optional.txt'),
},
ext_modules=[],
# ext_modules=[
# make_cuda_ext(
# name='deform_conv_ext',
# module='mmdet.ops.dcn',
# sources=['src/deform_conv_ext.cpp'],
# sources_cuda=[
# 'src/cuda/deform_conv_cuda.cpp',
# 'src/cuda/deform_conv_cuda_kernel.cu'
# ]),
# ],
cmdclass={'build_ext': BuildExtension},
zip_safe=False)
| 35.758621 | 125 | 0.559185 |
8c35536a8144a4f38a337263211a855bf0fcf497 | 5,844 | py | Python | machine-learning/logisticRegression.py | SupratimH/learning-data-science | 005f2f87b01b36b53179d1c1641b8a6c876358e1 | [
"MIT"
] | 3 | 2019-11-03T05:18:28.000Z | 2021-06-09T03:03:34.000Z | machine-learning/logisticRegression.py | SupratimH/learning-data-science | 005f2f87b01b36b53179d1c1641b8a6c876358e1 | [
"MIT"
] | null | null | null | machine-learning/logisticRegression.py | SupratimH/learning-data-science | 005f2f87b01b36b53179d1c1641b8a6c876358e1 | [
"MIT"
] | 1 | 2020-04-02T21:14:57.000Z | 2020-04-02T21:14:57.000Z | """
Created on Tue Sep 14 2018
@author: Supratim Haldar
@Description: My implementation of logistic regression (classifier) algorithm
"""
import numpy as np
from scipy import optimize
# =============================================================================
# Function to calculate value Sigmoid Function of any variable z.
# z can be a matrix, vector or scalar
# sigmoid g(z) = 1/(1 + e^-z)
# =============================================================================
def sigmoid(z):
sig = 1.0/(1.0 + np.exp(-z))
# Due to floating point presision related issues, e^-z might return very
# small or very large values, resulting in sigmoid = 1 or 0. Since we will
# compute log of these values later in cost function, we want to avoid
# sig = 1 or 0, and hardcode to following values instead.
sig[sig == 1.0] = 0.9999
sig[sig == 0.0] = 0.0001
return sig
# =============================================================================
# Compute cost of Logistic Regression with multiple features
# Vectorized implementation
# Input: data_X = mxn matrix, data_y = m-dim vector, theta = n-dim vector
# Output: cost = 1-dim vector
# =============================================================================
def computeCost(theta, data_X, data_y, lambda_reg = 0):
m = len(data_X) # No of rows
n = len(data_X[0]) # No of features
theta = theta.reshape(n,1)
# h(x) = g(z) = g(theta0 + theta1*X1 + theta2*X2 + .. + thetan*Xn)
# h(x) = g(X * theta) = Sigmoid(X * theta) = m-dim vector
hx = sigmoid(np.dot(data_X, theta))
cost = - np.dot(data_y.T, np.log(hx)) - np.dot((1 - data_y).T, np.log(1 - hx))
# This is unregularized cost
J = cost/m
# Adding regularization. Setting theta0 to 0, because theta0 will not be
# regularized
J_reg = (lambda_reg/(2*m)) * np.dot(theta[1:,:].T, theta[1:,:])
J = J + J_reg
return J
# =============================================================================
# Compute gradient or derivative of cost function over parameter, i.e.
# d J(Theta)/d Theta
# =============================================================================
def computeGradient(theta, data_X, data_y, lambda_reg = 0):
m = len(data_X) # No of rows
n = len(data_X[0]) # No of features
theta = theta.reshape(n,1)
theta_gradient = np.zeros(theta.shape)
cost = 0
#print("==== Inside computeGradient() ====", data_X.shape, data_y.shape)
cost = computeCost(theta, data_X, data_y, lambda_reg)
hx = sigmoid(np.dot(data_X, theta))
error = hx - data_y
theta_gradient = (1/m) * (np.dot(data_X.T, error))
# Apply regularization
theta_reg = (lambda_reg/m) * theta[1:,:]
theta_gradient[1:,:] = theta_gradient[1:,:] + theta_reg
#print("==== Inside computeGradient() ====", cost)
return cost.flatten(), theta_gradient.flatten()
# =============================================================================
# Gradient Descent of Linear Regression with multiple features
# Vectorized implementation
# Input: data_X = mxn matrix, data_y = m-dim vector, theta = n-dim vector
# alpha = learning rate, num_iters = no of iterations/steps for GD
# Output: theta = n-dim vector,
# J_history = cost at each iteration, a num_iters-dim vector
# =============================================================================
def gradientDescent(theta, data_X, data_y, alpha, num_iters, lambda_reg = 0):
m = len(data_X) # No of rows
J_history = np.zeros([num_iters, 1])
for i in range(num_iters):
hx = np.zeros(data_y.shape)
error = np.zeros(data_y.shape)
theta_change = np.zeros(theta.shape)
hx = sigmoid(np.dot(data_X, theta))
error = hx - data_y
theta_change = (alpha/m) * (np.dot(data_X.T, error))
# Apply regularization
temp = theta[0,0]
theta[0,0] = 0
theta_reg = (lambda_reg/m) * theta
theta[0,0] = temp;
theta_change = theta_change + theta_reg
theta = theta - theta_change
J_history[i] = computeCost(theta, data_X, data_y, lambda_reg)
return theta, theta_change, J_history
# =============================================================================
# Predict results based on test input feature and parameter values
# Compare with output results, if already available
# =============================================================================
def predict(theta, data_X, data_y):
prob = sigmoid(np.dot(data_X, theta))
pred = prob >= 0.5
accuracy = np.mean((pred == data_y)) * 100
print("Predict: Prediction Accuracy % =", accuracy)
return pred, accuracy
# =============================================================================
# One vs All method of logistic regression
# Used for data with multiple clssification outputs
# =============================================================================
def oneVsAll(data_X, data_y, num_labels, lambda_reg):
n = data_X.shape[1] # No of features
all_theta = np.zeros([num_labels, n])
initial_theta = np.zeros([n, 1])
print("OneVsAll: Shape of X and y: ", data_X.shape, data_y.shape)
for label in range(num_labels):
# Calling advanced optimization alogorith to converge gradient
theta_optimized = optimize.minimize( \
computeGradient, \
initial_theta, \
args=(data_X, data_y == label, lambda_reg), \
method = "CG",
jac=True, options={'disp': True, 'maxiter': 100} \
)
print("OneVsAll: Optimization Result =", theta_optimized.message, theta_optimized.success)
theta = theta_optimized.x.reshape(n, 1)
all_theta[label,:] = theta.T
return all_theta
| 38.701987 | 98 | 0.53371 |
de4877a45488929ba0afebaf1b7c163d4730b6f8 | 1,615 | py | Python | website/addons/dataverse/views/widget.py | lbanner/osf.io | 1898ef0ff8bd91713e94c60e7463b5f81ac62caa | [
"Apache-2.0"
] | null | null | null | website/addons/dataverse/views/widget.py | lbanner/osf.io | 1898ef0ff8bd91713e94c60e7463b5f81ac62caa | [
"Apache-2.0"
] | null | null | null | website/addons/dataverse/views/widget.py | lbanner/osf.io | 1898ef0ff8bd91713e94c60e7463b5f81ac62caa | [
"Apache-2.0"
] | null | null | null | import httplib as http
from website.addons.dataverse.client import connect_from_settings_or_403, \
get_dataverse, get_study
from website.addons.dataverse.settings import HOST
from website.project.decorators import must_be_contributor_or_public, \
must_have_addon
@must_be_contributor_or_public
@must_have_addon('dataverse', 'node')
def dataverse_widget(node_addon, **kwargs):
node = node_addon.owner
widget_url = node.api_url_for('dataverse_get_widget_contents')
ret = {
'complete': node_addon.is_fully_configured,
'widget_url': widget_url,
}
ret.update(node_addon.config.to_json())
return ret, http.OK
@must_be_contributor_or_public
@must_have_addon('dataverse', 'node')
def dataverse_get_widget_contents(node_addon, **kwargs):
data = {
'connected': False,
}
if not node_addon.is_fully_configured:
return {'data': data}, http.OK
doi = node_addon.study_hdl
alias = node_addon.dataverse_alias
connection = connect_from_settings_or_403(node_addon.user_settings)
dataverse = get_dataverse(connection, alias)
study = get_study(dataverse, doi)
if study is None:
return {'data': data}, http.BAD_REQUEST
dataverse_url = 'http://{0}/dvn/dv/'.format(HOST) + alias
study_url = 'http://dx.doi.org/' + doi
data.update({
'connected': True,
'dataverse': node_addon.dataverse,
'dataverseUrl': dataverse_url,
'study': node_addon.study,
'doi': doi,
'studyUrl': study_url,
'citation': study.citation,
})
return {'data': data}, http.OK | 27.372881 | 75 | 0.69226 |
e0823675445faff7cb450db658d6a60ff86260b0 | 267 | py | Python | client.py | Alwaysproblem/Socket-receive-timeout | d5c3ea25a2b5f4d88870204c1c47bac950c0c887 | [
"Apache-2.0"
] | 1 | 2019-03-06T03:47:00.000Z | 2019-03-06T03:47:00.000Z | client.py | Alwaysproblem/Socket-receive-timeout | d5c3ea25a2b5f4d88870204c1c47bac950c0c887 | [
"Apache-2.0"
] | null | null | null | client.py | Alwaysproblem/Socket-receive-timeout | d5c3ea25a2b5f4d88870204c1c47bac950c0c887 | [
"Apache-2.0"
] | null | null | null | import socket
address = ('127.0.0.1', 31500)
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
while True:
msg = input("please input message:")
if not msg:
break
s.sendto(msg.encode('utf-8'), address)
s.close() | 22.25 | 55 | 0.573034 |
aeb44a19027a9efc9eeaa2359b72f4b382c58092 | 7,709 | py | Python | src/spring-cloud/azext_spring_cloud/commands.py | bgrainger/azure-cli-extensions | 86680f7a592c659456feb605104809efa5fae353 | [
"MIT"
] | 1 | 2021-12-17T01:27:06.000Z | 2021-12-17T01:27:06.000Z | src/spring-cloud/azext_spring_cloud/commands.py | bgrainger/azure-cli-extensions | 86680f7a592c659456feb605104809efa5fae353 | [
"MIT"
] | null | null | null | src/spring-cloud/azext_spring_cloud/commands.py | bgrainger/azure-cli-extensions | 86680f7a592c659456feb605104809efa5fae353 | [
"MIT"
] | 1 | 2021-12-19T09:17:31.000Z | 2021-12-19T09:17:31.000Z | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# pylint: disable=line-too-long
from azext_spring_cloud._utils import handle_asc_exception
from ._client_factory import (cf_app_services,
cf_spring_cloud,
cf_spring_cloud_20201101preview,
cf_spring_cloud_20210601preview,
cf_config_servers)
from ._transformers import (transform_spring_cloud_table_output,
transform_app_table_output,
transform_spring_cloud_deployment_output,
transform_spring_cloud_certificate_output,
transform_spring_cloud_custom_domain_output)
# pylint: disable=too-many-statements
def load_command_table(self, _):
with self.command_group('spring-cloud', client_factory=cf_app_services,
exception_handler=handle_asc_exception) as g:
g.custom_command('create', 'spring_cloud_create', supports_no_wait=True, client_factory=cf_spring_cloud)
g.custom_command('update', 'spring_cloud_update', supports_no_wait=True, client_factory=cf_spring_cloud)
g.custom_command('delete', 'spring_cloud_delete', supports_no_wait=True)
g.custom_command('list', 'spring_cloud_list', table_transformer=transform_spring_cloud_table_output)
g.custom_show_command('show', 'spring_cloud_get', table_transformer=transform_spring_cloud_table_output)
with self.command_group('spring-cloud test-endpoint', client_factory=cf_spring_cloud,
exception_handler=handle_asc_exception) as g:
g.custom_command('enable ', 'enable_test_endpoint')
g.custom_show_command('disable ', 'disable_test_endpoint')
g.custom_command('renew-key', 'regenerate_keys')
g.custom_command('list', 'list_keys')
with self.command_group('spring-cloud config-server', client_factory=cf_config_servers,
exception_handler=handle_asc_exception) as g:
g.custom_command('set', 'config_set', supports_no_wait=True)
g.custom_command('clear', 'config_delete')
g.custom_show_command('show', 'config_get')
with self.command_group('spring-cloud config-server git', client_factory=cf_config_servers,
supports_local_cache=True, exception_handler=handle_asc_exception) as g:
g.custom_command('set', 'config_git_set')
g.custom_command('repo add', 'config_repo_add')
g.custom_command('repo remove', 'config_repo_delete')
g.custom_command('repo update', 'config_repo_update')
g.custom_command('repo list', 'config_repo_list')
with self.command_group('spring-cloud app', client_factory=cf_spring_cloud_20210601preview,
exception_handler=handle_asc_exception) as g:
g.custom_command('create', 'app_create')
g.custom_command('update', 'app_update')
g.custom_command('deploy', 'app_deploy', supports_no_wait=True)
g.custom_command('scale', 'app_scale', supports_no_wait=True)
g.custom_command('show-deploy-log', 'app_get_build_log')
g.custom_command('set-deployment', 'app_set_deployment',
supports_no_wait=True)
g.custom_command('unset-deployment', 'app_unset_deployment',
supports_no_wait=True)
g.custom_command('delete', 'app_delete')
g.custom_command('list', 'app_list',
table_transformer=transform_app_table_output)
g.custom_show_command(
'show', 'app_get', table_transformer=transform_app_table_output)
g.custom_command('start', 'app_start', supports_no_wait=True)
g.custom_command('stop', 'app_stop', supports_no_wait=True)
g.custom_command('restart', 'app_restart', supports_no_wait=True)
g.custom_command('logs', 'app_tail_log')
with self.command_group('spring-cloud app identity', client_factory=cf_spring_cloud,
exception_handler=handle_asc_exception) as g:
g.custom_command('assign', 'app_identity_assign')
g.custom_command('remove', 'app_identity_remove')
g.custom_show_command('show', 'app_identity_show')
with self.command_group('spring-cloud app log', client_factory=cf_spring_cloud,
deprecate_info=g.deprecate(redirect='az spring-cloud app logs', hide=True),
exception_handler=handle_asc_exception) as g:
g.custom_command('tail', 'app_tail_log')
with self.command_group('spring-cloud app deployment', client_factory=cf_spring_cloud_20210601preview,
exception_handler=handle_asc_exception) as g:
g.custom_command('create', 'deployment_create', supports_no_wait=True)
g.custom_command('list', 'deployment_list',
table_transformer=transform_spring_cloud_deployment_output)
g.custom_show_command(
'show', 'deployment_get', table_transformer=transform_spring_cloud_deployment_output)
g.custom_command('delete', 'deployment_delete', supports_no_wait=True)
with self.command_group('spring-cloud app binding', client_factory=cf_spring_cloud,
exception_handler=handle_asc_exception) as g:
g.custom_command('list', 'binding_list')
g.custom_show_command('show', 'binding_get')
g.custom_command('cosmos add', 'binding_cosmos_add')
g.custom_command('cosmos update', 'binding_cosmos_update')
g.custom_command('mysql add', 'binding_mysql_add')
g.custom_command('mysql update', 'binding_mysql_update')
g.custom_command('redis add', 'binding_redis_add')
g.custom_command('redis update', 'binding_redis_update')
g.custom_show_command('remove', 'binding_remove')
with self.command_group('spring-cloud certificate', client_factory=cf_spring_cloud,
exception_handler=handle_asc_exception) as g:
g.custom_command('add', 'certificate_add')
g.custom_show_command('show', 'certificate_show', table_transformer=transform_spring_cloud_certificate_output)
g.custom_command('list', 'certificate_list', table_transformer=transform_spring_cloud_certificate_output)
g.custom_command('remove', 'certificate_remove')
with self.command_group('spring-cloud app custom-domain', client_factory=cf_spring_cloud,
exception_handler=handle_asc_exception) as g:
g.custom_command('bind', 'domain_bind')
g.custom_show_command('show', 'domain_show', table_transformer=transform_spring_cloud_custom_domain_output)
g.custom_command('list', 'domain_list', table_transformer=transform_spring_cloud_custom_domain_output)
g.custom_command('update', 'domain_update')
g.custom_command('unbind', 'domain_unbind')
with self.command_group('spring-cloud app-insights',
client_factory=cf_spring_cloud_20201101preview,
exception_handler=handle_asc_exception) as g:
g.custom_command('update', 'app_insights_update', supports_no_wait=True)
g.custom_show_command('show', 'app_insights_show')
with self.command_group('spring-cloud', exception_handler=handle_asc_exception):
pass
| 60.226563 | 118 | 0.67298 |
af16627923ca06288d140694e66b5719e623b37b | 5,329 | py | Python | onlinecourse/models.py | Igorsvr/final-cloud-app-with-database | 5bde2da641f7704c4a78b9d59737e639dff876c8 | [
"Apache-2.0"
] | null | null | null | onlinecourse/models.py | Igorsvr/final-cloud-app-with-database | 5bde2da641f7704c4a78b9d59737e639dff876c8 | [
"Apache-2.0"
] | null | null | null | onlinecourse/models.py | Igorsvr/final-cloud-app-with-database | 5bde2da641f7704c4a78b9d59737e639dff876c8 | [
"Apache-2.0"
] | null | null | null | import sys
from django.utils.timezone import now
try:
from django.db import models
except Exception:
print("There was an error loading django modules. Do you have django installed?")
sys.exit()
from django.conf import settings
import uuid
# Instructor model
class Instructor(models.Model):
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.CASCADE,
)
full_time = models.BooleanField(default=True)
total_learners = models.IntegerField()
def __str__(self):
return self.user.username
# Learner model
class Learner(models.Model):
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.CASCADE,
)
STUDENT = 'student'
DEVELOPER = 'developer'
DATA_SCIENTIST = 'data_scientist'
DATABASE_ADMIN = 'dba'
OCCUPATION_CHOICES = [
(STUDENT, 'Student'),
(DEVELOPER, 'Developer'),
(DATA_SCIENTIST, 'Data Scientist'),
(DATABASE_ADMIN, 'Database Admin')
]
occupation = models.CharField(
null=False,
max_length=20,
choices=OCCUPATION_CHOICES,
default=STUDENT
)
social_link = models.URLField(max_length=200)
def __str__(self):
return self.user.username + "," + \
self.occupation
# Course model
class Course(models.Model):
name = models.CharField(null=False, max_length=30, default='online course')
image = models.ImageField(upload_to='course_images/')
description = models.CharField(max_length=1000)
pub_date = models.DateField(null=True)
instructors = models.ManyToManyField(Instructor)
users = models.ManyToManyField(settings.AUTH_USER_MODEL, through='Enrollment')
total_enrollment = models.IntegerField(default=0)
is_enrolled = False
def __str__(self):
return "Name: " + self.name + "," + \
"Description: " + self.description
# Lesson model
class Lesson(models.Model):
title = models.CharField(max_length=200, default="title")
order = models.IntegerField(default=0)
course = models.ForeignKey(Course, on_delete=models.CASCADE)
content = models.TextField()
# Enrollment model
# <HINT> Once a user enrolled a class, an enrollment entry should be created between the user and course
# And we could use the enrollment to track information such as exam submissions
class Enrollment(models.Model):
AUDIT = 'audit'
HONOR = 'honor'
BETA = 'BETA'
COURSE_MODES = [
(AUDIT, 'Audit'),
(HONOR, 'Honor'),
(BETA, 'BETA')
]
user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)
course = models.ForeignKey(Course, on_delete=models.CASCADE)
date_enrolled = models.DateField(default=now)
mode = models.CharField(max_length=10, choices=COURSE_MODES, default=AUDIT)
rating = models.FloatField(default=5.0)
# <HINT> Create a Question Model with:
# Used to persist question content for a course
# Has a One-To-Many (or Many-To-Many if you want to reuse questions) relationship with course
# Has a grade point for each question
# Has question content
# Other fields and methods you would like to design
class Question(models.Model):
# Foreign key to lesson
course = models.ForeignKey(Course, on_delete=models.CASCADE)
# question text
question_text = models.CharField(max_length=100, default=0)
# question grade/mark
def is_get_score(self, selected_ids):
all_answers = self.choice_set.filter(is_correct=True).count()
selected_correct = self.choice_set.filter(is_correct=True, id__in=selected_ids).count()
if all_answers == selected_correct:
return True
else:
return False
#class Question(models.Model):
# Foreign key to lesson
# question text
# question grade/mark
# <HINT> A sample model method to calculate if learner get the score of the question
#def is_get_score(self, selected_ids):
# all_answers = self.choice_set.filter(is_correct=True).count()
# selected_correct = self.choice_set.filter(is_correct=True, id__in=selected_ids).count()
# if all_answers == selected_correct:
# return True
# else:
# return False
# <HINT> Create a Choice Model with:
# Used to persist choice content for a question
# One-To-Many (or Many-To-Many if you want to reuse choices) relationship with Question
# Choice content
# Indicate if this choice of the question is a correct one or not
# Other fields and methods you would like to design
# class Choice(models.Model):
class Choice(models.Model):
question = models.ForeignKey(Lesson, on_delete=models.CASCADE)
is_correct = models.BooleanField(default=False)
choice_text=models.CharField(max_length=100)
# <HINT> The submission model
# One enrollment could have multiple submission
# One submission could have multiple choices
# One choice could belong to multiple submissions
#class Submission(models.Model):
# enrollment = models.ForeignKey(Enrollment, on_delete=models.CASCADE)
# choices = models.ManyToManyField(Choice)
# Other fields and methods you would like to design
class Submission(models.Model):
enrollment = models.ForeignKey(Enrollment, on_delete=models.CASCADE)
choices = models.ManyToManyField(Choice)
| 33.942675 | 104 | 0.703509 |
5fb5177e66abc544ab1a03cd9f8f86b0c063a238 | 1,371 | py | Python | main.py | nfernandezsanz/BSCScan-DiscordBOT | b05faa7518291e918cee853e8941975c34a3e20e | [
"MIT"
] | 2 | 2021-12-09T18:48:20.000Z | 2021-12-31T06:05:23.000Z | main.py | nfernandezsanz/BSCScan-DiscordBOT | b05faa7518291e918cee853e8941975c34a3e20e | [
"MIT"
] | null | null | null | main.py | nfernandezsanz/BSCScan-DiscordBOT | b05faa7518291e918cee853e8941975c34a3e20e | [
"MIT"
] | 1 | 2021-11-17T04:03:54.000Z | 2021-11-17T04:03:54.000Z | import asyncio
import discord
from discord.ext import commands
from bscscan import *
from config import *
async def update_task(bot, contract):
counter = 0
while(True):
data = token_info(contract)
await bot.change_presence(activity=discord.Activity(type=discord.ActivityType.playing, name= str(data) + " holders 🚀"))
await asyncio.sleep(500)
class Price_Tracker(commands.Bot):
def __init__(self, command_prefix, self_bot, contract):
commands.Bot.__init__(self, command_prefix=command_prefix, self_bot=self_bot, contract=contract)
self.message1 = "[INFO]: Bot now online"
self.coin = str(contract)
async def on_ready(self):
print(f'Logged in as {self.user} (ID: {self.user.id})')
print('------')
self.loop.create_task(update_task(self, self.coin))
bot_info_list = read_bots()
loop = asyncio.get_event_loop()
task = list()
bot_list = list()
i = 0
for bot in bot_info_list:
bot_name = list(bot.keys())[0]
bot_token = bot[bot_name][0]
print("Initializing: " + bot_name + " ...")
#Create Bot
bot_list.append(Price_Tracker(command_prefix="~", self_bot=False, contract=bot[bot_name][1]))
task.append(loop.create_task(bot_list[i].start(bot_token)))
i+=1
try:
loop.run_forever()
finally:
loop.stop()
| 27.979592 | 127 | 0.655726 |
64f44c17d473f6a671b7ecd97016cdec4fdcd1b6 | 2,188 | py | Python | hanged.py | JasanHdz/basic_exercises | e64b331d630161d317b7eaa34d04bad4b7a66aa2 | [
"MIT"
] | 1 | 2020-01-26T03:01:03.000Z | 2020-01-26T03:01:03.000Z | hanged.py | JasanHdz/basic_exercises | e64b331d630161d317b7eaa34d04bad4b7a66aa2 | [
"MIT"
] | null | null | null | hanged.py | JasanHdz/basic_exercises | e64b331d630161d317b7eaa34d04bad4b7a66aa2 | [
"MIT"
] | null | null | null | # -*- conding: utf-8 -*-
import random
IMAGES = ['''
+---+
| |
|
|
|
|
=========''','''
+---+
| |
0 |
|
|
|
=========''','''
+---+
| |
0 |
| |
|
|
=========''','''
+---+
| |
0 |
/| |
|
|
=========''','''
+---+
| |
0 |
/|\ |
|
|
=========''','''
+---+
| |
0 |
/|\ |
| |
|
=========''','''
+---+
| |
0 |
/|\ |
| |
/ |
=========''','''
+---+
| |
0 |
/|\ |
| |
/ \ |
=========''','''
''']
WORDS = [
'couch',
'dryer',
'government',
'deputy',
'democracy',
'computer',
'keyboard',
'washing machine',
]
def random_word():
idx = random.randint(0, len(WORDS) - 1)
return WORDS[idx]
def display_board(hidden_word, tries):
print(IMAGES[tries])
print('')
print(hidden_word)
print('--- * --- * --- * --- * --- * ---')
def run():
word = random_word()
hidden_word = ['-'] * len(word)
tries = 0
while True:
display_board(hidden_word, tries)
current_letter = str(input('Chose a letter: '))
letter_indexes = []
for idx in range(len(word)):
if word[idx] == current_letter:
letter_indexes.append(idx)
if len(letter_indexes) == 0:
tries += 1
if tries == 7:
display_board(hidden_word, tries)
print('')
print('The correct word was: {}'.format(word))
break
else:
for idx in letter_indexes:
hidden_word[idx] = current_letter
letter_indexes = []
try:
hidden_word.index('-')
except ValueError:
print('')
print('¡Congratulations! Won. The word is {}'.format(word))
break
if __name__ == '__main__':
print('W E L C O M E T O H A N G E D')
run() | 18.082645 | 71 | 0.334552 |
f3f8c171466b7d420a086e8cf4441abbd54afddd | 203 | py | Python | app/gws/ext/layer/postgres/__init__.py | ewie/gbd-websuite | 6f2814c7bb64d11cb5a0deec712df751718fb3e1 | [
"Apache-2.0"
] | null | null | null | app/gws/ext/layer/postgres/__init__.py | ewie/gbd-websuite | 6f2814c7bb64d11cb5a0deec712df751718fb3e1 | [
"Apache-2.0"
] | null | null | null | app/gws/ext/layer/postgres/__init__.py | ewie/gbd-websuite | 6f2814c7bb64d11cb5a0deec712df751718fb3e1 | [
"Apache-2.0"
] | null | null | null | import gws.ext.db.provider.postgres.layer
class Config(gws.ext.db.provider.postgres.layer.Config):
"""Postgres layer"""
pass
class Object(gws.ext.db.provider.postgres.layer.Object):
pass
| 18.454545 | 56 | 0.729064 |
632d7556ff6d7cb7f4cbc4c26182c239743d4796 | 6,141 | py | Python | rfcn/core/metric.py | CharlesTousignant/Deep-Feature-Flow | b89678d5957d5d1ecafd4ca0d540d001b3b4cbbf | [
"MIT"
] | 1,378 | 2017-05-11T15:23:44.000Z | 2022-03-30T01:50:34.000Z | rfcn/core/metric.py | CharlesTousignant/Deep-Feature-Flow | b89678d5957d5d1ecafd4ca0d540d001b3b4cbbf | [
"MIT"
] | 91 | 2017-05-12T11:44:43.000Z | 2022-02-01T07:33:52.000Z | rfcn/core/metric.py | CharlesTousignant/Deep-Feature-Flow | b89678d5957d5d1ecafd4ca0d540d001b3b4cbbf | [
"MIT"
] | 348 | 2017-05-11T15:27:37.000Z | 2022-01-19T13:33:01.000Z | # --------------------------------------------------------
# Deep Feature Flow
# Copyright (c) 2017 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Modified by Yuwen Xiong
# --------------------------------------------------------
# Based on:
# MX-RCNN
# Copyright (c) 2016 by Contributors
# Licence under The Apache 2.0 License
# https://github.com/ijkguo/mx-rcnn/
# --------------------------------------------------------
import mxnet as mx
import numpy as np
def get_rpn_names():
pred = ['rpn_cls_prob', 'rpn_bbox_loss']
label = ['rpn_label', 'rpn_bbox_target', 'rpn_bbox_weight']
return pred, label
def get_rcnn_names(cfg):
pred = ['rcnn_cls_prob', 'rcnn_bbox_loss']
label = ['rcnn_label', 'rcnn_bbox_target', 'rcnn_bbox_weight']
if cfg.TRAIN.ENABLE_OHEM or cfg.TRAIN.END2END:
pred.append('rcnn_label')
if cfg.TRAIN.END2END:
rpn_pred, rpn_label = get_rpn_names()
pred = rpn_pred + pred
label = rpn_label
return pred, label
class RPNAccMetric(mx.metric.EvalMetric):
def __init__(self):
super(RPNAccMetric, self).__init__('RPNAcc')
self.pred, self.label = get_rpn_names()
def update(self, labels, preds):
pred = preds[self.pred.index('rpn_cls_prob')]
label = labels[self.label.index('rpn_label')]
# pred (b, c, p) or (b, c, h, w)
pred_label = mx.ndarray.argmax_channel(pred).asnumpy().astype('int32')
pred_label = pred_label.reshape((pred_label.shape[0], -1))
# label (b, p)
label = label.asnumpy().astype('int32')
# filter with keep_inds
keep_inds = np.where(label != -1)
pred_label = pred_label[keep_inds]
label = label[keep_inds]
self.sum_metric += np.sum(pred_label.flat == label.flat)
self.num_inst += len(pred_label.flat)
class RCNNAccMetric(mx.metric.EvalMetric):
def __init__(self, cfg):
super(RCNNAccMetric, self).__init__('RCNNAcc')
self.e2e = cfg.TRAIN.END2END
self.ohem = cfg.TRAIN.ENABLE_OHEM
self.pred, self.label = get_rcnn_names(cfg)
def update(self, labels, preds):
pred = preds[self.pred.index('rcnn_cls_prob')]
if self.ohem or self.e2e:
label = preds[self.pred.index('rcnn_label')]
else:
label = labels[self.label.index('rcnn_label')]
last_dim = pred.shape[-1]
pred_label = pred.asnumpy().reshape(-1, last_dim).argmax(axis=1).astype('int32')
label = label.asnumpy().reshape(-1,).astype('int32')
# filter with keep_inds
keep_inds = np.where(label != -1)
pred_label = pred_label[keep_inds]
label = label[keep_inds]
self.sum_metric += np.sum(pred_label.flat == label.flat)
self.num_inst += len(pred_label.flat)
class RPNLogLossMetric(mx.metric.EvalMetric):
def __init__(self):
super(RPNLogLossMetric, self).__init__('RPNLogLoss')
self.pred, self.label = get_rpn_names()
def update(self, labels, preds):
pred = preds[self.pred.index('rpn_cls_prob')]
label = labels[self.label.index('rpn_label')]
# label (b, p)
label = label.asnumpy().astype('int32').reshape((-1))
# pred (b, c, p) or (b, c, h, w) --> (b, p, c) --> (b*p, c)
pred = pred.asnumpy().reshape((pred.shape[0], pred.shape[1], -1)).transpose((0, 2, 1))
pred = pred.reshape((label.shape[0], -1))
# filter with keep_inds
keep_inds = np.where(label != -1)[0]
label = label[keep_inds]
cls = pred[keep_inds, label]
cls += 1e-14
cls_loss = -1 * np.log(cls)
cls_loss = np.sum(cls_loss)
self.sum_metric += cls_loss
self.num_inst += label.shape[0]
class RCNNLogLossMetric(mx.metric.EvalMetric):
def __init__(self, cfg):
super(RCNNLogLossMetric, self).__init__('RCNNLogLoss')
self.e2e = cfg.TRAIN.END2END
self.ohem = cfg.TRAIN.ENABLE_OHEM
self.pred, self.label = get_rcnn_names(cfg)
def update(self, labels, preds):
pred = preds[self.pred.index('rcnn_cls_prob')]
if self.ohem or self.e2e:
label = preds[self.pred.index('rcnn_label')]
else:
label = labels[self.label.index('rcnn_label')]
last_dim = pred.shape[-1]
pred = pred.asnumpy().reshape(-1, last_dim)
label = label.asnumpy().reshape(-1,).astype('int32')
# filter with keep_inds
keep_inds = np.where(label != -1)[0]
label = label[keep_inds]
cls = pred[keep_inds, label]
cls += 1e-14
cls_loss = -1 * np.log(cls)
cls_loss = np.sum(cls_loss)
self.sum_metric += cls_loss
self.num_inst += label.shape[0]
class RPNL1LossMetric(mx.metric.EvalMetric):
def __init__(self):
super(RPNL1LossMetric, self).__init__('RPNL1Loss')
self.pred, self.label = get_rpn_names()
def update(self, labels, preds):
bbox_loss = preds[self.pred.index('rpn_bbox_loss')].asnumpy()
# calculate num_inst (average on those kept anchors)
label = labels[self.label.index('rpn_label')].asnumpy()
num_inst = np.sum(label != -1)
self.sum_metric += np.sum(bbox_loss)
self.num_inst += num_inst
class RCNNL1LossMetric(mx.metric.EvalMetric):
def __init__(self, cfg):
super(RCNNL1LossMetric, self).__init__('RCNNL1Loss')
self.e2e = cfg.TRAIN.END2END
self.ohem = cfg.TRAIN.ENABLE_OHEM
self.pred, self.label = get_rcnn_names(cfg)
def update(self, labels, preds):
bbox_loss = preds[self.pred.index('rcnn_bbox_loss')].asnumpy()
if self.ohem:
label = preds[self.pred.index('rcnn_label')].asnumpy()
else:
if self.e2e:
label = preds[self.pred.index('rcnn_label')].asnumpy()
else:
label = labels[self.label.index('rcnn_label')].asnumpy()
# calculate num_inst (average on those kept anchors)
num_inst = np.sum(label != -1)
self.sum_metric += np.sum(bbox_loss)
self.num_inst += num_inst
| 33.741758 | 94 | 0.599251 |
14e90e2552061fcb638082382b1572d5421bb760 | 1,427 | py | Python | .github/scripts/ensure_actions_will_cancel.py | kuronekodaisuki/pytorch | 459270ac01f1bfcbeaffb20f1c94622561af94e0 | [
"Intel"
] | 7 | 2021-05-29T16:31:51.000Z | 2022-02-21T18:52:25.000Z | .github/scripts/ensure_actions_will_cancel.py | kuronekodaisuki/pytorch | 459270ac01f1bfcbeaffb20f1c94622561af94e0 | [
"Intel"
] | 1 | 2021-05-31T02:20:29.000Z | 2021-05-31T02:20:29.000Z | .github/scripts/ensure_actions_will_cancel.py | kuronekodaisuki/pytorch | 459270ac01f1bfcbeaffb20f1c94622561af94e0 | [
"Intel"
] | null | null | null | #!/usr/bin/env python3
import argparse
import sys
import yaml
from pathlib import Path
REPO_ROOT = Path(__file__).resolve().parent.parent.parent
WORKFLOWS = REPO_ROOT / ".github" / "workflows"
def concurrency_key(filename):
workflow_name = filename.with_suffix("").name.replace("_", "-")
return f"{workflow_name}-${{{{ github.event.pull_request.number || github.sha }}}}"
def should_check(filename):
with open(filename, "r") as f:
content = f.read()
data = yaml.safe_load(content)
on = data.get("on", data.get(True, {}))
return "pull_request" in on
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Ensure all relevant GitHub actions jobs will be cancelled based on a concurrency key"
)
args = parser.parse_args()
files = WORKFLOWS.glob("*.yml")
errors_found = False
files = [f for f in files if should_check(f)]
for filename in files:
with open(filename, "r") as f:
data = yaml.safe_load(f)
expected = {
"group": concurrency_key(filename),
"cancel-in-progress": True,
}
if data.get("concurrency", None) != expected:
print(
f"'concurrency' incorrect or not found in '{filename.relative_to(REPO_ROOT)}'",
file=sys.stderr,
)
errors_found = True
if errors_found:
sys.exit(1)
| 25.945455 | 106 | 0.615978 |
ccdfa6ac9991d857d1935930027ad40df26e9da4 | 11,708 | py | Python | payment_lib_examples/alipay-sdk-python/virtual_environment/lib/python3.4/site-packages/alipay/aop/api/domain/AntMerchantExpandIndirectModifyModel.py | cuhk-mobitec/S3KVetter | 9ae79a242afbe6edae27c17065a88feca2896cf6 | [
"Apache-2.0"
] | 32 | 2018-05-24T08:40:15.000Z | 2019-04-04T20:54:55.000Z | payment_lib_examples/alipay-sdk-python/virtual_environment/lib/python3.4/site-packages/alipay/aop/api/domain/AntMerchantExpandIndirectModifyModel.py | cuhk-mobitec/S3KVetter | 9ae79a242afbe6edae27c17065a88feca2896cf6 | [
"Apache-2.0"
] | 7 | 2018-05-24T08:42:59.000Z | 2020-09-06T23:18:46.000Z | payment_lib_examples/alipay-sdk-python/virtual_environment/lib/python3.4/site-packages/alipay/aop/api/domain/AntMerchantExpandIndirectModifyModel.py | cuhk-mobitec/S3KVetter | 9ae79a242afbe6edae27c17065a88feca2896cf6 | [
"Apache-2.0"
] | 13 | 2018-04-25T11:27:58.000Z | 2021-03-15T12:22:21.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.AddressInfo import AddressInfo
from alipay.aop.api.domain.BankCardInfo import BankCardInfo
from alipay.aop.api.domain.ContactInfo import ContactInfo
class AntMerchantExpandIndirectModifyModel(object):
def __init__(self):
self._address_info = None
self._alias_name = None
self._bankcard_info = None
self._business_license = None
self._business_license_type = None
self._category_id = None
self._contact_info = None
self._external_id = None
self._logon_id = None
self._mcc = None
self._memo = None
self._name = None
self._org_pid = None
self._pay_code_info = None
self._service_phone = None
self._source = None
self._sub_merchant_id = None
@property
def address_info(self):
return self._address_info
@address_info.setter
def address_info(self, value):
if isinstance(value, list):
self._address_info = list()
for i in value:
if isinstance(i, AddressInfo):
self._address_info.append(i)
else:
self._address_info.append(AddressInfo.from_alipay_dict(i))
@property
def alias_name(self):
return self._alias_name
@alias_name.setter
def alias_name(self, value):
self._alias_name = value
@property
def bankcard_info(self):
return self._bankcard_info
@bankcard_info.setter
def bankcard_info(self, value):
if isinstance(value, list):
self._bankcard_info = list()
for i in value:
if isinstance(i, BankCardInfo):
self._bankcard_info.append(i)
else:
self._bankcard_info.append(BankCardInfo.from_alipay_dict(i))
@property
def business_license(self):
return self._business_license
@business_license.setter
def business_license(self, value):
self._business_license = value
@property
def business_license_type(self):
return self._business_license_type
@business_license_type.setter
def business_license_type(self, value):
self._business_license_type = value
@property
def category_id(self):
return self._category_id
@category_id.setter
def category_id(self, value):
self._category_id = value
@property
def contact_info(self):
return self._contact_info
@contact_info.setter
def contact_info(self, value):
if isinstance(value, list):
self._contact_info = list()
for i in value:
if isinstance(i, ContactInfo):
self._contact_info.append(i)
else:
self._contact_info.append(ContactInfo.from_alipay_dict(i))
@property
def external_id(self):
return self._external_id
@external_id.setter
def external_id(self, value):
self._external_id = value
@property
def logon_id(self):
return self._logon_id
@logon_id.setter
def logon_id(self, value):
if isinstance(value, list):
self._logon_id = list()
for i in value:
self._logon_id.append(i)
@property
def mcc(self):
return self._mcc
@mcc.setter
def mcc(self, value):
self._mcc = value
@property
def memo(self):
return self._memo
@memo.setter
def memo(self, value):
self._memo = value
@property
def name(self):
return self._name
@name.setter
def name(self, value):
self._name = value
@property
def org_pid(self):
return self._org_pid
@org_pid.setter
def org_pid(self, value):
self._org_pid = value
@property
def pay_code_info(self):
return self._pay_code_info
@pay_code_info.setter
def pay_code_info(self, value):
if isinstance(value, list):
self._pay_code_info = list()
for i in value:
self._pay_code_info.append(i)
@property
def service_phone(self):
return self._service_phone
@service_phone.setter
def service_phone(self, value):
self._service_phone = value
@property
def source(self):
return self._source
@source.setter
def source(self, value):
self._source = value
@property
def sub_merchant_id(self):
return self._sub_merchant_id
@sub_merchant_id.setter
def sub_merchant_id(self, value):
self._sub_merchant_id = value
def to_alipay_dict(self):
params = dict()
if self.address_info:
if isinstance(self.address_info, list):
for i in range(0, len(self.address_info)):
element = self.address_info[i]
if hasattr(element, 'to_alipay_dict'):
self.address_info[i] = element.to_alipay_dict()
if hasattr(self.address_info, 'to_alipay_dict'):
params['address_info'] = self.address_info.to_alipay_dict()
else:
params['address_info'] = self.address_info
if self.alias_name:
if hasattr(self.alias_name, 'to_alipay_dict'):
params['alias_name'] = self.alias_name.to_alipay_dict()
else:
params['alias_name'] = self.alias_name
if self.bankcard_info:
if isinstance(self.bankcard_info, list):
for i in range(0, len(self.bankcard_info)):
element = self.bankcard_info[i]
if hasattr(element, 'to_alipay_dict'):
self.bankcard_info[i] = element.to_alipay_dict()
if hasattr(self.bankcard_info, 'to_alipay_dict'):
params['bankcard_info'] = self.bankcard_info.to_alipay_dict()
else:
params['bankcard_info'] = self.bankcard_info
if self.business_license:
if hasattr(self.business_license, 'to_alipay_dict'):
params['business_license'] = self.business_license.to_alipay_dict()
else:
params['business_license'] = self.business_license
if self.business_license_type:
if hasattr(self.business_license_type, 'to_alipay_dict'):
params['business_license_type'] = self.business_license_type.to_alipay_dict()
else:
params['business_license_type'] = self.business_license_type
if self.category_id:
if hasattr(self.category_id, 'to_alipay_dict'):
params['category_id'] = self.category_id.to_alipay_dict()
else:
params['category_id'] = self.category_id
if self.contact_info:
if isinstance(self.contact_info, list):
for i in range(0, len(self.contact_info)):
element = self.contact_info[i]
if hasattr(element, 'to_alipay_dict'):
self.contact_info[i] = element.to_alipay_dict()
if hasattr(self.contact_info, 'to_alipay_dict'):
params['contact_info'] = self.contact_info.to_alipay_dict()
else:
params['contact_info'] = self.contact_info
if self.external_id:
if hasattr(self.external_id, 'to_alipay_dict'):
params['external_id'] = self.external_id.to_alipay_dict()
else:
params['external_id'] = self.external_id
if self.logon_id:
if isinstance(self.logon_id, list):
for i in range(0, len(self.logon_id)):
element = self.logon_id[i]
if hasattr(element, 'to_alipay_dict'):
self.logon_id[i] = element.to_alipay_dict()
if hasattr(self.logon_id, 'to_alipay_dict'):
params['logon_id'] = self.logon_id.to_alipay_dict()
else:
params['logon_id'] = self.logon_id
if self.mcc:
if hasattr(self.mcc, 'to_alipay_dict'):
params['mcc'] = self.mcc.to_alipay_dict()
else:
params['mcc'] = self.mcc
if self.memo:
if hasattr(self.memo, 'to_alipay_dict'):
params['memo'] = self.memo.to_alipay_dict()
else:
params['memo'] = self.memo
if self.name:
if hasattr(self.name, 'to_alipay_dict'):
params['name'] = self.name.to_alipay_dict()
else:
params['name'] = self.name
if self.org_pid:
if hasattr(self.org_pid, 'to_alipay_dict'):
params['org_pid'] = self.org_pid.to_alipay_dict()
else:
params['org_pid'] = self.org_pid
if self.pay_code_info:
if isinstance(self.pay_code_info, list):
for i in range(0, len(self.pay_code_info)):
element = self.pay_code_info[i]
if hasattr(element, 'to_alipay_dict'):
self.pay_code_info[i] = element.to_alipay_dict()
if hasattr(self.pay_code_info, 'to_alipay_dict'):
params['pay_code_info'] = self.pay_code_info.to_alipay_dict()
else:
params['pay_code_info'] = self.pay_code_info
if self.service_phone:
if hasattr(self.service_phone, 'to_alipay_dict'):
params['service_phone'] = self.service_phone.to_alipay_dict()
else:
params['service_phone'] = self.service_phone
if self.source:
if hasattr(self.source, 'to_alipay_dict'):
params['source'] = self.source.to_alipay_dict()
else:
params['source'] = self.source
if self.sub_merchant_id:
if hasattr(self.sub_merchant_id, 'to_alipay_dict'):
params['sub_merchant_id'] = self.sub_merchant_id.to_alipay_dict()
else:
params['sub_merchant_id'] = self.sub_merchant_id
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AntMerchantExpandIndirectModifyModel()
if 'address_info' in d:
o.address_info = d['address_info']
if 'alias_name' in d:
o.alias_name = d['alias_name']
if 'bankcard_info' in d:
o.bankcard_info = d['bankcard_info']
if 'business_license' in d:
o.business_license = d['business_license']
if 'business_license_type' in d:
o.business_license_type = d['business_license_type']
if 'category_id' in d:
o.category_id = d['category_id']
if 'contact_info' in d:
o.contact_info = d['contact_info']
if 'external_id' in d:
o.external_id = d['external_id']
if 'logon_id' in d:
o.logon_id = d['logon_id']
if 'mcc' in d:
o.mcc = d['mcc']
if 'memo' in d:
o.memo = d['memo']
if 'name' in d:
o.name = d['name']
if 'org_pid' in d:
o.org_pid = d['org_pid']
if 'pay_code_info' in d:
o.pay_code_info = d['pay_code_info']
if 'service_phone' in d:
o.service_phone = d['service_phone']
if 'source' in d:
o.source = d['source']
if 'sub_merchant_id' in d:
o.sub_merchant_id = d['sub_merchant_id']
return o
| 35.159159 | 93 | 0.579945 |
ee0cdd7298b7119b956761eb06c54c6c476cd5d7 | 6,683 | py | Python | ionoscloud/models/image.py | ionos-cloud/sdk-python | bb22b5b93505b25de6aebae97c523a6c2242ec2e | [
"Apache-2.0"
] | null | null | null | ionoscloud/models/image.py | ionos-cloud/sdk-python | bb22b5b93505b25de6aebae97c523a6c2242ec2e | [
"Apache-2.0"
] | 6 | 2021-11-26T16:18:51.000Z | 2022-02-18T10:08:49.000Z | ionoscloud/models/image.py | ionos-cloud/sdk-python | bb22b5b93505b25de6aebae97c523a6c2242ec2e | [
"Apache-2.0"
] | 1 | 2021-04-20T09:29:17.000Z | 2021-04-20T09:29:17.000Z | # coding: utf-8
"""
CLOUD API
An enterprise-grade Infrastructure is provided as a Service (IaaS) solution that can be managed through a browser-based \"Data Center Designer\" (DCD) tool or via an easy to use API. The API allows you to perform a variety of management tasks such as spinning up additional servers, adding volumes, adjusting networking, and so forth. It is designed to allow users to leverage the same power and flexibility found within the DCD visual tool. Both tools are consistent with their concepts and lend well to making the experience smooth and intuitive. # noqa: E501
The version of the OpenAPI document: 5.0
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from ionoscloud.configuration import Configuration
class Image(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'id': 'str',
'type': 'Type',
'href': 'str',
'metadata': 'DatacenterElementMetadata',
'properties': 'ImageProperties',
}
attribute_map = {
'id': 'id',
'type': 'type',
'href': 'href',
'metadata': 'metadata',
'properties': 'properties',
}
def __init__(self, id=None, type=None, href=None, metadata=None, properties=None, local_vars_configuration=None): # noqa: E501
"""Image - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._id = None
self._type = None
self._href = None
self._metadata = None
self._properties = None
self.discriminator = None
if id is not None:
self.id = id
if type is not None:
self.type = type
if href is not None:
self.href = href
if metadata is not None:
self.metadata = metadata
self.properties = properties
@property
def id(self):
"""Gets the id of this Image. # noqa: E501
The resource's unique identifier # noqa: E501
:return: The id of this Image. # noqa: E501
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this Image.
The resource's unique identifier # noqa: E501
:param id: The id of this Image. # noqa: E501
:type id: str
"""
self._id = id
@property
def type(self):
"""Gets the type of this Image. # noqa: E501
The type of object that has been created # noqa: E501
:return: The type of this Image. # noqa: E501
:rtype: Type
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this Image.
The type of object that has been created # noqa: E501
:param type: The type of this Image. # noqa: E501
:type type: Type
"""
self._type = type
@property
def href(self):
"""Gets the href of this Image. # noqa: E501
URL to the object representation (absolute path) # noqa: E501
:return: The href of this Image. # noqa: E501
:rtype: str
"""
return self._href
@href.setter
def href(self, href):
"""Sets the href of this Image.
URL to the object representation (absolute path) # noqa: E501
:param href: The href of this Image. # noqa: E501
:type href: str
"""
self._href = href
@property
def metadata(self):
"""Gets the metadata of this Image. # noqa: E501
:return: The metadata of this Image. # noqa: E501
:rtype: DatacenterElementMetadata
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""Sets the metadata of this Image.
:param metadata: The metadata of this Image. # noqa: E501
:type metadata: DatacenterElementMetadata
"""
self._metadata = metadata
@property
def properties(self):
"""Gets the properties of this Image. # noqa: E501
:return: The properties of this Image. # noqa: E501
:rtype: ImageProperties
"""
return self._properties
@properties.setter
def properties(self, properties):
"""Sets the properties of this Image.
:param properties: The properties of this Image. # noqa: E501
:type properties: ImageProperties
"""
if self.local_vars_configuration.client_side_validation and properties is None: # noqa: E501
raise ValueError("Invalid value for `properties`, must not be `None`") # noqa: E501
self._properties = properties
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Image):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, Image):
return True
return self.to_dict() != other.to_dict()
| 27.615702 | 568 | 0.579979 |
6b775e3c4f7d4bb3fe490ae8a06075c67701a36a | 3,211 | py | Python | examples/mpm3d.py | JYLeeLYJ/taichi | c4c057ff68ca0fd0ce68cba665c1f65c95a08cdc | [
"MIT"
] | null | null | null | examples/mpm3d.py | JYLeeLYJ/taichi | c4c057ff68ca0fd0ce68cba665c1f65c95a08cdc | [
"MIT"
] | 3 | 2020-08-24T09:07:15.000Z | 2020-08-24T09:18:29.000Z | examples/mpm3d.py | JYLeeLYJ/taichi | c4c057ff68ca0fd0ce68cba665c1f65c95a08cdc | [
"MIT"
] | null | null | null | export_file = '/tmp/mpm3d.ply'
import taichi as ti
import numpy as np
ti.init(arch=ti.opengl)
dim = 3
n_grid = 32
n_particles = n_grid**dim // 2**(dim - 1)
dx = 1 / n_grid
dt = 4e-4
p_rho = 1
p_vol = (dx * 0.5)**2
p_mass = p_vol * p_rho
gravity = 9.8
bound = 3
E = 400
x = ti.Vector.field(dim, float, n_particles)
v = ti.Vector.field(dim, float, n_particles)
C = ti.Matrix.field(dim, dim, float, n_particles)
J = ti.field(float, n_particles)
grid_v = ti.Vector.field(dim, float, (n_grid, ) * dim)
grid_m = ti.field(float, (n_grid, ) * dim)
neighbour = (3, ) * dim
@ti.kernel
def substep():
for I in ti.grouped(grid_m):
grid_v[I] = grid_v[I] * 0
grid_m[I] = 0
for p in x:
Xp = x[p] / dx
base = int(Xp - 0.5)
fx = Xp - base
w = [0.5 * (1.5 - fx)**2, 0.75 - (fx - 1)**2, 0.5 * (fx - 0.5)**2]
stress = -dt * 4 * E * p_vol * (J[p] - 1) / dx**2
affine = ti.Matrix.identity(float, dim) * stress + p_mass * C[p]
for offset in ti.static(ti.grouped(ti.ndrange(*neighbour))):
dpos = (offset - fx) * dx
weight = 1.0
for i in ti.static(range(dim)):
weight *= w[offset[i]][i]
grid_v[base + offset] += weight * (p_mass * v[p] + affine @ dpos)
grid_m[base + offset] += weight * p_mass
for I in ti.grouped(grid_m):
if grid_m[I] > 0:
grid_v[I] /= grid_m[I]
grid_v[I][1] -= dt * gravity
cond = I < bound and grid_v[I] < 0 or I > n_grid - bound and grid_v[
I] > 0
grid_v[I] = 0 if cond else grid_v[I]
for p in x:
Xp = x[p] / dx
base = int(Xp - 0.5)
fx = Xp - base
w = [0.5 * (1.5 - fx)**2, 0.75 - (fx - 1)**2, 0.5 * (fx - 0.5)**2]
new_v = v[p] * 0
new_C = C[p] * 0
for offset in ti.static(ti.grouped(ti.ndrange(*neighbour))):
dpos = (offset - fx) * dx
weight = 1.0
for i in ti.static(range(dim)):
weight *= w[offset[i]][i]
g_v = grid_v[base + offset]
new_v += weight * g_v
new_C += 4 * weight * g_v.outer_product(dpos) / dx**2
v[p] = new_v
x[p] += dt * v[p]
J[p] *= 1 + dt * new_C.trace()
C[p] = new_C
@ti.kernel
def init():
for i in range(n_particles):
x[i] = ti.Vector([ti.random() for i in range(dim)]) * 0.4 + 0.2
J[i] = 1
def T(a):
if dim == 2:
return a
phi, theta = np.radians(28), np.radians(32)
a = a - 0.5
x, y, z = a[:, 0], a[:, 1], a[:, 2]
c, s = np.cos(phi), np.sin(phi)
C, S = np.cos(theta), np.sin(theta)
x, z = x * c + z * s, z * c - x * s
u, v = x, y * C + z * S
return np.array([u, v]).swapaxes(0, 1) + 0.5
init()
gui = ti.GUI('MPM3D', background_color=0x112F41)
while gui.running and not gui.get_event(gui.ESCAPE):
for s in range(15):
substep()
pos = x.to_numpy()
if export_file:
writer = ti.PLYWriter(num_vertices=n_particles)
writer.add_vertex_pos(pos[:, 0], pos[:, 1], pos[:, 2])
writer.export_frame(gui.frame, export_file)
gui.circles(T(pos), radius=2, color=0x66ccff)
gui.show()
| 28.415929 | 77 | 0.510433 |
01054c0434891bc47b9357f2013b5ddfb16ddb8a | 1,243 | py | Python | SmileScanner/main.py | Ctalk3r/NotTowerDefence | c3b7cf890fdd07482a5fb47768033f1a9739ab53 | [
"MIT"
] | null | null | null | SmileScanner/main.py | Ctalk3r/NotTowerDefence | c3b7cf890fdd07482a5fb47768033f1a9739ab53 | [
"MIT"
] | null | null | null | SmileScanner/main.py | Ctalk3r/NotTowerDefence | c3b7cf890fdd07482a5fb47768033f1a9739ab53 | [
"MIT"
] | 1 | 2019-05-07T00:41:48.000Z | 2019-05-07T00:41:48.000Z | import cv2
class Smile:
def __init__(self):
self.activated = True
self.faceCascade = cv2.CascadeClassifier('SmileScanner/Cascades/haarcascade_frontalface_default.xml')
self.smileCascade = cv2.CascadeClassifier('SmileScanner/Cascades/haarcascade_smile.xml')
self.cap = cv2.VideoCapture('http://192.168.0.101:4747/mjpegfeed')
self.cap.set(3, 640) # set Width
self.cap.set(4, 480) # set Height
def stop(self):
self.activated = False
def check_face(self):
ret, img = self.cap.read()
gray = None
if img is not None:
gray = cv2.cvtColor(img, cv2.COLOR_RGB2HSV)
faces = self.faceCascade.detectMultiScale(
gray,
scaleFactor=1.3,
minNeighbors=5,
minSize=(30, 30)
)
for (x, y, w, h) in faces:
cv2.rectangle(img, (x, y), (x + w, y + h), (255, 0, 0), 2)
roi_gray = gray[y:y + h, x:x + w]
smile = self.smileCascade.detectMultiScale(
roi_gray,
scaleFactor=1.5,
minNeighbors=15,
minSize=(25, 25),
)
if smile is not None:
self.stop()
| 30.317073 | 109 | 0.540628 |
4a0220b9e920197af0cb4a90b5c4983898f5736c | 618 | py | Python | experiments/test_trafficAssignment.py | salomonw/mixed-traffic-amod-route-rebalance | 7f1edeb195a7bfab835e596ad84deead2957943e | [
"MIT"
] | 1 | 2022-03-07T16:15:56.000Z | 2022-03-07T16:15:56.000Z | experiments/test_trafficAssignment.py | salomonw/mixed-traffic-amod-route-rebalance | 7f1edeb195a7bfab835e596ad84deead2957943e | [
"MIT"
] | null | null | null | experiments/test_trafficAssignment.py | salomonw/mixed-traffic-amod-route-rebalance | 7f1edeb195a7bfab835e596ad84deead2957943e | [
"MIT"
] | null | null | null | import src.tnet as tnet
import src.CARS as cars
netFile, gFile, fcoeffs = tnet.get_network_parameters('Braess1')
tNet = tnet.tNet(netFile=netFile, gFile=gFile, fcoeffs=fcoeffs)
tNet.solveMSA()
print([(i,j, tNet.G[i][j]['flow']) for i,j in tNet.G.edges()])
tNet.build_supergraph()
tNet = cars.solve_CARS_noRebalancing(tNet, exogenous_G=0, fcoeffs=fcoeffs, xa=1)
print([(i,j, tNet.G_supergraph[i][j]['flow']) for i,j in tNet.G.edges()])
exogObj = tnet.get_totalTravelTime(tNet.G, fcoeffs)
amodObjNoRebalancing = cars.get_totalTravelTime(tNet)
priceOfAnarchy = exogObj / amodObjNoRebalancing
print(priceOfAnarchy) | 29.428571 | 80 | 0.755663 |
fc9d5f6b92324862c3c95d7546df67e501447223 | 3,777 | py | Python | imaging/ml/toolkit/hcls_imaging_ml_toolkit/pubsub_format_test.py | rczhang/healthcare | 0e66c5b70dd81b9449f2f603959e2cb4095b3eb9 | [
"Apache-2.0"
] | 310 | 2018-02-23T01:40:01.000Z | 2022-03-30T12:25:56.000Z | imaging/ml/toolkit/hcls_imaging_ml_toolkit/pubsub_format_test.py | rczhang/healthcare | 0e66c5b70dd81b9449f2f603959e2cb4095b3eb9 | [
"Apache-2.0"
] | 189 | 2018-06-19T15:32:10.000Z | 2022-03-11T23:48:14.000Z | imaging/ml/toolkit/hcls_imaging_ml_toolkit/pubsub_format_test.py | animesh/healthcare | 7d3d4dc9deb3d31eab99035780ccb9a44f00b687 | [
"Apache-2.0"
] | 165 | 2018-03-06T19:29:18.000Z | 2022-03-21T10:53:45.000Z | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for pubsub_format.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import posixpath
from absl.testing import absltest
from absl.testing import parameterized
from google.rpc import code_pb2
from hcls_imaging_ml_toolkit import dicom_path
from hcls_imaging_ml_toolkit import exception
from hcls_imaging_ml_toolkit import pubsub_format
from hcls_imaging_ml_toolkit import test_dicom_path_util as tdpu
from hcls_imaging_ml_toolkit import test_pubsub_util as tpsu
_OUTPUT_STORE_ID = 'output_dicom_store_id'
_OUTPUT_DICOM_STORE_PATH = posixpath.join(tdpu.DATASET_PATH_STR, 'dicomStores',
_OUTPUT_STORE_ID)
_ACK_ID = 'ack_id'
_MESSAGE_ID = 'message_id'
# dicomstores instead of dicomStores.
_INVALID_STORE_PATH = ('projects/project_name/locations/us-central1/datasets/'
'dataset_name/dicomstores/store_id')
# sries instead of series.
_INVALID_SERIES_PATH = ('projects/project_name/locations/us-central1/datasets/'
'dataset_name/dicomStores/store_id/dicomWeb/studies/'
'1.2.3/sries/4.5.6')
class PubsubFormatTest(parameterized.TestCase):
@parameterized.parameters([{}], [{
'output_dicom_store_path': _OUTPUT_DICOM_STORE_PATH
}])
def testExpectedPath(self, attributes):
"""Pub/Sub messages with valid format are parsed."""
pubsub_message = tpsu.CreatePubsubReceivedMessage(_ACK_ID,
tdpu.SERIES_PATH_STR,
_MESSAGE_ID, attributes)
parsed_message = pubsub_format.ParseMessage(pubsub_message.message,
dicom_path.Type.SERIES)
self.assertEqual(str(parsed_message.input_path), tdpu.SERIES_PATH_STR)
if attributes.get('output_dicom_store_path'):
self.assertEqual(
str(parsed_message.output_dicom_store_path),
attributes.get('output_dicom_store_path'))
else:
self.assertIsNone(parsed_message.output_dicom_store_path)
def testInvalidInputPath(self):
"""Pub/Sub messages with invalid input paths throw exception."""
pubsub_message = tpsu.CreatePubsubReceivedMessage(_ACK_ID,
_INVALID_SERIES_PATH,
_MESSAGE_ID)
with self.assertRaises(exception.CustomExceptionError) as cee:
pubsub_format.ParseMessage(pubsub_message.message, dicom_path.Type.SERIES)
self.assertEqual(cee.exception.status_code, code_pb2.Code.INVALID_ARGUMENT)
def testInvalidOutputPath(self):
"""Pub/Sub messages with invalid output paths throw exception."""
pubsub_message = tpsu.CreatePubsubReceivedMessage(
_ACK_ID, tdpu.SERIES_PATH_STR, _MESSAGE_ID,
{'output_dicom_store_path': _INVALID_STORE_PATH})
with self.assertRaises(exception.CustomExceptionError) as cee:
pubsub_format.ParseMessage(pubsub_message.message, dicom_path.Type.SERIES)
self.assertEqual(cee.exception.status_code, code_pb2.Code.INVALID_ARGUMENT)
if __name__ == '__main__':
absltest.main()
| 43.918605 | 80 | 0.715118 |
bfc5730ee171d79bfc6425edde05fb130102f2e1 | 1,927 | py | Python | checkov/kubernetes/checks/resource/base_spec_check.py | cevoaustralia/checkov | 07b8d2c97fedaeb5db95f1688177246f49107e7c | [
"Apache-2.0"
] | 1 | 2021-02-13T15:24:42.000Z | 2021-02-13T15:24:42.000Z | checkov/kubernetes/checks/resource/base_spec_check.py | cevoaustralia/checkov | 07b8d2c97fedaeb5db95f1688177246f49107e7c | [
"Apache-2.0"
] | 7 | 2021-04-12T06:54:07.000Z | 2022-03-21T14:04:14.000Z | checkov/kubernetes/checks/resource/base_spec_check.py | metahertz/checkov | 3a179c5a2fda97c55e932a8cf72d69bd5ffdd711 | [
"Apache-2.0"
] | 1 | 2021-12-16T03:09:55.000Z | 2021-12-16T03:09:55.000Z | from abc import abstractmethod
from collections.abc import Iterable
from typing import Dict, Any, Optional, List
from checkov.common.checks.base_check import BaseCheck
from checkov.common.models.enums import CheckCategories, CheckResult
from checkov.common.multi_signature import multi_signature
from checkov.kubernetes.checks.resource.registry import registry
class BaseK8Check(BaseCheck):
def __init__(
self,
name: str,
id: str,
categories: List[CheckCategories],
supported_entities: "Iterable[str]",
guideline: Optional[str] = None,
) -> None:
super().__init__(
name=name,
id=id,
categories=categories,
supported_entities=supported_entities,
block_type="k8",
guideline=guideline
)
self.supported_specs = supported_entities
registry.register(self)
def scan_entity_conf(self, conf: Dict[str, Any], entity_type: str) -> CheckResult:
self.entity_type = entity_type
return self.scan_spec_conf(conf, entity_type)
@multi_signature()
@abstractmethod
def scan_spec_conf(self, conf: Dict[str, Any], entity_type: str) -> CheckResult:
"""Return result of Kubernetes object check."""
raise NotImplementedError()
@classmethod
@scan_spec_conf.add_signature(args=["self", "conf"])
def _scan_spec_conf_self_conf(cls, wrapped):
def wrapper(self, conf, entity_type=None):
# keep default argument for entity_type so old code, that doesn't set it, will work.
return wrapped(self, conf)
return wrapper
@staticmethod
def get_inner_entry(conf: Dict[str, Any], entry_name: str) -> Dict[str, Any]:
spec = {}
if conf.get("spec") and conf.get("spec").get("template"):
spec = conf.get("spec").get("template").get(entry_name, {})
return spec
| 34.410714 | 96 | 0.659574 |
1d18d99bb96b05a3aec41a1cda4386051555921c | 4,101 | py | Python | datawinners/messageprovider/errors_translation_processor.py | ICT4H/dcs-web | fb0f53fad4401cfac1c1789ff28b9d5bda40c975 | [
"Apache-2.0"
] | 1 | 2015-11-02T09:11:12.000Z | 2015-11-02T09:11:12.000Z | datawinners/messageprovider/errors_translation_processor.py | ICT4H/dcs-web | fb0f53fad4401cfac1c1789ff28b9d5bda40c975 | [
"Apache-2.0"
] | null | null | null | datawinners/messageprovider/errors_translation_processor.py | ICT4H/dcs-web | fb0f53fad4401cfac1c1789ff28b9d5bda40c975 | [
"Apache-2.0"
] | null | null | null | from collections import OrderedDict
import mangrove.errors.MangroveException as ex
from django.utils.translation import get_language, ugettext, activate
def data_object_not_found_formatter(exception, message):
return message % (exception.data[0].capitalize(), ugettext(exception.data[1]), exception.data[2])
def default_formatter(exception, message):
return message
def incorrect_date_formatter(exception, message):
return message % (exception.data[1], exception.data[0], exception.data[2])
def invalid_answer_formatter(exception, message):
return message % (exception.data[1], exception.data[0])
def datasender_not_linked_formatter(exception, message):
return message % (exception.data[0].capitalize(), exception.data[1])
messages_and_formatters = {
ex.DataObjectNotFound: (u"%s with %s = %s not found.", data_object_not_found_formatter),
ex.GeoCodeFormatException: (u"Incorrect GPS format. The GPS coordinates must be in the following format: xx.xxxx,yy.yyyy. Example -18.8665,47.5315", default_formatter),
ex.IncorrectDate: (u"Answer %s for question %s is invalid. Expected date in %s format", incorrect_date_formatter),
ex.AnswerWrongType: (u"Answer %s for question %s is of the wrong type.", invalid_answer_formatter),
ex.AnswerTooLongException: (u"Answer %s for question %s is longer than allowed.", invalid_answer_formatter),
ex.AnswerTooSmallException: (u"Answer %s for question %s is smaller than allowed.", invalid_answer_formatter),
ex.AnswerTooBigException: (u"Answer %s for question %s is greater than allowed.", invalid_answer_formatter),
ex.AnswerTooShortException: (u"Answer %s for question %s is shorter than allowed.", invalid_answer_formatter),
ex.LatitudeNotFloat: (u"Incorrect GPS format. The GPS coordinates must be in the following format: xx.xxxx,yy.yyyy. Example -18.8665,47.5315", default_formatter),
ex.LongitudeNotFloat: (u"Incorrect GPS format. The GPS coordinates must be in the following format: xx.xxxx,yy.yyyy. Example -18.8665,47.5315", default_formatter),
ex.LatitudeNotInRange: (u'Invalid GPS value.', default_formatter),
ex.LongitudeNotInRange: (u'Invalid GPS value.', default_formatter),
ex.AnswerHasTooManyValuesException: (u"Answer %s for question %s contains more than one value.", invalid_answer_formatter),
ex.DatasenderIsNotLinkedException: (u"The Data Sender %s (%s) is not linked to your Questionnaire.", datasender_not_linked_formatter),
ex.AnswerNotInListException: (u"Answer %s for question %s in not present in the allowed options.", invalid_answer_formatter)
}
class TranslationProcessor(object):
def __init__(self, form_model, response):
self.form_model = form_model
self.language_separator = '| |'
if response is not None:
self.validation_exception = [response.exception] + form_model.validation_exception
else:
self.validation_exception = form_model.validation_exception
def process(self):
error_msg_dict = OrderedDict()
current_language = get_language()
existing_language = ["en", "fr"]
for language in existing_language:
activate(language)
for index, e in enumerate(self.validation_exception):
message, formatter = messages_and_formatters.get(type(e), (None, None))
if message is None:
error_msg_dict.update({'%s%s' % (language, index +1): e.message})
continue
translated_message = ugettext(message)
formatted_message = formatter(e, translated_message)
if index == len(self.validation_exception) -1 and \
existing_language.index(language) != len(existing_language)-1:
formatted_message += self.language_separator
if not isinstance(formatted_message, unicode):
formatted_message = formatted_message.decode('utf-8')
error_msg_dict.update({'%s%s' % (language, index +1): formatted_message})
activate(current_language)
return error_msg_dict | 58.585714 | 170 | 0.719093 |
46ae4700823358c863c0a3162d11364e31ec2544 | 7,956 | py | Python | docs/conf.py | benwhalley/pytest-bdd | f878209f36cf8988336220dc7da3e561e6c871aa | [
"MIT"
] | 4 | 2021-03-26T07:56:05.000Z | 2022-03-17T07:45:52.000Z | docs/conf.py | benwhalley/pytest-bdd | f878209f36cf8988336220dc7da3e561e6c871aa | [
"MIT"
] | 3 | 2020-04-29T13:31:28.000Z | 2020-05-10T08:57:12.000Z | docs/conf.py | benwhalley/pytest-bdd | f878209f36cf8988336220dc7da3e561e6c871aa | [
"MIT"
] | 2 | 2020-01-23T22:24:42.000Z | 2020-05-25T10:16:05.000Z | # -*- coding: utf-8 -*-
#
# Pytest-BDD documentation build configuration file, created by
# sphinx-quickstart on Sun Apr 7 21:07:56 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
import sys, os
sys.path.insert(0, os.path.abspath(".."))
import pytest_bdd
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ["sphinx.ext.autodoc", "sphinx.ext.viewcode"]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix of source filenames.
source_suffix = ".rst"
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = "index"
# General information about the project.
project = u"Pytest-BDD"
copyright = u"2013, Oleg Pidsadnyi"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = pytest_bdd.__version__
# The full version, including alpha/beta/rc tags.
release = pytest_bdd.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ["_build"]
# The reST default role (used for this markup: `text`) to use for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "default"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = "Pytest-BDDdoc"
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [("index", "Pytest-BDD.tex", u"Pytest-BDD Documentation", u"Oleg Pidsadnyi", "manual")]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [("index", "pytest-bdd", u"Pytest-BDD Documentation", [u"Oleg Pidsadnyi"], 1)]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
"index",
"Pytest-BDD",
u"Pytest-BDD Documentation",
u"Oleg Pidsadnyi",
"Pytest-BDD",
"One line description of project.",
"Miscellaneous",
)
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
| 32.606557 | 105 | 0.707139 |
b1d93caa3f925f7c9b1330079384ca5a3c921591 | 1,080 | py | Python | oscar_ecomenv/Scripts/viewer.py | PamilerinId/Ecommerce-Boiler | 1d706f88c8c828e86309793cb33ea102f385bf2f | [
"Apache-2.0"
] | null | null | null | oscar_ecomenv/Scripts/viewer.py | PamilerinId/Ecommerce-Boiler | 1d706f88c8c828e86309793cb33ea102f385bf2f | [
"Apache-2.0"
] | null | null | null | oscar_ecomenv/Scripts/viewer.py | PamilerinId/Ecommerce-Boiler | 1d706f88c8c828e86309793cb33ea102f385bf2f | [
"Apache-2.0"
] | null | null | null | #!c:\users\pi\documents\batcave\web\ecommerce\oscar_ecomenv\scripts\python.exe
#
# The Python Imaging Library
# $Id$
#
from __future__ import print_function
import sys
if sys.version_info[0] > 2:
import tkinter
else:
import Tkinter as tkinter
from PIL import Image, ImageTk
#
# an image viewer
class UI(tkinter.Label):
def __init__(self, master, im):
if im.mode == "1":
# bitmap image
self.image = ImageTk.BitmapImage(im, foreground="white")
tkinter.Label.__init__(self, master, image=self.image, bd=0,
bg="black")
else:
# photo image
self.image = ImageTk.PhotoImage(im)
tkinter.Label.__init__(self, master, image=self.image, bd=0)
#
# script interface
if __name__ == "__main__":
if not sys.argv[1:]:
print("Syntax: python viewer.py imagefile")
sys.exit(1)
filename = sys.argv[1]
root = tkinter.Tk()
root.title(filename)
im = Image.open(filename)
UI(root, im).pack()
root.mainloop()
| 19.285714 | 78 | 0.606481 |
8795afdb734ca6e58b38557bb2403f37e7a24369 | 2,882 | py | Python | examples/cuda_mpi.py | gdementen/numba | 78486e86ff9fbd343cac3dadbc63ec3bc66c75aa | [
"BSD-2-Clause"
] | null | null | null | examples/cuda_mpi.py | gdementen/numba | 78486e86ff9fbd343cac3dadbc63ec3bc66c75aa | [
"BSD-2-Clause"
] | null | null | null | examples/cuda_mpi.py | gdementen/numba | 78486e86ff9fbd343cac3dadbc63ec3bc66c75aa | [
"BSD-2-Clause"
] | null | null | null | # Demonstration of using MPI and Numba CUDA to perform parallel computation
# using GPUs in multiple nodes. This example requires MPI4py to be installed.
#
# The root process creates an input data array that is scattered to all nodes.
# Each node calls a CUDA jitted function on its portion of the input data.
# Output data is then gathered back to the master node.
#
# Notes/limitations:
#
# 1. It is generally more efficient to avoid initialising all data on the root
# node then scattering it out to all other nodes, and instead each node
# should initialise its own data, but initialisation is done on the root node
# here to keep the example simple.
# 2. If multiple GPUs are available to a single MPI process, additional code may
# need adding to ensure the correct GPU is used by each process - this will
# depend on the exact configuration of the MPI cluster.
#
# This example can be invoked with:
#
# $ mpirun -np <np> python cuda_mpi.py
#
# where np is the number of processes (e.g. 4). For demonstrating the code, this
# does work with a single node and a single GPU, since multiple processes can
# share a single GPU. However, in a production setting, it may be more
# appropriate to provide one GPU per MPI process.
from __future__ import print_function
from mpi4py import MPI
from numba import cuda
import numpy as np
mpi_comm = MPI.COMM_WORLD
# Input data size
total_n = 10
# Process 0 creates input data
if mpi_comm.rank == 0:
input_data = np.arange(total_n, dtype=np.int32)
print("Input:", input_data)
else:
input_data = None
# Compute partitioning of the input array
proc_n = [ total_n // mpi_comm.size + (total_n % mpi_comm.size > n)
for n in range(mpi_comm.size) ]
pos = 0
pos_n = []
for n in range(mpi_comm.size):
pos_n.append(pos)
pos += proc_n[n]
my_n = proc_n[mpi_comm.rank]
my_offset = pos_n[mpi_comm.rank]
print('Process %d, my_n = %d' % (mpi_comm.rank, my_n))
print('Process %d, my_offset = %d' % (mpi_comm.rank, my_offset))
# Distribute input data across processes
my_input_data = np.zeros(my_n, dtype=np.int32)
mpi_comm.Scatterv([input_data, proc_n, pos_n, MPI.INT], my_input_data)
print('Process %d, my_input_data = %s' % (mpi_comm.rank, my_input_data))
# Perform computation on local data
@cuda.jit
def sqplus2(input_data, output_data):
for i in range(len(input_data)):
d = input_data[i]
output_data[i] = d * d + 2
my_output_data = np.empty_like(my_input_data)
sqplus2(my_input_data, my_output_data)
print('Process %d, my_output_data = %s' % (mpi_comm.rank, my_output_data))
# Bring result back to root process
if mpi_comm.rank == 0:
output_data = np.empty_like(input_data)
else:
output_data = None
mpi_comm.Gatherv(my_output_data, [output_data, proc_n, pos_n, MPI.INT])
if mpi_comm.rank == 0:
print("Output:", output_data)
MPI.Finalize()
| 30.336842 | 80 | 0.725538 |
0837c0bceb43bb65d6deaa3b7f48befe10a77211 | 941 | py | Python | recipes/plog/all/conanfile.py | rockandsalt/conan-center-index | d739adcec3e4dd4c250eff559ceb738e420673dd | [
"MIT"
] | 562 | 2019-09-04T12:23:43.000Z | 2022-03-29T16:41:43.000Z | recipes/plog/all/conanfile.py | rockandsalt/conan-center-index | d739adcec3e4dd4c250eff559ceb738e420673dd | [
"MIT"
] | 9,799 | 2019-09-04T12:02:11.000Z | 2022-03-31T23:55:45.000Z | recipes/plog/all/conanfile.py | rockandsalt/conan-center-index | d739adcec3e4dd4c250eff559ceb738e420673dd | [
"MIT"
] | 1,126 | 2019-09-04T11:57:46.000Z | 2022-03-31T16:43:38.000Z | import os
from conans import ConanFile, tools
class PlogConan(ConanFile):
name = "plog"
description = "Pretty powerful logging library in about 1000 lines of code"
homepage = "https://github.com/SergiusTheBest/plog"
url = "https://github.com/conan-io/conan-center-index"
license = "MPL-2.0"
topics = ("logging", "header-only", "portable")
no_copy_source = True
@property
def _source_subfolder(self):
return "source_subfolder"
def source(self):
tools.get(**self.conan_data["sources"][self.version])
extracted_dir = self.name + "-" + self.version
os.rename(extracted_dir, self._source_subfolder)
def package(self):
self.copy(pattern="LICENSE", dst="licenses", src=self._source_subfolder)
self.copy("*.h", src=os.path.join(self._source_subfolder, "include"), dst=os.path.join("include"))
def package_id(self):
self.info.header_only()
| 32.448276 | 106 | 0.668438 |
05f84d4920634242e08c53748ea8e1192b282ebc | 333 | py | Python | Workflows/BenchmarkReader/analysis.py | HiveTracker/bonsai-interface | 1f4a1300727347c1f207022899070f319ea2be2b | [
"MIT"
] | null | null | null | Workflows/BenchmarkReader/analysis.py | HiveTracker/bonsai-interface | 1f4a1300727347c1f207022899070f319ea2be2b | [
"MIT"
] | null | null | null | Workflows/BenchmarkReader/analysis.py | HiveTracker/bonsai-interface | 1f4a1300727347c1f207022899070f319ea2be2b | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Sat May 19 15:56:18 2018
@author: gonca
"""
import numpy as np
import matplotlib.pyplot as plt
fname = r'C:/Users/gonca/Documents/Projects/github/hivetracker/bonsai-interface/BinaryReader/base0Axis0.bin'
data = np.fromfile(fname,dtype=np.float32)
data = data.reshape((-1,4))
plt.plot(data)
| 20.8125 | 108 | 0.726727 |
3778083c86219923b0683c5d7e7532daaa21e2a4 | 9,595 | py | Python | can4python/configuration.py | uincore/can4python | 4177bcf4368fd8484ec080e2d926cb999b14c6a5 | [
"BSD-3-Clause"
] | 36 | 2015-11-16T17:27:43.000Z | 2021-09-16T15:14:55.000Z | can4python/configuration.py | hexkDotCom/can4python | 4177bcf4368fd8484ec080e2d926cb999b14c6a5 | [
"BSD-3-Clause"
] | 6 | 2015-12-04T09:07:55.000Z | 2018-04-24T16:39:15.000Z | can4python/configuration.py | hexkDotCom/can4python | 4177bcf4368fd8484ec080e2d926cb999b14c6a5 | [
"BSD-3-Clause"
] | 17 | 2015-11-26T16:24:35.000Z | 2021-01-23T14:21:41.000Z | # -*- coding: utf-8 -*-
#
# Author: Jonas Berg
# Copyright (c) 2015, Semcon Sweden AB
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification, are permitted
# provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the
# following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and
# the following disclaimer in the documentation and/or other materials provided with the distribution.
# 3. Neither the name of the Semcon Sweden AB nor the names of its contributors may be used to endorse or
# promote products derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
from . import exceptions
class Configuration():
"""
Configuration object for the things that happen on the CAN bus. It holds frame definitions (including
signal definitions), the busname etc. See below.
Attributes:
framedefinitions (dict): The keys are the frame_id (*int*) and the items are the corresponding :class:`.CanFrameDefinition` objects.
busname (str or None): Which bus name in the configuration file to use when reading. Defaults to :const:`None` (using first alphabetically).
"""
def __init__(self, framedefinitions=None, busname=None, ego_node_ids=None):
if framedefinitions is None:
self.framedefinitions = {}
else:
self.framedefinitions = framedefinitions
self.busname = busname
self.ego_node_ids = ego_node_ids
@property
def ego_node_ids(self):
"""
*set of strings* Set of nodes that this program will enact. You can pass it a list (it will convert to a set).
"""
return self._ego_node_ids
@ego_node_ids.setter
def ego_node_ids(self, value):
if value is None:
self._ego_node_ids = set()
elif isinstance(value, str):
raise exceptions.CanException("ego_node_ids should be a list/set of strings. Given: {!r}".format(value))
else:
try:
self._ego_node_ids = set(map(str, value))
except TypeError:
raise exceptions.CanException("ego_node_ids should be a list/set of strings. Given: {!r}".format(value))
def __repr__(self):
return "CAN configuration object. Busname '{}', having {} frameIDs defined. Enacts these node IDs: {}".format(
self.busname, len(self.framedefinitions), " ".join(sorted(self.ego_node_ids)))
def get_descriptive_ascii_art(self):
"""Display an overview of the :class:`.Configuration` object with frame definitions and signals.
Returns:
A multi-line string.
"""
text = repr(self) + "\n"
text += " Frame definitions:\n"
for frameID in sorted(self.framedefinitions.keys()):
text += "\n " + \
self.framedefinitions[frameID].get_descriptive_ascii_art().replace('\n', '\n ')
return text
def add_framedefinition(self, framedef):
""" Add a frame definition to the configutation.
Args:
framedef (:class:`.CanFrameDefinition` object): The frame definition to add.
This is a convenience function. These two alternatives are equal::
myconfig.add_framedefinition(framedef1)
myconfig.framedefinitions[framedef1.frame_id] = framedef1
"""
self.framedefinitions[framedef.frame_id] = framedef
def set_throttle_times(self, inputdict):
""" Set throttle_time for some of the framedefinitions in the configuration object.
Args:
inputdict (dict): The keys are the frame IDs (int) and the values are the throttle times (numerical or None) in milliseconds.
This is a convenience function. You can instead do like this for each frame::
myconfig.framedefinitions[myframe_id].throttle_time = mythrottletime
"""
try:
inputdict.items()
except AttributeError:
raise exceptions.CanException("The inputdict must be a dict. Given: {!r}". format(inputdict))
for frame_id, throttle_time in inputdict.items():
try:
self.framedefinitions[frame_id].throttle_time = throttle_time
except KeyError:
raise exceptions.CanException("The frame_id given is not found in the configuration: {}".
format(frame_id))
def set_throttle_times_from_signalnames(self, inputdict):
""" Set throttle_time for some of the framedefinitions in the configuration object (via signal names)
Args:
inputdict (dict): The keys are the signalnames (str) and the values are the throttle times (numerical or None) in milliseconds.
Note that the throttle_time is set on the framedefinition holding the signalname. It will also affect other
signals on the same frame. Setting different throttle_times to signals on the same frame will
give an undefined result.
This is a convenience function. You can instead do like this for each signalname::
(first find myframe_id for a given signalname)
myconfig.framedefinitions[myframe_id].throttle_time = mythrottletime
"""
output_dict = {}
# Sorting the keys to have consisting behavior in case
# of multiple values of throttle_time for a single framedefinition.
try:
signalnames = sorted(inputdict.keys())
except AttributeError:
raise exceptions.CanException("The inputdict must be a dict. Given: {!r}". format(inputdict))
for signalname in signalnames:
throttle_time = inputdict[signalname]
frame_id = self.find_frameid_from_signalname(signalname)
output_dict[frame_id] = throttle_time
self.set_throttle_times(output_dict)
def set_receive_on_change_only(self, inputlist):
"""Set receive_on_change_only for some of the framedefinitions in the configuration object.
Args:
inputlist (list of ints): The frame IDs that should be received only when the data has changed.
This is a convenience function. You can instead do like this for each frame ID::
myconfig.framedefinitions[myframe_id].receive_on_change_only = True
"""
try:
len(inputlist)
except TypeError:
raise exceptions.CanException("The inputlist must be a list. Given: {!r}". format(inputlist))
for frame_id in inputlist:
try:
self.framedefinitions[frame_id].receive_on_change_only = True
except KeyError:
raise exceptions.CanException("The frame_id given is not found in the configuration: {} ".format(frame_id))
def set_receive_on_change_only_from_signalnames(self, inputlist):
"""Set receive_on_change_only for some of the framedefinitions in the configuration object (via signal names).
Args:
inputlist (list of str): The signal names that should be received only when the data has changed.
Note that the receive_on_change_only is set on the framedefinition holding the signalname. It will
also affect other signals on the same frame.
This is a convenience function. You can instead do like this for each signalname::
(first find myframe_id for a given signalname)
myconfig.framedefinitions[myframe_id].receive_on_change_only = True
"""
try:
len(inputlist)
except TypeError:
raise exceptions.CanException("The inputlist must be a list. Given: {!r}". format(inputlist))
outputset = set()
for signalname in inputlist:
frame_id = self.find_frameid_from_signalname(signalname)
outputset.add(frame_id)
self.set_receive_on_change_only(outputset)
def find_frameid_from_signalname(self, input_signalname):
"""Find which frame_id a specific signal name belongs.
Args:
input_signalname (str): signal name to search for.
Returns: The frame_id (int) in which the signal is located.
Raises:
CanException when the given signal name not is found.
"""
for frame_id, framedef in self.framedefinitions.items():
for signaldef in framedef.signaldefinitions:
if signaldef.signalname == input_signalname:
return framedef.frame_id
raise exceptions.CanException("The signalname given is not found in the configuration: {}".
format(input_signalname))
| 44.21659 | 146 | 0.675039 |
f998e9a09d704349b996e20600a76f3d53c3aec9 | 1,079 | py | Python | ds.py | Taruni-Anand/Past-Air-Future | e796c86266c3a4ac452f96e5381a9ef43d84a3f5 | [
"CC0-1.0"
] | null | null | null | ds.py | Taruni-Anand/Past-Air-Future | e796c86266c3a4ac452f96e5381a9ef43d84a3f5 | [
"CC0-1.0"
] | 3 | 2019-12-31T07:09:08.000Z | 2022-03-25T18:44:14.000Z | ds.py | Taruni-Anand/Past-Air-Future | e796c86266c3a4ac452f96e5381a9ef43d84a3f5 | [
"CC0-1.0"
] | null | null | null | import openaq
import pandas as pd
from pandas.io.json import json_normalize
import statistics
api = openaq.OpenAQ()
status, resp = api.cities()
df = json_normalize(resp)
def get_lat_long(Input_City):
data = api.locations(df=True,parameters='pm25',city=Input_City)
data.drop_duplicates(subset=['city'],keep=False,inplace=False)
return [data.iloc[0,1],data.iloc[0,2]]
def clean_data(data):
data.sort_index
data.dropna()
#delete -999 values as NaN
#FOR THOMAS
return data
def get_current(city):
#Get Current PM25
df_current_time = api.latest(df = True, parameter = 'pm25', city = city)
df = df_current_time.sort_index().tail(1)
return df['value']
def predict(value, lam, city):
res = api.latest(city = city, parameter = 'pm25', limit=100, df=True)
b =res.groupby([res.index.time])['value'].describe()
a = [x for x in b['mean']]
f = 0
c = 0
for x in range(len(a)):
if (a[x]<=value + lam and a[x]>=value-lam) and x<len(a):
f +=(a[x+1])
c+=1
return f//c | 26.317073 | 76 | 0.627433 |
f0aaa14c1c7f866d45f840fb4f8a35230a9cb990 | 205 | py | Python | user-config.py | PArangSae/pywikibot | caf1401e71a81d11e681a6d6adfdea907aa33b94 | [
"MIT"
] | null | null | null | user-config.py | PArangSae/pywikibot | caf1401e71a81d11e681a6d6adfdea907aa33b94 | [
"MIT"
] | null | null | null | user-config.py | PArangSae/pywikibot | caf1401e71a81d11e681a6d6adfdea907aa33b94 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# This is a sample file. You should use generate_user_files.py
# to create your user-config.py file.
mylang = 'ko'
family = 'wikipedia'
usernames['wikipedia']['ko'] = 'PArangSae'
| 22.777778 | 62 | 0.682927 |
f4147ba0fdaade7c9cca9e8048e56cfd516710cd | 633 | py | Python | backend/manage.py | crowdbotics-apps/wesands-33463 | b53cd4c61132598de0380d3c718c4f0774180827 | [
"FTL",
"AML",
"RSA-MD"
] | null | null | null | backend/manage.py | crowdbotics-apps/wesands-33463 | b53cd4c61132598de0380d3c718c4f0774180827 | [
"FTL",
"AML",
"RSA-MD"
] | null | null | null | backend/manage.py | crowdbotics-apps/wesands-33463 | b53cd4c61132598de0380d3c718c4f0774180827 | [
"FTL",
"AML",
"RSA-MD"
] | null | null | null | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'wesands_33463.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| 28.772727 | 77 | 0.685624 |
80024659c3d6b623bf79367790fedbf7aa6b4aaa | 9,199 | py | Python | cloudmesh/piazza/piazza_extractor.py | cloudmesh/piazza | aa603643fb8dd39cda6fc96fc3f13658aabe486e | [
"Apache-2.0"
] | 6 | 2017-04-21T20:20:08.000Z | 2021-01-12T16:59:54.000Z | cloudmesh/piazza/piazza_extractor.py | cloudmesh/piazza | aa603643fb8dd39cda6fc96fc3f13658aabe486e | [
"Apache-2.0"
] | null | null | null | cloudmesh/piazza/piazza_extractor.py | cloudmesh/piazza | aa603643fb8dd39cda6fc96fc3f13658aabe486e | [
"Apache-2.0"
] | null | null | null | from __future__ import print_function
import getpass
import json
import sys
import time
import config
import grequests
import requests
from cloudmesh.piazza.piazza_mongo import PiazzaMongo
class PiazzaExtractor:
"""Piazza API interface for Piazza Miner
"""
api_url = 'https://piazza.com/logic/api'
def __init__(self):
"""Get config, login, and set class_id
"""
self.email, self.password, self.class_id, update = self.get_piazza_info()
def get_piazza_info(self):
"""Open config file (default piazza.cfg)
Returns:
email (string), password (string), class_id (string)
"""
email = config.get('login', 'email')
password = config.decrypt(config.get('login', 'password'))
class_id = config.get('network', 'id')
update = config.get('network', 'update')
return email, password, class_id, update
def login(self):
"""Login to Piazza and get cookie
Args:
email (string) -- login email
password (string) -- login password
"""
# get username and password
email = self.email if self.email else raw_input('Enter login email: ')
password = self.password if self.password else getpass.getpass('Enter your password')
# login request to get cookie
print('Logging in as {email}.'.format(email=email))
login_data = json.dumps({
'method': 'user.login',
'params': {
'email': email,
'pass': password
}})
r = requests.post(self.api_url, data=login_data)
# login error
if r.json()['error']:
sys.exit(
'Error logging in: {msg}. Please check your piazza.cfg file or run ' +
'"piazza setup" in the command line.'.format(msg=r.json()['error']))
self.login_cookie = r.cookies
def get_folder_list(self):
"""Get list of folders for class (network)
Returns:
(list) -- folder names
"""
post_data = json.dumps({
'method': 'user.status',
'params': {
'nid': self.class_id
}
})
r = requests.post(self.api_url, data=post_data, cookies=self.login_cookie)
networks = r.json()['result']['networks']
# find correct network (class)
for network in networks:
if network['id'] == self.class_id:
return network['folders']
def get_all_posts(self):
"""Get all posts from folder, updating if necessary
Args:
folder (string) -- folder to get posts from
Returns:
(dict) -- of posts
"""
print('Getting posts.')
data = json.dumps({
'method': 'network.get_my_feed',
'params': {
'nid': self.class_id
}
})
r = requests.post(self.api_url, data=data, cookies=self.login_cookie)
folder_feed = json.loads(r.content)['result']['feed']
m = PiazzaMongo()
m.update_one('meta', {}, {'$set': {'updated': time.time()}})
class Progress:
"""Class for tracking download progress
"""
def __init__(self, total):
self.count = 0
self.total = total
def update(self, r, **kwargs):
self.count += 1
sys.stdout.write('\r')
sys.stdout.write(str(self.count) + '/' + str(self.total))
sys.stdout.flush()
if self.count == self.total:
print()
return r
prog = Progress(len(folder_feed))
session = requests.Session()
reqs = []
for post in folder_feed:
data = json.dumps({'method': 'content.get', 'params': {'cid': post['id'], 'nid': self.class_id}})
req = grequests.post(self.api_url, data=data, callback=prog.update, cookies=self.login_cookie,
session=session)
reqs.append(req)
responses = grequests.imap(reqs, size=2)
posts = []
for r in responses:
r = r.json()['result']
post = {
'created': r['created'],
'type': r['type'],
'tags': r['tags'],
'folders': r['folders'],
'id': r['id'],
'cid': r['nr'],
'num_favorites': r['num_favorites'],
'good_tags': len(r['tag_good']),
'answered': r['no_answer'] == 0 if 'no_answer' in r else True
}
original_post = r['history'][-1]
recent_post = r['history'][0]
post['author_id'] = original_post['uid'] if 'uid' in original_post else None
post['last_edited'] = recent_post['created']
post['subject'] = recent_post['subject']
post['content'] = recent_post['content']
def trim_children(c):
r = []
for child in c:
d = dict((key, value) for key, value in child.iteritems() if
key in ['uid', 'created', 'updated', 'type', 'children', 'subject'])
d['children'] = trim_children(d['children'])
r.append(d)
return r
post['children'] = trim_children(r['children'])
posts.append(post)
posts = self.add_names(posts)
m.drop('posts')
m.insert_many('posts', posts)
print('Update Complete.')
def add_names(self, posts):
"""Add names to posts based on uid
Args:
folder (string) -- folder to add names to
"""
# get all uids
def get_uids(posts, uids=[]):
"""recursive function to get all uids
"""
if hasattr(posts, 'iteritems'):
for key, value in posts.items():
if key == 'uid':
uids.append(value)
elif isinstance(value, dict):
get_uids(value, uids)
elif isinstance(value, list):
for i in value:
get_uids(i, uids)
return uids
uids = []
for post in posts:
uids = list(set(get_uids(post)))
# get list of users
users = self.get_users(uids)
# insert names into posts where uid exists
def insert_names(post):
"""resursive function to insert name where uid exists
"""
if hasattr(post, 'iteritems'):
for key, value in post.items():
if key == 'uid':
exists = False
for user in users:
if user['id'] == value:
exists = True
post['name'] = user['name']
break
if not exists:
if value:
post['author'] = '(deleted)'
else:
post['author'] = '(anonymous)'
elif key == 'author_id':
exists = False
for user in users:
if user['id'] == value:
exists = True
post['author'] = user['name']
break
if not exists:
if value:
post['author'] = '(deleted)'
else:
post['author'] = '(anonymous)'
if isinstance(value, dict):
insert_names(value)
elif isinstance(value, list):
for i in value:
insert_names(i)
return post
posts_with_names = []
for post in posts:
post = insert_names(post)
posts_with_names.append(post)
return posts_with_names
def get_users(self, uids):
"""Get list of users from Piazza, directly from Piazza API
Args:
uids (list) -- user ids
Returns:
(dict) -- users information
"""
post_data = json.dumps({
'method': 'network.get_users',
'params': {
'ids': uids,
'nid': self.class_id
}
})
print('Getting user names.')
r = requests.post(self.api_url, data=post_data, cookies=self.login_cookie)
# add to db
self.add_users(r.json()['result'])
return r.json()['result']
def add_users(self, users):
"""Add list of users to db
Args:
users (dict) -- user information
"""
m = PiazzaMongo()
for user in users:
m.insert('piazza_users', user)
| 31.503425 | 109 | 0.470486 |
69ce6dc3f34397b15f11a56f60ba53fbe05d6e15 | 721 | py | Python | LightUpAlarm/__main__.py | Mitender-Arya/ArduBlocky | 4604f257328b3768add68c82c644fbc816756952 | [
"MIT"
] | 37 | 2015-03-30T19:32:47.000Z | 2022-02-03T20:10:10.000Z | LightUpAlarm/__main__.py | Mitender-Arya/ArduBlocky | 4604f257328b3768add68c82c644fbc816756952 | [
"MIT"
] | 6 | 2016-04-14T22:46:07.000Z | 2020-05-18T19:42:40.000Z | LightUpAlarm/__main__.py | Mitender-Arya/ArduBlocky | 4604f257328b3768add68c82c644fbc816756952 | [
"MIT"
] | 14 | 2015-06-29T06:33:00.000Z | 2022-03-28T00:24:10.000Z | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
#
# Entry point for the LightUpAlarm package.
#
# Copyright (c) 2015 carlosperate http://carlosperate.github.io
#
# Licensed under The MIT License (MIT), a copy can be found in the LICENSE file
#
# Creates an instance of the AlarmCli class and runs it.
#
from __future__ import unicode_literals, absolute_import
import sys
try:
from LightUpAlarm.AlarmCli import AlarmCli
except ImportError:
from AlarmCli import AlarmCli
def main(argv=None):
# Checking command line arguments
if (argv is not None) and (len(argv) > 0):
AlarmCli().onecmd(' '.join(argv[0:]))
else:
AlarmCli().cmdloop()
if __name__ == '__main__':
main(sys.argv[1:])
| 24.033333 | 79 | 0.693481 |
80a9d5d40e275fce664ef52e5d5413930432d683 | 269,665 | py | Python | tensorflow/python/feature_column/feature_column_v2_test.py | elielhojman/tensorflow | 163aae337c875efce2518c3cd0fecb61968fe408 | [
"Apache-2.0"
] | 8 | 2017-03-20T12:04:21.000Z | 2021-06-24T20:34:30.000Z | tensorflow/python/feature_column/feature_column_v2_test.py | AKIRA-MIYAKE/tensorflow | 89e06304aad35bfb019a8c10f39fc1ead83e0f99 | [
"Apache-2.0"
] | 4 | 2019-08-14T22:32:51.000Z | 2020-03-09T14:59:18.000Z | tensorflow/python/feature_column/feature_column_v2_test.py | AKIRA-MIYAKE/tensorflow | 89e06304aad35bfb019a8c10f39fc1ead83e0f99 | [
"Apache-2.0"
] | 4 | 2019-11-11T13:46:27.000Z | 2020-03-14T05:36:53.000Z | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for feature_column."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import copy
import numpy as np
from tensorflow.core.example import example_pb2
from tensorflow.core.example import feature_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.python.client import session
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.estimator.inputs import numpy_io
from tensorflow.python.feature_column import feature_column as fc_old
from tensorflow.python.feature_column import feature_column_v2 as fc
from tensorflow.python.feature_column.feature_column_v2 import FeatureColumn
from tensorflow.python.feature_column.feature_column_v2 import FeatureTransformationCache
from tensorflow.python.feature_column.feature_column_v2 import InputLayer
from tensorflow.python.feature_column.feature_column_v2 import StateManager
from tensorflow.python.feature_column.feature_column_v2 import _LinearModel
from tensorflow.python.feature_column.feature_column_v2 import _transform_features
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import lookup_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.ops import partitioned_variables
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables as variables_lib
from tensorflow.python.platform import test
from tensorflow.python.training import coordinator
from tensorflow.python.training import queue_runner_impl
def _initialized_session(config=None):
sess = session.Session(config=config)
sess.run(variables_lib.global_variables_initializer())
sess.run(lookup_ops.tables_initializer())
return sess
class LazyColumnTest(test.TestCase):
def test_transformations_called_once(self):
class TransformCounter(FeatureColumn):
def __init__(self):
self.num_transform = 0
@property
def name(self):
return 'TransformCounter'
def transform_feature(self, transformation_cache, state_manager):
self.num_transform += 1 # Count transform calls.
return transformation_cache.get('a', state_manager)
@property
def parse_example_spec(self):
pass
transformation_cache = FeatureTransformationCache(
features={'a': [[2], [3.]]})
column = TransformCounter()
self.assertEqual(0, column.num_transform)
transformation_cache.get(column, None)
self.assertEqual(1, column.num_transform)
transformation_cache.get(column, None)
self.assertEqual(1, column.num_transform)
def test_returns_transform_output(self):
class Transformer(FeatureColumn):
@property
def name(self):
return 'Transformer'
def transform_feature(self, transformation_cache, state_manager):
return 'Output'
@property
def parse_example_spec(self):
pass
transformation_cache = FeatureTransformationCache(
features={'a': [[2], [3.]]})
column = Transformer()
self.assertEqual('Output', transformation_cache.get(column, None))
self.assertEqual('Output', transformation_cache.get(column, None))
def test_does_not_pollute_given_features_dict(self):
class Transformer(FeatureColumn):
@property
def name(self):
return 'Transformer'
def transform_feature(self, transformation_cache, state_manager):
return 'Output'
@property
def parse_example_spec(self):
pass
features = {'a': [[2], [3.]]}
transformation_cache = FeatureTransformationCache(features=features)
transformation_cache.get(Transformer(), None)
self.assertEqual(['a'], list(features.keys()))
def test_error_if_feature_is_not_found(self):
transformation_cache = FeatureTransformationCache(
features={'a': [[2], [3.]]})
with self.assertRaisesRegexp(ValueError,
'bbb is not in features dictionary'):
transformation_cache.get('bbb', None)
with self.assertRaisesRegexp(ValueError,
'bbb is not in features dictionary'):
transformation_cache.get(u'bbb', None)
def test_not_supported_feature_column(self):
class NotAProperColumn(FeatureColumn):
@property
def name(self):
return 'NotAProperColumn'
def transform_feature(self, transformation_cache, state_manager):
# It should return not None.
pass
@property
def parse_example_spec(self):
pass
transformation_cache = FeatureTransformationCache(
features={'a': [[2], [3.]]})
with self.assertRaisesRegexp(ValueError,
'NotAProperColumn is not supported'):
transformation_cache.get(NotAProperColumn(), None)
def test_key_should_be_string_or_feature_colum(self):
class NotAFeatureColumn(object):
pass
transformation_cache = FeatureTransformationCache(
features={'a': [[2], [3.]]})
with self.assertRaisesRegexp(
TypeError, '"key" must be either a "str" or "FeatureColumn".'):
transformation_cache.get(NotAFeatureColumn(), None)
class NumericColumnTest(test.TestCase):
def test_defaults(self):
a = fc.numeric_column('aaa')
self.assertEqual('aaa', a.key)
self.assertEqual('aaa', a.name)
self.assertEqual((1,), a.shape)
self.assertIsNone(a.default_value)
self.assertEqual(dtypes.float32, a.dtype)
self.assertIsNone(a.normalizer_fn)
def test_key_should_be_string(self):
with self.assertRaisesRegexp(ValueError, 'key must be a string.'):
fc.numeric_column(key=('aaa',))
def test_shape_saved_as_tuple(self):
a = fc.numeric_column('aaa', shape=[1, 2], default_value=[[3, 2.]])
self.assertEqual((1, 2), a.shape)
def test_default_value_saved_as_tuple(self):
a = fc.numeric_column('aaa', default_value=4.)
self.assertEqual((4.,), a.default_value)
a = fc.numeric_column('aaa', shape=[1, 2], default_value=[[3, 2.]])
self.assertEqual(((3., 2.),), a.default_value)
def test_shape_and_default_value_compatibility(self):
fc.numeric_column('aaa', shape=[2], default_value=[1, 2.])
with self.assertRaisesRegexp(ValueError, 'The shape of default_value'):
fc.numeric_column('aaa', shape=[2], default_value=[1, 2, 3.])
fc.numeric_column(
'aaa', shape=[3, 2], default_value=[[2, 3], [1, 2], [2, 3.]])
with self.assertRaisesRegexp(ValueError, 'The shape of default_value'):
fc.numeric_column(
'aaa', shape=[3, 1], default_value=[[2, 3], [1, 2], [2, 3.]])
with self.assertRaisesRegexp(ValueError, 'The shape of default_value'):
fc.numeric_column(
'aaa', shape=[3, 3], default_value=[[2, 3], [1, 2], [2, 3.]])
def test_default_value_type_check(self):
fc.numeric_column(
'aaa', shape=[2], default_value=[1, 2.], dtype=dtypes.float32)
fc.numeric_column(
'aaa', shape=[2], default_value=[1, 2], dtype=dtypes.int32)
with self.assertRaisesRegexp(TypeError, 'must be compatible with dtype'):
fc.numeric_column(
'aaa', shape=[2], default_value=[1, 2.], dtype=dtypes.int32)
with self.assertRaisesRegexp(TypeError,
'default_value must be compatible with dtype'):
fc.numeric_column('aaa', default_value=['string'])
def test_shape_must_be_positive_integer(self):
with self.assertRaisesRegexp(TypeError, 'shape dimensions must be integer'):
fc.numeric_column(
'aaa', shape=[
1.0,
])
with self.assertRaisesRegexp(ValueError,
'shape dimensions must be greater than 0'):
fc.numeric_column(
'aaa', shape=[
0,
])
def test_dtype_is_convertible_to_float(self):
with self.assertRaisesRegexp(ValueError,
'dtype must be convertible to float'):
fc.numeric_column('aaa', dtype=dtypes.string)
def test_scalar_default_value_fills_the_shape(self):
a = fc.numeric_column('aaa', shape=[2, 3], default_value=2.)
self.assertEqual(((2., 2., 2.), (2., 2., 2.)), a.default_value)
def test_parse_spec(self):
a = fc.numeric_column('aaa', shape=[2, 3], dtype=dtypes.int32)
self.assertEqual({
'aaa': parsing_ops.FixedLenFeature((2, 3), dtype=dtypes.int32)
}, a.parse_example_spec)
def test_parse_example_no_default_value(self):
price = fc.numeric_column('price', shape=[2])
data = example_pb2.Example(features=feature_pb2.Features(
feature={
'price':
feature_pb2.Feature(float_list=feature_pb2.FloatList(
value=[20., 110.]))
}))
features = parsing_ops.parse_example(
serialized=[data.SerializeToString()],
features=fc.make_parse_example_spec([price]))
self.assertIn('price', features)
with self.test_session():
self.assertAllEqual([[20., 110.]], features['price'].eval())
def test_parse_example_with_default_value(self):
price = fc.numeric_column('price', shape=[2], default_value=11.)
data = example_pb2.Example(features=feature_pb2.Features(
feature={
'price':
feature_pb2.Feature(float_list=feature_pb2.FloatList(
value=[20., 110.]))
}))
no_data = example_pb2.Example(features=feature_pb2.Features(
feature={
'something_else':
feature_pb2.Feature(float_list=feature_pb2.FloatList(
value=[20., 110.]))
}))
features = parsing_ops.parse_example(
serialized=[data.SerializeToString(),
no_data.SerializeToString()],
features=fc.make_parse_example_spec([price]))
self.assertIn('price', features)
with self.test_session():
self.assertAllEqual([[20., 110.], [11., 11.]], features['price'].eval())
def test_normalizer_fn_must_be_callable(self):
with self.assertRaisesRegexp(TypeError, 'must be a callable'):
fc.numeric_column('price', normalizer_fn='NotACallable')
def test_normalizer_fn_transform_feature(self):
def _increment_two(input_tensor):
return input_tensor + 2.
price = fc.numeric_column('price', shape=[2], normalizer_fn=_increment_two)
output = _transform_features({'price': [[1., 2.], [5., 6.]]}, [price], None)
with self.test_session():
self.assertAllEqual([[3., 4.], [7., 8.]], output[price].eval())
def test_get_dense_tensor(self):
def _increment_two(input_tensor):
return input_tensor + 2.
price = fc.numeric_column('price', shape=[2], normalizer_fn=_increment_two)
transformation_cache = FeatureTransformationCache({
'price': [[1., 2.], [5., 6.]]
})
self.assertEqual(
transformation_cache.get(price, None),
price.get_dense_tensor(transformation_cache, None))
def test_sparse_tensor_not_supported(self):
price = fc.numeric_column('price')
transformation_cache = FeatureTransformationCache({
'price':
sparse_tensor.SparseTensor(
indices=[[0, 0]], values=[0.3], dense_shape=[1, 1])
})
with self.assertRaisesRegexp(ValueError, 'must be a Tensor'):
price.transform_feature(transformation_cache, None)
def test_deep_copy(self):
a = fc.numeric_column('aaa', shape=[1, 2], default_value=[[3., 2.]])
a_copy = copy.deepcopy(a)
self.assertEqual(a_copy.name, 'aaa')
self.assertEqual(a_copy.shape, (1, 2))
self.assertEqual(a_copy.default_value, ((3., 2.),))
def test_numpy_default_value(self):
a = fc.numeric_column(
'aaa', shape=[1, 2], default_value=np.array([[3., 2.]]))
self.assertEqual(a.default_value, ((3., 2.),))
def test_linear_model(self):
price = fc_old.numeric_column('price')
with ops.Graph().as_default():
features = {'price': [[1.], [5.]]}
predictions = fc.linear_model(features, [price])
bias = get_linear_model_bias()
price_var = get_linear_model_column_var(price)
with _initialized_session() as sess:
self.assertAllClose([0.], bias.eval())
self.assertAllClose([[0.]], price_var.eval())
self.assertAllClose([[0.], [0.]], predictions.eval())
sess.run(price_var.assign([[10.]]))
self.assertAllClose([[10.], [50.]], predictions.eval())
def test_keras_linear_model(self):
price = fc_old.numeric_column('price')
with ops.Graph().as_default():
features = {'price': [[1.], [5.]]}
predictions = get_keras_linear_model_predictions(features, [price])
bias = get_linear_model_bias()
price_var = get_linear_model_column_var(price)
with _initialized_session() as sess:
self.assertAllClose([0.], bias.eval())
self.assertAllClose([[0.]], price_var.eval())
self.assertAllClose([[0.], [0.]], predictions.eval())
sess.run(price_var.assign([[10.]]))
self.assertAllClose([[10.], [50.]], predictions.eval())
class BucketizedColumnTest(test.TestCase):
def test_invalid_source_column_type(self):
a = fc.categorical_column_with_hash_bucket('aaa', hash_bucket_size=10)
with self.assertRaisesRegexp(
ValueError,
'source_column must be a column generated with numeric_column'):
fc.bucketized_column(a, boundaries=[0, 1])
def test_invalid_source_column_shape(self):
a = fc.numeric_column('aaa', shape=[2, 3])
with self.assertRaisesRegexp(
ValueError, 'source_column must be one-dimensional column'):
fc.bucketized_column(a, boundaries=[0, 1])
def test_invalid_boundaries(self):
a = fc.numeric_column('aaa')
with self.assertRaisesRegexp(
ValueError, 'boundaries must be a sorted list'):
fc.bucketized_column(a, boundaries=None)
with self.assertRaisesRegexp(
ValueError, 'boundaries must be a sorted list'):
fc.bucketized_column(a, boundaries=1.)
with self.assertRaisesRegexp(
ValueError, 'boundaries must be a sorted list'):
fc.bucketized_column(a, boundaries=[1, 0])
with self.assertRaisesRegexp(
ValueError, 'boundaries must be a sorted list'):
fc.bucketized_column(a, boundaries=[1, 1])
def test_name(self):
a = fc.numeric_column('aaa', dtype=dtypes.int32)
b = fc.bucketized_column(a, boundaries=[0, 1])
self.assertEqual('aaa_bucketized', b.name)
def test_parse_spec(self):
a = fc.numeric_column('aaa', shape=[2], dtype=dtypes.int32)
b = fc.bucketized_column(a, boundaries=[0, 1])
self.assertEqual({
'aaa': parsing_ops.FixedLenFeature((2,), dtype=dtypes.int32)
}, b.parse_example_spec)
def test_variable_shape(self):
a = fc.numeric_column('aaa', shape=[2], dtype=dtypes.int32)
b = fc.bucketized_column(a, boundaries=[0, 1])
# Column 'aaa` has shape [2] times three buckets -> variable_shape=[2, 3].
self.assertAllEqual((2, 3), b.variable_shape)
def test_num_buckets(self):
a = fc.numeric_column('aaa', shape=[2], dtype=dtypes.int32)
b = fc.bucketized_column(a, boundaries=[0, 1])
# Column 'aaa` has shape [2] times three buckets -> num_buckets=6.
self.assertEqual(6, b.num_buckets)
def test_parse_example(self):
price = fc.numeric_column('price', shape=[2])
bucketized_price = fc.bucketized_column(price, boundaries=[0, 50])
data = example_pb2.Example(features=feature_pb2.Features(
feature={
'price':
feature_pb2.Feature(float_list=feature_pb2.FloatList(
value=[20., 110.]))
}))
features = parsing_ops.parse_example(
serialized=[data.SerializeToString()],
features=fc.make_parse_example_spec([bucketized_price]))
self.assertIn('price', features)
with self.test_session():
self.assertAllEqual([[20., 110.]], features['price'].eval())
def test_transform_feature(self):
price = fc.numeric_column('price', shape=[2])
bucketized_price = fc.bucketized_column(price, boundaries=[0, 2, 4, 6])
with ops.Graph().as_default():
transformed_tensor = _transform_features({
'price': [[-1., 1.], [5., 6.]]
}, [bucketized_price], None)
with _initialized_session():
self.assertAllEqual([[0, 1], [3, 4]],
transformed_tensor[bucketized_price].eval())
def test_get_dense_tensor_one_input_value(self):
"""Tests _get_dense_tensor() for input with shape=[1]."""
price = fc.numeric_column('price', shape=[1])
bucketized_price = fc.bucketized_column(price, boundaries=[0, 2, 4, 6])
with ops.Graph().as_default():
transformation_cache = FeatureTransformationCache({
'price': [[-1.], [1.], [5.], [6.]]
})
with _initialized_session():
bucketized_price_tensor = bucketized_price.get_dense_tensor(
transformation_cache, None)
self.assertAllClose(
# One-hot tensor.
[[[1., 0., 0., 0., 0.]],
[[0., 1., 0., 0., 0.]],
[[0., 0., 0., 1., 0.]],
[[0., 0., 0., 0., 1.]]],
bucketized_price_tensor.eval())
def test_get_dense_tensor_two_input_values(self):
"""Tests _get_dense_tensor() for input with shape=[2]."""
price = fc.numeric_column('price', shape=[2])
bucketized_price = fc.bucketized_column(price, boundaries=[0, 2, 4, 6])
with ops.Graph().as_default():
transformation_cache = FeatureTransformationCache({
'price': [[-1., 1.], [5., 6.]]
})
with _initialized_session():
bucketized_price_tensor = bucketized_price.get_dense_tensor(
transformation_cache, None)
self.assertAllClose(
# One-hot tensor.
[[[1., 0., 0., 0., 0.], [0., 1., 0., 0., 0.]],
[[0., 0., 0., 1., 0.], [0., 0., 0., 0., 1.]]],
bucketized_price_tensor.eval())
def test_get_sparse_tensors_one_input_value(self):
"""Tests _get_sparse_tensors() for input with shape=[1]."""
price = fc.numeric_column('price', shape=[1])
bucketized_price = fc.bucketized_column(price, boundaries=[0, 2, 4, 6])
with ops.Graph().as_default():
transformation_cache = FeatureTransformationCache({
'price': [[-1.], [1.], [5.], [6.]]
})
with _initialized_session() as sess:
id_weight_pair = bucketized_price.get_sparse_tensors(
transformation_cache, None)
self.assertIsNone(id_weight_pair.weight_tensor)
id_tensor_value = sess.run(id_weight_pair.id_tensor)
self.assertAllEqual(
[[0, 0], [1, 0], [2, 0], [3, 0]], id_tensor_value.indices)
self.assertAllEqual([0, 1, 3, 4], id_tensor_value.values)
self.assertAllEqual([4, 1], id_tensor_value.dense_shape)
def test_get_sparse_tensors_two_input_values(self):
"""Tests _get_sparse_tensors() for input with shape=[2]."""
price = fc.numeric_column('price', shape=[2])
bucketized_price = fc.bucketized_column(price, boundaries=[0, 2, 4, 6])
with ops.Graph().as_default():
transformation_cache = FeatureTransformationCache({
'price': [[-1., 1.], [5., 6.]]
})
with _initialized_session() as sess:
id_weight_pair = bucketized_price.get_sparse_tensors(
transformation_cache, None)
self.assertIsNone(id_weight_pair.weight_tensor)
id_tensor_value = sess.run(id_weight_pair.id_tensor)
self.assertAllEqual(
[[0, 0], [0, 1], [1, 0], [1, 1]], id_tensor_value.indices)
# Values 0-4 correspond to the first column of the input price.
# Values 5-9 correspond to the second column of the input price.
self.assertAllEqual([0, 6, 3, 9], id_tensor_value.values)
self.assertAllEqual([2, 2], id_tensor_value.dense_shape)
def test_sparse_tensor_input_not_supported(self):
price = fc.numeric_column('price')
bucketized_price = fc.bucketized_column(price, boundaries=[0, 1])
transformation_cache = FeatureTransformationCache({
'price':
sparse_tensor.SparseTensor(
indices=[[0, 0]], values=[0.3], dense_shape=[1, 1])
})
with self.assertRaisesRegexp(ValueError, 'must be a Tensor'):
bucketized_price.transform_feature(transformation_cache, None)
def test_deep_copy(self):
a = fc.numeric_column('aaa', shape=[2])
a_bucketized = fc.bucketized_column(a, boundaries=[0, 1])
a_bucketized_copy = copy.deepcopy(a_bucketized)
self.assertEqual(a_bucketized_copy.name, 'aaa_bucketized')
self.assertAllEqual(a_bucketized_copy.variable_shape, (2, 3))
self.assertEqual(a_bucketized_copy.boundaries, (0, 1))
def test_linear_model_one_input_value(self):
"""Tests linear_model() for input with shape=[1]."""
price = fc_old.numeric_column('price', shape=[1])
bucketized_price = fc_old.bucketized_column(price, boundaries=[0, 2, 4, 6])
with ops.Graph().as_default():
features = {'price': [[-1.], [1.], [5.], [6.]]}
predictions = fc.linear_model(features, [bucketized_price])
bias = get_linear_model_bias()
bucketized_price_var = get_linear_model_column_var(bucketized_price)
with _initialized_session() as sess:
self.assertAllClose([0.], bias.eval())
# One weight variable per bucket, all initialized to zero.
self.assertAllClose(
[[0.], [0.], [0.], [0.], [0.]], bucketized_price_var.eval())
self.assertAllClose([[0.], [0.], [0.], [0.]], predictions.eval())
sess.run(bucketized_price_var.assign(
[[10.], [20.], [30.], [40.], [50.]]))
# price -1. is in the 0th bucket, whose weight is 10.
# price 1. is in the 1st bucket, whose weight is 20.
# price 5. is in the 3rd bucket, whose weight is 40.
# price 6. is in the 4th bucket, whose weight is 50.
self.assertAllClose([[10.], [20.], [40.], [50.]], predictions.eval())
sess.run(bias.assign([1.]))
self.assertAllClose([[11.], [21.], [41.], [51.]], predictions.eval())
def test_linear_model_two_input_values(self):
"""Tests linear_model() for input with shape=[2]."""
price = fc_old.numeric_column('price', shape=[2])
bucketized_price = fc_old.bucketized_column(price, boundaries=[0, 2, 4, 6])
with ops.Graph().as_default():
features = {'price': [[-1., 1.], [5., 6.]]}
predictions = fc.linear_model(features, [bucketized_price])
bias = get_linear_model_bias()
bucketized_price_var = get_linear_model_column_var(bucketized_price)
with _initialized_session() as sess:
self.assertAllClose([0.], bias.eval())
# One weight per bucket per input column, all initialized to zero.
self.assertAllClose(
[[0.], [0.], [0.], [0.], [0.], [0.], [0.], [0.], [0.], [0.]],
bucketized_price_var.eval())
self.assertAllClose([[0.], [0.]], predictions.eval())
sess.run(bucketized_price_var.assign(
[[10.], [20.], [30.], [40.], [50.],
[60.], [70.], [80.], [90.], [100.]]))
# 1st example:
# price -1. is in the 0th bucket, whose weight is 10.
# price 1. is in the 6th bucket, whose weight is 70.
# 2nd example:
# price 5. is in the 3rd bucket, whose weight is 40.
# price 6. is in the 9th bucket, whose weight is 100.
self.assertAllClose([[80.], [140.]], predictions.eval())
sess.run(bias.assign([1.]))
self.assertAllClose([[81.], [141.]], predictions.eval())
def test_keras_linear_model_one_input_value(self):
"""Tests _LinearModel for input with shape=[1]."""
price = fc_old.numeric_column('price', shape=[1])
bucketized_price = fc_old.bucketized_column(price, boundaries=[0, 2, 4, 6])
with ops.Graph().as_default():
features = {'price': [[-1.], [1.], [5.], [6.]]}
predictions = get_keras_linear_model_predictions(features,
[bucketized_price])
bias = get_linear_model_bias()
bucketized_price_var = get_linear_model_column_var(bucketized_price)
with _initialized_session() as sess:
self.assertAllClose([0.], bias.eval())
# One weight variable per bucket, all initialized to zero.
self.assertAllClose([[0.], [0.], [0.], [0.], [0.]],
bucketized_price_var.eval())
self.assertAllClose([[0.], [0.], [0.], [0.]], predictions.eval())
sess.run(
bucketized_price_var.assign([[10.], [20.], [30.], [40.], [50.]]))
# price -1. is in the 0th bucket, whose weight is 10.
# price 1. is in the 1st bucket, whose weight is 20.
# price 5. is in the 3rd bucket, whose weight is 40.
# price 6. is in the 4th bucket, whose weight is 50.
self.assertAllClose([[10.], [20.], [40.], [50.]], predictions.eval())
sess.run(bias.assign([1.]))
self.assertAllClose([[11.], [21.], [41.], [51.]], predictions.eval())
def test_keras_linear_model_two_input_values(self):
"""Tests _LinearModel for input with shape=[2]."""
price = fc_old.numeric_column('price', shape=[2])
bucketized_price = fc_old.bucketized_column(price, boundaries=[0, 2, 4, 6])
with ops.Graph().as_default():
features = {'price': [[-1., 1.], [5., 6.]]}
predictions = get_keras_linear_model_predictions(features,
[bucketized_price])
bias = get_linear_model_bias()
bucketized_price_var = get_linear_model_column_var(bucketized_price)
with _initialized_session() as sess:
self.assertAllClose([0.], bias.eval())
# One weight per bucket per input column, all initialized to zero.
self.assertAllClose(
[[0.], [0.], [0.], [0.], [0.], [0.], [0.], [0.], [0.], [0.]],
bucketized_price_var.eval())
self.assertAllClose([[0.], [0.]], predictions.eval())
sess.run(
bucketized_price_var.assign([[10.], [20.], [30.], [40.], [50.],
[60.], [70.], [80.], [90.], [100.]]))
# 1st example:
# price -1. is in the 0th bucket, whose weight is 10.
# price 1. is in the 6th bucket, whose weight is 70.
# 2nd example:
# price 5. is in the 3rd bucket, whose weight is 40.
# price 6. is in the 9th bucket, whose weight is 100.
self.assertAllClose([[80.], [140.]], predictions.eval())
sess.run(bias.assign([1.]))
self.assertAllClose([[81.], [141.]], predictions.eval())
class HashedCategoricalColumnTest(test.TestCase):
def test_defaults(self):
a = fc.categorical_column_with_hash_bucket('aaa', 10)
self.assertEqual('aaa', a.name)
self.assertEqual('aaa', a.key)
self.assertEqual(10, a.hash_bucket_size)
self.assertEqual(dtypes.string, a.dtype)
def test_key_should_be_string(self):
with self.assertRaisesRegexp(ValueError, 'key must be a string.'):
fc.categorical_column_with_hash_bucket(('key',), 10)
def test_bucket_size_should_be_given(self):
with self.assertRaisesRegexp(ValueError, 'hash_bucket_size must be set.'):
fc.categorical_column_with_hash_bucket('aaa', None)
def test_bucket_size_should_be_positive(self):
with self.assertRaisesRegexp(ValueError,
'hash_bucket_size must be at least 1'):
fc.categorical_column_with_hash_bucket('aaa', 0)
def test_dtype_should_be_string_or_integer(self):
fc.categorical_column_with_hash_bucket('aaa', 10, dtype=dtypes.string)
fc.categorical_column_with_hash_bucket('aaa', 10, dtype=dtypes.int32)
with self.assertRaisesRegexp(ValueError, 'dtype must be string or integer'):
fc.categorical_column_with_hash_bucket('aaa', 10, dtype=dtypes.float32)
def test_deep_copy(self):
original = fc.categorical_column_with_hash_bucket('aaa', 10)
for column in (original, copy.deepcopy(original)):
self.assertEqual('aaa', column.name)
self.assertEqual(10, column.hash_bucket_size)
self.assertEqual(10, column.num_buckets)
self.assertEqual(dtypes.string, column.dtype)
def test_parse_spec_string(self):
a = fc.categorical_column_with_hash_bucket('aaa', 10)
self.assertEqual({
'aaa': parsing_ops.VarLenFeature(dtypes.string)
}, a.parse_example_spec)
def test_parse_spec_int(self):
a = fc.categorical_column_with_hash_bucket('aaa', 10, dtype=dtypes.int32)
self.assertEqual({
'aaa': parsing_ops.VarLenFeature(dtypes.int32)
}, a.parse_example_spec)
def test_parse_example(self):
a = fc.categorical_column_with_hash_bucket('aaa', 10)
data = example_pb2.Example(features=feature_pb2.Features(
feature={
'aaa':
feature_pb2.Feature(bytes_list=feature_pb2.BytesList(
value=[b'omar', b'stringer']))
}))
features = parsing_ops.parse_example(
serialized=[data.SerializeToString()],
features=fc.make_parse_example_spec([a]))
self.assertIn('aaa', features)
with self.test_session():
_assert_sparse_tensor_value(
self,
sparse_tensor.SparseTensorValue(
indices=[[0, 0], [0, 1]],
values=np.array([b'omar', b'stringer'], dtype=np.object_),
dense_shape=[1, 2]),
features['aaa'].eval())
def test_strings_should_be_hashed(self):
hashed_sparse = fc.categorical_column_with_hash_bucket('wire', 10)
wire_tensor = sparse_tensor.SparseTensor(
values=['omar', 'stringer', 'marlo'],
indices=[[0, 0], [1, 0], [1, 1]],
dense_shape=[2, 2])
outputs = _transform_features({'wire': wire_tensor}, [hashed_sparse], None)
output = outputs[hashed_sparse]
# Check exact hashed output. If hashing changes this test will break.
expected_values = [6, 4, 1]
with self.test_session():
self.assertEqual(dtypes.int64, output.values.dtype)
self.assertAllEqual(expected_values, output.values.eval())
self.assertAllEqual(wire_tensor.indices.eval(), output.indices.eval())
self.assertAllEqual(wire_tensor.dense_shape.eval(),
output.dense_shape.eval())
def test_tensor_dtype_should_be_string_or_integer(self):
string_fc = fc.categorical_column_with_hash_bucket(
'a_string', 10, dtype=dtypes.string)
int_fc = fc.categorical_column_with_hash_bucket(
'a_int', 10, dtype=dtypes.int32)
float_fc = fc.categorical_column_with_hash_bucket(
'a_float', 10, dtype=dtypes.string)
int_tensor = sparse_tensor.SparseTensor(
values=[101],
indices=[[0, 0]],
dense_shape=[1, 1])
string_tensor = sparse_tensor.SparseTensor(
values=['101'],
indices=[[0, 0]],
dense_shape=[1, 1])
float_tensor = sparse_tensor.SparseTensor(
values=[101.],
indices=[[0, 0]],
dense_shape=[1, 1])
transformation_cache = FeatureTransformationCache({
'a_int': int_tensor,
'a_string': string_tensor,
'a_float': float_tensor
})
transformation_cache.get(string_fc, None)
transformation_cache.get(int_fc, None)
with self.assertRaisesRegexp(ValueError, 'dtype must be string or integer'):
transformation_cache.get(float_fc, None)
def test_dtype_should_match_with_tensor(self):
hashed_sparse = fc.categorical_column_with_hash_bucket(
'wire', 10, dtype=dtypes.int64)
wire_tensor = sparse_tensor.SparseTensor(
values=['omar'], indices=[[0, 0]], dense_shape=[1, 1])
transformation_cache = FeatureTransformationCache({'wire': wire_tensor})
with self.assertRaisesRegexp(ValueError, 'dtype must be compatible'):
transformation_cache.get(hashed_sparse, None)
def test_ints_should_be_hashed(self):
hashed_sparse = fc.categorical_column_with_hash_bucket(
'wire', 10, dtype=dtypes.int64)
wire_tensor = sparse_tensor.SparseTensor(
values=[101, 201, 301],
indices=[[0, 0], [1, 0], [1, 1]],
dense_shape=[2, 2])
transformation_cache = FeatureTransformationCache({'wire': wire_tensor})
output = transformation_cache.get(hashed_sparse, None)
# Check exact hashed output. If hashing changes this test will break.
expected_values = [3, 7, 5]
with self.test_session():
self.assertAllEqual(expected_values, output.values.eval())
def test_int32_64_is_compatible(self):
hashed_sparse = fc.categorical_column_with_hash_bucket(
'wire', 10, dtype=dtypes.int64)
wire_tensor = sparse_tensor.SparseTensor(
values=constant_op.constant([101, 201, 301], dtype=dtypes.int32),
indices=[[0, 0], [1, 0], [1, 1]],
dense_shape=[2, 2])
transformation_cache = FeatureTransformationCache({'wire': wire_tensor})
output = transformation_cache.get(hashed_sparse, None)
# Check exact hashed output. If hashing changes this test will break.
expected_values = [3, 7, 5]
with self.test_session():
self.assertAllEqual(expected_values, output.values.eval())
def test_get_sparse_tensors(self):
hashed_sparse = fc.categorical_column_with_hash_bucket('wire', 10)
transformation_cache = FeatureTransformationCache({
'wire':
sparse_tensor.SparseTensor(
values=['omar', 'stringer', 'marlo'],
indices=[[0, 0], [1, 0], [1, 1]],
dense_shape=[2, 2])
})
id_weight_pair = hashed_sparse.get_sparse_tensors(transformation_cache,
None)
self.assertIsNone(id_weight_pair.weight_tensor)
self.assertEqual(
transformation_cache.get(hashed_sparse, None), id_weight_pair.id_tensor)
def DISABLED_test_get_sparse_tensors_weight_collections(self):
column = fc.categorical_column_with_hash_bucket('aaa', 10)
inputs = sparse_tensor.SparseTensor(
values=['omar', 'stringer', 'marlo'],
indices=[[0, 0], [1, 0], [1, 1]],
dense_shape=[2, 2])
column._get_sparse_tensors(
FeatureTransformationCache({
'aaa': inputs
}),
weight_collections=('my_weights',))
self.assertItemsEqual(
[], ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES))
self.assertItemsEqual([], ops.get_collection('my_weights'))
def test_get_sparse_tensors_dense_input(self):
hashed_sparse = fc.categorical_column_with_hash_bucket('wire', 10)
transformation_cache = FeatureTransformationCache({
'wire': (('omar', ''), ('stringer', 'marlo'))
})
id_weight_pair = hashed_sparse.get_sparse_tensors(transformation_cache,
None)
self.assertIsNone(id_weight_pair.weight_tensor)
self.assertEqual(
transformation_cache.get(hashed_sparse, None), id_weight_pair.id_tensor)
def test_linear_model(self):
wire_column = fc_old.categorical_column_with_hash_bucket('wire', 4)
self.assertEqual(4, wire_column._num_buckets)
with ops.Graph().as_default():
predictions = fc.linear_model({
wire_column.name: sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=('marlo', 'skywalker', 'omar'),
dense_shape=(2, 2))
}, (wire_column,))
bias = get_linear_model_bias()
wire_var = get_linear_model_column_var(wire_column)
with _initialized_session():
self.assertAllClose((0.,), bias.eval())
self.assertAllClose(((0.,), (0.,), (0.,), (0.,)), wire_var.eval())
self.assertAllClose(((0.,), (0.,)), predictions.eval())
wire_var.assign(((1.,), (2.,), (3.,), (4.,))).eval()
# 'marlo' -> 3: wire_var[3] = 4
# 'skywalker' -> 2, 'omar' -> 2: wire_var[2] + wire_var[2] = 3+3 = 6
self.assertAllClose(((4.,), (6.,)), predictions.eval())
def test_keras_linear_model(self):
wire_column = fc_old.categorical_column_with_hash_bucket('wire', 4)
self.assertEqual(4, wire_column._num_buckets)
with ops.Graph().as_default():
predictions = get_keras_linear_model_predictions({
wire_column.name:
sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=('marlo', 'skywalker', 'omar'),
dense_shape=(2, 2))
}, (wire_column,))
bias = get_linear_model_bias()
wire_var = get_linear_model_column_var(wire_column)
with _initialized_session():
self.assertAllClose((0.,), bias.eval())
self.assertAllClose(((0.,), (0.,), (0.,), (0.,)), wire_var.eval())
self.assertAllClose(((0.,), (0.,)), predictions.eval())
wire_var.assign(((1.,), (2.,), (3.,), (4.,))).eval()
# 'marlo' -> 3: wire_var[3] = 4
# 'skywalker' -> 2, 'omar' -> 2: wire_var[2] + wire_var[2] = 3+3 = 6
self.assertAllClose(((4.,), (6.,)), predictions.eval())
class CrossedColumnTest(test.TestCase):
def test_keys_empty(self):
with self.assertRaisesRegexp(
ValueError, 'keys must be a list with length > 1'):
fc.crossed_column([], 10)
def test_keys_length_one(self):
with self.assertRaisesRegexp(
ValueError, 'keys must be a list with length > 1'):
fc.crossed_column(['a'], 10)
def test_key_type_unsupported(self):
with self.assertRaisesRegexp(ValueError, 'Unsupported key type'):
fc.crossed_column(['a', fc.numeric_column('c')], 10)
with self.assertRaisesRegexp(
ValueError, 'categorical_column_with_hash_bucket is not supported'):
fc.crossed_column(
['a', fc.categorical_column_with_hash_bucket('c', 10)], 10)
def test_hash_bucket_size_negative(self):
with self.assertRaisesRegexp(
ValueError, 'hash_bucket_size must be > 1'):
fc.crossed_column(['a', 'c'], -1)
def test_hash_bucket_size_zero(self):
with self.assertRaisesRegexp(
ValueError, 'hash_bucket_size must be > 1'):
fc.crossed_column(['a', 'c'], 0)
def test_hash_bucket_size_none(self):
with self.assertRaisesRegexp(
ValueError, 'hash_bucket_size must be > 1'):
fc.crossed_column(['a', 'c'], None)
def test_name(self):
a = fc.numeric_column('a', dtype=dtypes.int32)
b = fc.bucketized_column(a, boundaries=[0, 1])
crossed1 = fc.crossed_column(['d1', 'd2'], 10)
crossed2 = fc.crossed_column([b, 'c', crossed1], 10)
self.assertEqual('a_bucketized_X_c_X_d1_X_d2', crossed2.name)
def test_name_ordered_alphabetically(self):
"""Tests that the name does not depend on the order of given columns."""
a = fc.numeric_column('a', dtype=dtypes.int32)
b = fc.bucketized_column(a, boundaries=[0, 1])
crossed1 = fc.crossed_column(['d1', 'd2'], 10)
crossed2 = fc.crossed_column([crossed1, 'c', b], 10)
self.assertEqual('a_bucketized_X_c_X_d1_X_d2', crossed2.name)
def test_name_leaf_keys_ordered_alphabetically(self):
"""Tests that the name does not depend on the order of given columns."""
a = fc.numeric_column('a', dtype=dtypes.int32)
b = fc.bucketized_column(a, boundaries=[0, 1])
crossed1 = fc.crossed_column(['d2', 'c'], 10)
crossed2 = fc.crossed_column([crossed1, 'd1', b], 10)
self.assertEqual('a_bucketized_X_c_X_d1_X_d2', crossed2.name)
def test_parse_spec(self):
a = fc.numeric_column('a', shape=[2], dtype=dtypes.int32)
b = fc.bucketized_column(a, boundaries=[0, 1])
crossed = fc.crossed_column([b, 'c'], 10)
self.assertEqual({
'a': parsing_ops.FixedLenFeature((2,), dtype=dtypes.int32),
'c': parsing_ops.VarLenFeature(dtypes.string),
}, crossed.parse_example_spec)
def test_num_buckets(self):
a = fc.numeric_column('a', shape=[2], dtype=dtypes.int32)
b = fc.bucketized_column(a, boundaries=[0, 1])
crossed = fc.crossed_column([b, 'c'], 15)
self.assertEqual(15, crossed.num_buckets)
def test_deep_copy(self):
a = fc.numeric_column('a', dtype=dtypes.int32)
b = fc.bucketized_column(a, boundaries=[0, 1])
crossed1 = fc.crossed_column(['d1', 'd2'], 10)
crossed2 = fc.crossed_column([b, 'c', crossed1], 15, hash_key=5)
crossed2_copy = copy.deepcopy(crossed2)
self.assertEqual('a_bucketized_X_c_X_d1_X_d2', crossed2_copy.name,)
self.assertEqual(15, crossed2_copy.hash_bucket_size)
self.assertEqual(5, crossed2_copy.hash_key)
def test_parse_example(self):
price = fc.numeric_column('price', shape=[2])
bucketized_price = fc.bucketized_column(price, boundaries=[0, 50])
price_cross_wire = fc.crossed_column([bucketized_price, 'wire'], 10)
data = example_pb2.Example(features=feature_pb2.Features(
feature={
'price':
feature_pb2.Feature(float_list=feature_pb2.FloatList(
value=[20., 110.])),
'wire':
feature_pb2.Feature(bytes_list=feature_pb2.BytesList(
value=[b'omar', b'stringer'])),
}))
features = parsing_ops.parse_example(
serialized=[data.SerializeToString()],
features=fc.make_parse_example_spec([price_cross_wire]))
self.assertIn('price', features)
self.assertIn('wire', features)
with self.test_session():
self.assertAllEqual([[20., 110.]], features['price'].eval())
wire_sparse = features['wire']
self.assertAllEqual([[0, 0], [0, 1]], wire_sparse.indices.eval())
# Use byte constants to pass the open-source test.
self.assertAllEqual([b'omar', b'stringer'], wire_sparse.values.eval())
self.assertAllEqual([1, 2], wire_sparse.dense_shape.eval())
def test_transform_feature(self):
price = fc.numeric_column('price', shape=[2])
bucketized_price = fc.bucketized_column(price, boundaries=[0, 50])
hash_bucket_size = 10
price_cross_wire = fc.crossed_column(
[bucketized_price, 'wire'], hash_bucket_size)
features = {
'price': constant_op.constant([[1., 2.], [5., 6.]]),
'wire': sparse_tensor.SparseTensor(
values=['omar', 'stringer', 'marlo'],
indices=[[0, 0], [1, 0], [1, 1]],
dense_shape=[2, 2]),
}
outputs = _transform_features(features, [price_cross_wire], None)
output = outputs[price_cross_wire]
with self.test_session() as sess:
output_val = sess.run(output)
self.assertAllEqual(
[[0, 0], [0, 1], [1, 0], [1, 1], [1, 2], [1, 3]], output_val.indices)
for val in output_val.values:
self.assertIn(val, list(range(hash_bucket_size)))
self.assertAllEqual([2, 4], output_val.dense_shape)
def test_get_sparse_tensors(self):
a = fc.numeric_column('a', dtype=dtypes.int32, shape=(2,))
b = fc.bucketized_column(a, boundaries=(0, 1))
crossed1 = fc.crossed_column(['d1', 'd2'], 10)
crossed2 = fc.crossed_column([b, 'c', crossed1], 15, hash_key=5)
with ops.Graph().as_default():
transformation_cache = FeatureTransformationCache({
'a':
constant_op.constant(((-1., .5), (.5, 1.))),
'c':
sparse_tensor.SparseTensor(
indices=((0, 0), (1, 0), (1, 1)),
values=['cA', 'cB', 'cC'],
dense_shape=(2, 2)),
'd1':
sparse_tensor.SparseTensor(
indices=((0, 0), (1, 0), (1, 1)),
values=['d1A', 'd1B', 'd1C'],
dense_shape=(2, 2)),
'd2':
sparse_tensor.SparseTensor(
indices=((0, 0), (1, 0), (1, 1)),
values=['d2A', 'd2B', 'd2C'],
dense_shape=(2, 2)),
})
id_weight_pair = crossed2.get_sparse_tensors(transformation_cache, None)
with _initialized_session():
id_tensor_eval = id_weight_pair.id_tensor.eval()
self.assertAllEqual(
((0, 0), (0, 1), (1, 0), (1, 1), (1, 2), (1, 3), (1, 4), (1, 5),
(1, 6), (1, 7), (1, 8), (1, 9), (1, 10), (1, 11), (1, 12), (1, 13),
(1, 14), (1, 15)),
id_tensor_eval.indices)
# Check exact hashed output. If hashing changes this test will break.
# All values are within [0, hash_bucket_size).
expected_values = (
6, 14, 0, 13, 8, 8, 10, 12, 2, 0, 1, 9, 8, 12, 2, 0, 10, 11)
self.assertAllEqual(expected_values, id_tensor_eval.values)
self.assertAllEqual((2, 16), id_tensor_eval.dense_shape)
def test_get_sparse_tensors_simple(self):
"""Same as test_get_sparse_tensors, but with simpler values."""
a = fc.numeric_column('a', dtype=dtypes.int32, shape=(2,))
b = fc.bucketized_column(a, boundaries=(0, 1))
crossed = fc.crossed_column([b, 'c'], hash_bucket_size=5, hash_key=5)
with ops.Graph().as_default():
transformation_cache = FeatureTransformationCache({
'a':
constant_op.constant(((-1., .5), (.5, 1.))),
'c':
sparse_tensor.SparseTensor(
indices=((0, 0), (1, 0), (1, 1)),
values=['cA', 'cB', 'cC'],
dense_shape=(2, 2)),
})
id_weight_pair = crossed.get_sparse_tensors(transformation_cache, None)
with _initialized_session():
id_tensor_eval = id_weight_pair.id_tensor.eval()
self.assertAllEqual(
((0, 0), (0, 1), (1, 0), (1, 1), (1, 2), (1, 3)),
id_tensor_eval.indices)
# Check exact hashed output. If hashing changes this test will break.
# All values are within [0, hash_bucket_size).
expected_values = (1, 0, 1, 3, 4, 2)
self.assertAllEqual(expected_values, id_tensor_eval.values)
self.assertAllEqual((2, 4), id_tensor_eval.dense_shape)
def test_linear_model(self):
"""Tests linear_model.
Uses data from test_get_sparse_tesnsors_simple.
"""
a = fc_old.numeric_column('a', dtype=dtypes.int32, shape=(2,))
b = fc_old.bucketized_column(a, boundaries=(0, 1))
crossed = fc_old.crossed_column([b, 'c'], hash_bucket_size=5, hash_key=5)
with ops.Graph().as_default():
predictions = fc.linear_model({
'a': constant_op.constant(((-1., .5), (.5, 1.))),
'c': sparse_tensor.SparseTensor(
indices=((0, 0), (1, 0), (1, 1)),
values=['cA', 'cB', 'cC'],
dense_shape=(2, 2)),
}, (crossed,))
bias = get_linear_model_bias()
crossed_var = get_linear_model_column_var(crossed)
with _initialized_session() as sess:
self.assertAllClose((0.,), bias.eval())
self.assertAllClose(
((0.,), (0.,), (0.,), (0.,), (0.,)), crossed_var.eval())
self.assertAllClose(((0.,), (0.,)), predictions.eval())
sess.run(crossed_var.assign(((1.,), (2.,), (3.,), (4.,), (5.,))))
# Expected ids after cross = (1, 0, 1, 3, 4, 2)
self.assertAllClose(((3.,), (14.,)), predictions.eval())
sess.run(bias.assign((.1,)))
self.assertAllClose(((3.1,), (14.1,)), predictions.eval())
def test_linear_model_with_weights(self):
class _TestColumnWithWeights(fc_old._CategoricalColumn):
"""Produces sparse IDs and sparse weights."""
@property
def name(self):
return 'test_column'
@property
def _parse_example_spec(self):
return {
self.name: parsing_ops.VarLenFeature(dtypes.int32),
'{}_weights'.format(self.name): parsing_ops.VarLenFeature(
dtypes.float32),
}
@property
def _num_buckets(self):
return 5
def _transform_feature(self, inputs):
return (inputs.get(self.name),
inputs.get('{}_weights'.format(self.name)))
def _get_sparse_tensors(self, inputs, weight_collections=None,
trainable=None):
"""Populates both id_tensor and weight_tensor."""
ids_and_weights = inputs.get(self)
return fc_old._CategoricalColumn.IdWeightPair(
id_tensor=ids_and_weights[0], weight_tensor=ids_and_weights[1])
t = _TestColumnWithWeights()
crossed = fc_old.crossed_column([t, 'c'], hash_bucket_size=5, hash_key=5)
with ops.Graph().as_default():
with self.assertRaisesRegexp(
ValueError,
'crossed_column does not support weight_tensor.*{}'.format(t.name)):
fc.linear_model({
t.name: sparse_tensor.SparseTensor(
indices=((0, 0), (1, 0), (1, 1)),
values=[0, 1, 2],
dense_shape=(2, 2)),
'{}_weights'.format(t.name): sparse_tensor.SparseTensor(
indices=((0, 0), (1, 0), (1, 1)),
values=[1., 10., 2.],
dense_shape=(2, 2)),
'c': sparse_tensor.SparseTensor(
indices=((0, 0), (1, 0), (1, 1)),
values=['cA', 'cB', 'cC'],
dense_shape=(2, 2)),
}, (crossed,))
def test_keras_linear_model(self):
"""Tests _LinearModel.
Uses data from test_get_sparse_tesnsors_simple.
"""
a = fc_old.numeric_column('a', dtype=dtypes.int32, shape=(2,))
b = fc_old.bucketized_column(a, boundaries=(0, 1))
crossed = fc_old.crossed_column([b, 'c'], hash_bucket_size=5, hash_key=5)
with ops.Graph().as_default():
predictions = get_keras_linear_model_predictions({
'a':
constant_op.constant(((-1., .5), (.5, 1.))),
'c':
sparse_tensor.SparseTensor(
indices=((0, 0), (1, 0), (1, 1)),
values=['cA', 'cB', 'cC'],
dense_shape=(2, 2)),
}, (crossed,))
bias = get_linear_model_bias()
crossed_var = get_linear_model_column_var(crossed)
with _initialized_session() as sess:
self.assertAllClose((0.,), bias.eval())
self.assertAllClose(((0.,), (0.,), (0.,), (0.,), (0.,)),
crossed_var.eval())
self.assertAllClose(((0.,), (0.,)), predictions.eval())
sess.run(crossed_var.assign(((1.,), (2.,), (3.,), (4.,), (5.,))))
# Expected ids after cross = (1, 0, 1, 3, 4, 2)
self.assertAllClose(((3.,), (14.,)), predictions.eval())
sess.run(bias.assign((.1,)))
self.assertAllClose(((3.1,), (14.1,)), predictions.eval())
def test_keras_linear_model_with_weights(self):
class _TestColumnWithWeights(fc_old._CategoricalColumn):
"""Produces sparse IDs and sparse weights."""
@property
def name(self):
return 'test_column'
@property
def _parse_example_spec(self):
return {
self.name:
parsing_ops.VarLenFeature(dtypes.int32),
'{}_weights'.format(self.name):
parsing_ops.VarLenFeature(dtypes.float32),
}
@property
def _num_buckets(self):
return 5
def _transform_feature(self, inputs):
return (inputs.get(self.name),
inputs.get('{}_weights'.format(self.name)))
def _get_sparse_tensors(self,
inputs,
weight_collections=None,
trainable=None):
"""Populates both id_tensor and weight_tensor."""
ids_and_weights = inputs.get(self)
return fc_old._CategoricalColumn.IdWeightPair(
id_tensor=ids_and_weights[0], weight_tensor=ids_and_weights[1])
t = _TestColumnWithWeights()
crossed = fc_old.crossed_column([t, 'c'], hash_bucket_size=5, hash_key=5)
with ops.Graph().as_default():
with self.assertRaisesRegexp(
ValueError,
'crossed_column does not support weight_tensor.*{}'.format(t.name)):
get_keras_linear_model_predictions({
t.name:
sparse_tensor.SparseTensor(
indices=((0, 0), (1, 0), (1, 1)),
values=[0, 1, 2],
dense_shape=(2, 2)),
'{}_weights'.format(t.name):
sparse_tensor.SparseTensor(
indices=((0, 0), (1, 0), (1, 1)),
values=[1., 10., 2.],
dense_shape=(2, 2)),
'c':
sparse_tensor.SparseTensor(
indices=((0, 0), (1, 0), (1, 1)),
values=['cA', 'cB', 'cC'],
dense_shape=(2, 2)),
}, (crossed,))
def get_linear_model_bias(name='linear_model'):
with variable_scope.variable_scope(name, reuse=True):
return variable_scope.get_variable('bias_weights')
def get_linear_model_column_var(column, name='linear_model'):
return ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES,
name + '/' + column.name)[0]
def get_keras_linear_model_predictions(features,
feature_columns,
units=1,
sparse_combiner='sum',
weight_collections=None,
trainable=True,
cols_to_vars=None):
keras_linear_model = _LinearModel(
feature_columns,
units,
sparse_combiner,
weight_collections,
trainable,
name='linear_model')
retval = keras_linear_model(features) # pylint: disable=not-callable
if cols_to_vars is not None:
cols_to_vars.update(keras_linear_model.cols_to_vars())
return retval
class LinearModelTest(test.TestCase):
def test_raises_if_empty_feature_columns(self):
with self.assertRaisesRegexp(ValueError,
'feature_columns must not be empty'):
fc.linear_model(features={}, feature_columns=[])
def test_should_be_feature_column(self):
with self.assertRaisesRegexp(ValueError, 'must be a _FeatureColumn'):
fc.linear_model(features={'a': [[0]]}, feature_columns='NotSupported')
def test_should_be_dense_or_categorical_column(self):
class NotSupportedColumn(fc_old._FeatureColumn):
@property
def name(self):
return 'NotSupportedColumn'
def _transform_feature(self, cache):
pass
@property
def _parse_example_spec(self):
pass
with self.assertRaisesRegexp(
ValueError, 'must be either a _DenseColumn or _CategoricalColumn'):
fc.linear_model(
features={'a': [[0]]}, feature_columns=[NotSupportedColumn()])
def test_does_not_support_dict_columns(self):
with self.assertRaisesRegexp(
ValueError, 'Expected feature_columns to be iterable, found dict.'):
fc.linear_model(
features={'a': [[0]]},
feature_columns={'a': fc_old.numeric_column('a')})
def test_raises_if_duplicate_name(self):
with self.assertRaisesRegexp(
ValueError, 'Duplicate feature column name found for columns'):
fc.linear_model(
features={'a': [[0]]},
feature_columns=[
fc_old.numeric_column('a'),
fc_old.numeric_column('a')
])
def test_dense_bias(self):
price = fc_old.numeric_column('price')
with ops.Graph().as_default():
features = {'price': [[1.], [5.]]}
predictions = fc.linear_model(features, [price])
bias = get_linear_model_bias()
price_var = get_linear_model_column_var(price)
with _initialized_session() as sess:
self.assertAllClose([0.], bias.eval())
sess.run(price_var.assign([[10.]]))
sess.run(bias.assign([5.]))
self.assertAllClose([[15.], [55.]], predictions.eval())
def test_sparse_bias(self):
wire_cast = fc_old.categorical_column_with_hash_bucket('wire_cast', 4)
with ops.Graph().as_default():
wire_tensor = sparse_tensor.SparseTensor(
values=['omar', 'stringer', 'marlo'], # hashed to = [2, 0, 3]
indices=[[0, 0], [1, 0], [1, 1]],
dense_shape=[2, 2])
features = {'wire_cast': wire_tensor}
predictions = fc.linear_model(features, [wire_cast])
bias = get_linear_model_bias()
wire_cast_var = get_linear_model_column_var(wire_cast)
with _initialized_session() as sess:
self.assertAllClose([0.], bias.eval())
self.assertAllClose([[0.], [0.], [0.], [0.]], wire_cast_var.eval())
sess.run(wire_cast_var.assign([[10.], [100.], [1000.], [10000.]]))
sess.run(bias.assign([5.]))
self.assertAllClose([[1005.], [10015.]], predictions.eval())
def test_dense_and_sparse_bias(self):
wire_cast = fc_old.categorical_column_with_hash_bucket('wire_cast', 4)
price = fc_old.numeric_column('price')
with ops.Graph().as_default():
wire_tensor = sparse_tensor.SparseTensor(
values=['omar', 'stringer', 'marlo'], # hashed to = [2, 0, 3]
indices=[[0, 0], [1, 0], [1, 1]],
dense_shape=[2, 2])
features = {'wire_cast': wire_tensor, 'price': [[1.], [5.]]}
predictions = fc.linear_model(features, [wire_cast, price])
bias = get_linear_model_bias()
wire_cast_var = get_linear_model_column_var(wire_cast)
price_var = get_linear_model_column_var(price)
with _initialized_session() as sess:
sess.run(wire_cast_var.assign([[10.], [100.], [1000.], [10000.]]))
sess.run(bias.assign([5.]))
sess.run(price_var.assign([[10.]]))
self.assertAllClose([[1015.], [10065.]], predictions.eval())
def test_dense_and_sparse_column(self):
"""When the column is both dense and sparse, uses sparse tensors."""
class _DenseAndSparseColumn(fc_old._DenseColumn, fc_old._CategoricalColumn):
@property
def name(self):
return 'dense_and_sparse_column'
@property
def _parse_example_spec(self):
return {self.name: parsing_ops.VarLenFeature(self.dtype)}
def _transform_feature(self, inputs):
return inputs.get(self.name)
@property
def _variable_shape(self):
raise ValueError('Should not use this method.')
def _get_dense_tensor(self, inputs, weight_collections=None,
trainable=None):
raise ValueError('Should not use this method.')
@property
def _num_buckets(self):
return 4
def _get_sparse_tensors(self, inputs, weight_collections=None,
trainable=None):
sp_tensor = sparse_tensor.SparseTensor(
indices=[[0, 0], [1, 0], [1, 1]],
values=[2, 0, 3],
dense_shape=[2, 2])
return fc_old._CategoricalColumn.IdWeightPair(sp_tensor, None)
dense_and_sparse_column = _DenseAndSparseColumn()
with ops.Graph().as_default():
sp_tensor = sparse_tensor.SparseTensor(
values=['omar', 'stringer', 'marlo'],
indices=[[0, 0], [1, 0], [1, 1]],
dense_shape=[2, 2])
features = {dense_and_sparse_column.name: sp_tensor}
predictions = fc.linear_model(features, [dense_and_sparse_column])
bias = get_linear_model_bias()
dense_and_sparse_column_var = get_linear_model_column_var(
dense_and_sparse_column)
with _initialized_session() as sess:
sess.run(dense_and_sparse_column_var.assign(
[[10.], [100.], [1000.], [10000.]]))
sess.run(bias.assign([5.]))
self.assertAllClose([[1005.], [10015.]], predictions.eval())
def test_dense_multi_output(self):
price = fc_old.numeric_column('price')
with ops.Graph().as_default():
features = {'price': [[1.], [5.]]}
predictions = fc.linear_model(features, [price], units=3)
bias = get_linear_model_bias()
price_var = get_linear_model_column_var(price)
with _initialized_session() as sess:
self.assertAllClose(np.zeros((3,)), bias.eval())
self.assertAllClose(np.zeros((1, 3)), price_var.eval())
sess.run(price_var.assign([[10., 100., 1000.]]))
sess.run(bias.assign([5., 6., 7.]))
self.assertAllClose([[15., 106., 1007.], [55., 506., 5007.]],
predictions.eval())
def test_sparse_multi_output(self):
wire_cast = fc_old.categorical_column_with_hash_bucket('wire_cast', 4)
with ops.Graph().as_default():
wire_tensor = sparse_tensor.SparseTensor(
values=['omar', 'stringer', 'marlo'], # hashed to = [2, 0, 3]
indices=[[0, 0], [1, 0], [1, 1]],
dense_shape=[2, 2])
features = {'wire_cast': wire_tensor}
predictions = fc.linear_model(features, [wire_cast], units=3)
bias = get_linear_model_bias()
wire_cast_var = get_linear_model_column_var(wire_cast)
with _initialized_session() as sess:
self.assertAllClose(np.zeros((3,)), bias.eval())
self.assertAllClose(np.zeros((4, 3)), wire_cast_var.eval())
sess.run(
wire_cast_var.assign([[10., 11., 12.], [100., 110., 120.], [
1000., 1100., 1200.
], [10000., 11000., 12000.]]))
sess.run(bias.assign([5., 6., 7.]))
self.assertAllClose([[1005., 1106., 1207.], [10015., 11017., 12019.]],
predictions.eval())
def test_dense_multi_dimension(self):
price = fc_old.numeric_column('price', shape=2)
with ops.Graph().as_default():
features = {'price': [[1., 2.], [5., 6.]]}
predictions = fc.linear_model(features, [price])
price_var = get_linear_model_column_var(price)
with _initialized_session() as sess:
self.assertAllClose([[0.], [0.]], price_var.eval())
sess.run(price_var.assign([[10.], [100.]]))
self.assertAllClose([[210.], [650.]], predictions.eval())
def test_sparse_multi_rank(self):
wire_cast = fc_old.categorical_column_with_hash_bucket('wire_cast', 4)
with ops.Graph().as_default():
wire_tensor = array_ops.sparse_placeholder(dtypes.string)
wire_value = sparse_tensor.SparseTensorValue(
values=['omar', 'stringer', 'marlo', 'omar'], # hashed = [2, 0, 3, 2]
indices=[[0, 0, 0], [0, 1, 0], [1, 0, 0], [1, 0, 1]],
dense_shape=[2, 2, 2])
features = {'wire_cast': wire_tensor}
predictions = fc.linear_model(features, [wire_cast])
wire_cast_var = get_linear_model_column_var(wire_cast)
with _initialized_session() as sess:
self.assertAllClose(np.zeros((4, 1)), wire_cast_var.eval())
self.assertAllClose(
np.zeros((2, 1)),
predictions.eval(feed_dict={wire_tensor: wire_value}))
sess.run(wire_cast_var.assign([[10.], [100.], [1000.], [10000.]]))
self.assertAllClose(
[[1010.], [11000.]],
predictions.eval(feed_dict={wire_tensor: wire_value}))
def test_sparse_combiner(self):
wire_cast = fc_old.categorical_column_with_hash_bucket('wire_cast', 4)
with ops.Graph().as_default():
wire_tensor = sparse_tensor.SparseTensor(
values=['omar', 'stringer', 'marlo'], # hashed to = [2, 0, 3]
indices=[[0, 0], [1, 0], [1, 1]],
dense_shape=[2, 2])
features = {'wire_cast': wire_tensor}
predictions = fc.linear_model(
features, [wire_cast], sparse_combiner='mean')
bias = get_linear_model_bias()
wire_cast_var = get_linear_model_column_var(wire_cast)
with _initialized_session() as sess:
sess.run(wire_cast_var.assign([[10.], [100.], [1000.], [10000.]]))
sess.run(bias.assign([5.]))
self.assertAllClose([[1005.], [5010.]], predictions.eval())
def test_sparse_combiner_with_negative_weights(self):
wire_cast = fc_old.categorical_column_with_hash_bucket('wire_cast', 4)
wire_cast_weights = fc_old.weighted_categorical_column(wire_cast, 'weights')
with ops.Graph().as_default():
wire_tensor = sparse_tensor.SparseTensor(
values=['omar', 'stringer', 'marlo'], # hashed to = [2, 0, 3]
indices=[[0, 0], [1, 0], [1, 1]],
dense_shape=[2, 2])
features = {
'wire_cast': wire_tensor,
'weights': constant_op.constant([[1., 1., -1.0]])
}
predictions = fc.linear_model(
features, [wire_cast_weights], sparse_combiner='sum')
bias = get_linear_model_bias()
wire_cast_var = get_linear_model_column_var(wire_cast)
with _initialized_session() as sess:
sess.run(wire_cast_var.assign([[10.], [100.], [1000.], [10000.]]))
sess.run(bias.assign([5.]))
self.assertAllClose([[1005.], [-9985.]], predictions.eval())
def test_dense_multi_dimension_multi_output(self):
price = fc_old.numeric_column('price', shape=2)
with ops.Graph().as_default():
features = {'price': [[1., 2.], [5., 6.]]}
predictions = fc.linear_model(features, [price], units=3)
bias = get_linear_model_bias()
price_var = get_linear_model_column_var(price)
with _initialized_session() as sess:
self.assertAllClose(np.zeros((3,)), bias.eval())
self.assertAllClose(np.zeros((2, 3)), price_var.eval())
sess.run(price_var.assign([[1., 2., 3.], [10., 100., 1000.]]))
sess.run(bias.assign([2., 3., 4.]))
self.assertAllClose([[23., 205., 2007.], [67., 613., 6019.]],
predictions.eval())
def test_raises_if_shape_mismatch(self):
price = fc_old.numeric_column('price', shape=2)
with ops.Graph().as_default():
features = {'price': [[1.], [5.]]}
with self.assertRaisesRegexp(
Exception,
r'Cannot reshape a tensor with 2 elements to shape \[2,2\]'):
fc.linear_model(features, [price])
def test_dense_reshaping(self):
price = fc_old.numeric_column('price', shape=[1, 2])
with ops.Graph().as_default():
features = {'price': [[[1., 2.]], [[5., 6.]]]}
predictions = fc.linear_model(features, [price])
bias = get_linear_model_bias()
price_var = get_linear_model_column_var(price)
with _initialized_session() as sess:
self.assertAllClose([0.], bias.eval())
self.assertAllClose([[0.], [0.]], price_var.eval())
self.assertAllClose([[0.], [0.]], predictions.eval())
sess.run(price_var.assign([[10.], [100.]]))
self.assertAllClose([[210.], [650.]], predictions.eval())
def test_dense_multi_column(self):
price1 = fc_old.numeric_column('price1', shape=2)
price2 = fc_old.numeric_column('price2')
with ops.Graph().as_default():
features = {
'price1': [[1., 2.], [5., 6.]],
'price2': [[3.], [4.]]
}
predictions = fc.linear_model(features, [price1, price2])
bias = get_linear_model_bias()
price1_var = get_linear_model_column_var(price1)
price2_var = get_linear_model_column_var(price2)
with _initialized_session() as sess:
self.assertAllClose([0.], bias.eval())
self.assertAllClose([[0.], [0.]], price1_var.eval())
self.assertAllClose([[0.]], price2_var.eval())
self.assertAllClose([[0.], [0.]], predictions.eval())
sess.run(price1_var.assign([[10.], [100.]]))
sess.run(price2_var.assign([[1000.]]))
sess.run(bias.assign([7.]))
self.assertAllClose([[3217.], [4657.]], predictions.eval())
def test_fills_cols_to_vars(self):
price1 = fc_old.numeric_column('price1', shape=2)
price2 = fc_old.numeric_column('price2')
with ops.Graph().as_default():
features = {'price1': [[1., 2.], [5., 6.]], 'price2': [[3.], [4.]]}
cols_to_vars = {}
fc.linear_model(features, [price1, price2], cols_to_vars=cols_to_vars)
bias = get_linear_model_bias()
price1_var = get_linear_model_column_var(price1)
price2_var = get_linear_model_column_var(price2)
self.assertAllEqual(cols_to_vars['bias'], [bias])
self.assertAllEqual(cols_to_vars[price1], [price1_var])
self.assertAllEqual(cols_to_vars[price2], [price2_var])
def test_fills_cols_to_vars_partitioned_variables(self):
price1 = fc_old.numeric_column('price1', shape=2)
price2 = fc_old.numeric_column('price2', shape=3)
with ops.Graph().as_default():
features = {
'price1': [[1., 2.], [6., 7.]],
'price2': [[3., 4., 5.], [8., 9., 10.]]
}
cols_to_vars = {}
with variable_scope.variable_scope(
'linear',
partitioner=partitioned_variables.fixed_size_partitioner(2, axis=0)):
fc.linear_model(features, [price1, price2], cols_to_vars=cols_to_vars)
with _initialized_session():
self.assertEqual([0.], cols_to_vars['bias'][0].eval())
# Partitioning shards the [2, 1] price1 var into 2 [1, 1] Variables.
self.assertAllEqual([[0.]], cols_to_vars[price1][0].eval())
self.assertAllEqual([[0.]], cols_to_vars[price1][1].eval())
# Partitioning shards the [3, 1] price2 var into a [2, 1] Variable and
# a [1, 1] Variable.
self.assertAllEqual([[0.], [0.]], cols_to_vars[price2][0].eval())
self.assertAllEqual([[0.]], cols_to_vars[price2][1].eval())
def test_dense_collection(self):
price = fc_old.numeric_column('price')
with ops.Graph().as_default() as g:
features = {'price': [[1.], [5.]]}
fc.linear_model(features, [price], weight_collections=['my-vars'])
my_vars = g.get_collection('my-vars')
bias = get_linear_model_bias()
price_var = get_linear_model_column_var(price)
self.assertIn(bias, my_vars)
self.assertIn(price_var, my_vars)
def test_sparse_collection(self):
wire_cast = fc_old.categorical_column_with_hash_bucket('wire_cast', 4)
with ops.Graph().as_default() as g:
wire_tensor = sparse_tensor.SparseTensor(
values=['omar'], indices=[[0, 0]], dense_shape=[1, 1])
features = {'wire_cast': wire_tensor}
fc.linear_model(
features, [wire_cast], weight_collections=['my-vars'])
my_vars = g.get_collection('my-vars')
bias = get_linear_model_bias()
wire_cast_var = get_linear_model_column_var(wire_cast)
self.assertIn(bias, my_vars)
self.assertIn(wire_cast_var, my_vars)
def test_dense_trainable_default(self):
price = fc_old.numeric_column('price')
with ops.Graph().as_default() as g:
features = {'price': [[1.], [5.]]}
fc.linear_model(features, [price])
bias = get_linear_model_bias()
price_var = get_linear_model_column_var(price)
trainable_vars = g.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)
self.assertIn(bias, trainable_vars)
self.assertIn(price_var, trainable_vars)
def test_sparse_trainable_default(self):
wire_cast = fc_old.categorical_column_with_hash_bucket('wire_cast', 4)
with ops.Graph().as_default() as g:
wire_tensor = sparse_tensor.SparseTensor(
values=['omar'], indices=[[0, 0]], dense_shape=[1, 1])
features = {'wire_cast': wire_tensor}
fc.linear_model(features, [wire_cast])
trainable_vars = g.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)
bias = get_linear_model_bias()
wire_cast_var = get_linear_model_column_var(wire_cast)
self.assertIn(bias, trainable_vars)
self.assertIn(wire_cast_var, trainable_vars)
def test_dense_trainable_false(self):
price = fc_old.numeric_column('price')
with ops.Graph().as_default() as g:
features = {'price': [[1.], [5.]]}
fc.linear_model(features, [price], trainable=False)
trainable_vars = g.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)
self.assertEqual([], trainable_vars)
def test_sparse_trainable_false(self):
wire_cast = fc_old.categorical_column_with_hash_bucket('wire_cast', 4)
with ops.Graph().as_default() as g:
wire_tensor = sparse_tensor.SparseTensor(
values=['omar'], indices=[[0, 0]], dense_shape=[1, 1])
features = {'wire_cast': wire_tensor}
fc.linear_model(features, [wire_cast], trainable=False)
trainable_vars = g.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)
self.assertEqual([], trainable_vars)
def test_column_order(self):
price_a = fc_old.numeric_column('price_a')
price_b = fc_old.numeric_column('price_b')
wire_cast = fc_old.categorical_column_with_hash_bucket('wire_cast', 4)
with ops.Graph().as_default() as g:
features = {
'price_a': [[1.]],
'price_b': [[3.]],
'wire_cast':
sparse_tensor.SparseTensor(
values=['omar'], indices=[[0, 0]], dense_shape=[1, 1])
}
fc.linear_model(
features, [price_a, wire_cast, price_b],
weight_collections=['my-vars'])
my_vars = g.get_collection('my-vars')
self.assertIn('price_a', my_vars[0].name)
self.assertIn('price_b', my_vars[1].name)
self.assertIn('wire_cast', my_vars[2].name)
with ops.Graph().as_default() as g:
features = {
'price_a': [[1.]],
'price_b': [[3.]],
'wire_cast':
sparse_tensor.SparseTensor(
values=['omar'], indices=[[0, 0]], dense_shape=[1, 1])
}
fc.linear_model(
features, [wire_cast, price_b, price_a],
weight_collections=['my-vars'])
my_vars = g.get_collection('my-vars')
self.assertIn('price_a', my_vars[0].name)
self.assertIn('price_b', my_vars[1].name)
self.assertIn('wire_cast', my_vars[2].name)
def test_static_batch_size_mismatch(self):
price1 = fc_old.numeric_column('price1')
price2 = fc_old.numeric_column('price2')
with ops.Graph().as_default():
features = {
'price1': [[1.], [5.], [7.]], # batchsize = 3
'price2': [[3.], [4.]] # batchsize = 2
}
with self.assertRaisesRegexp(
ValueError,
'Batch size \(first dimension\) of each feature must be same.'): # pylint: disable=anomalous-backslash-in-string
fc.linear_model(features, [price1, price2])
def test_subset_of_static_batch_size_mismatch(self):
price1 = fc_old.numeric_column('price1')
price2 = fc_old.numeric_column('price2')
price3 = fc_old.numeric_column('price3')
with ops.Graph().as_default():
features = {
'price1': array_ops.placeholder(dtype=dtypes.int64), # batchsize = 3
'price2': [[3.], [4.]], # batchsize = 2
'price3': [[3.], [4.], [5.]] # batchsize = 3
}
with self.assertRaisesRegexp(
ValueError,
'Batch size \(first dimension\) of each feature must be same.'): # pylint: disable=anomalous-backslash-in-string
fc.linear_model(features, [price1, price2, price3])
def test_runtime_batch_size_mismatch(self):
price1 = fc_old.numeric_column('price1')
price2 = fc_old.numeric_column('price2')
with ops.Graph().as_default():
features = {
'price1': array_ops.placeholder(dtype=dtypes.int64), # batchsize = 3
'price2': [[3.], [4.]] # batchsize = 2
}
predictions = fc.linear_model(features, [price1, price2])
with _initialized_session() as sess:
with self.assertRaisesRegexp(errors.OpError,
'must have the same size and shape'):
sess.run(
predictions, feed_dict={features['price1']: [[1.], [5.], [7.]]})
def test_runtime_batch_size_matches(self):
price1 = fc_old.numeric_column('price1')
price2 = fc_old.numeric_column('price2')
with ops.Graph().as_default():
features = {
'price1': array_ops.placeholder(dtype=dtypes.int64), # batchsize = 2
'price2': array_ops.placeholder(dtype=dtypes.int64), # batchsize = 2
}
predictions = fc.linear_model(features, [price1, price2])
with _initialized_session() as sess:
sess.run(
predictions,
feed_dict={
features['price1']: [[1.], [5.]],
features['price2']: [[1.], [5.]],
})
def test_with_numpy_input_fn(self):
price = fc_old.numeric_column('price')
price_buckets = fc_old.bucketized_column(
price, boundaries=[
0.,
10.,
100.,
])
body_style = fc_old.categorical_column_with_vocabulary_list(
'body-style', vocabulary_list=['hardtop', 'wagon', 'sedan'])
input_fn = numpy_io.numpy_input_fn(
x={
'price': np.array([-1., 2., 13., 104.]),
'body-style': np.array(['sedan', 'hardtop', 'wagon', 'sedan']),
},
batch_size=2,
shuffle=False)
features = input_fn()
net = fc.linear_model(features, [price_buckets, body_style])
# self.assertEqual(1 + 3 + 5, net.shape[1])
with _initialized_session() as sess:
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(sess, coord=coord)
bias = get_linear_model_bias()
price_buckets_var = get_linear_model_column_var(price_buckets)
body_style_var = get_linear_model_column_var(body_style)
sess.run(price_buckets_var.assign([[10.], [100.], [1000.], [10000.]]))
sess.run(body_style_var.assign([[-10.], [-100.], [-1000.]]))
sess.run(bias.assign([5.]))
self.assertAllClose([[10 - 1000 + 5.], [100 - 10 + 5.]], sess.run(net))
coord.request_stop()
coord.join(threads)
def test_with_1d_sparse_tensor(self):
price = fc_old.numeric_column('price')
price_buckets = fc_old.bucketized_column(
price, boundaries=[
0.,
10.,
100.,
])
body_style = fc_old.categorical_column_with_vocabulary_list(
'body-style', vocabulary_list=['hardtop', 'wagon', 'sedan'])
# Provides 1-dim tensor and dense tensor.
features = {
'price': constant_op.constant([-1., 12.,]),
'body-style': sparse_tensor.SparseTensor(
indices=((0,), (1,)),
values=('sedan', 'hardtop'),
dense_shape=(2,)),
}
self.assertEqual(1, features['price'].shape.ndims)
self.assertEqual(1, features['body-style'].dense_shape.get_shape()[0])
net = fc.linear_model(features, [price_buckets, body_style])
with _initialized_session() as sess:
bias = get_linear_model_bias()
price_buckets_var = get_linear_model_column_var(price_buckets)
body_style_var = get_linear_model_column_var(body_style)
sess.run(price_buckets_var.assign([[10.], [100.], [1000.], [10000.]]))
sess.run(body_style_var.assign([[-10.], [-100.], [-1000.]]))
sess.run(bias.assign([5.]))
self.assertAllClose([[10 - 1000 + 5.], [1000 - 10 + 5.]], sess.run(net))
def test_with_1d_unknown_shape_sparse_tensor(self):
price = fc_old.numeric_column('price')
price_buckets = fc_old.bucketized_column(
price, boundaries=[
0.,
10.,
100.,
])
body_style = fc_old.categorical_column_with_vocabulary_list(
'body-style', vocabulary_list=['hardtop', 'wagon', 'sedan'])
country = fc_old.categorical_column_with_vocabulary_list(
'country', vocabulary_list=['US', 'JP', 'CA'])
# Provides 1-dim tensor and dense tensor.
features = {
'price': array_ops.placeholder(dtypes.float32),
'body-style': array_ops.sparse_placeholder(dtypes.string),
'country': array_ops.placeholder(dtypes.string),
}
self.assertIsNone(features['price'].shape.ndims)
self.assertIsNone(features['body-style'].get_shape().ndims)
price_data = np.array([-1., 12.])
body_style_data = sparse_tensor.SparseTensorValue(
indices=((0,), (1,)),
values=('sedan', 'hardtop'),
dense_shape=(2,))
country_data = np.array(['US', 'CA'])
net = fc.linear_model(features, [price_buckets, body_style, country])
bias = get_linear_model_bias()
price_buckets_var = get_linear_model_column_var(price_buckets)
body_style_var = get_linear_model_column_var(body_style)
with _initialized_session() as sess:
sess.run(price_buckets_var.assign([[10.], [100.], [1000.], [10000.]]))
sess.run(body_style_var.assign([[-10.], [-100.], [-1000.]]))
sess.run(bias.assign([5.]))
self.assertAllClose([[10 - 1000 + 5.], [1000 - 10 + 5.]],
sess.run(
net,
feed_dict={
features['price']: price_data,
features['body-style']: body_style_data,
features['country']: country_data
}))
def test_with_rank_0_feature(self):
price = fc_old.numeric_column('price')
features = {
'price': constant_op.constant(0),
}
self.assertEqual(0, features['price'].shape.ndims)
# Static rank 0 should fail
with self.assertRaisesRegexp(ValueError, 'Feature .* cannot have rank 0'):
fc.linear_model(features, [price])
# Dynamic rank 0 should fail
features = {
'price': array_ops.placeholder(dtypes.float32),
}
net = fc.linear_model(features, [price])
self.assertEqual(1, net.shape[1])
with _initialized_session() as sess:
with self.assertRaisesOpError('Feature .* cannot have rank 0'):
sess.run(net, feed_dict={features['price']: np.array(1)})
def test_multiple_linear_models(self):
price = fc_old.numeric_column('price')
with ops.Graph().as_default():
features1 = {'price': [[1.], [5.]]}
features2 = {'price': [[2.], [10.]]}
predictions1 = fc.linear_model(features1, [price])
predictions2 = fc.linear_model(features2, [price])
bias1 = get_linear_model_bias(name='linear_model')
bias2 = get_linear_model_bias(name='linear_model_1')
price_var1 = get_linear_model_column_var(price, name='linear_model')
price_var2 = get_linear_model_column_var(price, name='linear_model_1')
with _initialized_session() as sess:
self.assertAllClose([0.], bias1.eval())
sess.run(price_var1.assign([[10.]]))
sess.run(bias1.assign([5.]))
self.assertAllClose([[15.], [55.]], predictions1.eval())
self.assertAllClose([0.], bias2.eval())
sess.run(price_var2.assign([[10.]]))
sess.run(bias2.assign([5.]))
self.assertAllClose([[25.], [105.]], predictions2.eval())
class _LinearModelTest(test.TestCase):
def test_raises_if_empty_feature_columns(self):
with self.assertRaisesRegexp(ValueError,
'feature_columns must not be empty'):
get_keras_linear_model_predictions(features={}, feature_columns=[])
def test_should_be_feature_column(self):
with self.assertRaisesRegexp(ValueError, 'must be a _FeatureColumn'):
get_keras_linear_model_predictions(
features={'a': [[0]]}, feature_columns='NotSupported')
def test_should_be_dense_or_categorical_column(self):
class NotSupportedColumn(fc_old._FeatureColumn):
@property
def name(self):
return 'NotSupportedColumn'
def _transform_feature(self, cache):
pass
@property
def _parse_example_spec(self):
pass
with self.assertRaisesRegexp(
ValueError, 'must be either a _DenseColumn or _CategoricalColumn'):
get_keras_linear_model_predictions(
features={'a': [[0]]}, feature_columns=[NotSupportedColumn()])
def test_does_not_support_dict_columns(self):
with self.assertRaisesRegexp(
ValueError, 'Expected feature_columns to be iterable, found dict.'):
fc.linear_model(
features={'a': [[0]]},
feature_columns={'a': fc_old.numeric_column('a')})
def test_raises_if_duplicate_name(self):
with self.assertRaisesRegexp(
ValueError, 'Duplicate feature column name found for columns'):
get_keras_linear_model_predictions(
features={'a': [[0]]},
feature_columns=[
fc_old.numeric_column('a'),
fc_old.numeric_column('a')
])
def test_dense_bias(self):
price = fc_old.numeric_column('price')
with ops.Graph().as_default():
features = {'price': [[1.], [5.]]}
predictions = get_keras_linear_model_predictions(features, [price])
bias = get_linear_model_bias()
price_var = get_linear_model_column_var(price)
with _initialized_session() as sess:
self.assertAllClose([0.], bias.eval())
sess.run(price_var.assign([[10.]]))
sess.run(bias.assign([5.]))
self.assertAllClose([[15.], [55.]], predictions.eval())
def test_sparse_bias(self):
wire_cast = fc_old.categorical_column_with_hash_bucket('wire_cast', 4)
with ops.Graph().as_default():
wire_tensor = sparse_tensor.SparseTensor(
values=['omar', 'stringer', 'marlo'], # hashed to = [2, 0, 3]
indices=[[0, 0], [1, 0], [1, 1]],
dense_shape=[2, 2])
features = {'wire_cast': wire_tensor}
predictions = get_keras_linear_model_predictions(features, [wire_cast])
bias = get_linear_model_bias()
wire_cast_var = get_linear_model_column_var(wire_cast)
with _initialized_session() as sess:
self.assertAllClose([0.], bias.eval())
self.assertAllClose([[0.], [0.], [0.], [0.]], wire_cast_var.eval())
sess.run(wire_cast_var.assign([[10.], [100.], [1000.], [10000.]]))
sess.run(bias.assign([5.]))
self.assertAllClose([[1005.], [10015.]], predictions.eval())
def test_dense_and_sparse_bias(self):
wire_cast = fc_old.categorical_column_with_hash_bucket('wire_cast', 4)
price = fc_old.numeric_column('price')
with ops.Graph().as_default():
wire_tensor = sparse_tensor.SparseTensor(
values=['omar', 'stringer', 'marlo'], # hashed to = [2, 0, 3]
indices=[[0, 0], [1, 0], [1, 1]],
dense_shape=[2, 2])
features = {'wire_cast': wire_tensor, 'price': [[1.], [5.]]}
predictions = get_keras_linear_model_predictions(features,
[wire_cast, price])
bias = get_linear_model_bias()
wire_cast_var = get_linear_model_column_var(wire_cast)
price_var = get_linear_model_column_var(price)
with _initialized_session() as sess:
sess.run(wire_cast_var.assign([[10.], [100.], [1000.], [10000.]]))
sess.run(bias.assign([5.]))
sess.run(price_var.assign([[10.]]))
self.assertAllClose([[1015.], [10065.]], predictions.eval())
def test_dense_and_sparse_column(self):
"""When the column is both dense and sparse, uses sparse tensors."""
class _DenseAndSparseColumn(fc_old._DenseColumn, fc_old._CategoricalColumn):
@property
def name(self):
return 'dense_and_sparse_column'
@property
def _parse_example_spec(self):
return {self.name: parsing_ops.VarLenFeature(self.dtype)}
def _transform_feature(self, inputs):
return inputs.get(self.name)
@property
def _variable_shape(self):
raise ValueError('Should not use this method.')
def _get_dense_tensor(self,
inputs,
weight_collections=None,
trainable=None):
raise ValueError('Should not use this method.')
@property
def _num_buckets(self):
return 4
def _get_sparse_tensors(self,
inputs,
weight_collections=None,
trainable=None):
sp_tensor = sparse_tensor.SparseTensor(
indices=[[0, 0], [1, 0], [1, 1]],
values=[2, 0, 3],
dense_shape=[2, 2])
return fc_old._CategoricalColumn.IdWeightPair(sp_tensor, None)
dense_and_sparse_column = _DenseAndSparseColumn()
with ops.Graph().as_default():
sp_tensor = sparse_tensor.SparseTensor(
values=['omar', 'stringer', 'marlo'],
indices=[[0, 0], [1, 0], [1, 1]],
dense_shape=[2, 2])
features = {dense_and_sparse_column.name: sp_tensor}
predictions = get_keras_linear_model_predictions(
features, [dense_and_sparse_column])
bias = get_linear_model_bias()
dense_and_sparse_column_var = get_linear_model_column_var(
dense_and_sparse_column)
with _initialized_session() as sess:
sess.run(
dense_and_sparse_column_var.assign([[10.], [100.], [1000.],
[10000.]]))
sess.run(bias.assign([5.]))
self.assertAllClose([[1005.], [10015.]], predictions.eval())
def test_dense_multi_output(self):
price = fc_old.numeric_column('price')
with ops.Graph().as_default():
features = {'price': [[1.], [5.]]}
predictions = get_keras_linear_model_predictions(
features, [price], units=3)
bias = get_linear_model_bias()
price_var = get_linear_model_column_var(price)
with _initialized_session() as sess:
self.assertAllClose(np.zeros((3,)), bias.eval())
self.assertAllClose(np.zeros((1, 3)), price_var.eval())
sess.run(price_var.assign([[10., 100., 1000.]]))
sess.run(bias.assign([5., 6., 7.]))
self.assertAllClose([[15., 106., 1007.], [55., 506., 5007.]],
predictions.eval())
def test_sparse_multi_output(self):
wire_cast = fc_old.categorical_column_with_hash_bucket('wire_cast', 4)
with ops.Graph().as_default():
wire_tensor = sparse_tensor.SparseTensor(
values=['omar', 'stringer', 'marlo'], # hashed to = [2, 0, 3]
indices=[[0, 0], [1, 0], [1, 1]],
dense_shape=[2, 2])
features = {'wire_cast': wire_tensor}
predictions = get_keras_linear_model_predictions(
features, [wire_cast], units=3)
bias = get_linear_model_bias()
wire_cast_var = get_linear_model_column_var(wire_cast)
with _initialized_session() as sess:
self.assertAllClose(np.zeros((3,)), bias.eval())
self.assertAllClose(np.zeros((4, 3)), wire_cast_var.eval())
sess.run(
wire_cast_var.assign([[10., 11., 12.], [100., 110., 120.],
[1000., 1100.,
1200.], [10000., 11000., 12000.]]))
sess.run(bias.assign([5., 6., 7.]))
self.assertAllClose([[1005., 1106., 1207.], [10015., 11017., 12019.]],
predictions.eval())
def test_dense_multi_dimension(self):
price = fc_old.numeric_column('price', shape=2)
with ops.Graph().as_default():
features = {'price': [[1., 2.], [5., 6.]]}
predictions = get_keras_linear_model_predictions(features, [price])
price_var = get_linear_model_column_var(price)
with _initialized_session() as sess:
self.assertAllClose([[0.], [0.]], price_var.eval())
sess.run(price_var.assign([[10.], [100.]]))
self.assertAllClose([[210.], [650.]], predictions.eval())
def test_sparse_multi_rank(self):
wire_cast = fc_old.categorical_column_with_hash_bucket('wire_cast', 4)
with ops.Graph().as_default():
wire_tensor = array_ops.sparse_placeholder(dtypes.string)
wire_value = sparse_tensor.SparseTensorValue(
values=['omar', 'stringer', 'marlo', 'omar'], # hashed = [2, 0, 3, 2]
indices=[[0, 0, 0], [0, 1, 0], [1, 0, 0], [1, 0, 1]],
dense_shape=[2, 2, 2])
features = {'wire_cast': wire_tensor}
predictions = get_keras_linear_model_predictions(features, [wire_cast])
wire_cast_var = get_linear_model_column_var(wire_cast)
with _initialized_session() as sess:
self.assertAllClose(np.zeros((4, 1)), wire_cast_var.eval())
self.assertAllClose(
np.zeros((2, 1)),
predictions.eval(feed_dict={wire_tensor: wire_value}))
sess.run(wire_cast_var.assign([[10.], [100.], [1000.], [10000.]]))
self.assertAllClose(
[[1010.], [11000.]],
predictions.eval(feed_dict={wire_tensor: wire_value}))
def test_sparse_combiner(self):
wire_cast = fc_old.categorical_column_with_hash_bucket('wire_cast', 4)
with ops.Graph().as_default():
wire_tensor = sparse_tensor.SparseTensor(
values=['omar', 'stringer', 'marlo'], # hashed to = [2, 0, 3]
indices=[[0, 0], [1, 0], [1, 1]],
dense_shape=[2, 2])
features = {'wire_cast': wire_tensor}
predictions = get_keras_linear_model_predictions(
features, [wire_cast], sparse_combiner='mean')
bias = get_linear_model_bias()
wire_cast_var = get_linear_model_column_var(wire_cast)
with _initialized_session() as sess:
sess.run(wire_cast_var.assign([[10.], [100.], [1000.], [10000.]]))
sess.run(bias.assign([5.]))
self.assertAllClose([[1005.], [5010.]], predictions.eval())
def test_dense_multi_dimension_multi_output(self):
price = fc_old.numeric_column('price', shape=2)
with ops.Graph().as_default():
features = {'price': [[1., 2.], [5., 6.]]}
predictions = get_keras_linear_model_predictions(
features, [price], units=3)
bias = get_linear_model_bias()
price_var = get_linear_model_column_var(price)
with _initialized_session() as sess:
self.assertAllClose(np.zeros((3,)), bias.eval())
self.assertAllClose(np.zeros((2, 3)), price_var.eval())
sess.run(price_var.assign([[1., 2., 3.], [10., 100., 1000.]]))
sess.run(bias.assign([2., 3., 4.]))
self.assertAllClose([[23., 205., 2007.], [67., 613., 6019.]],
predictions.eval())
def test_raises_if_shape_mismatch(self):
price = fc_old.numeric_column('price', shape=2)
with ops.Graph().as_default():
features = {'price': [[1.], [5.]]}
with self.assertRaisesRegexp(
Exception,
r'Cannot reshape a tensor with 2 elements to shape \[2,2\]'):
get_keras_linear_model_predictions(features, [price])
def test_dense_reshaping(self):
price = fc_old.numeric_column('price', shape=[1, 2])
with ops.Graph().as_default():
features = {'price': [[[1., 2.]], [[5., 6.]]]}
predictions = get_keras_linear_model_predictions(features, [price])
bias = get_linear_model_bias()
price_var = get_linear_model_column_var(price)
with _initialized_session() as sess:
self.assertAllClose([0.], bias.eval())
self.assertAllClose([[0.], [0.]], price_var.eval())
self.assertAllClose([[0.], [0.]], predictions.eval())
sess.run(price_var.assign([[10.], [100.]]))
self.assertAllClose([[210.], [650.]], predictions.eval())
def test_dense_multi_column(self):
price1 = fc_old.numeric_column('price1', shape=2)
price2 = fc_old.numeric_column('price2')
with ops.Graph().as_default():
features = {'price1': [[1., 2.], [5., 6.]], 'price2': [[3.], [4.]]}
predictions = get_keras_linear_model_predictions(features,
[price1, price2])
bias = get_linear_model_bias()
price1_var = get_linear_model_column_var(price1)
price2_var = get_linear_model_column_var(price2)
with _initialized_session() as sess:
self.assertAllClose([0.], bias.eval())
self.assertAllClose([[0.], [0.]], price1_var.eval())
self.assertAllClose([[0.]], price2_var.eval())
self.assertAllClose([[0.], [0.]], predictions.eval())
sess.run(price1_var.assign([[10.], [100.]]))
sess.run(price2_var.assign([[1000.]]))
sess.run(bias.assign([7.]))
self.assertAllClose([[3217.], [4657.]], predictions.eval())
def test_fills_cols_to_vars(self):
price1 = fc_old.numeric_column('price1', shape=2)
price2 = fc_old.numeric_column('price2')
with ops.Graph().as_default():
features = {'price1': [[1., 2.], [5., 6.]], 'price2': [[3.], [4.]]}
cols_to_vars = {}
get_keras_linear_model_predictions(
features, [price1, price2], cols_to_vars=cols_to_vars)
bias = get_linear_model_bias()
price1_var = get_linear_model_column_var(price1)
price2_var = get_linear_model_column_var(price2)
self.assertAllEqual(cols_to_vars['bias'], [bias])
self.assertAllEqual(cols_to_vars[price1], [price1_var])
self.assertAllEqual(cols_to_vars[price2], [price2_var])
def test_fills_cols_to_vars_partitioned_variables(self):
price1 = fc_old.numeric_column('price1', shape=2)
price2 = fc_old.numeric_column('price2', shape=3)
with ops.Graph().as_default():
features = {
'price1': [[1., 2.], [6., 7.]],
'price2': [[3., 4., 5.], [8., 9., 10.]]
}
cols_to_vars = {}
with variable_scope.variable_scope(
'linear',
partitioner=partitioned_variables.fixed_size_partitioner(2, axis=0)):
get_keras_linear_model_predictions(
features, [price1, price2], cols_to_vars=cols_to_vars)
with _initialized_session():
self.assertEqual([0.], cols_to_vars['bias'][0].eval())
# Partitioning shards the [2, 1] price1 var into 2 [1, 1] Variables.
self.assertAllEqual([[0.]], cols_to_vars[price1][0].eval())
self.assertAllEqual([[0.]], cols_to_vars[price1][1].eval())
# Partitioning shards the [3, 1] price2 var into a [2, 1] Variable and
# a [1, 1] Variable.
self.assertAllEqual([[0.], [0.]], cols_to_vars[price2][0].eval())
self.assertAllEqual([[0.]], cols_to_vars[price2][1].eval())
def test_dense_collection(self):
price = fc_old.numeric_column('price')
with ops.Graph().as_default() as g:
features = {'price': [[1.], [5.]]}
get_keras_linear_model_predictions(
features, [price], weight_collections=['my-vars'])
my_vars = g.get_collection('my-vars')
bias = get_linear_model_bias()
price_var = get_linear_model_column_var(price)
self.assertIn(bias, my_vars)
self.assertIn(price_var, my_vars)
def test_sparse_collection(self):
wire_cast = fc_old.categorical_column_with_hash_bucket('wire_cast', 4)
with ops.Graph().as_default() as g:
wire_tensor = sparse_tensor.SparseTensor(
values=['omar'], indices=[[0, 0]], dense_shape=[1, 1])
features = {'wire_cast': wire_tensor}
get_keras_linear_model_predictions(
features, [wire_cast], weight_collections=['my-vars'])
my_vars = g.get_collection('my-vars')
bias = get_linear_model_bias()
wire_cast_var = get_linear_model_column_var(wire_cast)
self.assertIn(bias, my_vars)
self.assertIn(wire_cast_var, my_vars)
def test_dense_trainable_default(self):
price = fc_old.numeric_column('price')
with ops.Graph().as_default() as g:
features = {'price': [[1.], [5.]]}
get_keras_linear_model_predictions(features, [price])
bias = get_linear_model_bias()
price_var = get_linear_model_column_var(price)
trainable_vars = g.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)
self.assertIn(bias, trainable_vars)
self.assertIn(price_var, trainable_vars)
def test_sparse_trainable_default(self):
wire_cast = fc_old.categorical_column_with_hash_bucket('wire_cast', 4)
with ops.Graph().as_default() as g:
wire_tensor = sparse_tensor.SparseTensor(
values=['omar'], indices=[[0, 0]], dense_shape=[1, 1])
features = {'wire_cast': wire_tensor}
get_keras_linear_model_predictions(features, [wire_cast])
trainable_vars = g.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)
bias = get_linear_model_bias()
wire_cast_var = get_linear_model_column_var(wire_cast)
self.assertIn(bias, trainable_vars)
self.assertIn(wire_cast_var, trainable_vars)
def test_dense_trainable_false(self):
price = fc_old.numeric_column('price')
with ops.Graph().as_default() as g:
features = {'price': [[1.], [5.]]}
get_keras_linear_model_predictions(features, [price], trainable=False)
trainable_vars = g.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)
self.assertEqual([], trainable_vars)
def test_sparse_trainable_false(self):
wire_cast = fc_old.categorical_column_with_hash_bucket('wire_cast', 4)
with ops.Graph().as_default() as g:
wire_tensor = sparse_tensor.SparseTensor(
values=['omar'], indices=[[0, 0]], dense_shape=[1, 1])
features = {'wire_cast': wire_tensor}
get_keras_linear_model_predictions(features, [wire_cast], trainable=False)
trainable_vars = g.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)
self.assertEqual([], trainable_vars)
def test_column_order(self):
price_a = fc_old.numeric_column('price_a')
price_b = fc_old.numeric_column('price_b')
wire_cast = fc_old.categorical_column_with_hash_bucket('wire_cast', 4)
with ops.Graph().as_default() as g:
features = {
'price_a': [[1.]],
'price_b': [[3.]],
'wire_cast':
sparse_tensor.SparseTensor(
values=['omar'], indices=[[0, 0]], dense_shape=[1, 1])
}
get_keras_linear_model_predictions(
features, [price_a, wire_cast, price_b],
weight_collections=['my-vars'])
my_vars = g.get_collection('my-vars')
self.assertIn('price_a', my_vars[0].name)
self.assertIn('price_b', my_vars[1].name)
self.assertIn('wire_cast', my_vars[2].name)
with ops.Graph().as_default() as g:
features = {
'price_a': [[1.]],
'price_b': [[3.]],
'wire_cast':
sparse_tensor.SparseTensor(
values=['omar'], indices=[[0, 0]], dense_shape=[1, 1])
}
get_keras_linear_model_predictions(
features, [wire_cast, price_b, price_a],
weight_collections=['my-vars'])
my_vars = g.get_collection('my-vars')
self.assertIn('price_a', my_vars[0].name)
self.assertIn('price_b', my_vars[1].name)
self.assertIn('wire_cast', my_vars[2].name)
def test_static_batch_size_mismatch(self):
price1 = fc_old.numeric_column('price1')
price2 = fc_old.numeric_column('price2')
with ops.Graph().as_default():
features = {
'price1': [[1.], [5.], [7.]], # batchsize = 3
'price2': [[3.], [4.]] # batchsize = 2
}
with self.assertRaisesRegexp(
ValueError,
'Batch size \(first dimension\) of each feature must be same.'): # pylint: disable=anomalous-backslash-in-string
get_keras_linear_model_predictions(features, [price1, price2])
def test_subset_of_static_batch_size_mismatch(self):
price1 = fc_old.numeric_column('price1')
price2 = fc_old.numeric_column('price2')
price3 = fc_old.numeric_column('price3')
with ops.Graph().as_default():
features = {
'price1': array_ops.placeholder(dtype=dtypes.int64), # batchsize = 3
'price2': [[3.], [4.]], # batchsize = 2
'price3': [[3.], [4.], [5.]] # batchsize = 3
}
with self.assertRaisesRegexp(
ValueError,
'Batch size \(first dimension\) of each feature must be same.'): # pylint: disable=anomalous-backslash-in-string
get_keras_linear_model_predictions(features, [price1, price2, price3])
def test_runtime_batch_size_mismatch(self):
price1 = fc_old.numeric_column('price1')
price2 = fc_old.numeric_column('price2')
with ops.Graph().as_default():
features = {
'price1': array_ops.placeholder(dtype=dtypes.int64), # batchsize = 3
'price2': [[3.], [4.]] # batchsize = 2
}
predictions = get_keras_linear_model_predictions(features,
[price1, price2])
with _initialized_session() as sess:
with self.assertRaisesRegexp(errors.OpError,
'must have the same size and shape'):
sess.run(
predictions, feed_dict={features['price1']: [[1.], [5.], [7.]]})
def test_runtime_batch_size_matches(self):
price1 = fc_old.numeric_column('price1')
price2 = fc_old.numeric_column('price2')
with ops.Graph().as_default():
features = {
'price1': array_ops.placeholder(dtype=dtypes.int64), # batchsize = 2
'price2': array_ops.placeholder(dtype=dtypes.int64), # batchsize = 2
}
predictions = get_keras_linear_model_predictions(features,
[price1, price2])
with _initialized_session() as sess:
sess.run(
predictions,
feed_dict={
features['price1']: [[1.], [5.]],
features['price2']: [[1.], [5.]],
})
def test_with_numpy_input_fn(self):
price = fc_old.numeric_column('price')
price_buckets = fc_old.bucketized_column(
price, boundaries=[
0.,
10.,
100.,
])
body_style = fc_old.categorical_column_with_vocabulary_list(
'body-style', vocabulary_list=['hardtop', 'wagon', 'sedan'])
input_fn = numpy_io.numpy_input_fn(
x={
'price': np.array([-1., 2., 13., 104.]),
'body-style': np.array(['sedan', 'hardtop', 'wagon', 'sedan']),
},
batch_size=2,
shuffle=False)
features = input_fn()
net = get_keras_linear_model_predictions(features,
[price_buckets, body_style])
# self.assertEqual(1 + 3 + 5, net.shape[1])
with _initialized_session() as sess:
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(sess, coord=coord)
bias = get_linear_model_bias()
price_buckets_var = get_linear_model_column_var(price_buckets)
body_style_var = get_linear_model_column_var(body_style)
sess.run(price_buckets_var.assign([[10.], [100.], [1000.], [10000.]]))
sess.run(body_style_var.assign([[-10.], [-100.], [-1000.]]))
sess.run(bias.assign([5.]))
self.assertAllClose([[10 - 1000 + 5.], [100 - 10 + 5.]], sess.run(net))
coord.request_stop()
coord.join(threads)
def test_with_1d_sparse_tensor(self):
price = fc_old.numeric_column('price')
price_buckets = fc_old.bucketized_column(
price, boundaries=[
0.,
10.,
100.,
])
body_style = fc_old.categorical_column_with_vocabulary_list(
'body-style', vocabulary_list=['hardtop', 'wagon', 'sedan'])
# Provides 1-dim tensor and dense tensor.
features = {
'price':
constant_op.constant([
-1.,
12.,
]),
'body-style':
sparse_tensor.SparseTensor(
indices=((0,), (1,)),
values=('sedan', 'hardtop'),
dense_shape=(2,)),
}
self.assertEqual(1, features['price'].shape.ndims)
self.assertEqual(1, features['body-style'].dense_shape.get_shape()[0])
net = get_keras_linear_model_predictions(features,
[price_buckets, body_style])
with _initialized_session() as sess:
bias = get_linear_model_bias()
price_buckets_var = get_linear_model_column_var(price_buckets)
body_style_var = get_linear_model_column_var(body_style)
sess.run(price_buckets_var.assign([[10.], [100.], [1000.], [10000.]]))
sess.run(body_style_var.assign([[-10.], [-100.], [-1000.]]))
sess.run(bias.assign([5.]))
self.assertAllClose([[10 - 1000 + 5.], [1000 - 10 + 5.]], sess.run(net))
def test_with_1d_unknown_shape_sparse_tensor(self):
price = fc_old.numeric_column('price')
price_buckets = fc_old.bucketized_column(
price, boundaries=[
0.,
10.,
100.,
])
body_style = fc_old.categorical_column_with_vocabulary_list(
'body-style', vocabulary_list=['hardtop', 'wagon', 'sedan'])
country = fc_old.categorical_column_with_vocabulary_list(
'country', vocabulary_list=['US', 'JP', 'CA'])
# Provides 1-dim tensor and dense tensor.
features = {
'price': array_ops.placeholder(dtypes.float32),
'body-style': array_ops.sparse_placeholder(dtypes.string),
'country': array_ops.placeholder(dtypes.string),
}
self.assertIsNone(features['price'].shape.ndims)
self.assertIsNone(features['body-style'].get_shape().ndims)
price_data = np.array([-1., 12.])
body_style_data = sparse_tensor.SparseTensorValue(
indices=((0,), (1,)), values=('sedan', 'hardtop'), dense_shape=(2,))
country_data = np.array(['US', 'CA'])
net = get_keras_linear_model_predictions(
features, [price_buckets, body_style, country])
bias = get_linear_model_bias()
price_buckets_var = get_linear_model_column_var(price_buckets)
body_style_var = get_linear_model_column_var(body_style)
with _initialized_session() as sess:
sess.run(price_buckets_var.assign([[10.], [100.], [1000.], [10000.]]))
sess.run(body_style_var.assign([[-10.], [-100.], [-1000.]]))
sess.run(bias.assign([5.]))
self.assertAllClose([[10 - 1000 + 5.], [1000 - 10 + 5.]],
sess.run(
net,
feed_dict={
features['price']: price_data,
features['body-style']: body_style_data,
features['country']: country_data
}))
def test_with_rank_0_feature(self):
price = fc_old.numeric_column('price')
features = {
'price': constant_op.constant(0),
}
self.assertEqual(0, features['price'].shape.ndims)
# Static rank 0 should fail
with self.assertRaisesRegexp(ValueError, 'Feature .* cannot have rank 0'):
get_keras_linear_model_predictions(features, [price])
# Dynamic rank 0 should fail
features = {
'price': array_ops.placeholder(dtypes.float32),
}
net = get_keras_linear_model_predictions(features, [price])
self.assertEqual(1, net.shape[1])
with _initialized_session() as sess:
with self.assertRaisesOpError('Feature .* cannot have rank 0'):
sess.run(net, feed_dict={features['price']: np.array(1)})
class InputLayerTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes()
def test_retrieving_input(self):
features = {'a': [0.]}
input_layer = InputLayer(fc_old.numeric_column('a'))
inputs = self.evaluate(input_layer(features))
self.assertAllClose([[0.]], inputs)
def test_reuses_variables(self):
with context.eager_mode():
sparse_input = sparse_tensor.SparseTensor(
indices=((0, 0), (1, 0), (2, 0)),
values=(0, 1, 2),
dense_shape=(3, 3))
# Create feature columns (categorical and embedding).
categorical_column = fc_old.categorical_column_with_identity(
key='a', num_buckets=3)
embedding_dimension = 2
def _embedding_column_initializer(shape, dtype, partition_info):
del shape # unused
del dtype # unused
del partition_info # unused
embedding_values = (
(1, 0), # id 0
(0, 1), # id 1
(1, 1)) # id 2
return embedding_values
embedding_column = fc_old.embedding_column(
categorical_column,
dimension=embedding_dimension,
initializer=_embedding_column_initializer)
input_layer = InputLayer([embedding_column])
features = {'a': sparse_input}
inputs = input_layer(features)
variables = input_layer.variables
# Sanity check: test that the inputs are correct.
self.assertAllEqual([[1, 0], [0, 1], [1, 1]], inputs)
# Check that only one variable was created.
self.assertEqual(1, len(variables))
# Check that invoking input_layer on the same features does not create
# additional variables
_ = input_layer(features)
self.assertEqual(1, len(variables))
self.assertEqual(variables[0], input_layer.variables[0])
def test_feature_column_input_layer_gradient(self):
with context.eager_mode():
sparse_input = sparse_tensor.SparseTensor(
indices=((0, 0), (1, 0), (2, 0)),
values=(0, 1, 2),
dense_shape=(3, 3))
# Create feature columns (categorical and embedding).
categorical_column = fc_old.categorical_column_with_identity(
key='a', num_buckets=3)
embedding_dimension = 2
def _embedding_column_initializer(shape, dtype, partition_info):
del shape # unused
del dtype # unused
del partition_info # unused
embedding_values = (
(1, 0), # id 0
(0, 1), # id 1
(1, 1)) # id 2
return embedding_values
embedding_column = fc_old.embedding_column(
categorical_column,
dimension=embedding_dimension,
initializer=_embedding_column_initializer)
input_layer = InputLayer([embedding_column])
features = {'a': sparse_input}
def scale_matrix():
matrix = input_layer(features)
return 2 * matrix
# Sanity check: Verify that scale_matrix returns the correct output.
self.assertAllEqual([[2, 0], [0, 2], [2, 2]], scale_matrix())
# Check that the returned gradient is correct.
grad_function = backprop.implicit_grad(scale_matrix)
grads_and_vars = grad_function()
indexed_slice = grads_and_vars[0][0]
gradient = grads_and_vars[0][0].values
self.assertAllEqual([0, 1, 2], indexed_slice.indices)
self.assertAllEqual([[2, 2], [2, 2], [2, 2]], gradient)
class FunctionalInputLayerTest(test.TestCase):
def test_raises_if_empty_feature_columns(self):
with self.assertRaisesRegexp(ValueError,
'feature_columns must not be empty'):
fc.input_layer(features={}, feature_columns=[])
def test_should_be_dense_column(self):
with self.assertRaisesRegexp(ValueError, 'must be a _DenseColumn'):
fc.input_layer(
features={'a': [[0]]},
feature_columns=[
fc_old.categorical_column_with_hash_bucket('wire_cast', 4)
])
def test_does_not_support_dict_columns(self):
with self.assertRaisesRegexp(
ValueError, 'Expected feature_columns to be iterable, found dict.'):
fc.input_layer(
features={'a': [[0]]},
feature_columns={'a': fc_old.numeric_column('a')})
def test_bare_column(self):
with ops.Graph().as_default():
features = features = {'a': [0.]}
net = fc.input_layer(features, fc_old.numeric_column('a'))
with _initialized_session():
self.assertAllClose([[0.]], net.eval())
def test_column_generator(self):
with ops.Graph().as_default():
features = features = {'a': [0.], 'b': [1.]}
columns = (fc_old.numeric_column(key) for key in features)
net = fc.input_layer(features, columns)
with _initialized_session():
self.assertAllClose([[0., 1.]], net.eval())
def test_raises_if_duplicate_name(self):
with self.assertRaisesRegexp(
ValueError, 'Duplicate feature column name found for columns'):
fc.input_layer(
features={'a': [[0]]},
feature_columns=[
fc_old.numeric_column('a'),
fc_old.numeric_column('a')
])
def test_one_column(self):
price = fc_old.numeric_column('price')
with ops.Graph().as_default():
features = {'price': [[1.], [5.]]}
net = fc.input_layer(features, [price])
with _initialized_session():
self.assertAllClose([[1.], [5.]], net.eval())
def test_multi_dimension(self):
price = fc_old.numeric_column('price', shape=2)
with ops.Graph().as_default():
features = {'price': [[1., 2.], [5., 6.]]}
net = fc.input_layer(features, [price])
with _initialized_session():
self.assertAllClose([[1., 2.], [5., 6.]], net.eval())
def test_raises_if_shape_mismatch(self):
price = fc_old.numeric_column('price', shape=2)
with ops.Graph().as_default():
features = {'price': [[1.], [5.]]}
with self.assertRaisesRegexp(
Exception,
r'Cannot reshape a tensor with 2 elements to shape \[2,2\]'):
fc.input_layer(features, [price])
def test_reshaping(self):
price = fc_old.numeric_column('price', shape=[1, 2])
with ops.Graph().as_default():
features = {'price': [[[1., 2.]], [[5., 6.]]]}
net = fc.input_layer(features, [price])
with _initialized_session():
self.assertAllClose([[1., 2.], [5., 6.]], net.eval())
def test_multi_column(self):
price1 = fc_old.numeric_column('price1', shape=2)
price2 = fc_old.numeric_column('price2')
with ops.Graph().as_default():
features = {
'price1': [[1., 2.], [5., 6.]],
'price2': [[3.], [4.]]
}
net = fc.input_layer(features, [price1, price2])
with _initialized_session():
self.assertAllClose([[1., 2., 3.], [5., 6., 4.]], net.eval())
def test_fills_cols_to_vars(self):
# Provide three _DenseColumn's to input_layer: a _NumericColumn, a
# _BucketizedColumn, and an _EmbeddingColumn. Only the _EmbeddingColumn
# creates a Variable.
price1 = fc_old.numeric_column('price1')
dense_feature = fc_old.numeric_column('dense_feature')
dense_feature_bucketized = fc_old.bucketized_column(
dense_feature, boundaries=[0.])
some_sparse_column = fc_old.categorical_column_with_hash_bucket(
'sparse_feature', hash_bucket_size=5)
some_embedding_column = fc_old.embedding_column(
some_sparse_column, dimension=10)
with ops.Graph().as_default():
features = {
'price1': [[3.], [4.]],
'dense_feature': [[-1.], [4.]],
'sparse_feature': [['a'], ['x']],
}
cols_to_vars = {}
all_cols = [price1, dense_feature_bucketized, some_embedding_column]
fc.input_layer(features, all_cols, cols_to_vars=cols_to_vars)
self.assertItemsEqual(list(cols_to_vars.keys()), all_cols)
self.assertEqual(0, len(cols_to_vars[price1]))
self.assertEqual(0, len(cols_to_vars[dense_feature_bucketized]))
self.assertEqual(1, len(cols_to_vars[some_embedding_column]))
self.assertIsInstance(cols_to_vars[some_embedding_column][0],
variables_lib.Variable)
self.assertAllEqual(cols_to_vars[some_embedding_column][0].shape, [5, 10])
def test_fills_cols_to_vars_partitioned_variables(self):
price1 = fc_old.numeric_column('price1')
dense_feature = fc_old.numeric_column('dense_feature')
dense_feature_bucketized = fc_old.bucketized_column(
dense_feature, boundaries=[0.])
some_sparse_column = fc_old.categorical_column_with_hash_bucket(
'sparse_feature', hash_bucket_size=5)
some_embedding_column = fc_old.embedding_column(
some_sparse_column, dimension=10)
with ops.Graph().as_default():
features = {
'price1': [[3.], [4.]],
'dense_feature': [[-1.], [4.]],
'sparse_feature': [['a'], ['x']],
}
cols_to_vars = {}
all_cols = [price1, dense_feature_bucketized, some_embedding_column]
with variable_scope.variable_scope(
'input_from_feature_columns',
partitioner=partitioned_variables.fixed_size_partitioner(3, axis=0)):
fc.input_layer(features, all_cols, cols_to_vars=cols_to_vars)
self.assertItemsEqual(list(cols_to_vars.keys()), all_cols)
self.assertEqual(0, len(cols_to_vars[price1]))
self.assertEqual(0, len(cols_to_vars[dense_feature_bucketized]))
self.assertEqual(3, len(cols_to_vars[some_embedding_column]))
self.assertAllEqual(cols_to_vars[some_embedding_column][0].shape, [2, 10])
self.assertAllEqual(cols_to_vars[some_embedding_column][1].shape, [2, 10])
self.assertAllEqual(cols_to_vars[some_embedding_column][2].shape, [1, 10])
def test_column_order(self):
price_a = fc_old.numeric_column('price_a')
price_b = fc_old.numeric_column('price_b')
with ops.Graph().as_default():
features = {
'price_a': [[1.]],
'price_b': [[3.]],
}
net1 = fc.input_layer(features, [price_a, price_b])
net2 = fc.input_layer(features, [price_b, price_a])
with _initialized_session():
self.assertAllClose([[1., 3.]], net1.eval())
self.assertAllClose([[1., 3.]], net2.eval())
def test_fails_for_categorical_column(self):
animal = fc_old.categorical_column_with_identity('animal', num_buckets=4)
with ops.Graph().as_default():
features = {
'animal':
sparse_tensor.SparseTensor(
indices=[[0, 0], [0, 1]], values=[1, 2], dense_shape=[1, 2])
}
with self.assertRaisesRegexp(Exception, 'must be a _DenseColumn'):
fc.input_layer(features, [animal])
def test_static_batch_size_mismatch(self):
price1 = fc_old.numeric_column('price1')
price2 = fc_old.numeric_column('price2')
with ops.Graph().as_default():
features = {
'price1': [[1.], [5.], [7.]], # batchsize = 3
'price2': [[3.], [4.]] # batchsize = 2
}
with self.assertRaisesRegexp(
ValueError,
'Batch size \(first dimension\) of each feature must be same.'): # pylint: disable=anomalous-backslash-in-string
fc.input_layer(features, [price1, price2])
def test_subset_of_static_batch_size_mismatch(self):
price1 = fc_old.numeric_column('price1')
price2 = fc_old.numeric_column('price2')
price3 = fc_old.numeric_column('price3')
with ops.Graph().as_default():
features = {
'price1': array_ops.placeholder(dtype=dtypes.int64), # batchsize = 3
'price2': [[3.], [4.]], # batchsize = 2
'price3': [[3.], [4.], [5.]] # batchsize = 3
}
with self.assertRaisesRegexp(
ValueError,
'Batch size \(first dimension\) of each feature must be same.'): # pylint: disable=anomalous-backslash-in-string
fc.input_layer(features, [price1, price2, price3])
def test_runtime_batch_size_mismatch(self):
price1 = fc_old.numeric_column('price1')
price2 = fc_old.numeric_column('price2')
with ops.Graph().as_default():
features = {
'price1': array_ops.placeholder(dtype=dtypes.int64), # batchsize = 3
'price2': [[3.], [4.]] # batchsize = 2
}
net = fc.input_layer(features, [price1, price2])
with _initialized_session() as sess:
with self.assertRaisesRegexp(errors.OpError,
'Dimensions of inputs should match'):
sess.run(net, feed_dict={features['price1']: [[1.], [5.], [7.]]})
def test_runtime_batch_size_matches(self):
price1 = fc_old.numeric_column('price1')
price2 = fc_old.numeric_column('price2')
with ops.Graph().as_default():
features = {
'price1': array_ops.placeholder(dtype=dtypes.int64), # batchsize = 2
'price2': array_ops.placeholder(dtype=dtypes.int64), # batchsize = 2
}
net = fc.input_layer(features, [price1, price2])
with _initialized_session() as sess:
sess.run(
net,
feed_dict={
features['price1']: [[1.], [5.]],
features['price2']: [[1.], [5.]],
})
def test_multiple_layers_with_same_embedding_column(self):
some_sparse_column = fc_old.categorical_column_with_hash_bucket(
'sparse_feature', hash_bucket_size=5)
some_embedding_column = fc_old.embedding_column(
some_sparse_column, dimension=10)
with ops.Graph().as_default():
features = {
'sparse_feature': [['a'], ['x']],
}
all_cols = [some_embedding_column]
fc.input_layer(features, all_cols)
fc.input_layer(features, all_cols)
# Make sure that 2 variables get created in this case.
self.assertEqual(2, len(
ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)))
expected_var_names = [
'input_layer/sparse_feature_embedding/embedding_weights:0',
'input_layer_1/sparse_feature_embedding/embedding_weights:0'
]
self.assertItemsEqual(
expected_var_names,
[v.name for v in ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)])
def test_multiple_layers_with_same_shared_embedding_column(self):
categorical_column_a = fc_old.categorical_column_with_identity(
key='aaa', num_buckets=3)
categorical_column_b = fc_old.categorical_column_with_identity(
key='bbb', num_buckets=3)
embedding_dimension = 2
embedding_column_b, embedding_column_a = fc_old.shared_embedding_columns(
[categorical_column_b, categorical_column_a],
dimension=embedding_dimension)
with ops.Graph().as_default():
features = {
'aaa':
sparse_tensor.SparseTensor(
indices=((0, 0), (1, 0), (1, 1)),
values=(0, 1, 0),
dense_shape=(2, 2)),
'bbb':
sparse_tensor.SparseTensor(
indices=((0, 0), (1, 0), (1, 1)),
values=(1, 2, 1),
dense_shape=(2, 2)),
}
all_cols = [embedding_column_a, embedding_column_b]
fc.input_layer(features, all_cols)
fc.input_layer(features, all_cols)
# Make sure that only 1 variable gets created in this case.
self.assertEqual(1, len(
ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)))
self.assertItemsEqual(
['input_layer/aaa_bbb_shared_embedding/embedding_weights:0'],
[v.name for v in ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)])
def test_multiple_layers_with_same_shared_embedding_column_diff_graphs(self):
categorical_column_a = fc_old.categorical_column_with_identity(
key='aaa', num_buckets=3)
categorical_column_b = fc_old.categorical_column_with_identity(
key='bbb', num_buckets=3)
embedding_dimension = 2
embedding_column_b, embedding_column_a = fc_old.shared_embedding_columns(
[categorical_column_b, categorical_column_a],
dimension=embedding_dimension)
all_cols = [embedding_column_a, embedding_column_b]
with ops.Graph().as_default():
features = {
'aaa':
sparse_tensor.SparseTensor(
indices=((0, 0), (1, 0), (1, 1)),
values=(0, 1, 0),
dense_shape=(2, 2)),
'bbb':
sparse_tensor.SparseTensor(
indices=((0, 0), (1, 0), (1, 1)),
values=(1, 2, 1),
dense_shape=(2, 2)),
}
fc.input_layer(features, all_cols)
# Make sure that only 1 variable gets created in this case.
self.assertEqual(1, len(
ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)))
with ops.Graph().as_default():
features1 = {
'aaa':
sparse_tensor.SparseTensor(
indices=((0, 0), (1, 0), (1, 1)),
values=(0, 1, 0),
dense_shape=(2, 2)),
'bbb':
sparse_tensor.SparseTensor(
indices=((0, 0), (1, 0), (1, 1)),
values=(1, 2, 1),
dense_shape=(2, 2)),
}
fc.input_layer(features1, all_cols)
# Make sure that only 1 variable gets created in this case.
self.assertEqual(1, len(
ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)))
self.assertItemsEqual(
['input_layer/aaa_bbb_shared_embedding/embedding_weights:0'],
[v.name for v in ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)])
def test_with_numpy_input_fn(self):
embedding_values = (
(1., 2., 3., 4., 5.), # id 0
(6., 7., 8., 9., 10.), # id 1
(11., 12., 13., 14., 15.) # id 2
)
def _initializer(shape, dtype, partition_info):
del shape, dtype, partition_info
return embedding_values
# price has 1 dimension in input_layer
price = fc_old.numeric_column('price')
body_style = fc_old.categorical_column_with_vocabulary_list(
'body-style', vocabulary_list=['hardtop', 'wagon', 'sedan'])
# one_hot_body_style has 3 dims in input_layer.
one_hot_body_style = fc_old.indicator_column(body_style)
# embedded_body_style has 5 dims in input_layer.
embedded_body_style = fc_old.embedding_column(
body_style, dimension=5, initializer=_initializer)
input_fn = numpy_io.numpy_input_fn(
x={
'price': np.array([11., 12., 13., 14.]),
'body-style': np.array(['sedan', 'hardtop', 'wagon', 'sedan']),
},
batch_size=2,
shuffle=False)
features = input_fn()
net = fc.input_layer(features,
[price, one_hot_body_style, embedded_body_style])
self.assertEqual(1 + 3 + 5, net.shape[1])
with _initialized_session() as sess:
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(sess, coord=coord)
# Each row is formed by concatenating `embedded_body_style`,
# `one_hot_body_style`, and `price` in order.
self.assertAllEqual(
[[11., 12., 13., 14., 15., 0., 0., 1., 11.],
[1., 2., 3., 4., 5., 1., 0., 0., 12]],
sess.run(net))
coord.request_stop()
coord.join(threads)
def test_with_1d_sparse_tensor(self):
embedding_values = (
(1., 2., 3., 4., 5.), # id 0
(6., 7., 8., 9., 10.), # id 1
(11., 12., 13., 14., 15.) # id 2
)
def _initializer(shape, dtype, partition_info):
del shape, dtype, partition_info
return embedding_values
# price has 1 dimension in input_layer
price = fc_old.numeric_column('price')
# one_hot_body_style has 3 dims in input_layer.
body_style = fc_old.categorical_column_with_vocabulary_list(
'body-style', vocabulary_list=['hardtop', 'wagon', 'sedan'])
one_hot_body_style = fc_old.indicator_column(body_style)
# embedded_body_style has 5 dims in input_layer.
country = fc_old.categorical_column_with_vocabulary_list(
'country', vocabulary_list=['US', 'JP', 'CA'])
embedded_country = fc_old.embedding_column(
country, dimension=5, initializer=_initializer)
# Provides 1-dim tensor and dense tensor.
features = {
'price': constant_op.constant([11., 12.,]),
'body-style': sparse_tensor.SparseTensor(
indices=((0,), (1,)),
values=('sedan', 'hardtop'),
dense_shape=(2,)),
# This is dense tensor for the categorical_column.
'country': constant_op.constant(['CA', 'US']),
}
self.assertEqual(1, features['price'].shape.ndims)
self.assertEqual(1, features['body-style'].dense_shape.get_shape()[0])
self.assertEqual(1, features['country'].shape.ndims)
net = fc.input_layer(features,
[price, one_hot_body_style, embedded_country])
self.assertEqual(1 + 3 + 5, net.shape[1])
with _initialized_session() as sess:
# Each row is formed by concatenating `embedded_body_style`,
# `one_hot_body_style`, and `price` in order.
self.assertAllEqual(
[[0., 0., 1., 11., 12., 13., 14., 15., 11.],
[1., 0., 0., 1., 2., 3., 4., 5., 12.]],
sess.run(net))
def test_with_1d_unknown_shape_sparse_tensor(self):
embedding_values = (
(1., 2.), # id 0
(6., 7.), # id 1
(11., 12.) # id 2
)
def _initializer(shape, dtype, partition_info):
del shape, dtype, partition_info
return embedding_values
# price has 1 dimension in input_layer
price = fc_old.numeric_column('price')
# one_hot_body_style has 3 dims in input_layer.
body_style = fc_old.categorical_column_with_vocabulary_list(
'body-style', vocabulary_list=['hardtop', 'wagon', 'sedan'])
one_hot_body_style = fc_old.indicator_column(body_style)
# embedded_body_style has 5 dims in input_layer.
country = fc_old.categorical_column_with_vocabulary_list(
'country', vocabulary_list=['US', 'JP', 'CA'])
embedded_country = fc_old.embedding_column(
country, dimension=2, initializer=_initializer)
# Provides 1-dim tensor and dense tensor.
features = {
'price': array_ops.placeholder(dtypes.float32),
'body-style': array_ops.sparse_placeholder(dtypes.string),
# This is dense tensor for the categorical_column.
'country': array_ops.placeholder(dtypes.string),
}
self.assertIsNone(features['price'].shape.ndims)
self.assertIsNone(features['body-style'].get_shape().ndims)
self.assertIsNone(features['country'].shape.ndims)
price_data = np.array([11., 12.])
body_style_data = sparse_tensor.SparseTensorValue(
indices=((0,), (1,)),
values=('sedan', 'hardtop'),
dense_shape=(2,))
country_data = np.array([['US'], ['CA']])
net = fc.input_layer(features,
[price, one_hot_body_style, embedded_country])
self.assertEqual(1 + 3 + 2, net.shape[1])
with _initialized_session() as sess:
# Each row is formed by concatenating `embedded_body_style`,
# `one_hot_body_style`, and `price` in order.
self.assertAllEqual(
[[0., 0., 1., 1., 2., 11.], [1., 0., 0., 11., 12., 12.]],
sess.run(
net,
feed_dict={
features['price']: price_data,
features['body-style']: body_style_data,
features['country']: country_data
}))
def test_with_rank_0_feature(self):
# price has 1 dimension in input_layer
price = fc_old.numeric_column('price')
features = {
'price': constant_op.constant(0),
}
self.assertEqual(0, features['price'].shape.ndims)
# Static rank 0 should fail
with self.assertRaisesRegexp(ValueError, 'Feature .* cannot have rank 0'):
fc.input_layer(features, [price])
# Dynamic rank 0 should fail
features = {
'price': array_ops.placeholder(dtypes.float32),
}
net = fc.input_layer(features, [price])
self.assertEqual(1, net.shape[1])
with _initialized_session() as sess:
with self.assertRaisesOpError('Feature .* cannot have rank 0'):
sess.run(net, feed_dict={features['price']: np.array(1)})
class MakeParseExampleSpecTest(test.TestCase):
class _TestFeatureColumn(FeatureColumn,
collections.namedtuple('_TestFeatureColumn',
('parse_spec'))):
@property
def name(self):
return "_TestFeatureColumn"
def transform_feature(self, transformation_cache, state_manager):
pass
@property
def parse_example_spec(self):
return self.parse_spec
def test_no_feature_columns(self):
actual = fc.make_parse_example_spec([])
self.assertDictEqual({}, actual)
def test_invalid_type(self):
key1 = 'key1'
parse_spec1 = parsing_ops.FixedLenFeature(
shape=(2,), dtype=dtypes.float32, default_value=0.)
with self.assertRaisesRegexp(
ValueError,
'All feature_columns must be FeatureColumn instances.*invalid_column'):
fc.make_parse_example_spec(
(self._TestFeatureColumn({key1: parse_spec1}), 'invalid_column'))
def test_one_feature_column(self):
key1 = 'key1'
parse_spec1 = parsing_ops.FixedLenFeature(
shape=(2,), dtype=dtypes.float32, default_value=0.)
actual = fc.make_parse_example_spec(
(self._TestFeatureColumn({key1: parse_spec1}),))
self.assertDictEqual({key1: parse_spec1}, actual)
def test_two_feature_columns(self):
key1 = 'key1'
parse_spec1 = parsing_ops.FixedLenFeature(
shape=(2,), dtype=dtypes.float32, default_value=0.)
key2 = 'key2'
parse_spec2 = parsing_ops.VarLenFeature(dtype=dtypes.string)
actual = fc.make_parse_example_spec(
(self._TestFeatureColumn({key1: parse_spec1}),
self._TestFeatureColumn({key2: parse_spec2})))
self.assertDictEqual({key1: parse_spec1, key2: parse_spec2}, actual)
def test_equal_keys_different_parse_spec(self):
key1 = 'key1'
parse_spec1 = parsing_ops.FixedLenFeature(
shape=(2,), dtype=dtypes.float32, default_value=0.)
parse_spec2 = parsing_ops.VarLenFeature(dtype=dtypes.string)
with self.assertRaisesRegexp(
ValueError,
'feature_columns contain different parse_spec for key key1'):
fc.make_parse_example_spec(
(self._TestFeatureColumn({key1: parse_spec1}),
self._TestFeatureColumn({key1: parse_spec2})))
def test_equal_keys_equal_parse_spec(self):
key1 = 'key1'
parse_spec1 = parsing_ops.FixedLenFeature(
shape=(2,), dtype=dtypes.float32, default_value=0.)
actual = fc.make_parse_example_spec(
(self._TestFeatureColumn({key1: parse_spec1}),
self._TestFeatureColumn({key1: parse_spec1})))
self.assertDictEqual({key1: parse_spec1}, actual)
def test_multiple_features_dict(self):
"""parse_spc for one column is a dict with length > 1."""
key1 = 'key1'
parse_spec1 = parsing_ops.FixedLenFeature(
shape=(2,), dtype=dtypes.float32, default_value=0.)
key2 = 'key2'
parse_spec2 = parsing_ops.VarLenFeature(dtype=dtypes.string)
key3 = 'key3'
parse_spec3 = parsing_ops.VarLenFeature(dtype=dtypes.int32)
actual = fc.make_parse_example_spec(
(self._TestFeatureColumn({key1: parse_spec1}),
self._TestFeatureColumn({key2: parse_spec2, key3: parse_spec3})))
self.assertDictEqual(
{key1: parse_spec1, key2: parse_spec2, key3: parse_spec3}, actual)
def _assert_sparse_tensor_value(test_case, expected, actual):
test_case.assertEqual(np.int64, np.array(actual.indices).dtype)
test_case.assertAllEqual(expected.indices, actual.indices)
test_case.assertEqual(
np.array(expected.values).dtype, np.array(actual.values).dtype)
test_case.assertAllEqual(expected.values, actual.values)
test_case.assertEqual(np.int64, np.array(actual.dense_shape).dtype)
test_case.assertAllEqual(expected.dense_shape, actual.dense_shape)
class VocabularyFileCategoricalColumnTest(test.TestCase):
def setUp(self):
super(VocabularyFileCategoricalColumnTest, self).setUp()
# Contains ints, Golden State Warriors jersey numbers: 30, 35, 11, 23, 22
self._warriors_vocabulary_file_name = test.test_src_dir_path(
'python/feature_column/testdata/warriors_vocabulary.txt')
self._warriors_vocabulary_size = 5
# Contains strings, character names from 'The Wire': omar, stringer, marlo
self._wire_vocabulary_file_name = test.test_src_dir_path(
'python/feature_column/testdata/wire_vocabulary.txt')
self._wire_vocabulary_size = 3
def test_defaults(self):
column = fc.categorical_column_with_vocabulary_file(
key='aaa', vocabulary_file='path_to_file', vocabulary_size=3)
self.assertEqual('aaa', column.name)
self.assertEqual('aaa', column.key)
self.assertEqual(3, column.num_buckets)
self.assertEqual({
'aaa': parsing_ops.VarLenFeature(dtypes.string)
}, column.parse_example_spec)
def test_key_should_be_string(self):
with self.assertRaisesRegexp(ValueError, 'key must be a string.'):
fc.categorical_column_with_vocabulary_file(
key=('aaa',), vocabulary_file='path_to_file', vocabulary_size=3)
def test_all_constructor_args(self):
column = fc.categorical_column_with_vocabulary_file(
key='aaa', vocabulary_file='path_to_file', vocabulary_size=3,
num_oov_buckets=4, dtype=dtypes.int32)
self.assertEqual(7, column.num_buckets)
self.assertEqual({
'aaa': parsing_ops.VarLenFeature(dtypes.int32)
}, column.parse_example_spec)
def test_deep_copy(self):
original = fc.categorical_column_with_vocabulary_file(
key='aaa', vocabulary_file='path_to_file', vocabulary_size=3,
num_oov_buckets=4, dtype=dtypes.int32)
for column in (original, copy.deepcopy(original)):
self.assertEqual('aaa', column.name)
self.assertEqual(7, column.num_buckets)
self.assertEqual({
'aaa': parsing_ops.VarLenFeature(dtypes.int32)
}, column.parse_example_spec)
def test_vocabulary_file_none(self):
with self.assertRaisesRegexp(ValueError, 'Missing vocabulary_file'):
fc.categorical_column_with_vocabulary_file(
key='aaa', vocabulary_file=None, vocabulary_size=3)
def test_vocabulary_file_empty_string(self):
with self.assertRaisesRegexp(ValueError, 'Missing vocabulary_file'):
fc.categorical_column_with_vocabulary_file(
key='aaa', vocabulary_file='', vocabulary_size=3)
def test_invalid_vocabulary_file(self):
column = fc.categorical_column_with_vocabulary_file(
key='aaa', vocabulary_file='file_does_not_exist', vocabulary_size=10)
inputs = sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=('marlo', 'skywalker', 'omar'),
dense_shape=(2, 2))
column.get_sparse_tensors(FeatureTransformationCache({'aaa': inputs}), None)
with self.assertRaisesRegexp(errors.OpError, 'file_does_not_exist'):
with self.test_session():
lookup_ops.tables_initializer().run()
def test_invalid_vocabulary_size(self):
with self.assertRaisesRegexp(ValueError, 'Invalid vocabulary_size'):
fc.categorical_column_with_vocabulary_file(
key='aaa', vocabulary_file=self._wire_vocabulary_file_name,
vocabulary_size=-1)
with self.assertRaisesRegexp(ValueError, 'Invalid vocabulary_size'):
fc.categorical_column_with_vocabulary_file(
key='aaa', vocabulary_file=self._wire_vocabulary_file_name,
vocabulary_size=0)
def test_too_large_vocabulary_size(self):
column = fc.categorical_column_with_vocabulary_file(
key='aaa',
vocabulary_file=self._wire_vocabulary_file_name,
vocabulary_size=self._wire_vocabulary_size + 1)
inputs = sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=('marlo', 'skywalker', 'omar'),
dense_shape=(2, 2))
column.get_sparse_tensors(FeatureTransformationCache({'aaa': inputs}), None)
with self.assertRaisesRegexp(errors.OpError, 'Invalid vocab_size'):
with self.test_session():
lookup_ops.tables_initializer().run()
def test_invalid_num_oov_buckets(self):
with self.assertRaisesRegexp(ValueError, 'Invalid num_oov_buckets'):
fc.categorical_column_with_vocabulary_file(
key='aaa', vocabulary_file='path', vocabulary_size=3,
num_oov_buckets=-1)
def test_invalid_dtype(self):
with self.assertRaisesRegexp(ValueError, 'dtype must be string or integer'):
fc.categorical_column_with_vocabulary_file(
key='aaa', vocabulary_file='path', vocabulary_size=3,
dtype=dtypes.float64)
def test_invalid_buckets_and_default_value(self):
with self.assertRaisesRegexp(
ValueError, 'both num_oov_buckets and default_value'):
fc.categorical_column_with_vocabulary_file(
key='aaa',
vocabulary_file=self._wire_vocabulary_file_name,
vocabulary_size=self._wire_vocabulary_size,
num_oov_buckets=100,
default_value=2)
def test_invalid_input_dtype_int32(self):
column = fc.categorical_column_with_vocabulary_file(
key='aaa',
vocabulary_file=self._wire_vocabulary_file_name,
vocabulary_size=self._wire_vocabulary_size,
dtype=dtypes.string)
inputs = sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=(12, 24, 36),
dense_shape=(2, 2))
with self.assertRaisesRegexp(ValueError, 'dtype must be compatible'):
column.get_sparse_tensors(
FeatureTransformationCache({
'aaa': inputs
}), None)
def test_invalid_input_dtype_string(self):
column = fc.categorical_column_with_vocabulary_file(
key='aaa',
vocabulary_file=self._warriors_vocabulary_file_name,
vocabulary_size=self._warriors_vocabulary_size,
dtype=dtypes.int32)
inputs = sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=('omar', 'stringer', 'marlo'),
dense_shape=(2, 2))
with self.assertRaisesRegexp(ValueError, 'dtype must be compatible'):
column.get_sparse_tensors(
FeatureTransformationCache({
'aaa': inputs
}), None)
def test_parse_example(self):
a = fc.categorical_column_with_vocabulary_file(
key='aaa', vocabulary_file='path_to_file', vocabulary_size=3)
data = example_pb2.Example(features=feature_pb2.Features(
feature={
'aaa':
feature_pb2.Feature(bytes_list=feature_pb2.BytesList(
value=[b'omar', b'stringer']))
}))
features = parsing_ops.parse_example(
serialized=[data.SerializeToString()],
features=fc.make_parse_example_spec([a]))
self.assertIn('aaa', features)
with self.test_session():
_assert_sparse_tensor_value(
self,
sparse_tensor.SparseTensorValue(
indices=[[0, 0], [0, 1]],
values=np.array([b'omar', b'stringer'], dtype=np.object_),
dense_shape=[1, 2]),
features['aaa'].eval())
def test_get_sparse_tensors(self):
column = fc.categorical_column_with_vocabulary_file(
key='aaa',
vocabulary_file=self._wire_vocabulary_file_name,
vocabulary_size=self._wire_vocabulary_size)
inputs = sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=('marlo', 'skywalker', 'omar'),
dense_shape=(2, 2))
id_weight_pair = column.get_sparse_tensors(
FeatureTransformationCache({
'aaa': inputs
}), None)
self.assertIsNone(id_weight_pair.weight_tensor)
with _initialized_session():
_assert_sparse_tensor_value(
self,
sparse_tensor.SparseTensorValue(
indices=inputs.indices,
values=np.array((2, -1, 0), dtype=np.int64),
dense_shape=inputs.dense_shape),
id_weight_pair.id_tensor.eval())
def test_get_sparse_tensors_none_vocabulary_size(self):
column = fc.categorical_column_with_vocabulary_file(
key='aaa', vocabulary_file=self._wire_vocabulary_file_name)
inputs = sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=('marlo', 'skywalker', 'omar'),
dense_shape=(2, 2))
id_weight_pair = column.get_sparse_tensors(
FeatureTransformationCache({
'aaa': inputs
}), None)
self.assertIsNone(id_weight_pair.weight_tensor)
with _initialized_session():
_assert_sparse_tensor_value(self,
sparse_tensor.SparseTensorValue(
indices=inputs.indices,
values=np.array(
(2, -1, 0), dtype=np.int64),
dense_shape=inputs.dense_shape),
id_weight_pair.id_tensor.eval())
def test_transform_feature(self):
column = fc.categorical_column_with_vocabulary_file(
key='aaa',
vocabulary_file=self._wire_vocabulary_file_name,
vocabulary_size=self._wire_vocabulary_size)
inputs = sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=('marlo', 'skywalker', 'omar'),
dense_shape=(2, 2))
id_tensor = _transform_features({'aaa': inputs}, [column], None)[column]
with _initialized_session():
_assert_sparse_tensor_value(self,
sparse_tensor.SparseTensorValue(
indices=inputs.indices,
values=np.array(
(2, -1, 0), dtype=np.int64),
dense_shape=inputs.dense_shape),
id_tensor.eval())
def DISABLED_test_get_sparse_tensors_weight_collections(self):
column = fc.categorical_column_with_vocabulary_file(
key='aaa',
vocabulary_file=self._wire_vocabulary_file_name,
vocabulary_size=self._wire_vocabulary_size)
inputs = sparse_tensor.SparseTensor(
values=['omar', 'stringer', 'marlo'],
indices=[[0, 0], [1, 0], [1, 1]],
dense_shape=[2, 2])
column.get_sparse_tensors(
FeatureTransformationCache({
'aaa': inputs
}),
weight_collections=('my_weights',))
self.assertItemsEqual(
[], ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES))
self.assertItemsEqual([], ops.get_collection('my_weights'))
def test_get_sparse_tensors_dense_input(self):
column = fc.categorical_column_with_vocabulary_file(
key='aaa',
vocabulary_file=self._wire_vocabulary_file_name,
vocabulary_size=self._wire_vocabulary_size)
id_weight_pair = column.get_sparse_tensors(
FeatureTransformationCache({
'aaa': (('marlo', ''), ('skywalker', 'omar'))
}), None)
self.assertIsNone(id_weight_pair.weight_tensor)
with _initialized_session():
_assert_sparse_tensor_value(
self,
sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=np.array((2, -1, 0), dtype=np.int64),
dense_shape=(2, 2)),
id_weight_pair.id_tensor.eval())
def test_get_sparse_tensors_default_value_in_vocabulary(self):
column = fc.categorical_column_with_vocabulary_file(
key='aaa',
vocabulary_file=self._wire_vocabulary_file_name,
vocabulary_size=self._wire_vocabulary_size,
default_value=2)
inputs = sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=('marlo', 'skywalker', 'omar'),
dense_shape=(2, 2))
id_weight_pair = column.get_sparse_tensors(
FeatureTransformationCache({
'aaa': inputs
}), None)
self.assertIsNone(id_weight_pair.weight_tensor)
with _initialized_session():
_assert_sparse_tensor_value(
self,
sparse_tensor.SparseTensorValue(
indices=inputs.indices,
values=np.array((2, 2, 0), dtype=np.int64),
dense_shape=inputs.dense_shape),
id_weight_pair.id_tensor.eval())
def test_get_sparse_tensors_with_oov_buckets(self):
column = fc.categorical_column_with_vocabulary_file(
key='aaa',
vocabulary_file=self._wire_vocabulary_file_name,
vocabulary_size=self._wire_vocabulary_size,
num_oov_buckets=100)
inputs = sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1), (1, 2)),
values=('marlo', 'skywalker', 'omar', 'heisenberg'),
dense_shape=(2, 3))
id_weight_pair = column.get_sparse_tensors(
FeatureTransformationCache({
'aaa': inputs
}), None)
self.assertIsNone(id_weight_pair.weight_tensor)
with _initialized_session():
_assert_sparse_tensor_value(
self,
sparse_tensor.SparseTensorValue(
indices=inputs.indices,
values=np.array((2, 33, 0, 62), dtype=np.int64),
dense_shape=inputs.dense_shape),
id_weight_pair.id_tensor.eval())
def test_get_sparse_tensors_small_vocabulary_size(self):
# 'marlo' is the last entry in our vocabulary file, so be setting
# `vocabulary_size` to 1 less than number of entries in file, we take
# 'marlo' out of the vocabulary.
column = fc.categorical_column_with_vocabulary_file(
key='aaa',
vocabulary_file=self._wire_vocabulary_file_name,
vocabulary_size=self._wire_vocabulary_size - 1)
inputs = sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=('marlo', 'skywalker', 'omar'),
dense_shape=(2, 2))
id_weight_pair = column.get_sparse_tensors(
FeatureTransformationCache({
'aaa': inputs
}), None)
self.assertIsNone(id_weight_pair.weight_tensor)
with _initialized_session():
_assert_sparse_tensor_value(
self,
sparse_tensor.SparseTensorValue(
indices=inputs.indices,
values=np.array((-1, -1, 0), dtype=np.int64),
dense_shape=inputs.dense_shape),
id_weight_pair.id_tensor.eval())
def test_get_sparse_tensors_int32(self):
column = fc.categorical_column_with_vocabulary_file(
key='aaa',
vocabulary_file=self._warriors_vocabulary_file_name,
vocabulary_size=self._warriors_vocabulary_size,
dtype=dtypes.int32)
inputs = sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1), (2, 2)),
values=(11, 100, 30, 22),
dense_shape=(3, 3))
id_weight_pair = column.get_sparse_tensors(
FeatureTransformationCache({
'aaa': inputs
}), None)
self.assertIsNone(id_weight_pair.weight_tensor)
with _initialized_session():
_assert_sparse_tensor_value(
self,
sparse_tensor.SparseTensorValue(
indices=inputs.indices,
values=np.array((2, -1, 0, 4), dtype=np.int64),
dense_shape=inputs.dense_shape),
id_weight_pair.id_tensor.eval())
def test_get_sparse_tensors_int32_dense_input(self):
default_value = -100
column = fc.categorical_column_with_vocabulary_file(
key='aaa',
vocabulary_file=self._warriors_vocabulary_file_name,
vocabulary_size=self._warriors_vocabulary_size,
dtype=dtypes.int32,
default_value=default_value)
id_weight_pair = column.get_sparse_tensors(
FeatureTransformationCache({
'aaa': ((11, -1, -1), (100, 30, -1), (-1, -1, 22))
}), None)
self.assertIsNone(id_weight_pair.weight_tensor)
with _initialized_session():
_assert_sparse_tensor_value(
self,
sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1), (2, 2)),
values=np.array((2, default_value, 0, 4), dtype=np.int64),
dense_shape=(3, 3)),
id_weight_pair.id_tensor.eval())
def test_get_sparse_tensors_int32_with_oov_buckets(self):
column = fc.categorical_column_with_vocabulary_file(
key='aaa',
vocabulary_file=self._warriors_vocabulary_file_name,
vocabulary_size=self._warriors_vocabulary_size,
dtype=dtypes.int32,
num_oov_buckets=100)
inputs = sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1), (2, 2)),
values=(11, 100, 30, 22),
dense_shape=(3, 3))
id_weight_pair = column.get_sparse_tensors(
FeatureTransformationCache({
'aaa': inputs
}), None)
self.assertIsNone(id_weight_pair.weight_tensor)
with _initialized_session():
_assert_sparse_tensor_value(
self,
sparse_tensor.SparseTensorValue(
indices=inputs.indices,
values=np.array((2, 60, 0, 4), dtype=np.int64),
dense_shape=inputs.dense_shape),
id_weight_pair.id_tensor.eval())
def test_linear_model(self):
wire_column = fc_old.categorical_column_with_vocabulary_file(
key='wire',
vocabulary_file=self._wire_vocabulary_file_name,
vocabulary_size=self._wire_vocabulary_size,
num_oov_buckets=1)
self.assertEqual(4, wire_column._num_buckets)
with ops.Graph().as_default():
predictions = fc.linear_model({
wire_column.name: sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=('marlo', 'skywalker', 'omar'),
dense_shape=(2, 2))
}, (wire_column,))
bias = get_linear_model_bias()
wire_var = get_linear_model_column_var(wire_column)
with _initialized_session():
self.assertAllClose((0.,), bias.eval())
self.assertAllClose(((0.,), (0.,), (0.,), (0.,)), wire_var.eval())
self.assertAllClose(((0.,), (0.,)), predictions.eval())
wire_var.assign(((1.,), (2.,), (3.,), (4.,))).eval()
# 'marlo' -> 2: wire_var[2] = 3
# 'skywalker' -> 3, 'omar' -> 0: wire_var[3] + wire_var[0] = 4+1 = 5
self.assertAllClose(((3.,), (5.,)), predictions.eval())
def test_keras_linear_model(self):
wire_column = fc_old.categorical_column_with_vocabulary_file(
key='wire',
vocabulary_file=self._wire_vocabulary_file_name,
vocabulary_size=self._wire_vocabulary_size,
num_oov_buckets=1)
self.assertEqual(4, wire_column._num_buckets)
with ops.Graph().as_default():
predictions = get_keras_linear_model_predictions({
wire_column.name:
sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=('marlo', 'skywalker', 'omar'),
dense_shape=(2, 2))
}, (wire_column,))
bias = get_linear_model_bias()
wire_var = get_linear_model_column_var(wire_column)
with _initialized_session():
self.assertAllClose((0.,), bias.eval())
self.assertAllClose(((0.,), (0.,), (0.,), (0.,)), wire_var.eval())
self.assertAllClose(((0.,), (0.,)), predictions.eval())
wire_var.assign(((1.,), (2.,), (3.,), (4.,))).eval()
# 'marlo' -> 2: wire_var[2] = 3
# 'skywalker' -> 3, 'omar' -> 0: wire_var[3] + wire_var[0] = 4+1 = 5
self.assertAllClose(((3.,), (5.,)), predictions.eval())
class VocabularyListCategoricalColumnTest(test.TestCase):
def test_defaults_string(self):
column = fc.categorical_column_with_vocabulary_list(
key='aaa', vocabulary_list=('omar', 'stringer', 'marlo'))
self.assertEqual('aaa', column.name)
self.assertEqual('aaa', column.key)
self.assertEqual(3, column.num_buckets)
self.assertEqual({
'aaa': parsing_ops.VarLenFeature(dtypes.string)
}, column.parse_example_spec)
def test_key_should_be_string(self):
with self.assertRaisesRegexp(ValueError, 'key must be a string.'):
fc.categorical_column_with_vocabulary_list(
key=('aaa',), vocabulary_list=('omar', 'stringer', 'marlo'))
def test_defaults_int(self):
column = fc.categorical_column_with_vocabulary_list(
key='aaa', vocabulary_list=(12, 24, 36))
self.assertEqual('aaa', column.name)
self.assertEqual('aaa', column.key)
self.assertEqual(3, column.num_buckets)
self.assertEqual({
'aaa': parsing_ops.VarLenFeature(dtypes.int64)
}, column.parse_example_spec)
def test_all_constructor_args(self):
column = fc.categorical_column_with_vocabulary_list(
key='aaa', vocabulary_list=(12, 24, 36), dtype=dtypes.int32,
default_value=-99)
self.assertEqual(3, column.num_buckets)
self.assertEqual({
'aaa': parsing_ops.VarLenFeature(dtypes.int32)
}, column.parse_example_spec)
def test_deep_copy(self):
original = fc.categorical_column_with_vocabulary_list(
key='aaa', vocabulary_list=(12, 24, 36), dtype=dtypes.int32)
for column in (original, copy.deepcopy(original)):
self.assertEqual('aaa', column.name)
self.assertEqual(3, column.num_buckets)
self.assertEqual({
'aaa': parsing_ops.VarLenFeature(dtypes.int32)
}, column.parse_example_spec)
def test_invalid_dtype(self):
with self.assertRaisesRegexp(ValueError, 'dtype must be string or integer'):
fc.categorical_column_with_vocabulary_list(
key='aaa', vocabulary_list=('omar', 'stringer', 'marlo'),
dtype=dtypes.float32)
def test_invalid_mapping_dtype(self):
with self.assertRaisesRegexp(
ValueError, r'vocabulary dtype must be string or integer'):
fc.categorical_column_with_vocabulary_list(
key='aaa', vocabulary_list=(12., 24., 36.))
def test_mismatched_int_dtype(self):
with self.assertRaisesRegexp(
ValueError, r'dtype.*and vocabulary dtype.*do not match'):
fc.categorical_column_with_vocabulary_list(
key='aaa', vocabulary_list=('omar', 'stringer', 'marlo'),
dtype=dtypes.int32)
def test_mismatched_string_dtype(self):
with self.assertRaisesRegexp(
ValueError, r'dtype.*and vocabulary dtype.*do not match'):
fc.categorical_column_with_vocabulary_list(
key='aaa', vocabulary_list=(12, 24, 36), dtype=dtypes.string)
def test_none_mapping(self):
with self.assertRaisesRegexp(
ValueError, r'vocabulary_list.*must be non-empty'):
fc.categorical_column_with_vocabulary_list(
key='aaa', vocabulary_list=None)
def test_empty_mapping(self):
with self.assertRaisesRegexp(
ValueError, r'vocabulary_list.*must be non-empty'):
fc.categorical_column_with_vocabulary_list(
key='aaa', vocabulary_list=tuple([]))
def test_duplicate_mapping(self):
with self.assertRaisesRegexp(ValueError, 'Duplicate keys'):
fc.categorical_column_with_vocabulary_list(
key='aaa', vocabulary_list=(12, 24, 12))
def test_invalid_num_oov_buckets(self):
with self.assertRaisesRegexp(ValueError, 'Invalid num_oov_buckets'):
fc.categorical_column_with_vocabulary_list(
key='aaa', vocabulary_list=(12, 24, 36),
num_oov_buckets=-1)
def test_invalid_buckets_and_default_value(self):
with self.assertRaisesRegexp(
ValueError, 'both num_oov_buckets and default_value'):
fc.categorical_column_with_vocabulary_list(
key='aaa',
vocabulary_list=(12, 24, 36),
num_oov_buckets=100,
default_value=2)
def test_invalid_input_dtype_int32(self):
column = fc.categorical_column_with_vocabulary_list(
key='aaa',
vocabulary_list=('omar', 'stringer', 'marlo'))
inputs = sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=(12, 24, 36),
dense_shape=(2, 2))
with self.assertRaisesRegexp(ValueError, 'dtype must be compatible'):
column.get_sparse_tensors(
FeatureTransformationCache({
'aaa': inputs
}), None)
def test_invalid_input_dtype_string(self):
column = fc.categorical_column_with_vocabulary_list(
key='aaa',
vocabulary_list=(12, 24, 36))
inputs = sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=('omar', 'stringer', 'marlo'),
dense_shape=(2, 2))
with self.assertRaisesRegexp(ValueError, 'dtype must be compatible'):
column.get_sparse_tensors(
FeatureTransformationCache({
'aaa': inputs
}), None)
def test_parse_example_string(self):
a = fc.categorical_column_with_vocabulary_list(
key='aaa', vocabulary_list=('omar', 'stringer', 'marlo'))
data = example_pb2.Example(features=feature_pb2.Features(
feature={
'aaa':
feature_pb2.Feature(bytes_list=feature_pb2.BytesList(
value=[b'omar', b'stringer']))
}))
features = parsing_ops.parse_example(
serialized=[data.SerializeToString()],
features=fc.make_parse_example_spec([a]))
self.assertIn('aaa', features)
with self.test_session():
_assert_sparse_tensor_value(
self,
sparse_tensor.SparseTensorValue(
indices=[[0, 0], [0, 1]],
values=np.array([b'omar', b'stringer'], dtype=np.object_),
dense_shape=[1, 2]),
features['aaa'].eval())
def test_parse_example_int(self):
a = fc.categorical_column_with_vocabulary_list(
key='aaa', vocabulary_list=(11, 21, 31))
data = example_pb2.Example(features=feature_pb2.Features(
feature={
'aaa':
feature_pb2.Feature(int64_list=feature_pb2.Int64List(
value=[11, 21]))
}))
features = parsing_ops.parse_example(
serialized=[data.SerializeToString()],
features=fc.make_parse_example_spec([a]))
self.assertIn('aaa', features)
with self.test_session():
_assert_sparse_tensor_value(
self,
sparse_tensor.SparseTensorValue(
indices=[[0, 0], [0, 1]],
values=[11, 21],
dense_shape=[1, 2]),
features['aaa'].eval())
def test_get_sparse_tensors(self):
column = fc.categorical_column_with_vocabulary_list(
key='aaa',
vocabulary_list=('omar', 'stringer', 'marlo'))
inputs = sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=('marlo', 'skywalker', 'omar'),
dense_shape=(2, 2))
id_weight_pair = column.get_sparse_tensors(
FeatureTransformationCache({
'aaa': inputs
}), None)
self.assertIsNone(id_weight_pair.weight_tensor)
with _initialized_session():
_assert_sparse_tensor_value(
self,
sparse_tensor.SparseTensorValue(
indices=inputs.indices,
values=np.array((2, -1, 0), dtype=np.int64),
dense_shape=inputs.dense_shape),
id_weight_pair.id_tensor.eval())
def test_transform_feature(self):
column = fc.categorical_column_with_vocabulary_list(
key='aaa',
vocabulary_list=('omar', 'stringer', 'marlo'))
inputs = sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=('marlo', 'skywalker', 'omar'),
dense_shape=(2, 2))
id_tensor = _transform_features({'aaa': inputs}, [column], None)[column]
with _initialized_session():
_assert_sparse_tensor_value(
self,
sparse_tensor.SparseTensorValue(
indices=inputs.indices,
values=np.array((2, -1, 0), dtype=np.int64),
dense_shape=inputs.dense_shape),
id_tensor.eval())
def DISABLED_test_get_sparse_tensors_weight_collections(self):
column = fc.categorical_column_with_vocabulary_list(
key='aaa',
vocabulary_list=('omar', 'stringer', 'marlo'))
inputs = sparse_tensor.SparseTensor(
values=['omar', 'stringer', 'marlo'],
indices=[[0, 0], [1, 0], [1, 1]],
dense_shape=[2, 2])
column.get_sparse_tensors(
FeatureTransformationCache({
'aaa': inputs
}),
weight_collections=('my_weights',))
self.assertItemsEqual(
[], ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES))
self.assertItemsEqual([], ops.get_collection('my_weights'))
def test_get_sparse_tensors_dense_input(self):
column = fc.categorical_column_with_vocabulary_list(
key='aaa',
vocabulary_list=('omar', 'stringer', 'marlo'))
id_weight_pair = column.get_sparse_tensors(
FeatureTransformationCache({
'aaa': (('marlo', ''), ('skywalker', 'omar'))
}), None)
self.assertIsNone(id_weight_pair.weight_tensor)
with _initialized_session():
_assert_sparse_tensor_value(
self,
sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=np.array((2, -1, 0), dtype=np.int64),
dense_shape=(2, 2)),
id_weight_pair.id_tensor.eval())
def test_get_sparse_tensors_default_value_in_vocabulary(self):
column = fc.categorical_column_with_vocabulary_list(
key='aaa',
vocabulary_list=('omar', 'stringer', 'marlo'),
default_value=2)
inputs = sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=('marlo', 'skywalker', 'omar'),
dense_shape=(2, 2))
id_weight_pair = column.get_sparse_tensors(
FeatureTransformationCache({
'aaa': inputs
}), None)
self.assertIsNone(id_weight_pair.weight_tensor)
with _initialized_session():
_assert_sparse_tensor_value(
self,
sparse_tensor.SparseTensorValue(
indices=inputs.indices,
values=np.array((2, 2, 0), dtype=np.int64),
dense_shape=inputs.dense_shape),
id_weight_pair.id_tensor.eval())
def test_get_sparse_tensors_with_oov_buckets(self):
column = fc.categorical_column_with_vocabulary_list(
key='aaa',
vocabulary_list=('omar', 'stringer', 'marlo'),
num_oov_buckets=100)
inputs = sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1), (1, 2)),
values=('marlo', 'skywalker', 'omar', 'heisenberg'),
dense_shape=(2, 3))
id_weight_pair = column.get_sparse_tensors(
FeatureTransformationCache({
'aaa': inputs
}), None)
self.assertIsNone(id_weight_pair.weight_tensor)
with _initialized_session():
_assert_sparse_tensor_value(
self,
sparse_tensor.SparseTensorValue(
indices=inputs.indices,
values=np.array((2, 33, 0, 62), dtype=np.int64),
dense_shape=inputs.dense_shape),
id_weight_pair.id_tensor.eval())
def test_get_sparse_tensors_int32(self):
column = fc.categorical_column_with_vocabulary_list(
key='aaa',
vocabulary_list=np.array((30, 35, 11, 23, 22), dtype=np.int32),
dtype=dtypes.int32)
inputs = sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1), (2, 2)),
values=np.array((11, 100, 30, 22), dtype=np.int32),
dense_shape=(3, 3))
id_weight_pair = column.get_sparse_tensors(
FeatureTransformationCache({
'aaa': inputs
}), None)
self.assertIsNone(id_weight_pair.weight_tensor)
with _initialized_session():
_assert_sparse_tensor_value(
self,
sparse_tensor.SparseTensorValue(
indices=inputs.indices,
values=np.array((2, -1, 0, 4), dtype=np.int64),
dense_shape=inputs.dense_shape),
id_weight_pair.id_tensor.eval())
def test_get_sparse_tensors_int32_dense_input(self):
default_value = -100
column = fc.categorical_column_with_vocabulary_list(
key='aaa',
vocabulary_list=np.array((30, 35, 11, 23, 22), dtype=np.int32),
dtype=dtypes.int32,
default_value=default_value)
id_weight_pair = column.get_sparse_tensors(
FeatureTransformationCache({
'aaa':
np.array(
((11, -1, -1), (100, 30, -1), (-1, -1, 22)), dtype=np.int32)
}), None)
self.assertIsNone(id_weight_pair.weight_tensor)
with _initialized_session():
_assert_sparse_tensor_value(
self,
sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1), (2, 2)),
values=np.array((2, default_value, 0, 4), dtype=np.int64),
dense_shape=(3, 3)),
id_weight_pair.id_tensor.eval())
def test_get_sparse_tensors_int32_with_oov_buckets(self):
column = fc.categorical_column_with_vocabulary_list(
key='aaa',
vocabulary_list=np.array((30, 35, 11, 23, 22), dtype=np.int32),
dtype=dtypes.int32,
num_oov_buckets=100)
inputs = sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1), (2, 2)),
values=(11, 100, 30, 22),
dense_shape=(3, 3))
id_weight_pair = column.get_sparse_tensors(
FeatureTransformationCache({
'aaa': inputs
}), None)
self.assertIsNone(id_weight_pair.weight_tensor)
with _initialized_session():
_assert_sparse_tensor_value(
self,
sparse_tensor.SparseTensorValue(
indices=inputs.indices,
values=np.array((2, 60, 0, 4), dtype=np.int64),
dense_shape=inputs.dense_shape),
id_weight_pair.id_tensor.eval())
def test_linear_model(self):
wire_column = fc_old.categorical_column_with_vocabulary_list(
key='aaa',
vocabulary_list=('omar', 'stringer', 'marlo'),
num_oov_buckets=1)
self.assertEqual(4, wire_column._num_buckets)
with ops.Graph().as_default():
predictions = fc.linear_model({
wire_column.name: sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=('marlo', 'skywalker', 'omar'),
dense_shape=(2, 2))
}, (wire_column,))
bias = get_linear_model_bias()
wire_var = get_linear_model_column_var(wire_column)
with _initialized_session():
self.assertAllClose((0.,), bias.eval())
self.assertAllClose(((0.,), (0.,), (0.,), (0.,)), wire_var.eval())
self.assertAllClose(((0.,), (0.,)), predictions.eval())
wire_var.assign(((1.,), (2.,), (3.,), (4.,))).eval()
# 'marlo' -> 2: wire_var[2] = 3
# 'skywalker' -> 3, 'omar' -> 0: wire_var[3] + wire_var[0] = 4+1 = 5
self.assertAllClose(((3.,), (5.,)), predictions.eval())
def test_keras_linear_model(self):
wire_column = fc_old.categorical_column_with_vocabulary_list(
key='aaa',
vocabulary_list=('omar', 'stringer', 'marlo'),
num_oov_buckets=1)
self.assertEqual(4, wire_column._num_buckets)
with ops.Graph().as_default():
predictions = get_keras_linear_model_predictions({
wire_column.name:
sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=('marlo', 'skywalker', 'omar'),
dense_shape=(2, 2))
}, (wire_column,))
bias = get_linear_model_bias()
wire_var = get_linear_model_column_var(wire_column)
with _initialized_session():
self.assertAllClose((0.,), bias.eval())
self.assertAllClose(((0.,), (0.,), (0.,), (0.,)), wire_var.eval())
self.assertAllClose(((0.,), (0.,)), predictions.eval())
wire_var.assign(((1.,), (2.,), (3.,), (4.,))).eval()
# 'marlo' -> 2: wire_var[2] = 3
# 'skywalker' -> 3, 'omar' -> 0: wire_var[3] + wire_var[0] = 4+1 = 5
self.assertAllClose(((3.,), (5.,)), predictions.eval())
class IdentityCategoricalColumnTest(test.TestCase):
def test_constructor(self):
column = fc.categorical_column_with_identity(key='aaa', num_buckets=3)
self.assertEqual('aaa', column.name)
self.assertEqual('aaa', column.key)
self.assertEqual(3, column.num_buckets)
self.assertEqual({
'aaa': parsing_ops.VarLenFeature(dtypes.int64)
}, column.parse_example_spec)
def test_key_should_be_string(self):
with self.assertRaisesRegexp(ValueError, 'key must be a string.'):
fc.categorical_column_with_identity(key=('aaa',), num_buckets=3)
def test_deep_copy(self):
original = fc.categorical_column_with_identity(key='aaa', num_buckets=3)
for column in (original, copy.deepcopy(original)):
self.assertEqual('aaa', column.name)
self.assertEqual(3, column.num_buckets)
self.assertEqual({
'aaa': parsing_ops.VarLenFeature(dtypes.int64)
}, column.parse_example_spec)
def test_invalid_num_buckets_zero(self):
with self.assertRaisesRegexp(ValueError, 'num_buckets 0 < 1'):
fc.categorical_column_with_identity(key='aaa', num_buckets=0)
def test_invalid_num_buckets_negative(self):
with self.assertRaisesRegexp(ValueError, 'num_buckets -1 < 1'):
fc.categorical_column_with_identity(key='aaa', num_buckets=-1)
def test_invalid_default_value_too_small(self):
with self.assertRaisesRegexp(ValueError, 'default_value -1 not in range'):
fc.categorical_column_with_identity(
key='aaa', num_buckets=3, default_value=-1)
def test_invalid_default_value_too_big(self):
with self.assertRaisesRegexp(ValueError, 'default_value 3 not in range'):
fc.categorical_column_with_identity(
key='aaa', num_buckets=3, default_value=3)
def test_invalid_input_dtype(self):
column = fc.categorical_column_with_identity(key='aaa', num_buckets=3)
inputs = sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=('omar', 'stringer', 'marlo'),
dense_shape=(2, 2))
with self.assertRaisesRegexp(ValueError, 'Invalid input, not integer'):
column.get_sparse_tensors(
FeatureTransformationCache({
'aaa': inputs
}), None)
def test_parse_example(self):
a = fc.categorical_column_with_identity(key='aaa', num_buckets=30)
data = example_pb2.Example(features=feature_pb2.Features(
feature={
'aaa':
feature_pb2.Feature(int64_list=feature_pb2.Int64List(
value=[11, 21]))
}))
features = parsing_ops.parse_example(
serialized=[data.SerializeToString()],
features=fc.make_parse_example_spec([a]))
self.assertIn('aaa', features)
with self.test_session():
_assert_sparse_tensor_value(
self,
sparse_tensor.SparseTensorValue(
indices=[[0, 0], [0, 1]],
values=np.array([11, 21], dtype=np.int64),
dense_shape=[1, 2]),
features['aaa'].eval())
def test_get_sparse_tensors(self):
column = fc.categorical_column_with_identity(key='aaa', num_buckets=3)
inputs = sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=(0, 1, 0),
dense_shape=(2, 2))
id_weight_pair = column.get_sparse_tensors(
FeatureTransformationCache({
'aaa': inputs
}), None)
self.assertIsNone(id_weight_pair.weight_tensor)
with _initialized_session():
_assert_sparse_tensor_value(
self,
sparse_tensor.SparseTensorValue(
indices=inputs.indices,
values=np.array((0, 1, 0), dtype=np.int64),
dense_shape=inputs.dense_shape),
id_weight_pair.id_tensor.eval())
def test_transform_feature(self):
column = fc.categorical_column_with_identity(key='aaa', num_buckets=3)
inputs = sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=(0, 1, 0),
dense_shape=(2, 2))
id_tensor = _transform_features({'aaa': inputs}, [column], None)[column]
with _initialized_session():
_assert_sparse_tensor_value(
self,
sparse_tensor.SparseTensorValue(
indices=inputs.indices,
values=np.array((0, 1, 0), dtype=np.int64),
dense_shape=inputs.dense_shape),
id_tensor.eval())
def DISABLED_test_get_sparse_tensors_weight_collections(self):
column = fc.categorical_column_with_identity(key='aaa', num_buckets=3)
inputs = sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=(0, 1, 0),
dense_shape=(2, 2))
column.get_sparse_tensors(
FeatureTransformationCache({
'aaa': inputs
}),
weight_collections=('my_weights',))
self.assertItemsEqual(
[], ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES))
self.assertItemsEqual([], ops.get_collection('my_weights'))
def test_get_sparse_tensors_dense_input(self):
column = fc.categorical_column_with_identity(key='aaa', num_buckets=3)
id_weight_pair = column.get_sparse_tensors(
FeatureTransformationCache({
'aaa': ((0, -1), (1, 0))
}), None)
self.assertIsNone(id_weight_pair.weight_tensor)
with _initialized_session():
_assert_sparse_tensor_value(
self,
sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=np.array((0, 1, 0), dtype=np.int64),
dense_shape=(2, 2)),
id_weight_pair.id_tensor.eval())
def test_get_sparse_tensors_with_inputs_too_small(self):
column = fc.categorical_column_with_identity(key='aaa', num_buckets=3)
inputs = sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=(1, -1, 0),
dense_shape=(2, 2))
id_weight_pair = column.get_sparse_tensors(
FeatureTransformationCache({
'aaa': inputs
}), None)
self.assertIsNone(id_weight_pair.weight_tensor)
with _initialized_session():
with self.assertRaisesRegexp(
errors.OpError, 'assert_greater_or_equal_0'):
id_weight_pair.id_tensor.eval()
def test_get_sparse_tensors_with_inputs_too_big(self):
column = fc.categorical_column_with_identity(key='aaa', num_buckets=3)
inputs = sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=(1, 99, 0),
dense_shape=(2, 2))
id_weight_pair = column.get_sparse_tensors(
FeatureTransformationCache({
'aaa': inputs
}), None)
self.assertIsNone(id_weight_pair.weight_tensor)
with _initialized_session():
with self.assertRaisesRegexp(
errors.OpError, 'assert_less_than_num_buckets'):
id_weight_pair.id_tensor.eval()
def test_get_sparse_tensors_with_default_value(self):
column = fc.categorical_column_with_identity(
key='aaa', num_buckets=4, default_value=3)
inputs = sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=(1, -1, 99),
dense_shape=(2, 2))
id_weight_pair = column.get_sparse_tensors(
FeatureTransformationCache({
'aaa': inputs
}), None)
self.assertIsNone(id_weight_pair.weight_tensor)
with _initialized_session():
_assert_sparse_tensor_value(
self,
sparse_tensor.SparseTensorValue(
indices=inputs.indices,
values=np.array((1, 3, 3), dtype=np.int64),
dense_shape=inputs.dense_shape),
id_weight_pair.id_tensor.eval())
def test_get_sparse_tensors_with_default_value_and_placeholder_inputs(self):
column = fc.categorical_column_with_identity(
key='aaa', num_buckets=4, default_value=3)
input_indices = array_ops.placeholder(dtype=dtypes.int64)
input_values = array_ops.placeholder(dtype=dtypes.int32)
input_shape = array_ops.placeholder(dtype=dtypes.int64)
inputs = sparse_tensor.SparseTensorValue(
indices=input_indices,
values=input_values,
dense_shape=input_shape)
id_weight_pair = column.get_sparse_tensors(
FeatureTransformationCache({
'aaa': inputs
}), None)
self.assertIsNone(id_weight_pair.weight_tensor)
with _initialized_session():
_assert_sparse_tensor_value(
self,
sparse_tensor.SparseTensorValue(
indices=np.array(((0, 0), (1, 0), (1, 1)), dtype=np.int64),
values=np.array((1, 3, 3), dtype=np.int64),
dense_shape=np.array((2, 2), dtype=np.int64)),
id_weight_pair.id_tensor.eval(feed_dict={
input_indices: ((0, 0), (1, 0), (1, 1)),
input_values: (1, -1, 99),
input_shape: (2, 2),
}))
def test_linear_model(self):
column = fc_old.categorical_column_with_identity(key='aaa', num_buckets=3)
self.assertEqual(3, column.num_buckets)
with ops.Graph().as_default():
predictions = fc.linear_model({
column.name: sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=(0, 2, 1),
dense_shape=(2, 2))
}, (column,))
bias = get_linear_model_bias()
weight_var = get_linear_model_column_var(column)
with _initialized_session():
self.assertAllClose((0.,), bias.eval())
self.assertAllClose(((0.,), (0.,), (0.,)), weight_var.eval())
self.assertAllClose(((0.,), (0.,)), predictions.eval())
weight_var.assign(((1.,), (2.,), (3.,))).eval()
# weight_var[0] = 1
# weight_var[2] + weight_var[1] = 3+2 = 5
self.assertAllClose(((1.,), (5.,)), predictions.eval())
def test_keras_linear_model(self):
column = fc_old.categorical_column_with_identity(key='aaa', num_buckets=3)
self.assertEqual(3, column.num_buckets)
with ops.Graph().as_default():
predictions = get_keras_linear_model_predictions({
column.name:
sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=(0, 2, 1),
dense_shape=(2, 2))
}, (column,))
bias = get_linear_model_bias()
weight_var = get_linear_model_column_var(column)
with _initialized_session():
self.assertAllClose((0.,), bias.eval())
self.assertAllClose(((0.,), (0.,), (0.,)), weight_var.eval())
self.assertAllClose(((0.,), (0.,)), predictions.eval())
weight_var.assign(((1.,), (2.,), (3.,))).eval()
# weight_var[0] = 1
# weight_var[2] + weight_var[1] = 3+2 = 5
self.assertAllClose(((1.,), (5.,)), predictions.eval())
class TransformFeaturesTest(test.TestCase):
# All transform tests are distributed in column test.
# Here we only test multi column case and naming
def transform_multi_column(self):
bucketized_price = fc.bucketized_column(
fc.numeric_column('price'), boundaries=[0, 2, 4, 6])
hashed_sparse = fc.categorical_column_with_hash_bucket('wire', 10)
with ops.Graph().as_default():
features = {
'price': [[-1.], [5.]],
'wire':
sparse_tensor.SparseTensor(
values=['omar', 'stringer', 'marlo'],
indices=[[0, 0], [1, 0], [1, 1]],
dense_shape=[2, 2])
}
transformed = _transform_features(features,
[bucketized_price, hashed_sparse], None)
with _initialized_session():
self.assertIn(bucketized_price.name, transformed[bucketized_price].name)
self.assertAllEqual([[0], [3]], transformed[bucketized_price].eval())
self.assertIn(hashed_sparse.name, transformed[hashed_sparse].name)
self.assertAllEqual([6, 4, 1], transformed[hashed_sparse].values.eval())
def test_column_order(self):
"""When the column is both dense and sparse, uses sparse tensors."""
class _LoggerColumn(FeatureColumn):
def __init__(self, name):
self._name = name
@property
def name(self):
return self._name
def transform_feature(self, transformation_cache, state_manager):
self.call_order = call_logger['count']
call_logger['count'] += 1
return 'Anything'
@property
def parse_example_spec(self):
pass
with ops.Graph().as_default():
column1 = _LoggerColumn('1')
column2 = _LoggerColumn('2')
call_logger = {'count': 0}
_transform_features({}, [column1, column2], None)
self.assertEqual(0, column1.call_order)
self.assertEqual(1, column2.call_order)
call_logger = {'count': 0}
_transform_features({}, [column2, column1], None)
self.assertEqual(0, column1.call_order)
self.assertEqual(1, column2.call_order)
class IndicatorColumnTest(test.TestCase):
def test_indicator_column(self):
a = fc.categorical_column_with_hash_bucket('a', 4)
indicator_a = fc.indicator_column(a)
self.assertEqual(indicator_a.categorical_column.name, 'a')
self.assertEqual(indicator_a.name, 'a_indicator')
self.assertEqual(indicator_a.variable_shape, [1, 4])
b = fc.categorical_column_with_hash_bucket('b', hash_bucket_size=100)
indicator_b = fc.indicator_column(b)
self.assertEqual(indicator_b.categorical_column.name, 'b')
self.assertEqual(indicator_b.name, 'b_indicator')
self.assertEqual(indicator_b.variable_shape, [1, 100])
def test_1D_shape_succeeds(self):
animal = fc.indicator_column(
fc.categorical_column_with_hash_bucket('animal', 4))
transformation_cache = FeatureTransformationCache({
'animal': ['fox', 'fox']
})
output = transformation_cache.get(animal, None)
with self.test_session():
self.assertAllEqual([[0., 0., 1., 0.], [0., 0., 1., 0.]], output.eval())
def test_2D_shape_succeeds(self):
# TODO(ispir/cassandrax): Swith to categorical_column_with_keys when ready.
animal = fc.indicator_column(
fc.categorical_column_with_hash_bucket('animal', 4))
transformation_cache = FeatureTransformationCache({
'animal':
sparse_tensor.SparseTensor(
indices=[[0, 0], [1, 0]],
values=['fox', 'fox'],
dense_shape=[2, 1])
})
output = transformation_cache.get(animal, None)
with self.test_session():
self.assertAllEqual([[0., 0., 1., 0.], [0., 0., 1., 0.]], output.eval())
def test_multi_hot(self):
animal = fc.indicator_column(
fc.categorical_column_with_identity('animal', num_buckets=4))
transformation_cache = FeatureTransformationCache({
'animal':
sparse_tensor.SparseTensor(
indices=[[0, 0], [0, 1]], values=[1, 1], dense_shape=[1, 2])
})
output = transformation_cache.get(animal, None)
with self.test_session():
self.assertAllEqual([[0., 2., 0., 0.]], output.eval())
def test_multi_hot2(self):
animal = fc.indicator_column(
fc.categorical_column_with_identity('animal', num_buckets=4))
transformation_cache = FeatureTransformationCache({
'animal':
sparse_tensor.SparseTensor(
indices=[[0, 0], [0, 1]], values=[1, 2], dense_shape=[1, 2])
})
output = transformation_cache.get(animal, None)
with self.test_session():
self.assertAllEqual([[0., 1., 1., 0.]], output.eval())
def test_deep_copy(self):
a = fc.categorical_column_with_hash_bucket('a', 4)
column = fc.indicator_column(a)
column_copy = copy.deepcopy(column)
self.assertEqual(column_copy.categorical_column.name, 'a')
self.assertEqual(column.name, 'a_indicator')
self.assertEqual(column.variable_shape, [1, 4])
def test_parse_example(self):
a = fc.categorical_column_with_vocabulary_list(
key='aaa', vocabulary_list=('omar', 'stringer', 'marlo'))
a_indicator = fc.indicator_column(a)
data = example_pb2.Example(features=feature_pb2.Features(
feature={
'aaa':
feature_pb2.Feature(bytes_list=feature_pb2.BytesList(
value=[b'omar', b'stringer']))
}))
features = parsing_ops.parse_example(
serialized=[data.SerializeToString()],
features=fc.make_parse_example_spec([a_indicator]))
self.assertIn('aaa', features)
with self.test_session():
_assert_sparse_tensor_value(
self,
sparse_tensor.SparseTensorValue(
indices=[[0, 0], [0, 1]],
values=np.array([b'omar', b'stringer'], dtype=np.object_),
dense_shape=[1, 2]),
features['aaa'].eval())
def test_transform(self):
a = fc.categorical_column_with_vocabulary_list(
key='aaa', vocabulary_list=('omar', 'stringer', 'marlo'))
a_indicator = fc.indicator_column(a)
features = {
'aaa': sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=('marlo', 'skywalker', 'omar'),
dense_shape=(2, 2))
}
indicator_tensor = _transform_features(features, [a_indicator],
None)[a_indicator]
with _initialized_session():
self.assertAllEqual([[0, 0, 1], [1, 0, 0]], indicator_tensor.eval())
def test_transform_with_weighted_column(self):
# Github issue 12557
ids = fc.categorical_column_with_vocabulary_list(
key='ids', vocabulary_list=('a', 'b', 'c'))
weights = fc.weighted_categorical_column(ids, 'weights')
indicator = fc.indicator_column(weights)
features = {
'ids': constant_op.constant([['c', 'b', 'a']]),
'weights': constant_op.constant([[2., 4., 6.]])
}
indicator_tensor = _transform_features(features, [indicator],
None)[indicator]
with _initialized_session():
self.assertAllEqual([[6., 4., 2.]], indicator_tensor.eval())
def test_transform_with_missing_value_in_weighted_column(self):
# Github issue 12583
ids = fc.categorical_column_with_vocabulary_list(
key='ids', vocabulary_list=('a', 'b', 'c'))
weights = fc.weighted_categorical_column(ids, 'weights')
indicator = fc.indicator_column(weights)
features = {
'ids': constant_op.constant([['c', 'b', 'unknown']]),
'weights': constant_op.constant([[2., 4., 6.]])
}
indicator_tensor = _transform_features(features, [indicator],
None)[indicator]
with _initialized_session():
self.assertAllEqual([[0., 4., 2.]], indicator_tensor.eval())
def test_transform_with_missing_value_in_categorical_column(self):
# Github issue 12583
ids = fc.categorical_column_with_vocabulary_list(
key='ids', vocabulary_list=('a', 'b', 'c'))
indicator = fc.indicator_column(ids)
features = {
'ids': constant_op.constant([['c', 'b', 'unknown']]),
}
indicator_tensor = _transform_features(features, [indicator],
None)[indicator]
with _initialized_session():
self.assertAllEqual([[0., 1., 1.]], indicator_tensor.eval())
def test_linear_model(self):
animal = fc_old.indicator_column(
fc_old.categorical_column_with_identity('animal', num_buckets=4))
with ops.Graph().as_default():
features = {
'animal':
sparse_tensor.SparseTensor(
indices=[[0, 0], [0, 1]], values=[1, 2], dense_shape=[1, 2])
}
predictions = fc.linear_model(features, [animal])
weight_var = get_linear_model_column_var(animal)
with _initialized_session():
# All should be zero-initialized.
self.assertAllClose([[0.], [0.], [0.], [0.]], weight_var.eval())
self.assertAllClose([[0.]], predictions.eval())
weight_var.assign([[1.], [2.], [3.], [4.]]).eval()
self.assertAllClose([[2. + 3.]], predictions.eval())
def test_keras_linear_model(self):
animal = fc_old.indicator_column(
fc_old.categorical_column_with_identity('animal', num_buckets=4))
with ops.Graph().as_default():
features = {
'animal':
sparse_tensor.SparseTensor(
indices=[[0, 0], [0, 1]], values=[1, 2], dense_shape=[1, 2])
}
predictions = get_keras_linear_model_predictions(features, [animal])
weight_var = get_linear_model_column_var(animal)
with _initialized_session():
# All should be zero-initialized.
self.assertAllClose([[0.], [0.], [0.], [0.]], weight_var.eval())
self.assertAllClose([[0.]], predictions.eval())
weight_var.assign([[1.], [2.], [3.], [4.]]).eval()
self.assertAllClose([[2. + 3.]], predictions.eval())
def test_input_layer(self):
animal = fc_old.indicator_column(
fc_old.categorical_column_with_identity('animal', num_buckets=4))
with ops.Graph().as_default():
features = {
'animal':
sparse_tensor.SparseTensor(
indices=[[0, 0], [0, 1]], values=[1, 2], dense_shape=[1, 2])
}
net = fc.input_layer(features, [animal])
with _initialized_session():
self.assertAllClose([[0., 1., 1., 0.]], net.eval())
class _TestStateManager(StateManager):
def __init__(self, trainable=True):
# Dict of feature_column to a dict of variables.
self._all_variables = {}
self._trainable = trainable
def get_variable(self,
feature_column,
name,
shape,
dtype=None,
initializer=None):
if feature_column not in self._all_variables:
self._all_variables[feature_column] = {}
var_dict = self._all_variables[feature_column]
if name in var_dict:
return var_dict[name]
else:
var = variable_scope.get_variable(
name=name,
shape=shape,
initializer=initializer,
trainable=self._trainable)
var_dict[name] = var
return var
class EmbeddingColumnTest(test.TestCase):
def test_defaults(self):
categorical_column = fc.categorical_column_with_identity(
key='aaa', num_buckets=3)
embedding_dimension = 2
embedding_column = fc.embedding_column(
categorical_column, dimension=embedding_dimension)
self.assertIs(categorical_column, embedding_column.categorical_column)
self.assertEqual(embedding_dimension, embedding_column.dimension)
self.assertEqual('mean', embedding_column.combiner)
self.assertIsNone(embedding_column.ckpt_to_load_from)
self.assertIsNone(embedding_column.tensor_name_in_ckpt)
self.assertIsNone(embedding_column.max_norm)
self.assertTrue(embedding_column.trainable)
self.assertEqual('aaa_embedding', embedding_column.name)
self.assertEqual((embedding_dimension,), embedding_column.variable_shape)
self.assertEqual({
'aaa': parsing_ops.VarLenFeature(dtypes.int64)
}, embedding_column.parse_example_spec)
def test_all_constructor_args(self):
categorical_column = fc.categorical_column_with_identity(
key='aaa', num_buckets=3)
embedding_dimension = 2
embedding_column = fc.embedding_column(
categorical_column, dimension=embedding_dimension,
combiner='my_combiner', initializer=lambda: 'my_initializer',
ckpt_to_load_from='my_ckpt', tensor_name_in_ckpt='my_ckpt_tensor',
max_norm=42., trainable=False)
self.assertIs(categorical_column, embedding_column.categorical_column)
self.assertEqual(embedding_dimension, embedding_column.dimension)
self.assertEqual('my_combiner', embedding_column.combiner)
self.assertEqual('my_ckpt', embedding_column.ckpt_to_load_from)
self.assertEqual('my_ckpt_tensor', embedding_column.tensor_name_in_ckpt)
self.assertEqual(42., embedding_column.max_norm)
self.assertFalse(embedding_column.trainable)
self.assertEqual('aaa_embedding', embedding_column.name)
self.assertEqual((embedding_dimension,), embedding_column.variable_shape)
self.assertEqual({
'aaa': parsing_ops.VarLenFeature(dtypes.int64)
}, embedding_column.parse_example_spec)
def test_deep_copy(self):
categorical_column = fc.categorical_column_with_identity(
key='aaa', num_buckets=3)
embedding_dimension = 2
original = fc.embedding_column(
categorical_column, dimension=embedding_dimension,
combiner='my_combiner', initializer=lambda: 'my_initializer',
ckpt_to_load_from='my_ckpt', tensor_name_in_ckpt='my_ckpt_tensor',
max_norm=42., trainable=False)
for embedding_column in (original, copy.deepcopy(original)):
self.assertEqual('aaa', embedding_column.categorical_column.name)
self.assertEqual(3, embedding_column.categorical_column.num_buckets)
self.assertEqual({
'aaa': parsing_ops.VarLenFeature(dtypes.int64)
}, embedding_column.categorical_column.parse_example_spec)
self.assertEqual(embedding_dimension, embedding_column.dimension)
self.assertEqual('my_combiner', embedding_column.combiner)
self.assertEqual('my_ckpt', embedding_column.ckpt_to_load_from)
self.assertEqual('my_ckpt_tensor', embedding_column.tensor_name_in_ckpt)
self.assertEqual(42., embedding_column.max_norm)
self.assertFalse(embedding_column.trainable)
self.assertEqual('aaa_embedding', embedding_column.name)
self.assertEqual((embedding_dimension,), embedding_column.variable_shape)
self.assertEqual({
'aaa': parsing_ops.VarLenFeature(dtypes.int64)
}, embedding_column.parse_example_spec)
def test_invalid_initializer(self):
categorical_column = fc.categorical_column_with_identity(
key='aaa', num_buckets=3)
with self.assertRaisesRegexp(ValueError, 'initializer must be callable'):
fc.embedding_column(categorical_column, dimension=2, initializer='not_fn')
def test_parse_example(self):
a = fc.categorical_column_with_vocabulary_list(
key='aaa', vocabulary_list=('omar', 'stringer', 'marlo'))
a_embedded = fc.embedding_column(a, dimension=2)
data = example_pb2.Example(features=feature_pb2.Features(
feature={
'aaa':
feature_pb2.Feature(bytes_list=feature_pb2.BytesList(
value=[b'omar', b'stringer']))
}))
features = parsing_ops.parse_example(
serialized=[data.SerializeToString()],
features=fc.make_parse_example_spec([a_embedded]))
self.assertIn('aaa', features)
with self.test_session():
_assert_sparse_tensor_value(
self,
sparse_tensor.SparseTensorValue(
indices=[[0, 0], [0, 1]],
values=np.array([b'omar', b'stringer'], dtype=np.object_),
dense_shape=[1, 2]),
features['aaa'].eval())
def test_transform_feature(self):
a = fc.categorical_column_with_identity(key='aaa', num_buckets=3)
a_embedded = fc.embedding_column(a, dimension=2)
features = {
'aaa': sparse_tensor.SparseTensor(
indices=((0, 0), (1, 0), (1, 1)),
values=(0, 1, 0),
dense_shape=(2, 2))
}
outputs = _transform_features(features, [a, a_embedded], None)
output_a = outputs[a]
output_embedded = outputs[a_embedded]
with _initialized_session():
_assert_sparse_tensor_value(
self, output_a.eval(), output_embedded.eval())
def test_get_dense_tensor(self):
# Inputs.
vocabulary_size = 3
sparse_input = sparse_tensor.SparseTensorValue(
# example 0, ids [2]
# example 1, ids [0, 1]
# example 2, ids []
# example 3, ids [1]
indices=((0, 0), (1, 0), (1, 4), (3, 0)),
values=(2, 0, 1, 1),
dense_shape=(4, 5))
# Embedding variable.
embedding_dimension = 2
embedding_values = (
(1., 2.), # id 0
(3., 5.), # id 1
(7., 11.) # id 2
)
def _initializer(shape, dtype, partition_info):
self.assertAllEqual((vocabulary_size, embedding_dimension), shape)
self.assertEqual(dtypes.float32, dtype)
self.assertIsNone(partition_info)
return embedding_values
# Expected lookup result, using combiner='mean'.
expected_lookups = (
# example 0, ids [2], embedding = [7, 11]
(7., 11.),
# example 1, ids [0, 1], embedding = mean([1, 2] + [3, 5]) = [2, 3.5]
(2., 3.5),
# example 2, ids [], embedding = [0, 0]
(0., 0.),
# example 3, ids [1], embedding = [3, 5]
(3., 5.),
)
# Build columns.
categorical_column = fc.categorical_column_with_identity(
key='aaa', num_buckets=vocabulary_size)
embedding_column = fc.embedding_column(
categorical_column, dimension=embedding_dimension,
initializer=_initializer)
state_manager = _TestStateManager()
# Provide sparse input and get dense result.
embedding_lookup = embedding_column.get_dense_tensor(
FeatureTransformationCache({
'aaa': sparse_input
}), state_manager)
# Assert expected embedding variable and lookups.
global_vars = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
self.assertItemsEqual(('embedding_weights:0',),
tuple([v.name for v in global_vars]))
with _initialized_session():
self.assertAllEqual(embedding_values, global_vars[0].eval())
self.assertAllEqual(expected_lookups, embedding_lookup.eval())
def test_get_dense_tensor_3d(self):
# Inputs.
vocabulary_size = 4
sparse_input = sparse_tensor.SparseTensorValue(
# example 0, ids [2]
# example 1, ids [0, 1]
# example 2, ids []
# example 3, ids [1]
indices=((0, 0, 0), (1, 1, 0), (1, 1, 4), (3, 0, 0), (3, 1, 2)),
values=(2, 0, 1, 1, 2),
dense_shape=(4, 2, 5))
# Embedding variable.
embedding_dimension = 3
embedding_values = (
(1., 2., 4.), # id 0
(3., 5., 1.), # id 1
(7., 11., 2.), # id 2
(2., 7., 12.) # id 3
)
def _initializer(shape, dtype, partition_info):
self.assertAllEqual((vocabulary_size, embedding_dimension), shape)
self.assertEqual(dtypes.float32, dtype)
self.assertIsNone(partition_info)
return embedding_values
# Expected lookup result, using combiner='mean'.
expected_lookups = (
# example 0, ids [[2], []], embedding = [[7, 11, 2], [0, 0, 0]]
((7., 11., 2.), (0., 0., 0.)),
# example 1, ids [[], [0, 1]], embedding
# = mean([[], [1, 2, 4] + [3, 5, 1]]) = [[0, 0, 0], [2, 3.5, 2.5]]
((0., 0., 0.), (2., 3.5, 2.5)),
# example 2, ids [[], []], embedding = [[0, 0, 0], [0, 0, 0]]
((0., 0., 0.), (0., 0., 0.)),
# example 3, ids [[1], [2]], embedding = [[3, 5, 1], [7, 11, 2]]
((3., 5., 1.), (7., 11., 2.)),
)
# Build columns.
categorical_column = fc.categorical_column_with_identity(
key='aaa', num_buckets=vocabulary_size)
embedding_column = fc.embedding_column(
categorical_column, dimension=embedding_dimension,
initializer=_initializer)
state_manager = _TestStateManager()
# Provide sparse input and get dense result.
embedding_lookup = embedding_column.get_dense_tensor(
FeatureTransformationCache({
'aaa': sparse_input
}), state_manager)
# Assert expected embedding variable and lookups.
global_vars = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
self.assertItemsEqual(('embedding_weights:0',),
tuple([v.name for v in global_vars]))
with _initialized_session():
self.assertAllEqual(embedding_values, global_vars[0].eval())
self.assertAllEqual(expected_lookups, embedding_lookup.eval())
def DISABLED_test_get_dense_tensor_weight_collections(self):
sparse_input = sparse_tensor.SparseTensorValue(
# example 0, ids [2]
# example 1, ids [0, 1]
# example 2, ids []
# example 3, ids [1]
indices=((0, 0), (1, 0), (1, 4), (3, 0)),
values=(2, 0, 1, 1),
dense_shape=(4, 5))
# Build columns.
categorical_column = fc.categorical_column_with_identity(
key='aaa', num_buckets=3)
embedding_column = fc.embedding_column(categorical_column, dimension=2)
# Provide sparse input and get dense result.
embedding_column.get_dense_tensor(
FeatureTransformationCache({
'aaa': sparse_input
}),
weight_collections=('my_vars',))
# Assert expected embedding variable and lookups.
global_vars = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
self.assertItemsEqual(('embedding_weights:0',),
tuple([v.name for v in global_vars]))
my_vars = ops.get_collection('my_vars')
self.assertItemsEqual(
('embedding_weights:0',), tuple([v.name for v in my_vars]))
def test_get_dense_tensor_placeholder_inputs(self):
# Inputs.
vocabulary_size = 3
sparse_input = sparse_tensor.SparseTensorValue(
# example 0, ids [2]
# example 1, ids [0, 1]
# example 2, ids []
# example 3, ids [1]
indices=((0, 0), (1, 0), (1, 4), (3, 0)),
values=(2, 0, 1, 1),
dense_shape=(4, 5))
# Embedding variable.
embedding_dimension = 2
embedding_values = (
(1., 2.), # id 0
(3., 5.), # id 1
(7., 11.) # id 2
)
def _initializer(shape, dtype, partition_info):
self.assertAllEqual((vocabulary_size, embedding_dimension), shape)
self.assertEqual(dtypes.float32, dtype)
self.assertIsNone(partition_info)
return embedding_values
# Expected lookup result, using combiner='mean'.
expected_lookups = (
# example 0, ids [2], embedding = [7, 11]
(7., 11.),
# example 1, ids [0, 1], embedding = mean([1, 2] + [3, 5]) = [2, 3.5]
(2., 3.5),
# example 2, ids [], embedding = [0, 0]
(0., 0.),
# example 3, ids [1], embedding = [3, 5]
(3., 5.),
)
# Build columns.
categorical_column = fc.categorical_column_with_identity(
key='aaa', num_buckets=vocabulary_size)
embedding_column = fc.embedding_column(
categorical_column, dimension=embedding_dimension,
initializer=_initializer)
state_manager = _TestStateManager()
# Provide sparse input and get dense result.
input_indices = array_ops.placeholder(dtype=dtypes.int64)
input_values = array_ops.placeholder(dtype=dtypes.int64)
input_shape = array_ops.placeholder(dtype=dtypes.int64)
embedding_lookup = embedding_column.get_dense_tensor(
FeatureTransformationCache({
'aaa':
sparse_tensor.SparseTensorValue(
indices=input_indices,
values=input_values,
dense_shape=input_shape)
}), state_manager)
# Assert expected embedding variable and lookups.
global_vars = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
self.assertItemsEqual(
('embedding_weights:0',), tuple([v.name for v in global_vars]))
with _initialized_session():
self.assertAllEqual(embedding_values, global_vars[0].eval())
self.assertAllEqual(expected_lookups, embedding_lookup.eval(
feed_dict={
input_indices: sparse_input.indices,
input_values: sparse_input.values,
input_shape: sparse_input.dense_shape,
}))
def test_get_dense_tensor_restore_from_ckpt(self):
# Inputs.
vocabulary_size = 3
sparse_input = sparse_tensor.SparseTensorValue(
# example 0, ids [2]
# example 1, ids [0, 1]
# example 2, ids []
# example 3, ids [1]
indices=((0, 0), (1, 0), (1, 4), (3, 0)),
values=(2, 0, 1, 1),
dense_shape=(4, 5))
# Embedding variable. The checkpoint file contains _embedding_values.
embedding_dimension = 2
embedding_values = (
(1., 2.), # id 0
(3., 5.), # id 1
(7., 11.) # id 2
)
ckpt_path = test.test_src_dir_path(
'python/feature_column/testdata/embedding.ckpt')
ckpt_tensor = 'my_embedding'
# Expected lookup result, using combiner='mean'.
expected_lookups = (
# example 0, ids [2], embedding = [7, 11]
(7., 11.),
# example 1, ids [0, 1], embedding = mean([1, 2] + [3, 5]) = [2, 3.5]
(2., 3.5),
# example 2, ids [], embedding = [0, 0]
(0., 0.),
# example 3, ids [1], embedding = [3, 5]
(3., 5.),
)
# Build columns.
categorical_column = fc.categorical_column_with_identity(
key='aaa', num_buckets=vocabulary_size)
embedding_column = fc.embedding_column(
categorical_column, dimension=embedding_dimension,
ckpt_to_load_from=ckpt_path,
tensor_name_in_ckpt=ckpt_tensor)
state_manager = _TestStateManager()
# Provide sparse input and get dense result.
embedding_lookup = embedding_column.get_dense_tensor(
FeatureTransformationCache({
'aaa': sparse_input
}), state_manager)
# Assert expected embedding variable and lookups.
global_vars = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
self.assertItemsEqual(
('embedding_weights:0',), tuple([v.name for v in global_vars]))
with _initialized_session():
self.assertAllEqual(embedding_values, global_vars[0].eval())
self.assertAllEqual(expected_lookups, embedding_lookup.eval())
def test_linear_model(self):
# Inputs.
batch_size = 4
vocabulary_size = 3
sparse_input = sparse_tensor.SparseTensorValue(
# example 0, ids [2]
# example 1, ids [0, 1]
# example 2, ids []
# example 3, ids [1]
indices=((0, 0), (1, 0), (1, 4), (3, 0)),
values=(2, 0, 1, 1),
dense_shape=(batch_size, 5))
# Embedding variable.
embedding_dimension = 2
embedding_shape = (vocabulary_size, embedding_dimension)
zeros_embedding_values = np.zeros(embedding_shape)
def _initializer(shape, dtype, partition_info):
self.assertAllEqual(embedding_shape, shape)
self.assertEqual(dtypes.float32, dtype)
self.assertIsNone(partition_info)
return zeros_embedding_values
# Build columns.
categorical_column = fc_old.categorical_column_with_identity(
key='aaa', num_buckets=vocabulary_size)
embedding_column = fc_old.embedding_column(
categorical_column,
dimension=embedding_dimension,
initializer=_initializer)
with ops.Graph().as_default():
predictions = fc.linear_model({
categorical_column.name: sparse_input
}, (embedding_column,))
expected_var_names = (
'linear_model/bias_weights:0',
'linear_model/aaa_embedding/weights:0',
'linear_model/aaa_embedding/embedding_weights:0',
)
self.assertItemsEqual(
expected_var_names,
[v.name for v in ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)])
trainable_vars = {
v.name: v for v in ops.get_collection(
ops.GraphKeys.TRAINABLE_VARIABLES)
}
self.assertItemsEqual(expected_var_names, trainable_vars.keys())
bias = trainable_vars['linear_model/bias_weights:0']
embedding_weights = trainable_vars[
'linear_model/aaa_embedding/embedding_weights:0']
linear_weights = trainable_vars[
'linear_model/aaa_embedding/weights:0']
with _initialized_session():
# Predictions with all zero weights.
self.assertAllClose(np.zeros((1,)), bias.eval())
self.assertAllClose(zeros_embedding_values, embedding_weights.eval())
self.assertAllClose(
np.zeros((embedding_dimension, 1)), linear_weights.eval())
self.assertAllClose(np.zeros((batch_size, 1)), predictions.eval())
# Predictions with all non-zero weights.
embedding_weights.assign((
(1., 2.), # id 0
(3., 5.), # id 1
(7., 11.) # id 2
)).eval()
linear_weights.assign(((4.,), (6.,))).eval()
# example 0, ids [2], embedding[0] = [7, 11]
# example 1, ids [0, 1], embedding[1] = mean([1, 2] + [3, 5]) = [2, 3.5]
# example 2, ids [], embedding[2] = [0, 0]
# example 3, ids [1], embedding[3] = [3, 5]
# sum(embeddings * linear_weights)
# = [4*7 + 6*11, 4*2 + 6*3.5, 4*0 + 6*0, 4*3 + 6*5] = [94, 29, 0, 42]
self.assertAllClose(((94.,), (29.,), (0.,), (42.,)), predictions.eval())
def test_keras_linear_model(self):
# Inputs.
batch_size = 4
vocabulary_size = 3
sparse_input = sparse_tensor.SparseTensorValue(
# example 0, ids [2]
# example 1, ids [0, 1]
# example 2, ids []
# example 3, ids [1]
indices=((0, 0), (1, 0), (1, 4), (3, 0)),
values=(2, 0, 1, 1),
dense_shape=(batch_size, 5))
# Embedding variable.
embedding_dimension = 2
embedding_shape = (vocabulary_size, embedding_dimension)
zeros_embedding_values = np.zeros(embedding_shape)
def _initializer(shape, dtype, partition_info):
self.assertAllEqual(embedding_shape, shape)
self.assertEqual(dtypes.float32, dtype)
self.assertIsNone(partition_info)
return zeros_embedding_values
# Build columns.
categorical_column = fc_old.categorical_column_with_identity(
key='aaa', num_buckets=vocabulary_size)
embedding_column = fc_old.embedding_column(
categorical_column,
dimension=embedding_dimension,
initializer=_initializer)
with ops.Graph().as_default():
predictions = get_keras_linear_model_predictions({
categorical_column.name: sparse_input
}, (embedding_column,))
expected_var_names = (
'linear_model/bias_weights:0',
'linear_model/aaa_embedding/weights:0',
'linear_model/aaa_embedding/embedding_weights:0',
)
self.assertItemsEqual(
expected_var_names,
[v.name for v in ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)])
trainable_vars = {
v.name: v
for v in ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)
}
self.assertItemsEqual(expected_var_names, trainable_vars.keys())
bias = trainable_vars['linear_model/bias_weights:0']
embedding_weights = trainable_vars[
'linear_model/aaa_embedding/embedding_weights:0']
linear_weights = trainable_vars['linear_model/aaa_embedding/weights:0']
with _initialized_session():
# Predictions with all zero weights.
self.assertAllClose(np.zeros((1,)), bias.eval())
self.assertAllClose(zeros_embedding_values, embedding_weights.eval())
self.assertAllClose(
np.zeros((embedding_dimension, 1)), linear_weights.eval())
self.assertAllClose(np.zeros((batch_size, 1)), predictions.eval())
# Predictions with all non-zero weights.
embedding_weights.assign((
(1., 2.), # id 0
(3., 5.), # id 1
(7., 11.) # id 2
)).eval()
linear_weights.assign(((4.,), (6.,))).eval()
# example 0, ids [2], embedding[0] = [7, 11]
# example 1, ids [0, 1], embedding[1] = mean([1, 2] + [3, 5]) = [2, 3.5]
# example 2, ids [], embedding[2] = [0, 0]
# example 3, ids [1], embedding[3] = [3, 5]
# sum(embeddings * linear_weights)
# = [4*7 + 6*11, 4*2 + 6*3.5, 4*0 + 6*0, 4*3 + 6*5] = [94, 29, 0, 42]
self.assertAllClose(((94.,), (29.,), (0.,), (42.,)), predictions.eval())
def test_input_layer(self):
# Inputs.
vocabulary_size = 3
sparse_input = sparse_tensor.SparseTensorValue(
# example 0, ids [2]
# example 1, ids [0, 1]
# example 2, ids []
# example 3, ids [1]
indices=((0, 0), (1, 0), (1, 4), (3, 0)),
values=(2, 0, 1, 1),
dense_shape=(4, 5))
# Embedding variable.
embedding_dimension = 2
embedding_values = (
(1., 2.), # id 0
(3., 5.), # id 1
(7., 11.) # id 2
)
def _initializer(shape, dtype, partition_info):
self.assertAllEqual((vocabulary_size, embedding_dimension), shape)
self.assertEqual(dtypes.float32, dtype)
self.assertIsNone(partition_info)
return embedding_values
# Expected lookup result, using combiner='mean'.
expected_lookups = (
# example 0, ids [2], embedding = [7, 11]
(7., 11.),
# example 1, ids [0, 1], embedding = mean([1, 2] + [3, 5]) = [2, 3.5]
(2., 3.5),
# example 2, ids [], embedding = [0, 0]
(0., 0.),
# example 3, ids [1], embedding = [3, 5]
(3., 5.),
)
# Build columns.
categorical_column = fc_old.categorical_column_with_identity(
key='aaa', num_buckets=vocabulary_size)
embedding_column = fc_old.embedding_column(
categorical_column,
dimension=embedding_dimension,
initializer=_initializer)
# Provide sparse input and get dense result.
input_layer = fc.input_layer({'aaa': sparse_input}, (embedding_column,))
# Assert expected embedding variable and lookups.
global_vars = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
self.assertItemsEqual(
('input_layer/aaa_embedding/embedding_weights:0',),
tuple([v.name for v in global_vars]))
trainable_vars = ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)
self.assertItemsEqual(
('input_layer/aaa_embedding/embedding_weights:0',),
tuple([v.name for v in trainable_vars]))
with _initialized_session():
self.assertAllEqual(embedding_values, trainable_vars[0].eval())
self.assertAllEqual(expected_lookups, input_layer.eval())
def test_input_layer_not_trainable(self):
# Inputs.
vocabulary_size = 3
sparse_input = sparse_tensor.SparseTensorValue(
# example 0, ids [2]
# example 1, ids [0, 1]
# example 2, ids []
# example 3, ids [1]
indices=((0, 0), (1, 0), (1, 4), (3, 0)),
values=(2, 0, 1, 1),
dense_shape=(4, 5))
# Embedding variable.
embedding_dimension = 2
embedding_values = (
(1., 2.), # id 0
(3., 5.), # id 1
(7., 11.) # id 2
)
def _initializer(shape, dtype, partition_info):
self.assertAllEqual((vocabulary_size, embedding_dimension), shape)
self.assertEqual(dtypes.float32, dtype)
self.assertIsNone(partition_info)
return embedding_values
# Expected lookup result, using combiner='mean'.
expected_lookups = (
# example 0, ids [2], embedding = [7, 11]
(7., 11.),
# example 1, ids [0, 1], embedding = mean([1, 2] + [3, 5]) = [2, 3.5]
(2., 3.5),
# example 2, ids [], embedding = [0, 0]
(0., 0.),
# example 3, ids [1], embedding = [3, 5]
(3., 5.),
)
# Build columns.
categorical_column = fc_old.categorical_column_with_identity(
key='aaa', num_buckets=vocabulary_size)
embedding_column = fc_old.embedding_column(
categorical_column,
dimension=embedding_dimension,
initializer=_initializer,
trainable=False)
# Provide sparse input and get dense result.
input_layer = fc.input_layer({'aaa': sparse_input}, (embedding_column,))
# Assert expected embedding variable and lookups.
global_vars = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
self.assertItemsEqual(
('input_layer/aaa_embedding/embedding_weights:0',),
tuple([v.name for v in global_vars]))
self.assertItemsEqual(
[], ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES))
with _initialized_session():
self.assertAllEqual(embedding_values, global_vars[0].eval())
self.assertAllEqual(expected_lookups, input_layer.eval())
class _TestSharedEmbeddingStateManager(StateManager):
"""Manages the state for shared embedding columns.
This can handle multiple groups of shared embedding columns.
"""
def __init__(self, trainable=True):
# Dict of shared_embedding_collection_name to a dict of variables.
self._all_variables = {}
self._trainable = trainable
def get_variable(self,
feature_column,
name,
shape,
dtype=None,
initializer=None):
if not isinstance(feature_column, fc.SharedEmbeddingColumn):
raise ValueError(
'SharedEmbeddingStateManager can only handle SharedEmbeddingColumns. '
'Given type: {} '.format(type(feature_column)))
collection_name = feature_column.shared_collection_name
if collection_name not in self._all_variables:
self._all_variables[collection_name] = {}
var_dict = self._all_variables[collection_name]
if name in var_dict:
return var_dict[name]
else:
var = variable_scope.get_variable(
name=name,
shape=shape,
initializer=initializer,
trainable=self._trainable)
var_dict[name] = var
return var
class SharedEmbeddingColumnTest(test.TestCase):
def test_defaults(self):
categorical_column_a = fc.categorical_column_with_identity(
key='aaa', num_buckets=3)
categorical_column_b = fc.categorical_column_with_identity(
key='bbb', num_buckets=3)
embedding_dimension = 2
embedding_column_b, embedding_column_a = fc.shared_embedding_columns(
[categorical_column_b, categorical_column_a],
dimension=embedding_dimension)
self.assertIs(categorical_column_a, embedding_column_a.categorical_column)
self.assertIs(categorical_column_b, embedding_column_b.categorical_column)
self.assertEqual(embedding_dimension, embedding_column_a.dimension)
self.assertEqual(embedding_dimension, embedding_column_b.dimension)
self.assertEqual('mean', embedding_column_a.combiner)
self.assertEqual('mean', embedding_column_b.combiner)
self.assertIsNone(embedding_column_a.ckpt_to_load_from)
self.assertIsNone(embedding_column_b.ckpt_to_load_from)
self.assertEqual('aaa_bbb_shared_embedding',
embedding_column_a.shared_collection_name)
self.assertEqual('aaa_bbb_shared_embedding',
embedding_column_b.shared_collection_name)
self.assertIsNone(embedding_column_a.tensor_name_in_ckpt)
self.assertIsNone(embedding_column_b.tensor_name_in_ckpt)
self.assertIsNone(embedding_column_a.max_norm)
self.assertIsNone(embedding_column_b.max_norm)
self.assertTrue(embedding_column_a.trainable)
self.assertTrue(embedding_column_b.trainable)
self.assertEqual('aaa_shared_embedding', embedding_column_a.name)
self.assertEqual('bbb_shared_embedding', embedding_column_b.name)
self.assertEqual((embedding_dimension,), embedding_column_a.variable_shape)
self.assertEqual((embedding_dimension,), embedding_column_b.variable_shape)
self.assertEqual({
'aaa': parsing_ops.VarLenFeature(dtypes.int64)
}, embedding_column_a.parse_example_spec)
self.assertEqual({
'bbb': parsing_ops.VarLenFeature(dtypes.int64)
}, embedding_column_b.parse_example_spec)
def test_all_constructor_args(self):
categorical_column_a = fc.categorical_column_with_identity(
key='aaa', num_buckets=3)
categorical_column_b = fc.categorical_column_with_identity(
key='bbb', num_buckets=3)
embedding_dimension = 2
embedding_column_a, embedding_column_b = fc.shared_embedding_columns(
[categorical_column_a, categorical_column_b],
dimension=embedding_dimension,
combiner='my_combiner',
initializer=lambda: 'my_initializer',
shared_embedding_collection_name='shared_embedding_collection_name',
ckpt_to_load_from='my_ckpt',
tensor_name_in_ckpt='my_ckpt_tensor',
max_norm=42.,
trainable=False)
self.assertIs(categorical_column_a, embedding_column_a.categorical_column)
self.assertIs(categorical_column_b, embedding_column_b.categorical_column)
self.assertEqual(embedding_dimension, embedding_column_a.dimension)
self.assertEqual(embedding_dimension, embedding_column_b.dimension)
self.assertEqual('my_combiner', embedding_column_a.combiner)
self.assertEqual('my_combiner', embedding_column_b.combiner)
self.assertEqual('shared_embedding_collection_name',
embedding_column_a.shared_collection_name)
self.assertEqual('shared_embedding_collection_name',
embedding_column_b.shared_collection_name)
self.assertEqual('my_ckpt', embedding_column_a.ckpt_to_load_from)
self.assertEqual('my_ckpt', embedding_column_b.ckpt_to_load_from)
self.assertEqual('my_ckpt_tensor', embedding_column_a.tensor_name_in_ckpt)
self.assertEqual('my_ckpt_tensor', embedding_column_b.tensor_name_in_ckpt)
self.assertEqual(42., embedding_column_a.max_norm)
self.assertEqual(42., embedding_column_b.max_norm)
self.assertFalse(embedding_column_a.trainable)
self.assertFalse(embedding_column_b.trainable)
self.assertEqual('aaa_shared_embedding', embedding_column_a.name)
self.assertEqual('bbb_shared_embedding', embedding_column_b.name)
self.assertEqual((embedding_dimension,), embedding_column_a.variable_shape)
self.assertEqual((embedding_dimension,), embedding_column_b.variable_shape)
self.assertEqual({
'aaa': parsing_ops.VarLenFeature(dtypes.int64)
}, embedding_column_a.parse_example_spec)
self.assertEqual({
'bbb': parsing_ops.VarLenFeature(dtypes.int64)
}, embedding_column_b.parse_example_spec)
def test_deep_copy(self):
categorical_column_a = fc.categorical_column_with_identity(
key='aaa', num_buckets=3)
categorical_column_b = fc.categorical_column_with_identity(
key='bbb', num_buckets=3)
embedding_dimension = 2
original_a, _ = fc.shared_embedding_columns(
[categorical_column_a, categorical_column_b],
dimension=embedding_dimension,
combiner='my_combiner',
initializer=lambda: 'my_initializer',
shared_embedding_collection_name='shared_embedding_collection_name',
ckpt_to_load_from='my_ckpt',
tensor_name_in_ckpt='my_ckpt_tensor',
max_norm=42., trainable=False)
for embedding_column_a in (original_a, copy.deepcopy(original_a)):
self.assertEqual('aaa', embedding_column_a.categorical_column.name)
self.assertEqual(3, embedding_column_a.categorical_column.num_buckets)
self.assertEqual({
'aaa': parsing_ops.VarLenFeature(dtypes.int64)
}, embedding_column_a.categorical_column.parse_example_spec)
self.assertEqual(embedding_dimension, embedding_column_a.dimension)
self.assertEqual('my_combiner', embedding_column_a.combiner)
self.assertEqual('shared_embedding_collection_name',
embedding_column_a.shared_collection_name)
self.assertEqual('my_ckpt', embedding_column_a.ckpt_to_load_from)
self.assertEqual('my_ckpt_tensor', embedding_column_a.tensor_name_in_ckpt)
self.assertEqual(42., embedding_column_a.max_norm)
self.assertFalse(embedding_column_a.trainable)
self.assertEqual('aaa_shared_embedding', embedding_column_a.name)
self.assertEqual((embedding_dimension,),
embedding_column_a.variable_shape)
self.assertEqual({
'aaa': parsing_ops.VarLenFeature(dtypes.int64)
}, embedding_column_a.parse_example_spec)
def test_invalid_initializer(self):
categorical_column_a = fc.categorical_column_with_identity(
key='aaa', num_buckets=3)
categorical_column_b = fc.categorical_column_with_identity(
key='bbb', num_buckets=3)
with self.assertRaisesRegexp(ValueError, 'initializer must be callable'):
fc.shared_embedding_columns(
[categorical_column_a, categorical_column_b], dimension=2,
initializer='not_fn')
def test_incompatible_column_type(self):
categorical_column_a = fc.categorical_column_with_identity(
key='aaa', num_buckets=3)
categorical_column_b = fc.categorical_column_with_identity(
key='bbb', num_buckets=3)
categorical_column_c = fc.categorical_column_with_hash_bucket(
key='ccc', hash_bucket_size=3)
with self.assertRaisesRegexp(
ValueError, 'all categorical_columns must have the same type.*'
'IdentityCategoricalColumn.*HashedCategoricalColumn'):
fc.shared_embedding_columns(
[categorical_column_a, categorical_column_b, categorical_column_c],
dimension=2)
def test_weighted_categorical_column_ok(self):
categorical_column_a = fc.categorical_column_with_identity(
key='aaa', num_buckets=3)
weighted_categorical_column_a = fc.weighted_categorical_column(
categorical_column_a, weight_feature_key='aaa_weights')
categorical_column_b = fc.categorical_column_with_identity(
key='bbb', num_buckets=3)
weighted_categorical_column_b = fc.weighted_categorical_column(
categorical_column_b, weight_feature_key='bbb_weights')
fc.shared_embedding_columns(
[weighted_categorical_column_a, categorical_column_b], dimension=2)
fc.shared_embedding_columns(
[categorical_column_a, weighted_categorical_column_b], dimension=2)
fc.shared_embedding_columns(
[weighted_categorical_column_a, weighted_categorical_column_b],
dimension=2)
def test_parse_example(self):
a = fc.categorical_column_with_vocabulary_list(
key='aaa', vocabulary_list=('omar', 'stringer', 'marlo'))
b = fc.categorical_column_with_vocabulary_list(
key='bbb', vocabulary_list=('omar', 'stringer', 'marlo'))
a_embedded, b_embedded = fc.shared_embedding_columns(
[a, b], dimension=2)
data = example_pb2.Example(features=feature_pb2.Features(
feature={
'aaa':
feature_pb2.Feature(bytes_list=feature_pb2.BytesList(
value=[b'omar', b'stringer'])),
'bbb':
feature_pb2.Feature(bytes_list=feature_pb2.BytesList(
value=[b'stringer', b'marlo'])),
}))
features = parsing_ops.parse_example(
serialized=[data.SerializeToString()],
features=fc.make_parse_example_spec([a_embedded, b_embedded]))
self.assertIn('aaa', features)
self.assertIn('bbb', features)
with self.test_session():
_assert_sparse_tensor_value(
self,
sparse_tensor.SparseTensorValue(
indices=[[0, 0], [0, 1]],
values=np.array([b'omar', b'stringer'], dtype=np.object_),
dense_shape=[1, 2]),
features['aaa'].eval())
_assert_sparse_tensor_value(
self,
sparse_tensor.SparseTensorValue(
indices=[[0, 0], [0, 1]],
values=np.array([b'stringer', b'marlo'], dtype=np.object_),
dense_shape=[1, 2]),
features['bbb'].eval())
def test_transform_feature(self):
a = fc.categorical_column_with_identity(key='aaa', num_buckets=3)
b = fc.categorical_column_with_identity(key='bbb', num_buckets=3)
a_embedded, b_embedded = fc.shared_embedding_columns(
[a, b], dimension=2)
features = {
'aaa': sparse_tensor.SparseTensor(
indices=((0, 0), (1, 0), (1, 1)),
values=(0, 1, 0),
dense_shape=(2, 2)),
'bbb': sparse_tensor.SparseTensor(
indices=((0, 0), (1, 0), (1, 1)),
values=(1, 2, 1),
dense_shape=(2, 2)),
}
outputs = _transform_features(features, [a, a_embedded, b, b_embedded],
None)
output_a = outputs[a]
output_a_embedded = outputs[a_embedded]
output_b = outputs[b]
output_b_embedded = outputs[b_embedded]
with _initialized_session():
_assert_sparse_tensor_value(
self, output_a.eval(), output_a_embedded.eval())
_assert_sparse_tensor_value(
self, output_b.eval(), output_b_embedded.eval())
def test_get_dense_tensor(self):
# Inputs.
vocabulary_size = 3
# -1 values are ignored.
input_a = np.array(
[[2, -1, -1], # example 0, ids [2]
[0, 1, -1]]) # example 1, ids [0, 1]
input_b = np.array(
[[0, -1, -1], # example 0, ids [0]
[-1, -1, -1]]) # example 1, ids []
input_features = {
'aaa': input_a,
'bbb': input_b
}
# Embedding variable.
embedding_dimension = 2
embedding_values = (
(1., 2.), # id 0
(3., 5.), # id 1
(7., 11.) # id 2
)
def _initializer(shape, dtype, partition_info):
self.assertAllEqual((vocabulary_size, embedding_dimension), shape)
self.assertEqual(dtypes.float32, dtype)
self.assertIsNone(partition_info)
return embedding_values
# Expected lookup result, using combiner='mean'.
expected_lookups_a = (
# example 0:
(7., 11.), # ids [2], embedding = [7, 11]
# example 1:
(2., 3.5), # ids [0, 1], embedding = mean([1, 2] + [3, 5]) = [2, 3.5]
)
expected_lookups_b = (
# example 0:
(1., 2.), # ids [0], embedding = [1, 2]
# example 1:
(0., 0.), # ids [], embedding = [0, 0]
)
# Build columns.
categorical_column_a = fc.categorical_column_with_identity(
key='aaa', num_buckets=vocabulary_size)
categorical_column_b = fc.categorical_column_with_identity(
key='bbb', num_buckets=vocabulary_size)
embedding_column_a, embedding_column_b = fc.shared_embedding_columns(
[categorical_column_a, categorical_column_b],
dimension=embedding_dimension, initializer=_initializer)
state_manager = _TestSharedEmbeddingStateManager()
# Provide sparse input and get dense result.
embedding_lookup_a = embedding_column_a.get_dense_tensor(
FeatureTransformationCache(input_features), state_manager)
embedding_lookup_b = embedding_column_b.get_dense_tensor(
FeatureTransformationCache(input_features), state_manager)
# Assert expected embedding variable and lookups.
global_vars = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
self.assertItemsEqual(('embedding_weights:0',),
tuple([v.name for v in global_vars]))
embedding_var = global_vars[0]
with _initialized_session():
self.assertAllEqual(embedding_values, embedding_var.eval())
self.assertAllEqual(expected_lookups_a, embedding_lookup_a.eval())
self.assertAllEqual(expected_lookups_b, embedding_lookup_b.eval())
def DISABLED_test_get_dense_tensor_weight_collections(self):
# Inputs.
vocabulary_size = 3
# -1 values are ignored.
input_a = np.array([
[2, -1, -1], # example 0, ids [2]
[0, 1, -1]
]) # example 1, ids [0, 1]
input_b = np.array([
[0, -1, -1], # example 0, ids [0]
[-1, -1, -1]
]) # example 1, ids []
input_features = {'aaa': input_a, 'bbb': input_b}
# Embedding variable.
embedding_dimension = 2
embedding_values = (
(1., 2.), # id 0
(3., 5.), # id 1
(7., 11.) # id 2
)
def _initializer(shape, dtype, partition_info):
self.assertAllEqual((vocabulary_size, embedding_dimension), shape)
self.assertEqual(dtypes.float32, dtype)
self.assertIsNone(partition_info)
return embedding_values
# Build columns.
categorical_column_a = fc.categorical_column_with_identity(
key='aaa', num_buckets=vocabulary_size)
categorical_column_b = fc.categorical_column_with_identity(
key='bbb', num_buckets=vocabulary_size)
embedding_column_a, embedding_column_b = fc.shared_embedding_columns(
[categorical_column_a, categorical_column_b],
dimension=embedding_dimension,
initializer=_initializer)
fc.input_layer(
input_features, [embedding_column_a, embedding_column_b],
weight_collections=('my_vars',))
# Assert expected embedding variable and lookups.
global_vars = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
self.assertItemsEqual(
('input_layer/aaa_bbb_shared_embedding/embedding_weights:0',),
tuple(v.name for v in global_vars))
my_vars = ops.get_collection('my_vars')
self.assertItemsEqual(
('input_layer/aaa_bbb_shared_embedding/embedding_weights:0',),
tuple(v.name for v in my_vars))
def test_get_dense_tensor_placeholder_inputs(self):
# Inputs.
vocabulary_size = 3
# -1 values are ignored.
input_a = np.array(
[[2, -1, -1], # example 0, ids [2]
[0, 1, -1]]) # example 1, ids [0, 1]
input_b = np.array(
[[0, -1, -1], # example 0, ids [0]
[-1, -1, -1]]) # example 1, ids []
# Specify shape, because dense input must have rank specified.
input_a_placeholder = array_ops.placeholder(
dtype=dtypes.int64, shape=[None, 3])
input_b_placeholder = array_ops.placeholder(
dtype=dtypes.int64, shape=[None, 3])
input_features = {
'aaa': input_a_placeholder,
'bbb': input_b_placeholder,
}
feed_dict = {
input_a_placeholder: input_a,
input_b_placeholder: input_b,
}
# Embedding variable.
embedding_dimension = 2
embedding_values = (
(1., 2.), # id 0
(3., 5.), # id 1
(7., 11.) # id 2
)
def _initializer(shape, dtype, partition_info):
self.assertAllEqual((vocabulary_size, embedding_dimension), shape)
self.assertEqual(dtypes.float32, dtype)
self.assertIsNone(partition_info)
return embedding_values
# Build columns.
categorical_column_a = fc.categorical_column_with_identity(
key='aaa', num_buckets=vocabulary_size)
categorical_column_b = fc.categorical_column_with_identity(
key='bbb', num_buckets=vocabulary_size)
embedding_column_a, embedding_column_b = fc.shared_embedding_columns(
[categorical_column_a, categorical_column_b],
dimension=embedding_dimension, initializer=_initializer)
state_manager = _TestSharedEmbeddingStateManager()
# Provide sparse input and get dense result.
embedding_lookup_a = embedding_column_a.get_dense_tensor(
FeatureTransformationCache(input_features), state_manager)
embedding_lookup_b = embedding_column_b.get_dense_tensor(
FeatureTransformationCache(input_features), state_manager)
with _initialized_session() as sess:
sess.run([embedding_lookup_a, embedding_lookup_b], feed_dict=feed_dict)
def test_linear_model(self):
# Inputs.
batch_size = 2
vocabulary_size = 3
# -1 values are ignored.
input_a = np.array(
[[2, -1, -1], # example 0, ids [2]
[0, 1, -1]]) # example 1, ids [0, 1]
input_b = np.array(
[[0, -1, -1], # example 0, ids [0]
[-1, -1, -1]]) # example 1, ids []
# Embedding variable.
embedding_dimension = 2
embedding_shape = (vocabulary_size, embedding_dimension)
zeros_embedding_values = np.zeros(embedding_shape)
def _initializer(shape, dtype, partition_info):
self.assertAllEqual(embedding_shape, shape)
self.assertEqual(dtypes.float32, dtype)
self.assertIsNone(partition_info)
return zeros_embedding_values
# Build columns.
categorical_column_a = fc_old.categorical_column_with_identity(
key='aaa', num_buckets=vocabulary_size)
categorical_column_b = fc_old.categorical_column_with_identity(
key='bbb', num_buckets=vocabulary_size)
embedding_column_a, embedding_column_b = fc_old.shared_embedding_columns(
[categorical_column_a, categorical_column_b],
dimension=embedding_dimension,
initializer=_initializer)
with ops.Graph().as_default():
predictions = fc.linear_model({
categorical_column_a.name: input_a,
categorical_column_b.name: input_b,
}, (embedding_column_a, embedding_column_b))
# Linear weights do not follow the column name. But this is a rare use
# case, and fixing it would add too much complexity to the code.
expected_var_names = (
'linear_model/bias_weights:0',
'linear_model/aaa_bbb_shared_embedding/weights:0',
'linear_model/aaa_bbb_shared_embedding/embedding_weights:0',
'linear_model/aaa_bbb_shared_embedding_1/weights:0',
)
self.assertItemsEqual(
expected_var_names,
[v.name for v in ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)])
trainable_vars = {
v.name: v for v in ops.get_collection(
ops.GraphKeys.TRAINABLE_VARIABLES)
}
self.assertItemsEqual(expected_var_names, trainable_vars.keys())
bias = trainable_vars['linear_model/bias_weights:0']
embedding_weights = trainable_vars[
'linear_model/aaa_bbb_shared_embedding/embedding_weights:0']
linear_weights_a = trainable_vars[
'linear_model/aaa_bbb_shared_embedding/weights:0']
linear_weights_b = trainable_vars[
'linear_model/aaa_bbb_shared_embedding_1/weights:0']
with _initialized_session():
# Predictions with all zero weights.
self.assertAllClose(np.zeros((1,)), bias.eval())
self.assertAllClose(zeros_embedding_values, embedding_weights.eval())
self.assertAllClose(
np.zeros((embedding_dimension, 1)), linear_weights_a.eval())
self.assertAllClose(
np.zeros((embedding_dimension, 1)), linear_weights_b.eval())
self.assertAllClose(np.zeros((batch_size, 1)), predictions.eval())
# Predictions with all non-zero weights.
embedding_weights.assign((
(1., 2.), # id 0
(3., 5.), # id 1
(7., 11.) # id 2
)).eval()
linear_weights_a.assign(((4.,), (6.,))).eval()
# example 0, ids [2], embedding[0] = [7, 11]
# example 1, ids [0, 1], embedding[1] = mean([1, 2] + [3, 5]) = [2, 3.5]
# sum(embeddings * linear_weights)
# = [4*7 + 6*11, 4*2 + 6*3.5] = [94, 29]
linear_weights_b.assign(((3.,), (5.,))).eval()
# example 0, ids [0], embedding[0] = [1, 2]
# example 1, ids [], embedding[1] = 0, 0]
# sum(embeddings * linear_weights)
# = [3*1 + 5*2, 3*0 +5*0] = [13, 0]
self.assertAllClose([[94. + 13.], [29.]], predictions.eval())
def test_keras_linear_model(self):
# Inputs.
batch_size = 2
vocabulary_size = 3
# -1 values are ignored.
input_a = np.array([
[2, -1, -1], # example 0, ids [2]
[0, 1, -1]
]) # example 1, ids [0, 1]
input_b = np.array([
[0, -1, -1], # example 0, ids [0]
[-1, -1, -1]
]) # example 1, ids []
# Embedding variable.
embedding_dimension = 2
embedding_shape = (vocabulary_size, embedding_dimension)
zeros_embedding_values = np.zeros(embedding_shape)
def _initializer(shape, dtype, partition_info):
self.assertAllEqual(embedding_shape, shape)
self.assertEqual(dtypes.float32, dtype)
self.assertIsNone(partition_info)
return zeros_embedding_values
# Build columns.
categorical_column_a = fc_old.categorical_column_with_identity(
key='aaa', num_buckets=vocabulary_size)
categorical_column_b = fc_old.categorical_column_with_identity(
key='bbb', num_buckets=vocabulary_size)
embedding_column_a, embedding_column_b = fc_old.shared_embedding_columns(
[categorical_column_a, categorical_column_b],
dimension=embedding_dimension,
initializer=_initializer)
with ops.Graph().as_default():
predictions = get_keras_linear_model_predictions({
categorical_column_a.name: input_a,
categorical_column_b.name: input_b,
}, (embedding_column_a, embedding_column_b))
# Linear weights do not follow the column name. But this is a rare use
# case, and fixing it would add too much complexity to the code.
expected_var_names = (
'linear_model/bias_weights:0',
'linear_model/aaa_bbb_shared_embedding/weights:0',
'linear_model/aaa_bbb_shared_embedding/embedding_weights:0',
'linear_model/aaa_bbb_shared_embedding_1/weights:0',
)
self.assertItemsEqual(
expected_var_names,
[v.name for v in ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)])
trainable_vars = {
v.name: v
for v in ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)
}
self.assertItemsEqual(expected_var_names, trainable_vars.keys())
bias = trainable_vars['linear_model/bias_weights:0']
embedding_weights = trainable_vars[
'linear_model/aaa_bbb_shared_embedding/embedding_weights:0']
linear_weights_a = trainable_vars[
'linear_model/aaa_bbb_shared_embedding/weights:0']
linear_weights_b = trainable_vars[
'linear_model/aaa_bbb_shared_embedding_1/weights:0']
with _initialized_session():
# Predictions with all zero weights.
self.assertAllClose(np.zeros((1,)), bias.eval())
self.assertAllClose(zeros_embedding_values, embedding_weights.eval())
self.assertAllClose(
np.zeros((embedding_dimension, 1)), linear_weights_a.eval())
self.assertAllClose(
np.zeros((embedding_dimension, 1)), linear_weights_b.eval())
self.assertAllClose(np.zeros((batch_size, 1)), predictions.eval())
# Predictions with all non-zero weights.
embedding_weights.assign((
(1., 2.), # id 0
(3., 5.), # id 1
(7., 11.) # id 2
)).eval()
linear_weights_a.assign(((4.,), (6.,))).eval()
# example 0, ids [2], embedding[0] = [7, 11]
# example 1, ids [0, 1], embedding[1] = mean([1, 2] + [3, 5]) = [2, 3.5]
# sum(embeddings * linear_weights)
# = [4*7 + 6*11, 4*2 + 6*3.5] = [94, 29]
linear_weights_b.assign(((3.,), (5.,))).eval()
# example 0, ids [0], embedding[0] = [1, 2]
# example 1, ids [], embedding[1] = 0, 0]
# sum(embeddings * linear_weights)
# = [3*1 + 5*2, 3*0 +5*0] = [13, 0]
self.assertAllClose([[94. + 13.], [29.]], predictions.eval())
def _test_input_layer(self, trainable=True):
# Inputs.
vocabulary_size = 3
sparse_input_a = sparse_tensor.SparseTensorValue(
# example 0, ids [2]
# example 1, ids [0, 1]
indices=((0, 0), (1, 0), (1, 4)),
values=(2, 0, 1),
dense_shape=(2, 5))
sparse_input_b = sparse_tensor.SparseTensorValue(
# example 0, ids [0]
# example 1, ids []
indices=((0, 0),),
values=(0,),
dense_shape=(2, 5))
# Embedding variable.
embedding_dimension = 2
embedding_values = (
(1., 2.), # id 0
(3., 5.), # id 1
(7., 11.) # id 2
)
def _initializer(shape, dtype, partition_info):
self.assertAllEqual((vocabulary_size, embedding_dimension), shape)
self.assertEqual(dtypes.float32, dtype)
self.assertIsNone(partition_info)
return embedding_values
# Expected lookup result, using combiner='mean'.
expected_lookups = (
# example 0:
# A ids [2], embedding = [7, 11]
# B ids [0], embedding = [1, 2]
(7., 11., 1., 2.),
# example 1:
# A ids [0, 1], embedding = mean([1, 2] + [3, 5]) = [2, 3.5]
# B ids [], embedding = [0, 0]
(2., 3.5, 0., 0.),
)
# Build columns.
categorical_column_a = fc_old.categorical_column_with_identity(
key='aaa', num_buckets=vocabulary_size)
categorical_column_b = fc_old.categorical_column_with_identity(
key='bbb', num_buckets=vocabulary_size)
embedding_column_a, embedding_column_b = fc_old.shared_embedding_columns(
[categorical_column_a, categorical_column_b],
dimension=embedding_dimension,
initializer=_initializer,
trainable=trainable)
# Provide sparse input and get dense result.
input_layer = fc.input_layer(
features={'aaa': sparse_input_a, 'bbb': sparse_input_b},
feature_columns=(embedding_column_b, embedding_column_a))
# Assert expected embedding variable and lookups.
global_vars = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
self.assertItemsEqual(
['input_layer/aaa_bbb_shared_embedding/embedding_weights:0'],
tuple([v.name for v in global_vars]))
trainable_vars = ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)
if trainable:
self.assertItemsEqual(
['input_layer/aaa_bbb_shared_embedding/embedding_weights:0'],
tuple([v.name for v in trainable_vars]))
else:
self.assertItemsEqual([], tuple([v.name for v in trainable_vars]))
shared_embedding_vars = global_vars
with _initialized_session():
self.assertAllEqual(embedding_values, shared_embedding_vars[0].eval())
self.assertAllEqual(expected_lookups, input_layer.eval())
def test_input_layer(self):
self._test_input_layer()
def test_input_layer_no_trainable(self):
self._test_input_layer(trainable=False)
class WeightedCategoricalColumnTest(test.TestCase):
def test_defaults(self):
column = fc.weighted_categorical_column(
categorical_column=fc.categorical_column_with_identity(
key='ids', num_buckets=3),
weight_feature_key='values')
self.assertEqual('ids_weighted_by_values', column.name)
self.assertEqual(3, column.num_buckets)
self.assertEqual({
'ids': parsing_ops.VarLenFeature(dtypes.int64),
'values': parsing_ops.VarLenFeature(dtypes.float32)
}, column.parse_example_spec)
def test_deep_copy(self):
"""Tests deepcopy of categorical_column_with_hash_bucket."""
original = fc.weighted_categorical_column(
categorical_column=fc.categorical_column_with_identity(
key='ids', num_buckets=3),
weight_feature_key='values')
for column in (original, copy.deepcopy(original)):
self.assertEqual('ids_weighted_by_values', column.name)
self.assertEqual(3, column.num_buckets)
self.assertEqual({
'ids': parsing_ops.VarLenFeature(dtypes.int64),
'values': parsing_ops.VarLenFeature(dtypes.float32)
}, column.parse_example_spec)
def test_invalid_dtype_none(self):
with self.assertRaisesRegexp(ValueError, 'is not convertible to float'):
fc.weighted_categorical_column(
categorical_column=fc.categorical_column_with_identity(
key='ids', num_buckets=3),
weight_feature_key='values',
dtype=None)
def test_invalid_dtype_string(self):
with self.assertRaisesRegexp(ValueError, 'is not convertible to float'):
fc.weighted_categorical_column(
categorical_column=fc.categorical_column_with_identity(
key='ids', num_buckets=3),
weight_feature_key='values',
dtype=dtypes.string)
def test_invalid_input_dtype(self):
column = fc.weighted_categorical_column(
categorical_column=fc.categorical_column_with_identity(
key='ids', num_buckets=3),
weight_feature_key='values')
strings = sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=('omar', 'stringer', 'marlo'),
dense_shape=(2, 2))
with self.assertRaisesRegexp(ValueError, 'Bad dtype'):
_transform_features({'ids': strings, 'values': strings}, (column,), None)
def test_column_name_collision(self):
with self.assertRaisesRegexp(ValueError, r'Parse config.*already exists'):
fc.weighted_categorical_column(
categorical_column=fc.categorical_column_with_identity(
key='aaa', num_buckets=3),
weight_feature_key='aaa').parse_example_spec()
def test_missing_weights(self):
column = fc.weighted_categorical_column(
categorical_column=fc.categorical_column_with_identity(
key='ids', num_buckets=3),
weight_feature_key='values')
inputs = sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=('omar', 'stringer', 'marlo'),
dense_shape=(2, 2))
with self.assertRaisesRegexp(
ValueError, 'values is not in features dictionary'):
_transform_features({'ids': inputs}, (column,), None)
def test_parse_example(self):
a = fc.categorical_column_with_vocabulary_list(
key='aaa', vocabulary_list=('omar', 'stringer', 'marlo'))
a_weighted = fc.weighted_categorical_column(a, weight_feature_key='weights')
data = example_pb2.Example(features=feature_pb2.Features(
feature={
'aaa':
feature_pb2.Feature(bytes_list=feature_pb2.BytesList(
value=[b'omar', b'stringer'])),
'weights':
feature_pb2.Feature(float_list=feature_pb2.FloatList(
value=[1., 10.]))
}))
features = parsing_ops.parse_example(
serialized=[data.SerializeToString()],
features=fc.make_parse_example_spec([a_weighted]))
self.assertIn('aaa', features)
self.assertIn('weights', features)
with self.test_session():
_assert_sparse_tensor_value(
self,
sparse_tensor.SparseTensorValue(
indices=[[0, 0], [0, 1]],
values=np.array([b'omar', b'stringer'], dtype=np.object_),
dense_shape=[1, 2]),
features['aaa'].eval())
_assert_sparse_tensor_value(
self,
sparse_tensor.SparseTensorValue(
indices=[[0, 0], [0, 1]],
values=np.array([1., 10.], dtype=np.float32),
dense_shape=[1, 2]),
features['weights'].eval())
def test_transform_features(self):
column = fc.weighted_categorical_column(
categorical_column=fc.categorical_column_with_identity(
key='ids', num_buckets=3),
weight_feature_key='values')
inputs = sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=(0, 1, 0),
dense_shape=(2, 2))
weights = sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=(0.5, 1.0, 0.1),
dense_shape=(2, 2))
id_tensor, weight_tensor = _transform_features({
'ids': inputs,
'values': weights,
}, (column,), None)[column]
with _initialized_session():
_assert_sparse_tensor_value(
self,
sparse_tensor.SparseTensorValue(
indices=inputs.indices,
values=np.array(inputs.values, dtype=np.int64),
dense_shape=inputs.dense_shape),
id_tensor.eval())
_assert_sparse_tensor_value(
self,
sparse_tensor.SparseTensorValue(
indices=weights.indices,
values=np.array(weights.values, dtype=np.float32),
dense_shape=weights.dense_shape),
weight_tensor.eval())
def test_transform_features_dense_input(self):
column = fc.weighted_categorical_column(
categorical_column=fc.categorical_column_with_identity(
key='ids', num_buckets=3),
weight_feature_key='values')
weights = sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=(0.5, 1.0, 0.1),
dense_shape=(2, 2))
id_tensor, weight_tensor = _transform_features({
'ids': ((0, -1), (1, 0)),
'values': weights,
}, (column,), None)[column]
with _initialized_session():
_assert_sparse_tensor_value(
self,
sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=np.array((0, 1, 0), dtype=np.int64),
dense_shape=(2, 2)),
id_tensor.eval())
_assert_sparse_tensor_value(
self,
sparse_tensor.SparseTensorValue(
indices=weights.indices,
values=np.array(weights.values, dtype=np.float32),
dense_shape=weights.dense_shape),
weight_tensor.eval())
def test_transform_features_dense_weights(self):
column = fc.weighted_categorical_column(
categorical_column=fc.categorical_column_with_identity(
key='ids', num_buckets=3),
weight_feature_key='values')
inputs = sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=(2, 1, 0),
dense_shape=(2, 2))
id_tensor, weight_tensor = _transform_features({
'ids': inputs,
'values': ((.5, 0.), (1., .1)),
}, (column,), None)[column]
with _initialized_session():
_assert_sparse_tensor_value(
self,
sparse_tensor.SparseTensorValue(
indices=inputs.indices,
values=np.array(inputs.values, dtype=np.int64),
dense_shape=inputs.dense_shape),
id_tensor.eval())
_assert_sparse_tensor_value(
self,
sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=np.array((.5, 1., .1), dtype=np.float32),
dense_shape=(2, 2)),
weight_tensor.eval())
def test_keras_linear_model(self):
column = fc_old.weighted_categorical_column(
categorical_column=fc_old.categorical_column_with_identity(
key='ids', num_buckets=3),
weight_feature_key='values')
with ops.Graph().as_default():
predictions = get_keras_linear_model_predictions({
'ids':
sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=(0, 2, 1),
dense_shape=(2, 2)),
'values':
sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=(.5, 1., .1),
dense_shape=(2, 2))
}, (column,))
bias = get_linear_model_bias()
weight_var = get_linear_model_column_var(column)
with _initialized_session():
self.assertAllClose((0.,), bias.eval())
self.assertAllClose(((0.,), (0.,), (0.,)), weight_var.eval())
self.assertAllClose(((0.,), (0.,)), predictions.eval())
weight_var.assign(((1.,), (2.,), (3.,))).eval()
# weight_var[0] * weights[0, 0] = 1 * .5 = .5
# weight_var[2] * weights[1, 0] + weight_var[1] * weights[1, 1]
# = 3*1 + 2*.1 = 3+.2 = 3.2
self.assertAllClose(((.5,), (3.2,)), predictions.eval())
def test_keras_linear_model_mismatched_shape(self):
column = fc_old.weighted_categorical_column(
categorical_column=fc_old.categorical_column_with_identity(
key='ids', num_buckets=3),
weight_feature_key='values')
with ops.Graph().as_default():
with self.assertRaisesRegexp(ValueError,
r'Dimensions.*are not compatible'):
get_keras_linear_model_predictions({
'ids':
sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=(0, 2, 1),
dense_shape=(2, 2)),
'values':
sparse_tensor.SparseTensorValue(
indices=((0, 0), (0, 1), (1, 0), (1, 1)),
values=(.5, 11., 1., .1),
dense_shape=(2, 2))
}, (column,))
def test_keras_linear_model_mismatched_dense_values(self):
column = fc_old.weighted_categorical_column(
categorical_column=fc_old.categorical_column_with_identity(
key='ids', num_buckets=3),
weight_feature_key='values')
with ops.Graph().as_default():
predictions = get_keras_linear_model_predictions(
{
'ids':
sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=(0, 2, 1),
dense_shape=(2, 2)),
'values': ((.5,), (1.,))
}, (column,),
sparse_combiner='mean')
# Disabling the constant folding optimizer here since it changes the
# error message differently on CPU and GPU.
config = config_pb2.ConfigProto()
config.graph_options.rewrite_options.constant_folding = (
rewriter_config_pb2.RewriterConfig.OFF)
with _initialized_session(config):
with self.assertRaisesRegexp(errors.OpError, 'Incompatible shapes'):
predictions.eval()
def test_keras_linear_model_mismatched_dense_shape(self):
column = fc_old.weighted_categorical_column(
categorical_column=fc_old.categorical_column_with_identity(
key='ids', num_buckets=3),
weight_feature_key='values')
with ops.Graph().as_default():
predictions = get_keras_linear_model_predictions({
'ids':
sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=(0, 2, 1),
dense_shape=(2, 2)),
'values': ((.5,), (1.,), (.1,))
}, (column,))
bias = get_linear_model_bias()
weight_var = get_linear_model_column_var(column)
with _initialized_session():
self.assertAllClose((0.,), bias.eval())
self.assertAllClose(((0.,), (0.,), (0.,)), weight_var.eval())
self.assertAllClose(((0.,), (0.,)), predictions.eval())
weight_var.assign(((1.,), (2.,), (3.,))).eval()
# weight_var[0] * weights[0, 0] = 1 * .5 = .5
# weight_var[2] * weights[1, 0] + weight_var[1] * weights[1, 1]
# = 3*1 + 2*.1 = 3+.2 = 3.2
self.assertAllClose(((.5,), (3.2,)), predictions.eval())
def test_linear_model(self):
column = fc_old.weighted_categorical_column(
categorical_column=fc_old.categorical_column_with_identity(
key='ids', num_buckets=3),
weight_feature_key='values')
with ops.Graph().as_default():
predictions = fc.linear_model({
'ids': sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=(0, 2, 1),
dense_shape=(2, 2)),
'values': sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=(.5, 1., .1),
dense_shape=(2, 2))
}, (column,))
bias = get_linear_model_bias()
weight_var = get_linear_model_column_var(column)
with _initialized_session():
self.assertAllClose((0.,), bias.eval())
self.assertAllClose(((0.,), (0.,), (0.,)), weight_var.eval())
self.assertAllClose(((0.,), (0.,)), predictions.eval())
weight_var.assign(((1.,), (2.,), (3.,))).eval()
# weight_var[0] * weights[0, 0] = 1 * .5 = .5
# weight_var[2] * weights[1, 0] + weight_var[1] * weights[1, 1]
# = 3*1 + 2*.1 = 3+.2 = 3.2
self.assertAllClose(((.5,), (3.2,)), predictions.eval())
def test_linear_model_mismatched_shape(self):
column = fc_old.weighted_categorical_column(
categorical_column=fc_old.categorical_column_with_identity(
key='ids', num_buckets=3),
weight_feature_key='values')
with ops.Graph().as_default():
with self.assertRaisesRegexp(
ValueError, r'Dimensions.*are not compatible'):
fc.linear_model({
'ids': sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=(0, 2, 1),
dense_shape=(2, 2)),
'values': sparse_tensor.SparseTensorValue(
indices=((0, 0), (0, 1), (1, 0), (1, 1)),
values=(.5, 11., 1., .1),
dense_shape=(2, 2))
}, (column,))
def test_linear_model_mismatched_dense_values(self):
column = fc_old.weighted_categorical_column(
categorical_column=fc_old.categorical_column_with_identity(
key='ids', num_buckets=3),
weight_feature_key='values')
with ops.Graph().as_default():
predictions = fc.linear_model(
{
'ids':
sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=(0, 2, 1),
dense_shape=(2, 2)),
'values': ((.5,), (1.,))
}, (column,),
sparse_combiner='mean')
# Disabling the constant folding optimizer here since it changes the
# error message differently on CPU and GPU.
config = config_pb2.ConfigProto()
config.graph_options.rewrite_options.constant_folding = (
rewriter_config_pb2.RewriterConfig.OFF)
with _initialized_session(config):
with self.assertRaisesRegexp(errors.OpError, 'Incompatible shapes'):
predictions.eval()
def test_linear_model_mismatched_dense_shape(self):
column = fc_old.weighted_categorical_column(
categorical_column=fc_old.categorical_column_with_identity(
key='ids', num_buckets=3),
weight_feature_key='values')
with ops.Graph().as_default():
predictions = fc.linear_model({
'ids': sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=(0, 2, 1),
dense_shape=(2, 2)),
'values': ((.5,), (1.,), (.1,))
}, (column,))
bias = get_linear_model_bias()
weight_var = get_linear_model_column_var(column)
with _initialized_session():
self.assertAllClose((0.,), bias.eval())
self.assertAllClose(((0.,), (0.,), (0.,)), weight_var.eval())
self.assertAllClose(((0.,), (0.,)), predictions.eval())
weight_var.assign(((1.,), (2.,), (3.,))).eval()
# weight_var[0] * weights[0, 0] = 1 * .5 = .5
# weight_var[2] * weights[1, 0] + weight_var[1] * weights[1, 1]
# = 3*1 + 2*.1 = 3+.2 = 3.2
self.assertAllClose(((.5,), (3.2,)), predictions.eval())
# TODO(ptucker): Add test with embedding of weighted categorical.
if __name__ == '__main__':
test.main()
| 40.957625 | 123 | 0.632919 |
dc877209e8d4fa89e4273fbd63d7662c2657b1d1 | 55,488 | py | Python | python/testData/MockSdk3.7/python_stubs/_io.py | truthiswill/intellij-community | fff88cfb0dc168eea18ecb745d3e5b93f57b0b95 | [
"Apache-2.0"
] | 52 | 2019-01-11T22:51:59.000Z | 2021-12-12T13:28:21.000Z | python/testData/MockSdk3.7/python_stubs/_io.py | truthiswill/intellij-community | fff88cfb0dc168eea18ecb745d3e5b93f57b0b95 | [
"Apache-2.0"
] | 417 | 2019-01-11T19:02:48.000Z | 2022-03-28T14:52:04.000Z | python/testData/MockSdk3.7/python_stubs/_io.py | truthiswill/intellij-community | fff88cfb0dc168eea18ecb745d3e5b93f57b0b95 | [
"Apache-2.0"
] | 10 | 2019-05-17T08:10:52.000Z | 2021-07-26T18:20:03.000Z | # encoding: utf-8
# module _io calls itself io
# from (built-in)
# by generator 1.145
"""
The io module provides the Python interfaces to stream handling. The
builtin open function is defined in this module.
At the top of the I/O hierarchy is the abstract base class IOBase. It
defines the basic interface to a stream. Note, however, that there is no
separation between reading and writing to streams; implementations are
allowed to raise an OSError if they do not support a given operation.
Extending IOBase is RawIOBase which deals simply with the reading and
writing of raw bytes to a stream. FileIO subclasses RawIOBase to provide
an interface to OS files.
BufferedIOBase deals with buffering on a raw byte stream (RawIOBase). Its
subclasses, BufferedWriter, BufferedReader, and BufferedRWPair buffer
streams that are readable, writable, and both respectively.
BufferedRandom provides a buffered interface to random access
streams. BytesIO is a simple stream of in-memory bytes.
Another IOBase subclass, TextIOBase, deals with the encoding and decoding
of streams into text. TextIOWrapper, which extends it, is a buffered text
interface to a buffered raw stream (`BufferedIOBase`). Finally, StringIO
is an in-memory stream for text.
Argument names are not part of the specification, and only the arguments
of open() are intended to be used as keyword arguments.
data:
DEFAULT_BUFFER_SIZE
An int containing the default buffer size used by the module's buffered
I/O classes. open() uses the file's blksize (as obtained by os.stat) if
possible.
"""
# no imports
# Variables with simple values
DEFAULT_BUFFER_SIZE = 8192
# functions
def open(name, mode=None, buffering=None): # known case of _io.open
"""
Open file and return a stream. Raise OSError upon failure.
file is either a text or byte string giving the name (and the path
if the file isn't in the current working directory) of the file to
be opened or an integer file descriptor of the file to be
wrapped. (If a file descriptor is given, it is closed when the
returned I/O object is closed, unless closefd is set to False.)
mode is an optional string that specifies the mode in which the file
is opened. It defaults to 'r' which means open for reading in text
mode. Other common values are 'w' for writing (truncating the file if
it already exists), 'x' for creating and writing to a new file, and
'a' for appending (which on some Unix systems, means that all writes
append to the end of the file regardless of the current seek position).
In text mode, if encoding is not specified the encoding used is platform
dependent: locale.getpreferredencoding(False) is called to get the
current locale encoding. (For reading and writing raw bytes use binary
mode and leave encoding unspecified.) The available modes are:
========= ===============================================================
Character Meaning
--------- ---------------------------------------------------------------
'r' open for reading (default)
'w' open for writing, truncating the file first
'x' create a new file and open it for writing
'a' open for writing, appending to the end of the file if it exists
'b' binary mode
't' text mode (default)
'+' open a disk file for updating (reading and writing)
'U' universal newline mode (deprecated)
========= ===============================================================
The default mode is 'rt' (open for reading text). For binary random
access, the mode 'w+b' opens and truncates the file to 0 bytes, while
'r+b' opens the file without truncation. The 'x' mode implies 'w' and
raises an `FileExistsError` if the file already exists.
Python distinguishes between files opened in binary and text modes,
even when the underlying operating system doesn't. Files opened in
binary mode (appending 'b' to the mode argument) return contents as
bytes objects without any decoding. In text mode (the default, or when
't' is appended to the mode argument), the contents of the file are
returned as strings, the bytes having been first decoded using a
platform-dependent encoding or using the specified encoding if given.
'U' mode is deprecated and will raise an exception in future versions
of Python. It has no effect in Python 3. Use newline to control
universal newlines mode.
buffering is an optional integer used to set the buffering policy.
Pass 0 to switch buffering off (only allowed in binary mode), 1 to select
line buffering (only usable in text mode), and an integer > 1 to indicate
the size of a fixed-size chunk buffer. When no buffering argument is
given, the default buffering policy works as follows:
* Binary files are buffered in fixed-size chunks; the size of the buffer
is chosen using a heuristic trying to determine the underlying device's
"block size" and falling back on `io.DEFAULT_BUFFER_SIZE`.
On many systems, the buffer will typically be 4096 or 8192 bytes long.
* "Interactive" text files (files for which isatty() returns True)
use line buffering. Other text files use the policy described above
for binary files.
encoding is the name of the encoding used to decode or encode the
file. This should only be used in text mode. The default encoding is
platform dependent, but any encoding supported by Python can be
passed. See the codecs module for the list of supported encodings.
errors is an optional string that specifies how encoding errors are to
be handled---this argument should not be used in binary mode. Pass
'strict' to raise a ValueError exception if there is an encoding error
(the default of None has the same effect), or pass 'ignore' to ignore
errors. (Note that ignoring encoding errors can lead to data loss.)
See the documentation for codecs.register or run 'help(codecs.Codec)'
for a list of the permitted encoding error strings.
newline controls how universal newlines works (it only applies to text
mode). It can be None, '', '\n', '\r', and '\r\n'. It works as
follows:
* On input, if newline is None, universal newlines mode is
enabled. Lines in the input can end in '\n', '\r', or '\r\n', and
these are translated into '\n' before being returned to the
caller. If it is '', universal newline mode is enabled, but line
endings are returned to the caller untranslated. If it has any of
the other legal values, input lines are only terminated by the given
string, and the line ending is returned to the caller untranslated.
* On output, if newline is None, any '\n' characters written are
translated to the system default line separator, os.linesep. If
newline is '' or '\n', no translation takes place. If newline is any
of the other legal values, any '\n' characters written are translated
to the given string.
If closefd is False, the underlying file descriptor will be kept open
when the file is closed. This does not work when a file name is given
and must be True in that case.
A custom opener can be used by passing a callable as *opener*. The
underlying file descriptor for the file object is then obtained by
calling *opener* with (*file*, *flags*). *opener* must return an open
file descriptor (passing os.open as *opener* results in functionality
similar to passing None).
open() returns a file object whose type depends on the mode, and
through which the standard file operations such as reading and writing
are performed. When open() is used to open a file in a text mode ('w',
'r', 'wt', 'rt', etc.), it returns a TextIOWrapper. When used to open
a file in a binary mode, the returned class varies: in read binary
mode, it returns a BufferedReader; in write binary and append binary
modes, it returns a BufferedWriter, and in read/write mode, it returns
a BufferedRandom.
It is also possible to use a string or bytearray as a file for both
reading and writing. For strings StringIO can be used like a file
opened in a text mode, and for bytes a BytesIO can be used like a file
opened in a binary mode.
"""
return file('/dev/null')
# classes
class BlockingIOError(OSError):
""" I/O operation would block. """
def __init__(self, *args, **kwargs): # real signature unknown
pass
class _IOBase(object):
"""
The abstract base class for all I/O classes, acting on streams of
bytes. There is no public constructor.
This class provides dummy implementations for many methods that
derived classes can override selectively; the default implementations
represent a file that cannot be read, written or seeked.
Even though IOBase does not declare read, readinto, or write because
their signatures will vary, implementations and clients should
consider those methods part of the interface. Also, implementations
may raise UnsupportedOperation when operations they do not support are
called.
The basic type used for binary data read from or written to a file is
bytes. Other bytes-like objects are accepted as method arguments too.
In some cases (such as readinto), a writable object is required. Text
I/O classes work with str data.
Note that calling any method (except additional calls to close(),
which are ignored) on a closed stream should raise a ValueError.
IOBase (and its subclasses) support the iterator protocol, meaning
that an IOBase object can be iterated over yielding the lines in a
stream.
IOBase also supports the :keyword:`with` statement. In this example,
fp is closed after the suite of the with statement is complete:
with open('spam.txt', 'r') as fp:
fp.write('Spam and eggs!')
"""
def close(self, *args, **kwargs): # real signature unknown
"""
Flush and close the IO object.
This method has no effect if the file is already closed.
"""
pass
def fileno(self, *args, **kwargs): # real signature unknown
"""
Returns underlying file descriptor if one exists.
OSError is raised if the IO object does not use a file descriptor.
"""
pass
def flush(self, *args, **kwargs): # real signature unknown
"""
Flush write buffers, if applicable.
This is not implemented for read-only and non-blocking streams.
"""
pass
def isatty(self, *args, **kwargs): # real signature unknown
"""
Return whether this is an 'interactive' stream.
Return False if it can't be determined.
"""
pass
def readable(self, *args, **kwargs): # real signature unknown
"""
Return whether object was opened for reading.
If False, read() will raise OSError.
"""
pass
def readline(self, *args, **kwargs): # real signature unknown
"""
Read and return a line from the stream.
If size is specified, at most size bytes will be read.
The line terminator is always b'\n' for binary files; for text
files, the newlines argument to open can be used to select the line
terminator(s) recognized.
"""
pass
def readlines(self, *args, **kwargs): # real signature unknown
"""
Return a list of lines from the stream.
hint can be specified to control the number of lines read: no more
lines will be read if the total size (in bytes/characters) of all
lines so far exceeds hint.
"""
pass
def seek(self, *args, **kwargs): # real signature unknown
"""
Change stream position.
Change the stream position to the given byte offset. The offset is
interpreted relative to the position indicated by whence. Values
for whence are:
* 0 -- start of stream (the default); offset should be zero or positive
* 1 -- current stream position; offset may be negative
* 2 -- end of stream; offset is usually negative
Return the new absolute position.
"""
pass
def seekable(self, *args, **kwargs): # real signature unknown
"""
Return whether object supports random access.
If False, seek(), tell() and truncate() will raise OSError.
This method may need to do a test seek().
"""
pass
def tell(self, *args, **kwargs): # real signature unknown
""" Return current stream position. """
pass
def truncate(self, *args, **kwargs): # real signature unknown
"""
Truncate file to size bytes.
File pointer is left unchanged. Size defaults to the current IO
position as reported by tell(). Returns the new size.
"""
pass
def writable(self, *args, **kwargs): # real signature unknown
"""
Return whether object was opened for writing.
If False, write() will raise OSError.
"""
pass
def writelines(self, *args, **kwargs): # real signature unknown
pass
def _checkClosed(self, *args, **kwargs): # real signature unknown
pass
def _checkReadable(self, *args, **kwargs): # real signature unknown
pass
def _checkSeekable(self, *args, **kwargs): # real signature unknown
pass
def _checkWritable(self, *args, **kwargs): # real signature unknown
pass
def __del__(self, *args, **kwargs): # real signature unknown
pass
def __enter__(self, *args, **kwargs): # real signature unknown
pass
def __exit__(self, *args, **kwargs): # real signature unknown
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
def __iter__(self, *args, **kwargs): # real signature unknown
""" Implement iter(self). """
pass
@staticmethod # known case of __new__
def __new__(*args, **kwargs): # real signature unknown
""" Create and return a new object. See help(type) for accurate signature. """
pass
def __next__(self, *args, **kwargs): # real signature unknown
""" Implement next(self). """
pass
closed = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
__dict__ = None # (!) real value is ''
class _BufferedIOBase(_IOBase):
"""
Base class for buffered IO objects.
The main difference with RawIOBase is that the read() method
supports omitting the size argument, and does not have a default
implementation that defers to readinto().
In addition, read(), readinto() and write() may raise
BlockingIOError if the underlying raw stream is in non-blocking
mode and not ready; unlike their raw counterparts, they will never
return None.
A typical implementation should not inherit from a RawIOBase
implementation, but wrap one.
"""
def detach(self, *args, **kwargs): # real signature unknown
"""
Disconnect this buffer from its underlying raw stream and return it.
After the raw stream has been detached, the buffer is in an unusable
state.
"""
pass
def read(self, *args, **kwargs): # real signature unknown
"""
Read and return up to n bytes.
If the argument is omitted, None, or negative, reads and
returns all data until EOF.
If the argument is positive, and the underlying raw stream is
not 'interactive', multiple raw reads may be issued to satisfy
the byte count (unless EOF is reached first). But for
interactive raw streams (as well as sockets and pipes), at most
one raw read will be issued, and a short result does not imply
that EOF is imminent.
Returns an empty bytes object on EOF.
Returns None if the underlying raw stream was open in non-blocking
mode and no data is available at the moment.
"""
pass
def read1(self, *args, **kwargs): # real signature unknown
"""
Read and return up to n bytes, with at most one read() call
to the underlying raw stream. A short result does not imply
that EOF is imminent.
Returns an empty bytes object on EOF.
"""
pass
def readinto(self, *args, **kwargs): # real signature unknown
pass
def readinto1(self, *args, **kwargs): # real signature unknown
pass
def write(self, *args, **kwargs): # real signature unknown
"""
Write the given buffer to the IO stream.
Returns the number of bytes written, which is always the length of b
in bytes.
Raises BlockingIOError if the buffer is full and the
underlying raw stream cannot accept more data at the moment.
"""
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
class BufferedRandom(_BufferedIOBase):
"""
A buffered interface to random access streams.
The constructor creates a reader and writer for a seekable stream,
raw, given in the first argument. If the buffer_size is omitted it
defaults to DEFAULT_BUFFER_SIZE.
"""
def close(self, *args, **kwargs): # real signature unknown
pass
def detach(self, *args, **kwargs): # real signature unknown
pass
def fileno(self, *args, **kwargs): # real signature unknown
pass
def flush(self, *args, **kwargs): # real signature unknown
pass
def isatty(self, *args, **kwargs): # real signature unknown
pass
def peek(self, *args, **kwargs): # real signature unknown
pass
def read(self, *args, **kwargs): # real signature unknown
pass
def read1(self, *args, **kwargs): # real signature unknown
pass
def readable(self, *args, **kwargs): # real signature unknown
pass
def readinto(self, *args, **kwargs): # real signature unknown
pass
def readinto1(self, *args, **kwargs): # real signature unknown
pass
def readline(self, *args, **kwargs): # real signature unknown
pass
def seek(self, *args, **kwargs): # real signature unknown
pass
def seekable(self, *args, **kwargs): # real signature unknown
pass
def tell(self, *args, **kwargs): # real signature unknown
pass
def truncate(self, *args, **kwargs): # real signature unknown
pass
def writable(self, *args, **kwargs): # real signature unknown
pass
def write(self, *args, **kwargs): # real signature unknown
pass
def _dealloc_warn(self, *args, **kwargs): # real signature unknown
pass
def __getstate__(self, *args, **kwargs): # real signature unknown
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
@staticmethod # known case of __new__
def __new__(*args, **kwargs): # real signature unknown
""" Create and return a new object. See help(type) for accurate signature. """
pass
def __next__(self, *args, **kwargs): # real signature unknown
""" Implement next(self). """
pass
def __repr__(self, *args, **kwargs): # real signature unknown
""" Return repr(self). """
pass
def __sizeof__(self, *args, **kwargs): # real signature unknown
pass
closed = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
mode = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
name = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
raw = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
_finalizing = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
class BufferedReader(_BufferedIOBase):
""" Create a new buffered reader using the given readable raw IO object. """
def close(self, *args, **kwargs): # real signature unknown
pass
def detach(self, *args, **kwargs): # real signature unknown
pass
def fileno(self, *args, **kwargs): # real signature unknown
pass
def flush(self, *args, **kwargs): # real signature unknown
pass
def isatty(self, *args, **kwargs): # real signature unknown
pass
def peek(self, *args, **kwargs): # real signature unknown
pass
def read(self, *args, **kwargs): # real signature unknown
pass
def read1(self, *args, **kwargs): # real signature unknown
pass
def readable(self, *args, **kwargs): # real signature unknown
pass
def readinto(self, *args, **kwargs): # real signature unknown
pass
def readinto1(self, *args, **kwargs): # real signature unknown
pass
def readline(self, *args, **kwargs): # real signature unknown
pass
def seek(self, *args, **kwargs): # real signature unknown
pass
def seekable(self, *args, **kwargs): # real signature unknown
pass
def tell(self, *args, **kwargs): # real signature unknown
pass
def truncate(self, *args, **kwargs): # real signature unknown
pass
def _dealloc_warn(self, *args, **kwargs): # real signature unknown
pass
def __getstate__(self, *args, **kwargs): # real signature unknown
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
@staticmethod # known case of __new__
def __new__(*args, **kwargs): # real signature unknown
""" Create and return a new object. See help(type) for accurate signature. """
pass
def __next__(self, *args, **kwargs): # real signature unknown
""" Implement next(self). """
pass
def __repr__(self, *args, **kwargs): # real signature unknown
""" Return repr(self). """
pass
def __sizeof__(self, *args, **kwargs): # real signature unknown
pass
closed = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
mode = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
name = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
raw = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
_finalizing = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
class BufferedRWPair(_BufferedIOBase):
"""
A buffered reader and writer object together.
A buffered reader object and buffered writer object put together to
form a sequential IO object that can read and write. This is typically
used with a socket or two-way pipe.
reader and writer are RawIOBase objects that are readable and
writeable respectively. If the buffer_size is omitted it defaults to
DEFAULT_BUFFER_SIZE.
"""
def close(self, *args, **kwargs): # real signature unknown
pass
def flush(self, *args, **kwargs): # real signature unknown
pass
def isatty(self, *args, **kwargs): # real signature unknown
pass
def peek(self, *args, **kwargs): # real signature unknown
pass
def read(self, *args, **kwargs): # real signature unknown
pass
def read1(self, *args, **kwargs): # real signature unknown
pass
def readable(self, *args, **kwargs): # real signature unknown
pass
def readinto(self, *args, **kwargs): # real signature unknown
pass
def readinto1(self, *args, **kwargs): # real signature unknown
pass
def writable(self, *args, **kwargs): # real signature unknown
pass
def write(self, *args, **kwargs): # real signature unknown
pass
def __getstate__(self, *args, **kwargs): # real signature unknown
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
@staticmethod # known case of __new__
def __new__(*args, **kwargs): # real signature unknown
""" Create and return a new object. See help(type) for accurate signature. """
pass
closed = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
class BufferedWriter(_BufferedIOBase):
"""
A buffer for a writeable sequential RawIO object.
The constructor creates a BufferedWriter for the given writeable raw
stream. If the buffer_size is not given, it defaults to
DEFAULT_BUFFER_SIZE.
"""
def close(self, *args, **kwargs): # real signature unknown
pass
def detach(self, *args, **kwargs): # real signature unknown
pass
def fileno(self, *args, **kwargs): # real signature unknown
pass
def flush(self, *args, **kwargs): # real signature unknown
pass
def isatty(self, *args, **kwargs): # real signature unknown
pass
def seek(self, *args, **kwargs): # real signature unknown
pass
def seekable(self, *args, **kwargs): # real signature unknown
pass
def tell(self, *args, **kwargs): # real signature unknown
pass
def truncate(self, *args, **kwargs): # real signature unknown
pass
def writable(self, *args, **kwargs): # real signature unknown
pass
def write(self, *args, **kwargs): # real signature unknown
pass
def _dealloc_warn(self, *args, **kwargs): # real signature unknown
pass
def __getstate__(self, *args, **kwargs): # real signature unknown
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
@staticmethod # known case of __new__
def __new__(*args, **kwargs): # real signature unknown
""" Create and return a new object. See help(type) for accurate signature. """
pass
def __repr__(self, *args, **kwargs): # real signature unknown
""" Return repr(self). """
pass
def __sizeof__(self, *args, **kwargs): # real signature unknown
pass
closed = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
mode = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
name = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
raw = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
_finalizing = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
class BytesIO(_BufferedIOBase):
""" Buffered I/O implementation using an in-memory bytes buffer. """
def close(self, *args, **kwargs): # real signature unknown
""" Disable all I/O operations. """
pass
def flush(self, *args, **kwargs): # real signature unknown
""" Does nothing. """
pass
def getbuffer(self, *args, **kwargs): # real signature unknown
""" Get a read-write view over the contents of the BytesIO object. """
pass
def getvalue(self, *args, **kwargs): # real signature unknown
""" Retrieve the entire contents of the BytesIO object. """
pass
def isatty(self, *args, **kwargs): # real signature unknown
"""
Always returns False.
BytesIO objects are not connected to a TTY-like device.
"""
pass
def read(self, *args, **kwargs): # real signature unknown
"""
Read at most size bytes, returned as a bytes object.
If the size argument is negative, read until EOF is reached.
Return an empty bytes object at EOF.
"""
pass
def read1(self, *args, **kwargs): # real signature unknown
"""
Read at most size bytes, returned as a bytes object.
If the size argument is negative or omitted, read until EOF is reached.
Return an empty bytes object at EOF.
"""
pass
def readable(self, *args, **kwargs): # real signature unknown
""" Returns True if the IO object can be read. """
pass
def readinto(self, *args, **kwargs): # real signature unknown
"""
Read bytes into buffer.
Returns number of bytes read (0 for EOF), or None if the object
is set not to block and has no data to read.
"""
pass
def readline(self, *args, **kwargs): # real signature unknown
"""
Next line from the file, as a bytes object.
Retain newline. A non-negative size argument limits the maximum
number of bytes to return (an incomplete line may be returned then).
Return an empty bytes object at EOF.
"""
pass
def readlines(self, *args, **kwargs): # real signature unknown
"""
List of bytes objects, each a line from the file.
Call readline() repeatedly and return a list of the lines so read.
The optional size argument, if given, is an approximate bound on the
total number of bytes in the lines returned.
"""
pass
def seek(self, *args, **kwargs): # real signature unknown
"""
Change stream position.
Seek to byte offset pos relative to position indicated by whence:
0 Start of stream (the default). pos should be >= 0;
1 Current position - pos may be negative;
2 End of stream - pos usually negative.
Returns the new absolute position.
"""
pass
def seekable(self, *args, **kwargs): # real signature unknown
""" Returns True if the IO object can be seeked. """
pass
def tell(self, *args, **kwargs): # real signature unknown
""" Current file position, an integer. """
pass
def truncate(self, *args, **kwargs): # real signature unknown
"""
Truncate the file to at most size bytes.
Size defaults to the current file position, as returned by tell().
The current file position is unchanged. Returns the new size.
"""
pass
def writable(self, *args, **kwargs): # real signature unknown
""" Returns True if the IO object can be written. """
pass
def write(self, *args, **kwargs): # real signature unknown
"""
Write bytes to file.
Return the number of bytes written.
"""
pass
def writelines(self, *args, **kwargs): # real signature unknown
"""
Write lines to the file.
Note that newlines are not added. lines can be any iterable object
producing bytes-like objects. This is equivalent to calling write() for
each element.
"""
pass
def __getstate__(self, *args, **kwargs): # real signature unknown
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
def __iter__(self, *args, **kwargs): # real signature unknown
""" Implement iter(self). """
pass
@staticmethod # known case of __new__
def __new__(*args, **kwargs): # real signature unknown
""" Create and return a new object. See help(type) for accurate signature. """
pass
def __next__(self, *args, **kwargs): # real signature unknown
""" Implement next(self). """
pass
def __setstate__(self, *args, **kwargs): # real signature unknown
pass
def __sizeof__(self, *args, **kwargs): # real signature unknown
pass
closed = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""True if the file is closed."""
class _RawIOBase(_IOBase):
""" Base class for raw binary I/O. """
def read(self, *args, **kwargs): # real signature unknown
pass
def readall(self, *args, **kwargs): # real signature unknown
""" Read until EOF, using multiple read() call. """
pass
def readinto(self, *args, **kwargs): # real signature unknown
pass
def write(self, *args, **kwargs): # real signature unknown
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
class FileIO(_RawIOBase):
"""
Open a file.
The mode can be 'r' (default), 'w', 'x' or 'a' for reading,
writing, exclusive creation or appending. The file will be created if it
doesn't exist when opened for writing or appending; it will be truncated
when opened for writing. A FileExistsError will be raised if it already
exists when opened for creating. Opening a file for creating implies
writing so this mode behaves in a similar way to 'w'.Add a '+' to the mode
to allow simultaneous reading and writing. A custom opener can be used by
passing a callable as *opener*. The underlying file descriptor for the file
object is then obtained by calling opener with (*name*, *flags*).
*opener* must return an open file descriptor (passing os.open as *opener*
results in functionality similar to passing None).
"""
def close(self): # real signature unknown; restored from __doc__
"""
Close the file.
A closed file cannot be used for further I/O operations. close() may be
called more than once without error.
"""
pass
def fileno(self, *args, **kwargs): # real signature unknown
""" Return the underlying file descriptor (an integer). """
pass
def isatty(self, *args, **kwargs): # real signature unknown
""" True if the file is connected to a TTY device. """
pass
def read(self, size=-1): # known case of _io.FileIO.read
"""
Read at most size bytes, returned as bytes.
Only makes one system call, so less data may be returned than requested.
In non-blocking mode, returns None if no data is available.
Return an empty bytes object at EOF.
"""
return ""
def readable(self, *args, **kwargs): # real signature unknown
""" True if file was opened in a read mode. """
pass
def readall(self, *args, **kwargs): # real signature unknown
"""
Read all data from the file, returned as bytes.
In non-blocking mode, returns as much as is immediately available,
or None if no data is available. Return an empty bytes object at EOF.
"""
pass
def readinto(self): # real signature unknown; restored from __doc__
""" Same as RawIOBase.readinto(). """
pass
def seek(self, *args, **kwargs): # real signature unknown
"""
Move to new file position and return the file position.
Argument offset is a byte count. Optional argument whence defaults to
SEEK_SET or 0 (offset from start of file, offset should be >= 0); other values
are SEEK_CUR or 1 (move relative to current position, positive or negative),
and SEEK_END or 2 (move relative to end of file, usually negative, although
many platforms allow seeking beyond the end of a file).
Note that not all file objects are seekable.
"""
pass
def seekable(self, *args, **kwargs): # real signature unknown
""" True if file supports random-access. """
pass
def tell(self, *args, **kwargs): # real signature unknown
"""
Current file position.
Can raise OSError for non seekable files.
"""
pass
def truncate(self, *args, **kwargs): # real signature unknown
"""
Truncate the file to at most size bytes and return the truncated size.
Size defaults to the current file position, as returned by tell().
The current file position is changed to the value of size.
"""
pass
def writable(self, *args, **kwargs): # real signature unknown
""" True if file was opened in a write mode. """
pass
def write(self, *args, **kwargs): # real signature unknown
"""
Write buffer b to file, return number of bytes written.
Only makes one system call, so not all of the data may be written.
The number of bytes actually written is returned. In non-blocking mode,
returns None if the write would block.
"""
pass
def _dealloc_warn(self, *args, **kwargs): # real signature unknown
pass
def __getattribute__(self, *args, **kwargs): # real signature unknown
""" Return getattr(self, name). """
pass
def __getstate__(self, *args, **kwargs): # real signature unknown
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
@staticmethod # known case of __new__
def __new__(*args, **kwargs): # real signature unknown
""" Create and return a new object. See help(type) for accurate signature. """
pass
def __repr__(self, *args, **kwargs): # real signature unknown
""" Return repr(self). """
pass
closed = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""True if the file is closed"""
closefd = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""True if the file descriptor will be closed by close()."""
mode = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""String giving the file mode"""
_blksize = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
_finalizing = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
class IncrementalNewlineDecoder(object):
"""
Codec used when reading a file in universal newlines mode.
It wraps another incremental decoder, translating \r\n and \r into \n.
It also records the types of newlines encountered. When used with
translate=False, it ensures that the newline sequence is returned in
one piece. When used with decoder=None, it expects unicode strings as
decode input and translates newlines without first invoking an external
decoder.
"""
def decode(self, *args, **kwargs): # real signature unknown
pass
def getstate(self, *args, **kwargs): # real signature unknown
pass
def reset(self, *args, **kwargs): # real signature unknown
pass
def setstate(self, *args, **kwargs): # real signature unknown
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
@staticmethod # known case of __new__
def __new__(*args, **kwargs): # real signature unknown
""" Create and return a new object. See help(type) for accurate signature. """
pass
newlines = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
class _TextIOBase(_IOBase):
"""
Base class for text I/O.
This class provides a character and line based interface to stream
I/O. There is no readinto method because Python's character strings
are immutable. There is no public constructor.
"""
def detach(self, *args, **kwargs): # real signature unknown
"""
Separate the underlying buffer from the TextIOBase and return it.
After the underlying buffer has been detached, the TextIO is in an
unusable state.
"""
pass
def read(self, *args, **kwargs): # real signature unknown
"""
Read at most n characters from stream.
Read from underlying buffer until we have n characters or we hit EOF.
If n is negative or omitted, read until EOF.
"""
pass
def readline(self, *args, **kwargs): # real signature unknown
"""
Read until newline or EOF.
Returns an empty string if EOF is hit immediately.
"""
pass
def write(self, *args, **kwargs): # real signature unknown
"""
Write string to stream.
Returns the number of characters written (which is always equal to
the length of the string).
"""
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
encoding = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Encoding of the text stream.
Subclasses should override.
"""
errors = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""The error setting of the decoder or encoder.
Subclasses should override.
"""
newlines = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Line endings translated so far.
Only line endings translated during reading are considered.
Subclasses should override.
"""
class StringIO(_TextIOBase):
"""
Text I/O implementation using an in-memory buffer.
The initial_value argument sets the value of object. The newline
argument is like the one of TextIOWrapper's constructor.
"""
def close(self, *args, **kwargs): # real signature unknown
"""
Close the IO object.
Attempting any further operation after the object is closed
will raise a ValueError.
This method has no effect if the file is already closed.
"""
pass
def getvalue(self, *args, **kwargs): # real signature unknown
""" Retrieve the entire contents of the object. """
pass
def read(self, *args, **kwargs): # real signature unknown
"""
Read at most size characters, returned as a string.
If the argument is negative or omitted, read until EOF
is reached. Return an empty string at EOF.
"""
pass
def readable(self, *args, **kwargs): # real signature unknown
""" Returns True if the IO object can be read. """
pass
def readline(self, *args, **kwargs): # real signature unknown
"""
Read until newline or EOF.
Returns an empty string if EOF is hit immediately.
"""
pass
def seek(self, *args, **kwargs): # real signature unknown
"""
Change stream position.
Seek to character offset pos relative to position indicated by whence:
0 Start of stream (the default). pos should be >= 0;
1 Current position - pos must be 0;
2 End of stream - pos must be 0.
Returns the new absolute position.
"""
pass
def seekable(self, *args, **kwargs): # real signature unknown
""" Returns True if the IO object can be seeked. """
pass
def tell(self, *args, **kwargs): # real signature unknown
""" Tell the current file position. """
pass
def truncate(self, *args, **kwargs): # real signature unknown
"""
Truncate size to pos.
The pos argument defaults to the current file position, as
returned by tell(). The current file position is unchanged.
Returns the new absolute position.
"""
pass
def writable(self, *args, **kwargs): # real signature unknown
""" Returns True if the IO object can be written. """
pass
def write(self, *args, **kwargs): # real signature unknown
"""
Write string to file.
Returns the number of characters written, which is always equal to
the length of the string.
"""
pass
def __getstate__(self, *args, **kwargs): # real signature unknown
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
@staticmethod # known case of __new__
def __new__(*args, **kwargs): # real signature unknown
""" Create and return a new object. See help(type) for accurate signature. """
pass
def __next__(self, *args, **kwargs): # real signature unknown
""" Implement next(self). """
pass
def __setstate__(self, *args, **kwargs): # real signature unknown
pass
closed = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
line_buffering = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
newlines = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
class TextIOWrapper(_TextIOBase):
"""
Character and line based layer over a BufferedIOBase object, buffer.
encoding gives the name of the encoding that the stream will be
decoded or encoded with. It defaults to locale.getpreferredencoding(False).
errors determines the strictness of encoding and decoding (see
help(codecs.Codec) or the documentation for codecs.register) and
defaults to "strict".
newline controls how line endings are handled. It can be None, '',
'\n', '\r', and '\r\n'. It works as follows:
* On input, if newline is None, universal newlines mode is
enabled. Lines in the input can end in '\n', '\r', or '\r\n', and
these are translated into '\n' before being returned to the
caller. If it is '', universal newline mode is enabled, but line
endings are returned to the caller untranslated. If it has any of
the other legal values, input lines are only terminated by the given
string, and the line ending is returned to the caller untranslated.
* On output, if newline is None, any '\n' characters written are
translated to the system default line separator, os.linesep. If
newline is '' or '\n', no translation takes place. If newline is any
of the other legal values, any '\n' characters written are translated
to the given string.
If line_buffering is True, a call to flush is implied when a call to
write contains a newline character.
"""
def close(self, *args, **kwargs): # real signature unknown
pass
def detach(self, *args, **kwargs): # real signature unknown
pass
def fileno(self, *args, **kwargs): # real signature unknown
pass
def flush(self, *args, **kwargs): # real signature unknown
pass
def isatty(self, *args, **kwargs): # real signature unknown
pass
def read(self, *args, **kwargs): # real signature unknown
pass
def readable(self, *args, **kwargs): # real signature unknown
pass
def readline(self, *args, **kwargs): # real signature unknown
pass
def reconfigure(self, *args, **kwargs): # real signature unknown
"""
Reconfigure the text stream with new parameters.
This also does an implicit stream flush.
"""
pass
def seek(self, *args, **kwargs): # real signature unknown
pass
def seekable(self, *args, **kwargs): # real signature unknown
pass
def tell(self, *args, **kwargs): # real signature unknown
pass
def truncate(self, *args, **kwargs): # real signature unknown
pass
def writable(self, *args, **kwargs): # real signature unknown
pass
def write(self, *args, **kwargs): # real signature unknown
pass
def __getstate__(self, *args, **kwargs): # real signature unknown
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
@staticmethod # known case of __new__
def __new__(*args, **kwargs): # real signature unknown
""" Create and return a new object. See help(type) for accurate signature. """
pass
def __next__(self, *args, **kwargs): # real signature unknown
""" Implement next(self). """
pass
def __repr__(self, *args, **kwargs): # real signature unknown
""" Return repr(self). """
pass
buffer = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
closed = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
encoding = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
errors = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
line_buffering = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
name = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
newlines = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
write_through = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
_CHUNK_SIZE = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
_finalizing = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
class UnsupportedOperation(OSError, ValueError):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
__weakref__ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""list of weak references to the object (if defined)"""
class _WindowsConsoleIO(_RawIOBase):
"""
Open a console buffer by file descriptor.
The mode can be 'rb' (default), or 'wb' for reading or writing bytes. All
other mode characters will be ignored. Mode 'b' will be assumed if it is
omitted. The *opener* parameter is always ignored.
"""
def close(self): # real signature unknown; restored from __doc__
"""
Close the handle.
A closed handle cannot be used for further I/O operations. close() may be
called more than once without error.
"""
pass
def fileno(self, *args, **kwargs): # real signature unknown
"""
Return the underlying file descriptor (an integer).
fileno is only set when a file descriptor is used to open
one of the standard streams.
"""
pass
def isatty(self, *args, **kwargs): # real signature unknown
""" Always True. """
pass
def read(self, *args, **kwargs): # real signature unknown
"""
Read at most size bytes, returned as bytes.
Only makes one system call when size is a positive integer,
so less data may be returned than requested.
Return an empty bytes object at EOF.
"""
pass
def readable(self, *args, **kwargs): # real signature unknown
""" True if console is an input buffer. """
pass
def readall(self, *args, **kwargs): # real signature unknown
"""
Read all data from the console, returned as bytes.
Return an empty bytes object at EOF.
"""
pass
def readinto(self): # real signature unknown; restored from __doc__
""" Same as RawIOBase.readinto(). """
pass
def writable(self, *args, **kwargs): # real signature unknown
""" True if console is an output buffer. """
pass
def write(self, *args, **kwargs): # real signature unknown
"""
Write buffer b to file, return number of bytes written.
Only makes one system call, so not all of the data may be written.
The number of bytes actually written is returned.
"""
pass
def __getattribute__(self, *args, **kwargs): # real signature unknown
""" Return getattr(self, name). """
pass
def __getstate__(self, *args, **kwargs): # real signature unknown
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
@staticmethod # known case of __new__
def __new__(*args, **kwargs): # real signature unknown
""" Create and return a new object. See help(type) for accurate signature. """
pass
def __repr__(self, *args, **kwargs): # real signature unknown
""" Return repr(self). """
pass
closed = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""True if the file is closed"""
closefd = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""True if the file descriptor will be closed by close()."""
mode = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""String giving the file mode"""
_blksize = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
_finalizing = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
class __loader__(object):
"""
Meta path import for built-in modules.
All methods are either class or static methods to avoid the need to
instantiate the class.
"""
@classmethod
def create_module(cls, *args, **kwargs): # real signature unknown
""" Create a built-in module """
pass
@classmethod
def exec_module(cls, *args, **kwargs): # real signature unknown
""" Exec a built-in module """
pass
@classmethod
def find_module(cls, *args, **kwargs): # real signature unknown
"""
Find the built-in module.
If 'path' is ever specified then the search is considered a failure.
This method is deprecated. Use find_spec() instead.
"""
pass
@classmethod
def find_spec(cls, *args, **kwargs): # real signature unknown
pass
@classmethod
def get_code(cls, *args, **kwargs): # real signature unknown
""" Return None as built-in modules do not have code objects. """
pass
@classmethod
def get_source(cls, *args, **kwargs): # real signature unknown
""" Return None as built-in modules do not have source code. """
pass
@classmethod
def is_package(cls, *args, **kwargs): # real signature unknown
""" Return False as built-in modules are never packages. """
pass
@classmethod
def load_module(cls, *args, **kwargs): # real signature unknown
"""
Load the specified module into sys.modules and return it.
This method is deprecated. Use loader.exec_module instead.
"""
pass
def module_repr(module): # reliably restored by inspect
"""
Return repr for the module.
The method is deprecated. The import machinery does the job itself.
"""
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
__weakref__ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""list of weak references to the object (if defined)"""
__dict__ = None # (!) real value is ''
# variables with complex values
__spec__ = None # (!) real value is ''
| 34.379182 | 104 | 0.633434 |
cd2844bb361ee1f8720c29542d1e7d9b1595b420 | 4,824 | py | Python | astroML/plotting/tools.py | arjunsavel/astroML | 361cadc56360ca35c760405a341e35ecab6dd585 | [
"BSD-2-Clause"
] | 1 | 2020-10-28T14:45:48.000Z | 2020-10-28T14:45:48.000Z | astroML/plotting/tools.py | awesomemachinelearning/astroML | d378ca41565d1aa39997191d13d46d09d104ff1d | [
"BSD-2-Clause"
] | 1 | 2018-05-18T19:32:15.000Z | 2018-05-18T19:32:15.000Z | astroML/plotting/tools.py | DinoBektesevic/astroML | b4e699bf45a65e233b40d60323c05eafa1d4955e | [
"BSD-2-Clause"
] | 1 | 2019-06-13T00:19:11.000Z | 2019-06-13T00:19:11.000Z | import numpy as np
from io import BytesIO
from matplotlib import pyplot as plt
from scipy import interpolate
from matplotlib import image
from matplotlib.colors import LinearSegmentedColormap
from matplotlib.transforms import Bbox
from matplotlib.patches import Ellipse
def devectorize_axes(ax=None, dpi=None, transparent=True):
"""Convert axes contents to a png.
This is useful when plotting many points, as the size of the saved file
can become very large otherwise.
Parameters
----------
ax : Axes instance (optional)
Axes to de-vectorize. If None, this uses the current active axes
(plt.gca())
dpi: int (optional)
resolution of the png image. If not specified, the default from
'savefig.dpi' in rcParams will be used
transparent : bool (optional)
if True (default) then the PNG will be made transparent
Returns
-------
ax : Axes instance
the in-place modified Axes instance
Examples
--------
The code can be used in the following way::
>>> import matplotlib.pyplot as plt
>>> import numpy as np
>>> from astroML.plotting.tools import devectorize_axes
>>> fig, ax = plt.subplots()
>>> x, y = np.random.random((2, 10000))
>>> ax.scatter(x, y) # doctest: +IGNORE_OUTPUT
>>> devectorize_axes(ax) # doctest: +IGNORE_OUTPUT
The resulting figure will be much smaller than the vectorized version.
"""
if ax is None:
ax = plt.gca()
fig = ax.figure
axlim = ax.axis()
# setup: make all visible spines (axes & ticks) & text invisible
# we need to set these back later, so we save their current state
_sp = {}
_txt_vis = [t.get_visible() for t in ax.texts]
for k in ax.spines:
_sp[k] = ax.spines[k].get_visible()
ax.spines[k].set_visible(False)
for t in ax.texts:
t.set_visible(False)
_xax = ax.xaxis.get_visible()
_yax = ax.yaxis.get_visible()
_patch = ax.patch.get_visible()
ax.patch.set_visible(False)
ax.xaxis.set_visible(False)
ax.yaxis.set_visible(False)
# convert canvas to PNG
extents = ax.bbox.extents / fig.dpi
output = BytesIO()
plt.savefig(output, format='png', dpi=dpi,
transparent=transparent,
bbox_inches=Bbox([extents[:2], extents[2:]]))
output.seek(0)
im = image.imread(output)
# clear everything on axis (but not text)
ax.lines = []
ax.patches = []
ax.tables = []
ax.artists = []
ax.images = []
ax.collections = []
# Show the image
ax.imshow(im, extent=axlim, aspect='auto', interpolation='nearest')
# restore all the spines & text
for k in ax.spines:
ax.spines[k].set_visible(_sp[k])
for t, v in zip(ax.texts, _txt_vis):
t.set_visible(v)
ax.patch.set_visible(_patch)
ax.xaxis.set_visible(_xax)
ax.yaxis.set_visible(_yax)
if plt.isinteractive():
plt.draw()
return ax
def discretize_cmap(cmap, N):
"""Return a discrete colormap from the continuous colormap cmap.
Parameters
----------
cmap : colormap instance, eg. cm.jet.
N : Number of colors.
Returns
-------
cmap_d : discretized colormap
Example
-------
>>> from matplotlib import cm
>>> djet = discretize_cmap(cm.jet, 5)
"""
cdict = cmap._segmentdata.copy()
# N colors
colors_i = np.linspace(0, 1., N)
# N+1 indices
indices = np.linspace(0, 1., N + 1)
for key in ('red', 'green', 'blue'):
# Find the N colors
D = np.array(cdict[key])
I = interpolate.interp1d(D[:, 0], D[:, 1])
colors = I(colors_i)
# Place these colors at the correct indices.
A = np.zeros((N + 1, 3), float)
A[:, 0] = indices
A[1:, 1] = colors
A[:-1, 2] = colors
# Create a tuple for the dictionary.
L = []
for l in A:
L.append(tuple(l))
cdict[key] = tuple(L)
# Return colormap object.
return LinearSegmentedColormap('colormap', cdict, 1024)
def draw_ellipse(mu, C, scales=[1, 2, 3], ax=None, **kwargs):
if ax is None:
ax = plt.gca()
# find principal components and rotation angle of ellipse
sigma_x2 = C[0, 0]
sigma_y2 = C[1, 1]
sigma_xy = C[0, 1]
alpha = 0.5 * np.arctan2(2 * sigma_xy,
(sigma_x2 - sigma_y2))
tmp1 = 0.5 * (sigma_x2 + sigma_y2)
tmp2 = np.sqrt(0.25 * (sigma_x2 - sigma_y2) ** 2 + sigma_xy ** 2)
sigma1 = np.sqrt(tmp1 + tmp2)
sigma2 = np.sqrt(tmp1 - tmp2)
for scale in scales:
ax.add_patch(Ellipse((mu[0], mu[1]),
2 * scale * sigma1, 2 * scale * sigma2,
alpha * 180. / np.pi,
**kwargs))
| 28.376471 | 75 | 0.593284 |
b1fb062319ad2bf5755079d03df9894f8ec4d067 | 393 | py | Python | 11numoccur.py | arunkumarang/python | 1960e285dfe2ef54d2e3ab37584bfef8b24ecca9 | [
"Apache-2.0"
] | null | null | null | 11numoccur.py | arunkumarang/python | 1960e285dfe2ef54d2e3ab37584bfef8b24ecca9 | [
"Apache-2.0"
] | null | null | null | 11numoccur.py | arunkumarang/python | 1960e285dfe2ef54d2e3ab37584bfef8b24ecca9 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
import sys
import math
def main():
print('enter the line: ')
line = str(input())
words = line.split()
wordcnt = dict()
for word in words:
if word in wordcnt:
wordcnt[word] = wordcnt[word] + 1
else:
wordcnt[word] = 0
print(wordcnt[word], end=' ')
if __name__ == '__main__':
main()
sys.exit(0)
| 15.72 | 45 | 0.531807 |
e546e25fa8e0d16be1363cd4de92b69a418a5c40 | 2,550 | py | Python | docs/doxygen/doxyxml/__init__.py | vt-gs/gr-vcc | 8f29e6448ad6d5e37789fadca96fa8c46edd8f7d | [
"MIT"
] | 1 | 2021-04-28T03:52:20.000Z | 2021-04-28T03:52:20.000Z | docs/doxygen/doxyxml/__init__.py | vt-gs/gr-vcc | 8f29e6448ad6d5e37789fadca96fa8c46edd8f7d | [
"MIT"
] | null | null | null | docs/doxygen/doxyxml/__init__.py | vt-gs/gr-vcc | 8f29e6448ad6d5e37789fadca96fa8c46edd8f7d | [
"MIT"
] | null | null | null | #
# Copyright 2010 Free Software Foundation, Inc.
#
# This file was generated by gr_modtool, a tool from the GNU Radio framework
# This file is a part of gr-vcc
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
"""
Python interface to contents of doxygen xml documentation.
Example use:
See the contents of the example folder for the C++ and
doxygen-generated xml used in this example.
>>> # Parse the doxygen docs.
>>> import os
>>> this_dir = os.path.dirname(globals()['__file__'])
>>> xml_path = this_dir + "/example/xml/"
>>> di = DoxyIndex(xml_path)
Get a list of all top-level objects.
>>> print([mem.name() for mem in di.members()])
[u'Aadvark', u'aadvarky_enough', u'main']
Get all functions.
>>> print([mem.name() for mem in di.in_category(DoxyFunction)])
[u'aadvarky_enough', u'main']
Check if an object is present.
>>> di.has_member(u'Aadvark')
True
>>> di.has_member(u'Fish')
False
Get an item by name and check its properties.
>>> aad = di.get_member(u'Aadvark')
>>> print(aad.brief_description)
Models the mammal Aadvark.
>>> print(aad.detailed_description)
Sadly the model is incomplete and cannot capture all aspects of an aadvark yet.
<BLANKLINE>
This line is uninformative and is only to test line breaks in the comments.
>>> [mem.name() for mem in aad.members()]
[u'aadvarkness', u'print', u'Aadvark', u'get_aadvarkness']
>>> aad.get_member(u'print').brief_description
u'Outputs the vital aadvark statistics.'
"""
from doxyindex import DoxyIndex, DoxyFunction, DoxyParam, DoxyClass, DoxyFile, DoxyNamespace, DoxyGroup, DoxyFriend, DoxyOther
def _test():
import os
this_dir = os.path.dirname(globals()['__file__'])
xml_path = this_dir + "/example/xml/"
di = DoxyIndex(xml_path)
# Get the Aadvark class
aad = di.get_member('Aadvark')
aad.brief_description
import doctest
return doctest.testmod()
if __name__ == "__main__":
_test()
| 30.357143 | 126 | 0.728627 |
5e5657655bed13b60c710a46eb4d7bdaeb6b3f37 | 6,791 | py | Python | script/json/BlueEgg/blueegg/model/auth.py | Zex/Starter | 40196fe2ef93301a889d40217b92355e51bf2f5d | [
"MIT"
] | null | null | null | script/json/BlueEgg/blueegg/model/auth.py | Zex/Starter | 40196fe2ef93301a889d40217b92355e51bf2f5d | [
"MIT"
] | null | null | null | script/json/BlueEgg/blueegg/model/auth.py | Zex/Starter | 40196fe2ef93301a889d40217b92355e51bf2f5d | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Auth* related model.
This is where the models used by :mod:`repoze.who` and :mod:`repoze.what` are
defined.
It's perfectly fine to re-use this definition in the BlueEgg application,
though.
"""
import os
from datetime import datetime
import sys
try:
from hashlib import sha1
except ImportError:
sys.exit('ImportError: No module named hashlib\n'
'If you are on python2.4 this library is not part of python. '
'Please install it. Example: easy_install hashlib')
from sqlalchemy import Table, ForeignKey, Column
from sqlalchemy.types import Unicode, Integer, DateTime
from sqlalchemy.orm import relation, synonym
from blueegg.model import DeclarativeBase, metadata, DBSession
__all__ = ['User', 'Group', 'Permission']
#{ Association tables
# This is the association table for the many-to-many relationship between
# groups and permissions. This is required by repoze.what.
group_permission_table = Table('tg_group_permission', metadata,
Column('group_id', Integer, ForeignKey('tg_group.group_id',
onupdate="CASCADE", ondelete="CASCADE")),
Column('permission_id', Integer, ForeignKey('tg_permission.permission_id',
onupdate="CASCADE", ondelete="CASCADE"))
)
# This is the association table for the many-to-many relationship between
# groups and members - this is, the memberships. It's required by repoze.what.
user_group_table = Table('tg_user_group', metadata,
Column('user_id', Integer, ForeignKey('tg_user.user_id',
onupdate="CASCADE", ondelete="CASCADE")),
Column('group_id', Integer, ForeignKey('tg_group.group_id',
onupdate="CASCADE", ondelete="CASCADE"))
)
#{ The auth* model itself
class Group(DeclarativeBase):
"""
Group definition for :mod:`repoze.what`.
Only the ``group_name`` column is required by :mod:`repoze.what`.
"""
__tablename__ = 'tg_group'
#{ Columns
group_id = Column(Integer, autoincrement=True, primary_key=True)
group_name = Column(Unicode(16), unique=True, nullable=False)
display_name = Column(Unicode(255))
created = Column(DateTime, default=datetime.now)
#{ Relations
users = relation('User', secondary=user_group_table, backref='groups')
#{ Special methods
def __repr__(self):
return '<Group: name=%s>' % self.group_name
def __unicode__(self):
return self.group_name
#}
# The 'info' argument we're passing to the email_address and password columns
# contain metadata that Rum (http://python-rum.org/) can use generate an
# admin interface for your models.
class User(DeclarativeBase):
"""
User definition.
This is the user definition used by :mod:`repoze.who`, which requires at
least the ``user_name`` column.
"""
__tablename__ = 'tg_user'
#{ Columns
user_id = Column(Integer, autoincrement=True, primary_key=True)
user_name = Column(Unicode(16), unique=True, nullable=False)
email_address = Column(Unicode(255), unique=True, nullable=False,
info={'rum': {'field':'Email'}})
display_name = Column(Unicode(255))
_password = Column('password', Unicode(80),
info={'rum': {'field':'Password'}})
created = Column(DateTime, default=datetime.now)
#{ Special methods
def __repr__(self):
return '<User: email="%s", display name="%s">' % (
self.email_address, self.display_name)
def __unicode__(self):
return self.display_name or self.user_name
#{ Getters and setters
@property
def permissions(self):
"""Return a set of strings for the permissions granted."""
perms = set()
for g in self.groups:
perms = perms | set(g.permissions)
return perms
@classmethod
def by_email_address(cls, email):
"""Return the user object whose email address is ``email``."""
return DBSession.query(cls).filter(cls.email_address==email).first()
@classmethod
def by_user_name(cls, username):
"""Return the user object whose user name is ``username``."""
return DBSession.query(cls).filter(cls.user_name==username).first()
def _set_password(self, password):
"""Hash ``password`` on the fly and store its hashed version."""
hashed_password = password
if isinstance(password, unicode):
password_8bit = password.encode('UTF-8')
else:
password_8bit = password
salt = sha1()
salt.update(os.urandom(60))
hash = sha1()
hash.update(password_8bit + salt.hexdigest())
hashed_password = salt.hexdigest() + hash.hexdigest()
# Make sure the hashed password is an UTF-8 object at the end of the
# process because SQLAlchemy _wants_ a unicode object for Unicode
# columns
if not isinstance(hashed_password, unicode):
hashed_password = hashed_password.decode('UTF-8')
self._password = hashed_password
def _get_password(self):
"""Return the hashed version of the password."""
return self._password
password = synonym('_password', descriptor=property(_get_password,
_set_password))
#}
def validate_password(self, password):
"""
Check the password against existing credentials.
:param password: the password that was provided by the user to
try and authenticate. This is the clear text version that we will
need to match against the hashed one in the database.
:type password: unicode object.
:return: Whether the password is valid.
:rtype: bool
"""
hashed_pass = sha1()
hashed_pass.update(password + self.password[:40])
return self.password[40:] == hashed_pass.hexdigest()
class Permission(DeclarativeBase):
"""
Permission definition for :mod:`repoze.what`.
Only the ``permission_name`` column is required by :mod:`repoze.what`.
"""
__tablename__ = 'tg_permission'
#{ Columns
permission_id = Column(Integer, autoincrement=True, primary_key=True)
permission_name = Column(Unicode(16), unique=True, nullable=False)
description = Column(Unicode(255))
#{ Relations
groups = relation(Group, secondary=group_permission_table,
backref='permissions')
#{ Special methods
def __repr__(self):
return '<Permission: name=%s>' % self.permission_name
def __unicode__(self):
return self.permission_name
#}
#}
| 29.145923 | 78 | 0.641143 |
a64387f9176b43bac205224261f0c212c4fb6a82 | 3,911 | py | Python | huaweicloud-sdk-servicestage/huaweicloudsdkservicestage/v2/model/list_hooks_request.py | githubmilesma/huaweicloud-sdk-python-v3 | 9d9449ed68a609ca65f0aa50b5b2a1c28445bf03 | [
"Apache-2.0"
] | 1 | 2021-04-16T07:59:28.000Z | 2021-04-16T07:59:28.000Z | huaweicloud-sdk-servicestage/huaweicloudsdkservicestage/v2/model/list_hooks_request.py | Lencof/huaweicloud-sdk-python-v3 | d13dc4e2830a83e295be6e4de021999b3376e34e | [
"Apache-2.0"
] | null | null | null | huaweicloud-sdk-servicestage/huaweicloudsdkservicestage/v2/model/list_hooks_request.py | Lencof/huaweicloud-sdk-python-v3 | d13dc4e2830a83e295be6e4de021999b3376e34e | [
"Apache-2.0"
] | 1 | 2022-01-17T02:24:18.000Z | 2022-01-17T02:24:18.000Z | # coding: utf-8
import pprint
import re
import six
class ListHooksRequest:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'x_repo_auth': 'str',
'namespace': 'str',
'project': 'str'
}
attribute_map = {
'x_repo_auth': 'X-Repo-Auth',
'namespace': 'namespace',
'project': 'project'
}
def __init__(self, x_repo_auth=None, namespace=None, project=None):
"""ListHooksRequest - a model defined in huaweicloud sdk"""
self._x_repo_auth = None
self._namespace = None
self._project = None
self.discriminator = None
self.x_repo_auth = x_repo_auth
self.namespace = namespace
self.project = project
@property
def x_repo_auth(self):
"""Gets the x_repo_auth of this ListHooksRequest.
:return: The x_repo_auth of this ListHooksRequest.
:rtype: str
"""
return self._x_repo_auth
@x_repo_auth.setter
def x_repo_auth(self, x_repo_auth):
"""Sets the x_repo_auth of this ListHooksRequest.
:param x_repo_auth: The x_repo_auth of this ListHooksRequest.
:type: str
"""
self._x_repo_auth = x_repo_auth
@property
def namespace(self):
"""Gets the namespace of this ListHooksRequest.
:return: The namespace of this ListHooksRequest.
:rtype: str
"""
return self._namespace
@namespace.setter
def namespace(self, namespace):
"""Sets the namespace of this ListHooksRequest.
:param namespace: The namespace of this ListHooksRequest.
:type: str
"""
self._namespace = namespace
@property
def project(self):
"""Gets the project of this ListHooksRequest.
:return: The project of this ListHooksRequest.
:rtype: str
"""
return self._project
@project.setter
def project(self, project):
"""Sets the project of this ListHooksRequest.
:param project: The project of this ListHooksRequest.
:type: str
"""
self._project = project
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ListHooksRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 25.070513 | 74 | 0.555612 |
dbbc760b300b14469efa722586d46c1ce9450ef5 | 8,874 | py | Python | mkt/developers/urls.py | acidburn0zzz/zamboni | 780fbeb99e240a569a72a1c15410f49b76b3807c | [
"BSD-3-Clause"
] | 1 | 2017-07-14T19:22:39.000Z | 2017-07-14T19:22:39.000Z | mkt/developers/urls.py | Acidburn0zzz/zamboni | 780fbeb99e240a569a72a1c15410f49b76b3807c | [
"BSD-3-Clause"
] | 6 | 2021-02-02T23:08:48.000Z | 2021-09-08T02:47:17.000Z | mkt/developers/urls.py | Acidburn0zzz/zamboni | 780fbeb99e240a569a72a1c15410f49b76b3807c | [
"BSD-3-Clause"
] | null | null | null | from django import http
from django.conf.urls import include, patterns, url
from rest_framework.routers import SimpleRouter
from lib.misc.urlconf_decorator import decorate
import amo
from amo.decorators import write
from amo.urlresolvers import reverse
from mkt.api.base import SubRouter
from mkt.developers.api import ContentRatingList, ContentRatingsPingback
from mkt.developers.api_payments import (
AddonPaymentAccountViewSet, PaymentAccountViewSet, PaymentCheckViewSet,
PaymentDebugViewSet, UpsellViewSet)
from mkt.developers.decorators import use_apps
from mkt.inapp.views import InAppProductViewSet
from mkt.receipts.urls import test_patterns
from . import views
from . import views_payments
def provider_patterns(prefix):
return patterns('',
url('^accounts$', views_payments.payment_accounts,
name='mkt.developers.%s.payment_accounts' % prefix),
url('^accounts/form$', views_payments.payment_accounts_form,
name='mkt.developers.%s.payment_accounts_form' % prefix),
url('^accounts/add$', views_payments.payments_accounts_add,
name='mkt.developers.%s.add_payment_account' % prefix),
url('^accounts/(?P<id>\d+)/delete$',
views_payments.payments_accounts_delete,
name='mkt.developers.%s.delete_payment_account' % prefix),
url('^accounts/(?P<id>\d+)$',
views_payments.payments_account,
name='mkt.developers.%s.payment_account' % prefix),
url('^accounts/(?P<id>\d+)/agreement/$', views_payments.agreement,
name='mkt.developers.%s.agreement' % prefix)
)
# These will all start with /app/<app_slug>/
app_detail_patterns = patterns('',
# Redirect people who go to / instead of /edit.
('^$', lambda r, app_slug: http.HttpResponseRedirect(
reverse('mkt.developers.apps.edit', args=[app_slug]))),
url('^edit$', views.edit, name='mkt.developers.apps.edit'),
url('^edit_(?P<section>[^/]+)(?:/(?P<editable>[^/]+))?$',
views.addons_section, name='mkt.developers.apps.section'),
url('^refresh_manifest$', views.refresh_manifest,
name='mkt.developers.apps.refresh_manifest'),
url('^ownership$', views.ownership, name='mkt.developers.apps.owner'),
url('^enable$', views.enable, name='mkt.developers.apps.enable'),
url('^delete$', views.delete, name='mkt.developers.apps.delete'),
url('^disable$', views.disable, name='mkt.developers.apps.disable'),
url('^publicise$', views.publicise, name='mkt.developers.apps.publicise'),
url('^status$', views.status, name='mkt.developers.apps.versions'),
url('^blocklist$', views.blocklist, name='mkt.developers.apps.blocklist'),
# IARC content ratings.
url('^content_ratings$', views.content_ratings,
name='mkt.developers.apps.ratings'),
url('^content_ratings/edit$', views.content_ratings_edit,
name='mkt.developers.apps.ratings_edit'),
url('^status/preload$', views.preload_home,
name='mkt.developers.apps.preload_home'),
url('^status/preload/submit$', views.preload_submit,
name='mkt.developers.apps.preload_submit'),
# TODO: '^versions/$'
url('^versions/(?P<version_id>\d+)$', views.version_edit,
name='mkt.developers.apps.versions.edit'),
url('^versions/delete$', views.version_delete,
name='mkt.developers.apps.versions.delete'),
url('^versions/publicise$', views.version_publicise,
name='mkt.developers.apps.versions.publicise'),
url('^payments/$', views_payments.payments,
name='mkt.developers.apps.payments'),
url('^payments/disable$', views_payments.disable_payments,
name='mkt.developers.apps.payments.disable'),
url('^payments/bango-portal$', views_payments.bango_portal_from_addon,
name='mkt.developers.apps.payments.bango_portal_from_addon'),
# in-app payments.
url('^in-app-config/$', views_payments.in_app_config,
name='mkt.developers.apps.in_app_config'),
url('^in-app-products/$', views_payments.in_app_products,
name='mkt.developers.apps.in_app_products'),
url('^in-app-secret/$', views_payments.in_app_secret,
name='mkt.developers.apps.in_app_secret'),
# Old stuff.
url('^upload_preview$', views.upload_media, {'upload_type': 'preview'},
name='mkt.developers.apps.upload_preview'),
url('^upload_icon$', views.upload_media, {'upload_type': 'icon'},
name='mkt.developers.apps.upload_icon'),
url('^upload_image$', views.upload_media, {'upload_type': 'image'},
name='mkt.developers.apps.upload_image'),
url('^rmlocale$', views.remove_locale,
name='mkt.developers.apps.remove-locale'),
# Not apps-specific (yet).
url('^file/(?P<file_id>[^/]+)/validation$', views.file_validation,
name='mkt.developers.apps.file_validation'),
url('^file/(?P<file_id>[^/]+)/validation.json$',
views.json_file_validation,
name='mkt.developers.apps.json_file_validation'),
url('^upload$', views.upload_for_addon,
name='mkt.developers.upload_for_addon'),
url('^upload/(?P<uuid>[^/]+)$', views.upload_detail_for_addon,
name='mkt.developers.upload_detail_for_addon'),
)
# These will all start with /ajax/app/<app_slug>/
ajax_patterns = patterns('',
url('^image/status$', views.image_status,
name='mkt.developers.apps.ajax.image.status'),
)
urlpatterns = decorate(write, patterns('',
# Redirect people who have /apps/ instead of /app/.
('^apps/\d+/.*',
lambda r: http.HttpResponseRedirect(r.path.replace('apps', 'app', 1))),
# Standalone validator:
url('^validator/?$', views.validate_addon,
name='mkt.developers.validate_addon'),
# Redirect to /addons/ at the base.
url('^submissions$', use_apps(views.dashboard),
name='mkt.developers.apps'),
url('^upload$', views.upload_new, name='mkt.developers.upload'),
url('^upload/([^/]+)(?:/([^/]+))?$', views.upload_detail,
name='mkt.developers.upload_detail'),
url('^standalone-hosted-upload$', views.standalone_hosted_upload,
name='mkt.developers.standalone_hosted_upload'),
url('^standalone-packaged-upload$', views.standalone_packaged_upload,
name='mkt.developers.standalone_packaged_upload'),
url('^standalone-(hosted|packaged)-upload/([^/]+)$',
views.standalone_upload_detail,
name='mkt.developers.standalone_upload_detail'),
# Standalone tools.
url('^upload-manifest$', views.upload_manifest,
name='mkt.developers.upload_manifest'),
url('^in-app-keys/$', views_payments.in_app_keys,
name='mkt.developers.apps.in_app_keys'),
url('^in-app-key-secret/([^/]+)$', views_payments.in_app_key_secret,
name='mkt.developers.apps.in_app_key_secret'),
# URLs for a single app.
url('^app/%s/' % amo.APP_SLUG, include(app_detail_patterns)),
url('^ajax/app/%s/' % amo.APP_SLUG, include(ajax_patterns)),
url('^terms$', views.terms, name='mkt.developers.apps.terms'),
url('^api$', views.api, name='mkt.developers.apps.api'),
# Developer docs
url('docs/(?P<doc_name>[-_\w]+)?$',
views.docs, name='mkt.developers.docs'),
url('docs/(?P<doc_name>[-_\w]+)/(?P<doc_page>[-_\w]+)',
views.docs, name='mkt.developers.docs'),
url('^transactions/', views.transactions,
name='mkt.developers.transactions'),
# Bango-specific stuff.
url('^provider/', include(provider_patterns('provider'))),
url('^test/$', views.testing, name='mkt.developers.apps.testing'),
url('^test/receipts/', include(test_patterns)),
))
api_payments = SimpleRouter()
api_payments.register(r'account', PaymentAccountViewSet,
base_name='payment-account')
api_payments.register(r'upsell', UpsellViewSet, base_name='app-upsell')
api_payments.register(r'app', AddonPaymentAccountViewSet,
base_name='app-payment-account')
in_app_products = SimpleRouter()
in_app_products.register(r'in-app', InAppProductViewSet,
base_name='in-app-products')
app_payments = SubRouter()
app_payments.register(r'payments/status', PaymentCheckViewSet,
base_name='app-payments-status')
app_payments.register(r'payments/debug', PaymentDebugViewSet,
base_name='app-payments-debug')
payments_api_patterns = patterns('',
url(r'^payments/', include(api_payments.urls)),
url(r'^payments/(?P<app_slug>[^\/]+)/', include(in_app_products.urls)),
url(r'^apps/app/', include(app_payments.urls)),
)
dev_api_patterns = patterns('',
url(r'^apps/app/(?P<pk>[^/<>"\']+)/content-ratings/pingback/',
ContentRatingsPingback.as_view(), name='content-ratings-pingback'),
url(r'^apps/app/(?P<pk>[^/<>"\']+)/content-ratings/',
ContentRatingList.as_view(), name='content-ratings-list'),
)
| 41.858491 | 78 | 0.676133 |
b4bffc9607b9a0d8830f1969394cb51d47d0760f | 1,247 | py | Python | kds.py | wangzishuo111/bk_zhangdan | 30be7d92c53de4f18d90c00aba1ee73073f47029 | [
"MIT"
] | null | null | null | kds.py | wangzishuo111/bk_zhangdan | 30be7d92c53de4f18d90c00aba1ee73073f47029 | [
"MIT"
] | null | null | null | kds.py | wangzishuo111/bk_zhangdan | 30be7d92c53de4f18d90c00aba1ee73073f47029 | [
"MIT"
] | null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
from base.log import logger
import base.opt as opt
from base.timer import Timer
import hbase_util_thrift as hbase_util
import kds_rowkey
import params
def get_table_name():
env = opt.option().env
return 'kds_data_' + env
def kds_save(batch, data):
rowkey = kds_rowkey.gen(batch)
table = get_table_name()
cols = []
cols.append(('kds', 'data', data))
logger().info('save data for row[%s], table[%s]', rowkey, table)
if not hbase_util.put_cols(table, rowkey, cols):
return False
return True
def kds_get(batch):
rowkey = kds_rowkey.gen(batch)
table = get_table_name()
ret = hbase_util.get_col(table, rowkey, 'kds', 'data')
logger().info('seek data for row[%s]', rowkey)
if ret:
return ret['kds:data']
return None
def kds_del(batch):
rowkey = kds_rowkey.gen(batch)
logger().info('delete rowkey[%s]', rowkey)
table = get_table_name()
return hbase_util.delete(table, rowkey)
def kds_test():
print 'save:', kds_save('test', '11')
print 'get:', kds_get('test')
print 'delete:', kds_del('test')
print 'get:', kds_get('test')
def kds_main():
kds_test()
if __name__ == '__main__':
kds_main()
| 23.980769 | 68 | 0.646351 |
0987bdf7055b487b90a9a91751cb9f5d118ce510 | 10,512 | py | Python | make_planes/make_planes.py | qianyeqiang/AVOD-testing | aa4e2903f52f3a9d15b9b607a08c3bf8504919be | [
"MIT"
] | 3 | 2018-12-18T08:19:40.000Z | 2020-05-18T15:48:43.000Z | make_planes/make_planes.py | qianyeqiang/AVOD-testing | aa4e2903f52f3a9d15b9b607a08c3bf8504919be | [
"MIT"
] | 2 | 2019-03-05T01:50:08.000Z | 2020-06-03T08:42:57.000Z | make_planes/make_planes.py | qianyeqiang/AVOD-testing | aa4e2903f52f3a9d15b9b607a08c3bf8504919be | [
"MIT"
] | 1 | 2020-05-19T10:47:31.000Z | 2020-05-19T10:47:31.000Z | from pyntcloud import PyntCloud
import numpy as np
import os
import time
import scipy.linalg
import matplotlib.pyplot as plt
path_in = "/home/jackqian/avod/make_planes/"
path_kitti_training = "/home/jackqian/KITTI/training/velodyne/"
path_kitti_testing = "/home/jackqian/KITTI/testing/velodyne/"
path_save = "/media/jackqian/新加卷/Ubuntu/avod/make_planes/"
file1 = "000303.bin"
file2 = "0.bin"
def lidar4to3():
"""
convert the lidar points for Nx4 shape to Nx3 shape, i.e., remove the reflectivity.
:return:
"""
filename = path_in + file1
print("Processing: ", filename)
scan = np.fromfile(filename, dtype=np.float32)
print(np.shape(scan))
scan = scan.reshape((-1, 4))
scan = scan[:, :3]
scan = scan.reshape(-1)
#calib = calib_at("000000")
# scan input: nx3; scan output: nx3;
#scan = lidar_point_to_img(scan, calib[3], calib[2], calib[0])
#scan = scan.astype(np.float32)
#np.save(str(0)+ ".txt", scan)
scan.tofile(file2)
def lidar4to3_kitti():
"""
convert the KITTI lidar points for Nx4 shape to Nx3 shape, i.e., remove the reflectivity.
:return:
"""
for i in range(7481):
filename = path_kitti_training + str(i).zfill(6) + ".bin"
print("Processing: ", filename)
scan = np.fromfile(filename, dtype=np.float32)
#print(np.shape(scan))
scan = scan.reshape((-1, 4))
scan = scan[:, :3]
calib = calib_at(str(i).zfill(6))
# scan input: nx3; scan output: nx3;
scan = lidar_point_to_img_calib2(scan, calib[3], calib[2], calib[0])
scan = scan.astype(np.float32)
file2 = path_save + "kittilidar_training_qyqmake_calib2/" + str(i).zfill(6) + ".bin"
scan.tofile(file2)
def cau_planes():
"""
using Ransac in PyntCloud to find the groud plane.
Note the lidar points have transformed to the camera coordinate.
:return: groud plane parameters (A, B, C, D) for Ax+By+Cz+D=0.
"""
last_time = time.time()
cloud = PyntCloud.from_file(path_save + "kittilidar_training_qyqmake_calib2/" + file1)
#cloud = PyntCloud.from_file(path_in + file2)
#cloud.plot()
cloud.points = cloud.points[cloud.points["y"] > 1]
# cloud.points = cloud.points[cloud.points["x"] > -2]
# cloud.points = cloud.points[cloud.points["x"] < 2]
# cloud.points = cloud.points[cloud.points["z"] > -20]
# cloud.points = cloud.points[cloud.points["z"] < 20]
data_raw = np.array(cloud.points)
is_floor = cloud.add_scalar_field("plane_fit", n_inliers_to_stop=len(cloud.points) / 30, max_dist=0.001, max_iterations=100)
#cloud.plot(use_as_color=is_floor, cmap = "cool")
cloud.points = cloud.points[cloud.points[is_floor] > 0]
data = np.array(cloud.points)
mn = np.min(data, axis=0)
mx = np.max(data, axis=0)
X, Y = np.meshgrid(np.linspace(mn[0], mx[0], 20), np.linspace(mn[1], mx[1], 20))
X_avod, Y_avod = np.meshgrid(np.linspace(mn[0], mx[0], 20), np.linspace(mn[1], mx[1], 20))
X_flat, Z_flat = np.meshgrid(np.arange(-5, 5, 1), np.arange(-5, 5, 1))
#### best-fit linear plane
#### Z = C[0] * X + C[1] * Y + C[2]
A = np.c_[data[:, 0], data[:, 1], np.ones(data.shape[0])]
C, _, _, _ = scipy.linalg.lstsq(A, data[:, 2]) # coefficients
Z = C[0] * X + C[1] * Y + C[2]
Z_avod = (8.587492e-03*X_avod + 9.995657e-01*Y_avod - 1.519515e+00)/2.818885e-02
Z_avod = (1.316190e-02 * X_avod + 9.997416e-01 * Y_avod - 1.543552e+00) / 1.853603e-02
Y_flat = 1.65
normal = np.array([C[0], C[1], 1, C[2]])
normal = - normal / normal[1]
print(normal)
# fig = plt.figure()
# ax = fig.gca(projection='3d')
#
# #ax.plot_surface(X, Y, Z, rstride=1, cstride=1, alpha=0.2)
# ax.plot_surface(X_avod, Y_avod, Z_avod, rstride=1, cstride=1, alpha=0.2)
# ax.plot_surface(X_flat, Y_flat, Z_flat, rstride=1, cstride=1, alpha=0.2)
#
# ax.scatter(data[:, 0], data[:, 1], data[:, 2], c='r', s=1)
# #ax.scatter(data_raw[:, 0], data_raw[:, 1], data_raw[:, 2], c='g', s=0.1)
# plt.xlabel('X')
# plt.ylabel('Y')
# plt.ylabel('Z')
# #ax.set_zlabel('Z')
# ax.axis('equal')
# ax.axis('tight')
#
# ax.axis([-5, 5, -5, 5])
# ax.set_zlim(-5, 5)
# #ax.zaxis.set_major_locator(LinearLocator(20))
#
# plt.show()
current_time = time.time()
print("cost_time: ", current_time - last_time)
#print("normal:", normal_final)
#print("normal_normalized:", normal_normalized)
def cau_planes_kitti():
"""
using Ransac in PyntCloud to find the groud plane in KITTI.
Note the lidar points have transformed to the camera coordinate.
:return: groud plane parameters (A, B, C, D) for Ax+By+Cz+D=0.
"""
# regular grid covering the domain of the data
last_time = time.time()
k = 0
while k != 7481:
print(path_save + "kittilidar_training_qyqmake_calib2/" + str(k).zfill(6)+ ".bin")
cloud = PyntCloud.from_file(path_save + "kittilidar_training_qyqmake_calib2/" + str(k).zfill(6)+ ".bin")
cloud.points = cloud.points[cloud.points["y"] > 1]
is_floor = cloud.add_scalar_field("plane_fit", n_inliers_to_stop=len(cloud.points) / 30, max_dist=0.001, max_iterations=100)
cloud.points = cloud.points[cloud.points[is_floor] > 0]
data = np.array(cloud.points)
#### best-fit linear plane
#### Z = C[0] * X + C[1] * Y + C[2]
A = np.c_[data[:, 0], data[:, 1], np.ones(data.shape[0])]
C, _, _, _ = scipy.linalg.lstsq(A, data[:, 2]) # coefficients
normal = np.array([C[0], C[1], 1, C[2]])
normal = - normal / normal[1]
print(normal)
# Check if the result is almost the groud plane.
# if the result is right, parameter B should be nearly 1 when the D is the height of the camera.
if (normal[3] > 2.0 or normal[3] < 1.4) :
print("error_result")
continue
# elif (normal[3] > 2.0 or normal[3] < 1.4) and count >= 10:
# txtname = path_save + "kittilidar_training_planes_qyqmake_calib2_fit/" + str(k).zfill(6) + ".txt"
# f = open(txtname, "a")
# f.write("# Plane\n")
# f.write("Width 4\n")
# f.write("Height 1\n")
# str_normal = "0" + " " + "-1" + " " + "0" + " " + "1.65"
# f.write(str_normal)
# f.close()
#
# k = k + 1
# count = 0
# continue
txtname = path_save + "kittilidar_training_planes_qyqmake_calib2_fit/" + str(k).zfill(6) + ".txt"
f = open(txtname, "a")
f.write("# Plane\n")
f.write("Width 4\n")
f.write("Height 1\n")
str_normal = str(normal[0]) + " " + str(normal[1]) + " " + str(normal[2]) + " " + str(normal[3])
f.write(str_normal)
f.close()
k = k + 1
# normal_normalized = normal / np.linalg.norm(normal)
# print("normal_normalized:", normal_normalized)
# current_time = time.time()
# print("cost_time: ", current_time - last_time)
# print("normal:", normal)
def lidar_point_to_img_calib2(point, Tr, R0, P2):
"""
rewrite by jackqian
convert lidar points to the camera
input: points with shape Nx3; output: point with shape NX3 (N is the number of the points)
output = R0*Tr*point
if you want to convert the lidar points to the image: output = P2*R0*Tr*point
"""
P2 = P2.reshape((3, 4))
R0 = R0.reshape((4, 3))
Tr = Tr.reshape((3, 4))
T = np.zeros((1,4))
T[0,3] = 1
P2 = np.vstack((P2, T))
Tr = np.vstack((Tr, T))
T2 = np.zeros((4,1))
T2[3,0] = 1
R0 = np.hstack((R0, T2))
assert Tr.shape == (4, 4)
assert R0.shape == (4, 4)
assert P2.shape == (4, 4)
point = point.transpose((1, 0))
point = np.vstack((point, np.ones(point.shape[1])))
# mat1 = np.dot(P2, R0)
# mat2 = np.dot(mat1, Tr)
# img_cor = np.dot(mat2, point)
#mat = np.dot(R0, Tr)
img_cor = np.dot(Tr, point)
#img_cor = img_cor/img_cor[2]
img_cor = img_cor.transpose((1, 0))
img_cor = img_cor[:, :3]
return img_cor
def lidar_point_to_img(point, Tr, R0, P2):
"""
rewrite by jackqian
convert lidar points to the camera
input: points with shape Nx3; output: point with shape NX3 (N is the number of the points)
output = R0*Tr*point
if you want to convert the lidar points to the image: output = P2*R0*Tr*point
"""
P2 = P2.reshape((3, 4))
R0 = R0.reshape((4, 3))
Tr = Tr.reshape((3, 4))
T = np.zeros((1,4))
T[0,3] = 1
P2 = np.vstack((P2, T))
Tr = np.vstack((Tr, T))
T2 = np.zeros((4,1))
T2[3,0] = 1
R0 = np.hstack((R0, T2))
assert Tr.shape == (4, 4)
assert R0.shape == (4, 4)
assert P2.shape == (4, 4)
point = point.transpose((1, 0))
point = np.vstack((point, np.ones(point.shape[1])))
# mat1 = np.dot(P2, R0)
# mat2 = np.dot(mat1, Tr)
# img_cor = np.dot(mat2, point)
mat = np.dot(R0, Tr)
img_cor = np.dot(mat, point)
#img_cor = img_cor/img_cor[2]
img_cor = img_cor.transpose((1, 0))
img_cor = img_cor[:, :3]
return img_cor
def calib_at(index):
"""
Return the calib sequence.
"""
calib_ori = load_kitti_calib(index)
calib = np.zeros((4, 12))
calib[0,:] = calib_ori['P2'].reshape(12)
calib[1,:] = calib_ori['P3'].reshape(12)
calib[2,:9] = calib_ori['R0'].reshape(9)
calib[3,:] = calib_ori['Tr_velo2cam'].reshape(12)
return calib
def load_kitti_calib(index):
"""
load projection matrix
"""
data_path = '/home/jackqian//KITTI/'
prefix = 'training/calib'
#prefix = 'testing/calib'
calib_dir = os.path.join(data_path, prefix, index + '.txt')
with open(calib_dir) as fi:
lines = fi.readlines()
obj = lines[2].strip().split(' ')[1:]
P2 = np.array(obj, dtype=np.float32)
obj = lines[3].strip().split(' ')[1:]
P3 = np.array(obj, dtype=np.float32)
obj = lines[4].strip().split(' ')[1:]
R0 = np.array(obj, dtype=np.float32)
obj = lines[5].strip().split(' ')[1:]
Tr_velo_to_cam = np.array(obj, dtype=np.float32)
return {'P2': P2.reshape(3, 4),
'P3': P3.reshape(3, 4),
'R0': R0.reshape(3, 3),
'Tr_velo2cam': Tr_velo_to_cam.reshape(3, 4)}
def main():
#lidar4to3()
#lidar4to3_kitti()
cau_planes()
pass
if __name__ == '__main__':
main() | 30.206897 | 132 | 0.582382 |
f35685b50bd05b4e65265fa9ceab149f402107ea | 584 | py | Python | tut1-mainwindow/ex1-mainWindow.py | JeffHoogland/qtdesigner-pyside-tutorial | c212a4f96ea8a0032b0b302e100ee222e68ce187 | [
"BSD-3-Clause"
] | 5 | 2015-03-14T11:20:40.000Z | 2020-09-15T19:33:00.000Z | tut1-mainwindow/ex1-mainWindow.py | JeffHoogland/qtdesigner-pyside-tutorial | c212a4f96ea8a0032b0b302e100ee222e68ce187 | [
"BSD-3-Clause"
] | null | null | null | tut1-mainwindow/ex1-mainWindow.py | JeffHoogland/qtdesigner-pyside-tutorial | c212a4f96ea8a0032b0b302e100ee222e68ce187 | [
"BSD-3-Clause"
] | 4 | 2015-03-14T11:20:22.000Z | 2020-03-07T17:40:12.000Z | import sys
from PySide.QtGui import *
from PySide.QtCore import *
from ui_mainWindow import Ui_mainWindow
class MainWindow(QMainWindow, Ui_mainWindow):
def __init__(self):
super(MainWindow, self).__init__()
self.setupUi(self)
self.assignWidgets()
self.show()
def assignWidgets(self):
self.goButton.clicked.connect(self.goPushed)
def goPushed(self):
self.goText.append("Go, Go, Go!")
if __name__ == '__main__':
app = QApplication(sys.argv)
mainWin = MainWindow()
ret = app.exec_()
sys.exit( ret )
| 24.333333 | 52 | 0.65411 |
f878a70cf50c78e2edd52e00c76c000ebd7cdf23 | 1,588 | py | Python | read_from_pdf.py | neha-duggirala/DataScience-Interview-Preparation-Bot | 41757cd193b15b0651b1b6ebf580fe86cec1ee35 | [
"MIT"
] | null | null | null | read_from_pdf.py | neha-duggirala/DataScience-Interview-Preparation-Bot | 41757cd193b15b0651b1b6ebf580fe86cec1ee35 | [
"MIT"
] | null | null | null | read_from_pdf.py | neha-duggirala/DataScience-Interview-Preparation-Bot | 41757cd193b15b0651b1b6ebf580fe86cec1ee35 | [
"MIT"
] | null | null | null | """Downloads PDF as a text file
Source: https://stackoverflow.com/questions/11087795/whitespace-gone-from-pdf-extraction-and-strange-word-interpretation
"""
from pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter
from pdfminer.converter import TextConverter
from pdfminer.layout import LAParams
from pdfminer.pdfpage import PDFPage
from io import StringIO
class PdfConverter:
def __init__(self, file_path):
self.file_path = file_path
# convert pdf file to a string which has space among words
def convert_pdf_to_txt(self):
rsrcmgr = PDFResourceManager()
retstr = StringIO()
laparams = LAParams()
device = TextConverter(rsrcmgr, retstr, laparams=laparams)
fp = open(self.file_path, 'rb')
interpreter = PDFPageInterpreter(rsrcmgr, device)
password = ""
maxpages = 0
caching = True
pagenos = set()
for page in PDFPage.get_pages(fp, pagenos, maxpages=maxpages, password=password, caching=caching, check_extractable=True):
interpreter.process_page(page)
fp.close()
device.close()
str = retstr.getvalue()
retstr.close()
return str
# convert pdf file text to string and save as a text_pdf.txt file
def save_convert_pdf_to_txt(self):
content = self.convert_pdf_to_txt()
txt_pdf = open('Q&A.txt', 'wb')
txt_pdf.write(content.encode('utf-8'))
txt_pdf.close()
if __name__ == '__main__':
pdfConverter = PdfConverter(file_path='Q&A.pdf')
pdfConverter.save_convert_pdf_to_txt() | 38.731707 | 130 | 0.688917 |
d7990c83388aa959790a7f41bacd3f56883ab7f0 | 1,023 | py | Python | assets/templatetags/assets.py | stewardshiptools/stewardshiptools | ee5d27e7b0d5d4947f34ad02bdf63a06ad0a5c3e | [
"MIT"
] | null | null | null | assets/templatetags/assets.py | stewardshiptools/stewardshiptools | ee5d27e7b0d5d4947f34ad02bdf63a06ad0a5c3e | [
"MIT"
] | 11 | 2020-03-24T15:29:46.000Z | 2022-03-11T23:14:48.000Z | assets/templatetags/assets.py | stewardshiptools/stewardshiptools | ee5d27e7b0d5d4947f34ad02bdf63a06ad0a5c3e | [
"MIT"
] | null | null | null | from django import template
register = template.Library()
@register.inclusion_tag('assets/metadocument_inline_form.html', takes_context=True)
def include_metadocument_inline_form(context, **kwargs):
response = {'context': context}
for k in kwargs.keys():
response[k] = kwargs[k]
return response
@register.assignment_tag()
def form_field_has_data(form_field, result_if_has_value=True, result_if_no_value=''):
'''
Can use this to set an element class depending on whether the form field has any
data in it or not --- used in setting expanded/collapsed collection items.
:param form_field:
:param result_if_has_value:
:param result_if_no_value:
:return:
'''
try:
if form_field.form.initial[form_field.name]:
return result_if_has_value
except KeyError:
pass
return result_if_no_value
@register.assignment_tag()
def get_child_asset(asset_instance):
return asset_instance.__class__.objects.get_subclass(pk=asset_instance.pk)
| 29.228571 | 85 | 0.731183 |
95ab9d08949692a32e42f979dddadc71f945672e | 9,486 | py | Python | PSENet/dataset/psenet/psenet_synth.py | PaddleEdu/OCR-models-PaddlePaddle | 1a62dcf4b647310b505fa5e4a18bbd8d27c39dfd | [
"Apache-2.0"
] | 12 | 2021-05-10T13:47:32.000Z | 2021-07-30T08:59:53.000Z | PSENet/dataset/psenet/psenet_synth.py | maxpark/OCR-models-PaddlePaddle | 1a62dcf4b647310b505fa5e4a18bbd8d27c39dfd | [
"Apache-2.0"
] | 4 | 2021-05-16T11:28:32.000Z | 2021-07-23T07:41:44.000Z | PSENet/dataset/psenet/psenet_synth.py | maxpark/OCR-models-PaddlePaddle | 1a62dcf4b647310b505fa5e4a18bbd8d27c39dfd | [
"Apache-2.0"
] | 4 | 2021-05-12T16:32:03.000Z | 2021-11-17T23:18:39.000Z | import numpy as np
from PIL import Image
import cv2
import random
import paddle
from paddle.io import Dataset
import paddle.vision.transforms as transforms
import pyclipper
import Polygon as plg
import math
import string
import scipy.io as scio
synth_root_dir = './data/SynthText/'
synth_train_data_dir = synth_root_dir
synth_train_gt_path = synth_root_dir + 'gt.mat'
def get_img(img_path, read_type='pil'):
try:
if read_type == 'cv2':
img = cv2.imread(img_path)
img = img[:, :, [2, 1, 0]]
elif read_type == 'pil':
img = np.array(Image.open(img_path))
except Exception as e:
print(img_path)
raise
return img
def get_ann(img, gts, texts, index):
bboxes = np.array(gts[index])
bboxes = np.reshape(bboxes, (bboxes.shape[0], bboxes.shape[1], -1))
bboxes = bboxes.transpose(2, 1, 0)
bboxes = np.reshape(bboxes, (bboxes.shape[0], -1)) / ([img.shape[1], img.shape[0]] * 4)
words = []
for text in texts[index]:
text = text.replace('\n', ' ').replace('\r', ' ')
words.extend([w for w in text.split(' ') if len(w) > 0])
return bboxes, words
def random_horizontal_flip(imgs):
if random.random() < 0.5:
for i in range(len(imgs)):
imgs[i] = np.flip(imgs[i], axis=1).copy()
return imgs
def random_rotate(imgs):
max_angle = 10
angle = random.random() * 2 * max_angle - max_angle
for i in range(len(imgs)):
img = imgs[i]
w, h = img.shape[:2]
rotation_matrix = cv2.getRotationMatrix2D((h / 2, w / 2), angle, 1)
img_rotation = cv2.warpAffine(img, rotation_matrix, (h, w), flags=cv2.INTER_NEAREST)
imgs[i] = img_rotation
return imgs
def scale_aligned(img, h_scale, w_scale):
h, w = img.shape[0:2]
h = int(h * h_scale + 0.5)
w = int(w * w_scale + 0.5)
if h % 32 != 0:
h = h + (32 - h % 32)
if w % 32 != 0:
w = w + (32 - w % 32)
img = cv2.resize(img, dsize=(w, h))
return img
def random_scale(img, short_size=736):
h, w = img.shape[0:2]
scale = np.random.choice(np.array([0.7, 0.8, 0.9, 1.0, 1.1, 1.2, 1.3]))
scale = (scale * short_size) / min(h, w)
aspect = np.random.choice(np.array([0.9, 0.95, 1.0, 1.05, 1.1]))
h_scale = scale * math.sqrt(aspect)
w_scale = scale / math.sqrt(aspect)
img = scale_aligned(img, h_scale, w_scale)
return img
def random_crop_padding(imgs, target_size):
h, w = imgs[0].shape[0:2]
t_w, t_h = target_size
p_w, p_h = target_size
if w == t_w and h == t_h:
return imgs
t_h = t_h if t_h < h else h
t_w = t_w if t_w < w else w
if random.random() > 3.0 / 8.0 and np.max(imgs[1]) > 0:
# make sure to crop the text region
tl = np.min(np.where(imgs[1] > 0), axis=1) - (t_h, t_w)
tl[tl < 0] = 0
br = np.max(np.where(imgs[1] > 0), axis=1) - (t_h, t_w)
br[br < 0] = 0
br[0] = min(br[0], h - t_h)
br[1] = min(br[1], w - t_w)
i = random.randint(tl[0], br[0]) if tl[0] < br[0] else 0
j = random.randint(tl[1], br[1]) if tl[1] < br[1] else 0
else:
i = random.randint(0, h - t_h) if h - t_h > 0 else 0
j = random.randint(0, w - t_w) if w - t_w > 0 else 0
n_imgs = []
for idx in range(len(imgs)):
if len(imgs[idx].shape) == 3:
s3_length = int(imgs[idx].shape[-1])
img = imgs[idx][i:i + t_h, j:j + t_w, :]
img_p = cv2.copyMakeBorder(img, 0, p_h - t_h, 0, p_w - t_w, borderType=cv2.BORDER_CONSTANT,
value=tuple(0 for i in range(s3_length)))
else:
img = imgs[idx][i:i + t_h, j:j + t_w]
img_p = cv2.copyMakeBorder(img, 0, p_h - t_h, 0, p_w - t_w, borderType=cv2.BORDER_CONSTANT, value=(0,))
n_imgs.append(img_p)
return n_imgs
def update_word_mask(instance, instance_before_crop, word_mask):
labels = np.unique(instance)
for label in labels:
if label == 0:
continue
ind = instance == label
if np.sum(ind) == 0:
word_mask[label] = 0
continue
ind_before_crop = instance_before_crop == label
# print(np.sum(ind), np.sum(ind_before_crop))
if float(np.sum(ind)) / np.sum(ind_before_crop) > 0.9:
continue
word_mask[label] = 0
return word_mask
def dist(a, b):
return np.linalg.norm((a - b), ord=2, axis=0)
def perimeter(bbox):
peri = 0.0
for i in range(bbox.shape[0]):
peri += dist(bbox[i], bbox[(i + 1) % bbox.shape[0]])
return peri
def shrink(bboxes, rate, max_shr=20):
rate = rate * rate
shrinked_bboxes = []
for bbox in bboxes:
area = plg.Polygon(bbox).area()
peri = perimeter(bbox)
try:
pco = pyclipper.PyclipperOffset()
pco.AddPath(bbox, pyclipper.JT_ROUND, pyclipper.ET_CLOSEDPOLYGON)
offset = min(int(area * (1 - rate) / (peri + 0.001) + 0.5), max_shr)
shrinked_bbox = pco.Execute(-offset)
if len(shrinked_bbox) == 0:
shrinked_bboxes.append(bbox)
continue
shrinked_bbox = np.array(shrinked_bbox[0])
if shrinked_bbox.shape[0] <= 2:
shrinked_bboxes.append(bbox)
continue
shrinked_bboxes.append(shrinked_bbox)
except Exception as e:
print('area:', area, 'peri:', peri)
shrinked_bboxes.append(bbox)
return shrinked_bboxes
class PSENET_Synth(Dataset):
def __init__(self,
is_transform=False,
img_size=None,
short_size=736,
kernel_num=7,
min_scale=0.7,
read_type='pil'):
self.is_transform = is_transform
self.img_size = img_size if (img_size is None or isinstance(img_size, tuple)) else (img_size, img_size)
self.kernel_num = kernel_num
self.min_scale = min_scale
self.short_size = short_size
self.read_type = read_type
data = scio.loadmat(synth_train_gt_path)
self.img_paths = data['imnames'][0]
self.gts = data['wordBB'][0]
self.texts = data['txt'][0]
self.max_word_num = 200
def __len__(self):
return len(self.img_paths)
def __getitem__(self, index):
img_path = synth_train_data_dir + self.img_paths[index][0]
img = get_img(img_path, read_type=self.read_type)
bboxes, words = get_ann(img, self.gts, self.texts, index)
if bboxes.shape[0] > self.max_word_num:
bboxes = bboxes[:self.max_word_num]
words = words[:self.max_word_num]
if self.is_transform:
img = random_scale(img, self.short_size)
gt_instance = np.zeros(img.shape[0:2], dtype='uint8')
training_mask = np.ones(img.shape[0:2], dtype='uint8')
if bboxes.shape[0] > 0:
bboxes = np.reshape(bboxes * ([img.shape[1], img.shape[0]] * 4),
(bboxes.shape[0], -1, 2)).astype('int32')
for i in range(bboxes.shape[0]):
cv2.drawContours(gt_instance, [bboxes[i]], -1, i + 1, -1)
if words[i] == '###':
cv2.drawContours(training_mask, [bboxes[i]], -1, 0, -1)
gt_kernels = []
for i in range(1, self.kernel_num):
rate = 1.0 - (1.0 - self.min_scale) / (self.kernel_num - 1) * i
gt_kernel = np.zeros(img.shape[0:2], dtype='uint8')
kernel_bboxes = shrink(bboxes, rate)
for i in range(len(bboxes)):
cv2.drawContours(gt_kernel, [kernel_bboxes[i].astype(int)], -1, 1, -1)
gt_kernels.append(gt_kernel)
if self.is_transform:
imgs = [img, gt_instance, training_mask]
imgs.extend(gt_kernels)
imgs = random_horizontal_flip(imgs)
imgs = random_rotate(imgs)
imgs = random_crop_padding(imgs, self.img_size)
img, gt_instance, training_mask, gt_kernels = imgs[0], imgs[1], imgs[2], imgs[3:]
gt_text = gt_instance.copy()
gt_text[gt_text > 0] = 1
gt_kernels = np.array(gt_kernels)
max_instance = np.max(gt_instance)
gt_bboxes = np.zeros((self.max_word_num, 4), dtype=np.int32)
for i in range(1, max_instance + 1):
ind = gt_instance == i
if np.sum(ind) == 0:
continue
points = np.array(np.where(ind)).transpose((1, 0))
tl = np.min(points, axis=0)
br = np.max(points, axis=0) + 1
gt_bboxes[i] = (tl[0], tl[1], br[0], br[1])
img = Image.fromarray(img)
img = img.convert('RGB')
if self.is_transform:
img = transforms.ColorJitter(brightness=32.0 / 255, saturation=0.5)(img)
img = transforms.ToTensor()(img)
img = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])(img)
gt_text = paddle.to_tensor(gt_text,dtype="int64")
gt_kernels = paddle.to_tensor(gt_kernels,dtype="int64")
training_mask = paddle.to_tensor(training_mask,dtype="int64")
data = dict(
imgs=img,
gt_texts=gt_text,
gt_kernels=gt_kernels,
training_masks=training_mask,
)
return img.numpy(),gt_text.numpy(),gt_kernels.numpy(),training_mask.numpy()
| 32.375427 | 115 | 0.564938 |
fba0862a8d0def6affa9616148dbc847e17527dd | 164 | py | Python | apps/shop/forms.py | Zomba4okk/DjangoOnlinerLikeShop | 6f2bc8ca8072c6b3c69a79c33e448aa6c5be0f81 | [
"MIT"
] | null | null | null | apps/shop/forms.py | Zomba4okk/DjangoOnlinerLikeShop | 6f2bc8ca8072c6b3c69a79c33e448aa6c5be0f81 | [
"MIT"
] | 7 | 2021-02-20T13:17:16.000Z | 2021-04-17T18:43:25.000Z | apps/shop/forms.py | Zomba4okk/DjangoOnlinerLikeShop | 6f2bc8ca8072c6b3c69a79c33e448aa6c5be0f81 | [
"MIT"
] | null | null | null | from django import forms
from .models import Order
class OrderForm(forms.ModelForm):
class Meta:
model = Order
fields = ('user', 'products')
| 16.4 | 37 | 0.652439 |
9ee2d2b70ab48a0a3ed65ce7377bf919d8cd88c4 | 259 | py | Python | twisted/web/_version.py | linxuping/twisted | f0ca2e1844da1fe524d7bac66f371c1235ce7201 | [
"Unlicense",
"MIT"
] | 3 | 2018-11-25T01:09:55.000Z | 2021-08-24T01:56:36.000Z | twisted/web/_version.py | linxuping/twisted | f0ca2e1844da1fe524d7bac66f371c1235ce7201 | [
"Unlicense",
"MIT"
] | null | null | null | twisted/web/_version.py | linxuping/twisted | f0ca2e1844da1fe524d7bac66f371c1235ce7201 | [
"Unlicense",
"MIT"
] | 3 | 2018-11-09T03:38:09.000Z | 2020-02-24T06:26:10.000Z | # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
# This is an auto-generated file. Do not edit it.
"""
Provides Twisted version information.
"""
from twisted.python import versions
version = versions.Version('twisted.web', 15, 0, 0)
| 21.583333 | 51 | 0.737452 |
481f95f63263090e0416ede4b8631d0aa0af4315 | 2,462 | py | Python | TD/q_learning/simple_q_learning.py | hadleyhzy34/reinforcement_learning | 14371756c2ff8225dc800d146452b7956875410c | [
"MIT"
] | null | null | null | TD/q_learning/simple_q_learning.py | hadleyhzy34/reinforcement_learning | 14371756c2ff8225dc800d146452b7956875410c | [
"MIT"
] | null | null | null | TD/q_learning/simple_q_learning.py | hadleyhzy34/reinforcement_learning | 14371756c2ff8225dc800d146452b7956875410c | [
"MIT"
] | null | null | null | #http://mnemstudio.org/path-finding-q-learning-tutorial.htm
#provide two different criterion for q-learning
import numpy as np
#initialize q function
q = np.matrix(np.zeros([6,6]))
# r is the tabular representation for rewards
r = np.matrix([[-1,-1,-1,-1,0,-1],
[-1,-1,-1,0,-1,100],
[-1,-1,-1,0,-1,-1],
[-1,0,0,-1,0,-1],
[0,-1,-1,0,-1,100],
[-1,0,-1,-1,0,100]])
#hyperparameter
gamma = 0.8
epsilon = 0.4
alpha = 0.1 #learning rate
#the main training loop
for episode in range(101):
#random initial state
state = np.random.randint(0,6)
while (state!=5):
possible_actions = []
possible_q = []
for action in range(6):
#loop through all actions, choose rules-allowed actions
if r[state,action] >= 0:
possible_actions.append(action)
possible_q.append(q[state,action])
#step next state, here we use epsilon-greedy algo
action = -1
if np.random.random() < epsilon:
#choose random action
action = possible_actions[np.random.randint(0,len(possible_actions))]
else:
#greedy
action = possible_actions[np.argmax(possible_q)]
#update q value
#method1
#q[state,action] = r[state,action] + gamma * q[action].max()
#method2
rs = r[state,action]
re = gamma * q[action].max()
cur_r = q[state, action]
td = rs+re-cur_r
print(f'next immediate reward is: {r[action,np.argmax(q[action])]}')
print(f'future reward is: {gamma*q[action].max()}')
print(f'current future reward is: {q[state,action]}')
#q[state,action] += alpha * [ rs + re - cur_r
#q[state,action] += alpha * (r[action,np.argmax(q[action])] + gamma * q[action].max() - q[state,action])
q[state,action] += alpha * td
#go to the next state
state = action
if episode % 10 ==0:
print('_________________________________________')
print('training episode: %d' % episode)
print(q)
for i in range(10):
print('episode: %d' %i)
#random initial state
state = np.random.randint(0,6)
print(f'robot starts at {state}')
for _ in range(20):
if state == 5:
break
action = np.argmax(q[state])
print(f'the robot goes to {action}')
state = action
| 29.309524 | 112 | 0.557271 |
6010e271ecbccb4b8c9660028d88a499c9da2ef4 | 156 | py | Python | lab3project/street/apps/dictionaries/apps.py | danylott/labs_oop_2course | 7ebfd4ff5bd4604bf1cc7fce7046d7506fa8aa3f | [
"Apache-2.0"
] | null | null | null | lab3project/street/apps/dictionaries/apps.py | danylott/labs_oop_2course | 7ebfd4ff5bd4604bf1cc7fce7046d7506fa8aa3f | [
"Apache-2.0"
] | 13 | 2021-03-19T04:13:54.000Z | 2022-03-12T00:19:34.000Z | lab3project/street/apps/dictionaries/apps.py | Neknu/labs_oop_2course | 7ebfd4ff5bd4604bf1cc7fce7046d7506fa8aa3f | [
"Apache-2.0"
] | null | null | null | from django.apps import AppConfig
class DictionariesConfig(AppConfig):
name = 'dictionaries'
verbose_name = 'Довідники типів вулиць та сегментів'
| 22.285714 | 56 | 0.769231 |
36aead21a1f2f82a3bfae280f71df2da572f5c87 | 520 | py | Python | snake_block.py | Phaugt/PyGame-Snake | e91e63e5fa0067fba03691dba9e08a5abb8cd249 | [
"MIT"
] | null | null | null | snake_block.py | Phaugt/PyGame-Snake | e91e63e5fa0067fba03691dba9e08a5abb8cd249 | [
"MIT"
] | null | null | null | snake_block.py | Phaugt/PyGame-Snake | e91e63e5fa0067fba03691dba9e08a5abb8cd249 | [
"MIT"
] | null | null | null | import pygame
pygame.init()
white = (255, 255, 255)
game_y = 500
game_x = 500
playboard = pygame.display.set_mode((game_x, game_y))
pygame.display.set_caption('Snake by pythonexplainedto.me')
game_over = False
x1 = game_x/2
y1 = game_y/2
snake_block=10
while not game_over:
for event in pygame.event.get():
if event.type == pygame.QUIT:
game_over = True
pygame.draw.rect(playboard, white, [x1, y1, snake_block, snake_block])
pygame.display.update()
pygame.quit()
quit() | 16.774194 | 74 | 0.678846 |
6b089a0bd996982df5323f49c9a8bfca397b1d77 | 7,787 | py | Python | fetcher/utils.py | COVID19Tracking/covid19-datafetcher | d7e35a977149d8c06ed340b20a17479cdab0b76b | [
"Apache-2.0"
] | 31 | 2020-06-17T11:16:47.000Z | 2022-01-11T17:24:52.000Z | fetcher/utils.py | pablocaviglia-baires/covid19-datafetcher | d7e35a977149d8c06ed340b20a17479cdab0b76b | [
"Apache-2.0"
] | 24 | 2020-06-18T20:09:40.000Z | 2021-03-30T18:34:33.000Z | fetcher/utils.py | space-buzzer/covid19-datafetcher | b974583937bcb7dee2685b1b9b36e4a90042e056 | [
"Apache-2.0"
] | 15 | 2020-06-25T20:30:22.000Z | 2021-09-20T04:31:10.000Z | """
This is the utils module that handles the tasks of making a request and parsing
a request. Utils methods are used both by the main library and by state specific
extras module
"""
from datetime import datetime
from enum import Enum
from io import StringIO
import csv
import json
import logging
import ssl
import typing
import urllib
import urllib.request
import pandas as pd
from bs4 import BeautifulSoup
from tableauscraper import TableauScraper
# TODO: It's not used as an effective enum
# TODO: move this to a yaml somewhere
class Fields(Enum):
STATE = "state"
# time
FETCH_TIMESTAMP = "fetch_timestamp"
TIMESTAMP = "timestamp"
DATE = "date"
# Tests
POSITIVE = "positive"
NEGATIVE = "negative"
CONFIRMED = "positiveCasesViral"
TOTAL = "total" # totalTestsPeopleViral"
INCONCLUSIVE = "inconclusive"
PROBABLE = "probableCases"
PENDING = "pending"
ANTIBODY_POS = "positiveTestsAntibody"
ANTIBODY_NEG = "negativeTestsAntibody"
ANTIBODY_TOTAL = "totalTestsAntibody"
ANTIBODY_POS_PEOPLE = "positiveTestsPeopleAntibody"
ANTIBODY_NEG_PEOPLE = "negativeTestsPeopleAntibody"
ANTIBODY_TOTAL_PEOPLE = "totalTestsPeopleAntibody"
SPECIMENS = "totalTestsViral"
SPECIMENS_POS = "positiveTestsViral"
SPECIMENS_NEG = "negativeTestsViral"
PCR_TEST_ENCOUNTERS = "totalTestEncountersViral"
# Death
DEATH = "death"
DEATH_CONFIRMED = "deathConfirmed"
DEATH_PROBABLE = "deathProbable"
# Holpitalization
HOSP = "hospitalizedCumulative"
ICU = "inIcuCumulative"
VENT = "onVentilatorCumulative"
CURR_HOSP = "hospitalizedCurrently"
CURR_ICU = "inIcuCurrently"
CURR_VENT = "onVentilatorCurrently"
# Recovered
RECOVERED = "recovered"
ANTIGEN_TOTAL = "totalTestsAntigen"
ANTIGEN_POS = "positiveTestsAntigen"
ANTIGEN_NEG = "negativeTestsAntigen"
ANTIGEN_TOTAL_PEOPLE = "totalTestsPeopleAntigen"
ANTIGEN_POS_PEOPLE = "positiveTestsPeopleAntigen"
ANTIGEN_NEG_PEOPLE = "negativeTestsPeopleAntigen"
# Meta
WINDOW = "window"
PPR = "ppr"
UNITS = "units"
SID = "sid"
DATE_USED = "date_used"
@property
def value(self):
supervalue = super().value
if not isinstance(supervalue, str):
return self.name
return supervalue
def __repr__(self):
return self.value
@classmethod
def map(cls):
return {f.name: f.value for f in Fields}
def request(url, query=None, encoding=None, method=None):
if not encoding:
encoding = 'utf-8'
if not method:
method = 'GET'
if query:
url = "{}?{}".format(url, urllib.parse.urlencode(query))
res = {}
req = urllib.request.Request(url, method=method, headers={
'user-agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:79.0) Gecko/20100101 Firefox/79.0'
})
with urllib.request.urlopen(req) as f:
res = f.read().decode(encoding)
return res
def request_pandas(query):
url = query.url
params = {} if not query.params else query.params
# Use params as **kwargs for pandas call
if query.type in ['xlsx', 'xls']:
df = pd.read_excel(url, **params)
else:
# assume csv
df = pd.read_csv(url, **params)
return df
def request_soup(url, query=None, encoding=None):
res = request(url, query, encoding)
return BeautifulSoup(res, 'html.parser')
def request_and_parse(url, query=None, method=None):
# skip cert verification for PR (because who knows why)
if getattr(ssl, '_create_unverified_context', None):
ssl._create_default_https_context = ssl._create_unverified_context
res = request(url, query, method=method)
res = json.loads(res)
return res
def request_csv(url, query=None, dialect=None, header=True, encoding=None):
res = request(url, query, encoding)
if not dialect:
dialect = 'unix'
if header:
reader = csv.DictReader(StringIO(res, newline=None), dialect='unix')
else:
reader = csv.reader(StringIO(res, newline=None), dialect='unix')
res = list(reader)
return res
def request_tableau_scraper(query):
ts = TableauScraper()
ts.loads(query.url)
dashboard = ts.getWorkbook()
dfs = []
prefixes = [] if not query.params else query.params.get('worksheet', [])
if isinstance(prefixes, str):
prefixes = [prefixes]
for ws in dashboard.worksheets:
if prefixes is None:
dfs.append(ws.data)
elif any([ws.name.startswith(n) for n in prefixes]):
dfs.append(ws.data)
return dfs
def map_attributes(original, mapping, debug_state=None):
tagged_attributes = {}
for k, v in original.items():
if k.strip() in mapping:
tagged_attributes[mapping[k.strip()]] = v
else:
# report value without mapping
logging.debug("[{}] Field {} has no mapping".format(debug_state, k))
# Date special casing: handle dates here
if Fields.TIMESTAMP.name not in tagged_attributes \
and Fields.DATE.name in tagged_attributes \
and '__strptime' in mapping:
# If we don't have a timestamp, but have a date and ways to parse it
# parse it now
d = tagged_attributes[Fields.DATE.name]
if d:
tagged_attributes[Fields.TIMESTAMP.name] = \
datetime.strptime(d, mapping['__strptime']).timestamp()
return tagged_attributes
def extract_arcgis_attributes(dict_result, mapping, debug_state=None):
path = ['features', [], 'attributes']
return extract_attributes(dict_result, path, mapping, debug_state)
def extract_attributes(dict_result, path, mapping, debug_state=None):
res = _extract_attributes(dict_result, path, mapping, debug_state)
if isinstance(res, typing.List) and len(res) > 1:
return res
if isinstance(res, typing.List) and len(res) == 1:
return res[0]
return res
def _extract_attributes(dict_result, path, mapping, debug_state=None):
'''Uses mapping to extract attributes from dict_result
Retruns tagged attributes
dict_result: the object we get from maping a call to api/url
path: the path in the result dict where all the mappings should apply
e.g., {"state": {"results": [{"name": value}, {...}}]}
path would be: ['state', 'results', 0]
This is like the cheap version of a `jq` expression
mapping: the mapping from the given tags/terms to our common field names
'''
res = dict_result
mapped = []
for i, step in enumerate(path):
# need to distinguish between list index and dict key:
if isinstance(res, typing.List) and step == []:
for item in res:
mapped.append(extract_attributes(item, path[i+1:], mapping, debug_state))
return mapped
if isinstance(res, typing.List) and isinstance(step, int):
res = res[step]
elif isinstance(res, typing.Dict) and not isinstance(step, list) and step in res:
res = res[step]
# now that res is the correct place in the result object, we can map the values
mapped = map_attributes(res, mapping, debug_state)
return mapped
def csv_sum(data, columns=None):
'''Expecting Dict CSV: list of dicts-like objects
What about dates/cells that cannot be summed?
Use columns hint
TODO: heuristic to decide whether a column is numeric for summation
returns dictionary of sums
'''
if columns is None or not columns:
return {}
sums = {x: 0 for x in columns}
for row in data:
for k, v in row.items():
if k in sums:
sums[k] += v if isinstance(v, int) else int(float(v.replace(',', '')))
return sums
| 30.778656 | 100 | 0.663157 |
807630ea4cf13a0914a523656f3ae30e300c3031 | 1,441 | py | Python | pushpluck/color.py | ejconlon/pushpluck | 4e5b8bcff6fe3955e8f25638268569f901815b5a | [
"MIT"
] | null | null | null | pushpluck/color.py | ejconlon/pushpluck | 4e5b8bcff6fe3955e8f25638268569f901815b5a | [
"MIT"
] | 2 | 2021-04-02T03:54:12.000Z | 2021-04-23T18:23:03.000Z | pushpluck/color.py | ejconlon/pushpluck | 4e5b8bcff6fe3955e8f25638268569f901815b5a | [
"MIT"
] | null | null | null | from dataclasses import dataclass
from typing import Dict, Generator
@dataclass(frozen=True)
class Color:
red: int
green: int
blue: int
def __iter__(self) -> Generator[int, None, None]:
yield self.red
yield self.green
yield self.blue
def to_code(self) -> str:
nums = ''.join(f'{x:02x}' for x in self)
return f'#{nums.upper()}'
@classmethod
def from_code(cls, code: str) -> 'Color':
assert code[0] == '#'
red = int(code[1:3], 16)
green = int(code[3:5], 16)
blue = int(code[5:7], 16)
return cls(red, green, blue)
COLORS: Dict[str, Color] = {
'Black': Color.from_code('#000000'),
'DarkGrey': Color.from_code('#A9A9A9'),
'Gray': Color.from_code('#808080'),
'White': Color.from_code('#FFFFFF'),
'Red': Color.from_code('#FF0000'),
'Yellow': Color.from_code('#FFFF00'),
'Lime': Color.from_code('#00FF00'),
'Green': Color.from_code('#008000'),
'Spring': Color.from_code('#00FF7F'),
'Turquoise': Color.from_code('#40E0D0'),
'Cyan': Color.from_code('#00FFFF'),
'Sky': Color.from_code('#87CEEB'),
'Blue': Color.from_code('#0000FF'),
'Orchid': Color.from_code('#DA70D6'),
'Magenta': Color.from_code('#FF00FF'),
'Pink': Color.from_code('#FFC0CB'),
'Orange': Color.from_code('#FFA580'),
'Indigo': Color.from_code('#4B0082'),
'Violet': Color.from_code('#EE82EE')
}
| 28.82 | 53 | 0.59195 |
9295f31efd2561c0972500db46b597a92e0ae767 | 3,631 | py | Python | magnum/conductor/api.py | mail2nsrajesh/magnum | 2e7e5a77967028c961337177ce577eb936c3845c | [
"Apache-2.0"
] | null | null | null | magnum/conductor/api.py | mail2nsrajesh/magnum | 2e7e5a77967028c961337177ce577eb936c3845c | [
"Apache-2.0"
] | null | null | null | magnum/conductor/api.py | mail2nsrajesh/magnum | 2e7e5a77967028c961337177ce577eb936c3845c | [
"Apache-2.0"
] | null | null | null | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""API for interfacing with Magnum Backend."""
from magnum.common import profiler
from magnum.common import rpc_service
import magnum.conf
CONF = magnum.conf.CONF
# The Backend API class serves as a AMQP client for communicating
# on a topic exchange specific to the conductors. This allows the ReST
# API to trigger operations on the conductors
@profiler.trace_cls("rpc")
class API(rpc_service.API):
def __init__(self, transport=None, context=None, topic=None):
super(API, self).__init__(transport, context,
topic=CONF.conductor.topic)
# Cluster Operations
def cluster_create(self, cluster, create_timeout):
return self._call('cluster_create', cluster=cluster,
create_timeout=create_timeout)
def cluster_create_async(self, cluster, create_timeout):
self._cast('cluster_create', cluster=cluster,
create_timeout=create_timeout)
def cluster_delete(self, uuid):
return self._call('cluster_delete', uuid=uuid)
def cluster_delete_async(self, uuid):
self._cast('cluster_delete', uuid=uuid)
def cluster_update(self, cluster):
return self._call('cluster_update', cluster=cluster)
def cluster_update_async(self, cluster, rollback=False):
self._cast('cluster_update', cluster=cluster, rollback=rollback)
# CA operations
def sign_certificate(self, cluster, certificate):
return self._call('sign_certificate', cluster=cluster,
certificate=certificate)
def get_ca_certificate(self, cluster):
return self._call('get_ca_certificate', cluster=cluster)
def rotate_ca_certificate(self, cluster):
return self._call('rotate_ca_certificate', cluster=cluster)
# Versioned Objects indirection API
def object_class_action(self, context, objname, objmethod, objver,
args, kwargs):
"Indirection API callback"
return self._client.call(context, 'object_class_action',
objname=objname, objmethod=objmethod,
objver=objver, args=args, kwargs=kwargs)
def object_action(self, context, objinst, objmethod, args, kwargs):
"Indirection API callback"
return self._client.call(context, 'object_action', objinst=objinst,
objmethod=objmethod, args=args, kwargs=kwargs)
def object_backport(self, context, objinst, target_version):
"Indirection API callback"
return self._client.call(context, 'object_backport', objinst=objinst,
target_version=target_version)
@profiler.trace_cls("rpc")
class ListenerAPI(rpc_service.API):
def __init__(self, context=None, topic=None, server=None, timeout=None):
super(ListenerAPI, self).__init__(context=context, topic=topic,
server=server, timeout=timeout)
def ping_conductor(self):
return self._call('ping_conductor')
| 38.62766 | 79 | 0.675021 |
880198db928ce16cee4f265c646d79d8fc3c0f7e | 14,178 | py | Python | tentacle/dnn3.py | splendor-kill/ml-five | 4da5c192bbdc9175542833a86f5ec65fc955dc10 | [
"MIT"
] | 72 | 2016-10-20T13:01:30.000Z | 2021-12-16T09:17:32.000Z | tentacle/dnn3.py | splendor-kill/ml-five | 4da5c192bbdc9175542833a86f5ec65fc955dc10 | [
"MIT"
] | null | null | null | tentacle/dnn3.py | splendor-kill/ml-five | 4da5c192bbdc9175542833a86f5ec65fc955dc10 | [
"MIT"
] | 16 | 2016-11-25T10:43:59.000Z | 2018-07-12T16:12:03.000Z | from builtins import (super)
from datetime import datetime
import os
import time
import numpy as np
import tensorflow as tf
from tentacle.board import Board
from tentacle.config import cfg
from tentacle.dnn import Pre
class DCNN3(Pre):
def __init__(self, is_train=True, is_revive=False, is_rl=False):
super().__init__(is_train, is_revive, is_rl)
self.test_stat = None
def placeholder_inputs(self):
h, w, c = self.get_input_shape()
states = tf.placeholder(tf.float32, [None, h, w, c]) # NHWC
actions = tf.placeholder(tf.float32, [None, Pre.NUM_ACTIONS])
return states, actions
def model(self, states_pl, actions_pl):
with tf.variable_scope("policy_net"):
conv, conv_out_dim = self.create_conv_net(states_pl)
raw_predictions = self.create_policy_net(conv, conv_out_dim, states_pl)
legal_filter = tf.reshape(tf.slice(states_pl, [0, 0, 0, 2], [-1, -1, -1, 1]), [-1, Pre.NUM_ACTIONS])
self.predictions = (raw_predictions - tf.reduce_min(raw_predictions) + 0.1 / Pre.NUM_ACTIONS) * legal_filter
with tf.variable_scope("value_net"):
# conv, conv_out_dim = self.create_conv_net(states_pl)
self.value_outputs = self.create_value_net(conv, conv_out_dim, states_pl)
self.policy_net_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope="policy_net")
sl_pg_loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=actions_pl, logits=self.predictions))
# reg_loss = tf.reduce_sum([tf.reduce_sum(tf.square(x)) for x in self.policy_net_vars])
reg_loss = tf.reduce_sum(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES, scope="policy_net"))
# illegal_penalty = tf.reduce_sum(raw_predictions * (1. - legal_filter))
self.loss = sl_pg_loss + 0.001 * reg_loss # + 0.1 * illegal_penalty
tf.summary.scalar("raw_policy_loss", sl_pg_loss)
tf.summary.scalar("reg_policy_loss", reg_loss)
tf.summary.scalar("all_policy_loss", self.loss)
# tf.summary.scalar("illegal_penalty", illegal_penalty)
self.optimizer = tf.train.AdamOptimizer(0.0001)
# update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
# with tf.control_dependencies(update_ops):
# self.opt_op = self.optimizer.minimize(self.loss)
self.opt_op = self.optimizer.minimize(self.loss)
self.predict_probs = tf.nn.softmax(self.predictions)
eq = tf.equal(tf.argmax(self.predict_probs, 1), tf.argmax(actions_pl, 1))
# best_move = tf.argmax(actions_pl, 1)
# eq = tf.nn.in_top_k(self.predict_probs, best_move, 3)
self.eval_correct = tf.reduce_sum(tf.cast(eq, tf.int32))
self.rl_op(actions_pl)
def bn_conv(self, conv, offset, scale, convolutional=True):
axes = [0, 1, 2] if convolutional else [0]
mean, variance = tf.nn.moments(conv, axes)
return tf.nn.batch_normalization(conv, mean, variance, offset, scale, 1e-5)
def create_conv_net(self, states_pl):
inputs = states_pl
inputs = tf.layers.conv2d(inputs=inputs,
filters=46,
kernel_size=[5, 5],
padding="same",
activation=tf.nn.relu,
kernel_regularizer=tf.nn.l2_loss)
for _ in range(5):
conv = tf.layers.conv2d(inputs=inputs,
filters=192,
kernel_size=[3, 3],
padding="same",
activation=tf.nn.relu,
kernel_regularizer=tf.nn.l2_loss
)
inputs = conv
conv_out_dim = inputs.get_shape()[1:].num_elements()
conv_out = tf.reshape(inputs, [-1, conv_out_dim])
return conv_out, conv_out_dim
def create_policy_net(self, conv, conv_out_dim, states_pl):
conv = tf.identity(conv, 'policy_net_conv')
dense = tf.layers.dense(inputs=conv,
units=Pre.NUM_ACTIONS,
kernel_regularizer=tf.nn.l2_loss)
return dense
def create_value_net(self, conv, conv_out_dim, states_pl):
conv = tf.identity(conv, 'value_net_conv')
dense = tf.layers.dense(inputs=conv,
units=128,
kernel_regularizer=tf.nn.l2_loss,
activation=tf.nn.relu)
dense = tf.layers.dense(inputs=dense,
units=1,
kernel_regularizer=tf.nn.l2_loss,
activation=tf.nn.tanh)
return dense
def get_input_shape(self):
return Board.BOARD_SIZE, Board.BOARD_SIZE, Pre.NUM_CHANNELS
def ready_for_input_from_tfrecords(self, files, batch_size, num_epochs=None, capacity=2000):
filename_queue = tf.train.string_input_producer(files, num_epochs=num_epochs)
if cfg.DATA_SET_SUFFIX[3:7] == 'dist':
read_features = {'state': tf.FixedLenFeature([Board.BOARD_SIZE_SQ], tf.int64),
'actions': tf.FixedLenFeature([Board.BOARD_SIZE_SQ], tf.float32)}
else:
read_features = {'state': tf.FixedLenFeature([Board.BOARD_SIZE_SQ], tf.int64),
'action': tf.FixedLenFeature([], tf.int64)}
reader = tf.TFRecordReader()
_, serialized_example = reader.read(filename_queue)
features = tf.parse_single_example(serialized_example, features=read_features)
s = features['state']
s = tf.cast(s, tf.float32)
if 'actions' in features:
a_dist = features['actions']
elif 'action' in features:
a_dist = tf.one_hot(features['action'], Board.BOARD_SIZE_SQ, dtype=tf.float32)
else:
raise ValueError('unknown feature')
min_after_dequeue = capacity
capacity = min_after_dequeue + 3 * batch_size
s, a_dist = tf.train.shuffle_batch([s, a_dist],
batch_size=batch_size,
num_threads=2,
capacity=capacity,
min_after_dequeue=min_after_dequeue)
return s, a_dist
def do_eval(self, eval_correct, states_pl, actions_pl, data_set):
true_count = 0
batch_size = cfg.FEED_BATCH_SIZE
num_examples = cfg.SAMPLE_BATCH_NUM * batch_size
for _ in range(cfg.SAMPLE_BATCH_NUM):
feed_dict = self.fill_feed_dict(data_set, states_pl, actions_pl, batch_size)
true_count += self.sess.run(eval_correct, feed_dict=feed_dict)
accuracy = true_count / (num_examples or 1)
return accuracy
def fill_feed_dict(self, data_set, states_pl, actions_pl, batch_size=None):
batch_size = batch_size or Pre.BATCH_SIZE
if data_set == 'train':
states_feed, actions_feed = self.sess.run([self.state_batch_train, self.action_batch_train])
elif data_set == 'validation':
states_feed, actions_feed = self.sess.run([self.state_batch_validation, self.action_batch_validation])
elif data_set == 'test':
states_feed, actions_feed = self.sess.run([self.state_batch_test, self.action_batch_test])
else:
assert ValueError('unknown data_set')
reshaped_states = []
for s in states_feed:
s1, _ = self.adapt_state(s)
reshaped_states.append(s1)
states_feed = np.array(reshaped_states)
h, w, c = self.get_input_shape()
states_feed = states_feed.reshape((-1, h, w, c))
if self.sparse_labels:
actions_feed = actions_feed.ravel()
feed_dict = {
states_pl: states_feed,
actions_pl: actions_feed
}
return feed_dict
def prepare(self):
with tf.Graph().as_default():
self.states_pl, self.actions_pl = self.placeholder_inputs()
self.model(self.states_pl, self.actions_pl)
with tf.name_scope('train_queue_runner'):
ds_file = os.path.join(cfg.DATA_SET_DIR, 'train' + cfg.DATA_SET_SUFFIX)
self.state_batch_train, self.action_batch_train = self.ready_for_input_from_tfrecords([ds_file],
cfg.FEED_BATCH_SIZE,
num_epochs=cfg.TRAIN_EPOCHS,
capacity=cfg.TRAIN_QUEUE_CAPACITY)
ds_file = os.path.join(cfg.DATA_SET_DIR, 'validation' + cfg.DATA_SET_SUFFIX)
self.state_batch_validation, self.action_batch_validation = self.ready_for_input_from_tfrecords([ds_file],
cfg.FEED_BATCH_SIZE,
capacity=cfg.VALIDATE_QUEUE_CAPACITY)
with tf.name_scope('test_queue_runner'):
ds_file = os.path.join(cfg.DATA_SET_DIR, 'test' + cfg.DATA_SET_SUFFIX)
self.state_batch_test, self.action_batch_test = self.ready_for_input_from_tfrecords([ds_file],
cfg.FEED_BATCH_SIZE,
num_epochs=1,
capacity=cfg.VALIDATE_QUEUE_CAPACITY)
for i in tf.get_collection(tf.GraphKeys.QUEUE_RUNNERS, scope='train_queue_runner'):
tf.train.add_queue_runner(i, collection='train_q_runner')
for i in tf.get_collection(tf.GraphKeys.QUEUE_RUNNERS, scope='test_queue_runner'):
tf.train.add_queue_runner(i, collection='test_q_runner')
self.summary_op = tf.summary.merge_all()
self.saver = tf.train.Saver(tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope="policy_net")) # tf.trainable_variables())
self.saver_all = tf.train.Saver(tf.trainable_variables(), max_to_keep=100)
init_op = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer())
now = datetime.now().strftime("%Y%m%d-%H%M%S")
logdir = os.path.join(Pre.SUMMARY_DIR, "run-{}".format(now,))
self.summary_writer = tf.summary.FileWriter(logdir, tf.get_default_graph())
self.sess = tf.Session()
self.sess.run(init_op)
print('Initialized')
def run(self, from_file=None, part_vars=True):
self.prepare()
if self.is_revive:
self.load_from_vat(from_file, part_vars)
if not self.is_train:
return
prev_time = 0
def work1(coord, cnt):
nonlocal prev_time
if cnt == 0:
prev_time = time.time()
feed_dict = self.fill_feed_dict('train', self.states_pl, self.actions_pl)
_, loss = self.sess.run([self.opt_op, self.loss], feed_dict=feed_dict)
self.loss_window.extend(loss)
self.gstep += 1
if cnt % 1000 == 0:
summary_str = self.sess.run(self.summary_op, feed_dict=feed_dict)
self.summary_writer.add_summary(summary_str, self.gstep)
self.summary_writer.flush()
if cnt != 0 and cnt % 1000 == 0:
self.saver.save(self.sess, Pre.BRAIN_CHECKPOINT_FILE, global_step=self.gstep)
train_accuracy = self.do_eval(self.eval_correct, self.states_pl, self.actions_pl, 'train')
validation_accuracy = self.do_eval(self.eval_correct, self.states_pl, self.actions_pl, 'validation')
self.stat.append((self.gstep, train_accuracy, validation_accuracy, 0.))
self.gap = train_accuracy - validation_accuracy
now = time.time()
duration = now - prev_time
prev_time = now
avg_loss = self.loss_window.get_average()
print('iter: %d, loss: %.3f, acc_train: %.3f, acc_valid: %.3f, time cost: %.3f sec' %
(cnt, avg_loss, train_accuracy, validation_accuracy, duration))
if avg_loss < 1.0:
coord.request_stop()
self.work_work('train_q_runner', work1)
print('testing...')
self.test_stat = [0, 0] # [correct, total]
def work2(coord, cnt):
feed_dict = self.fill_feed_dict('test', self.states_pl, self.actions_pl, batch_size=cfg.FEED_BATCH_SIZE)
self.test_stat[0] += self.sess.run(self.eval_correct, feed_dict=feed_dict)
self.test_stat[1] += cfg.FEED_BATCH_SIZE
self.work_work('test_q_runner', work2)
def work_work(self, qs, func):
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(self.sess, coord=coord, collection=qs)
try:
start_time = time.time()
cnt = 0
while not coord.should_stop():
func(coord, cnt)
cnt += 1
except tf.errors.OutOfRangeError:
duration = time.time() - start_time
print('count: %d (%.3f sec)' % (cnt, duration))
if self.test_stat is not None:
print('test accuracy: %.3f' % (self.test_stat[0] / self.test_stat[1],))
coord.request_stop()
finally:
coord.request_stop()
coord.join(threads)
if __name__ == '__main__':
n = DCNN3(is_revive=False)
n.run()
| 45.442308 | 149 | 0.570955 |
62d6ca0ed70a1981e4ebf0ac47f56435e563d7b0 | 11,999 | py | Python | octavia/network/drivers/noop_driver/driver.py | BeaconFramework/Distributor | c9f8737063263ca69365679c8b76331766d63191 | [
"Apache-2.0"
] | 1 | 2019-01-11T06:20:25.000Z | 2019-01-11T06:20:25.000Z | octavia/network/drivers/noop_driver/driver.py | BeaconFramework/Distributor | c9f8737063263ca69365679c8b76331766d63191 | [
"Apache-2.0"
] | null | null | null | octavia/network/drivers/noop_driver/driver.py | BeaconFramework/Distributor | c9f8737063263ca69365679c8b76331766d63191 | [
"Apache-2.0"
] | null | null | null | # Copyright 2015 Hewlett-Packard Development Company, L.P.
# Copyright 2016 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from oslo_utils import uuidutils
from octavia.common import data_models
from octavia.network import base as driver_base
from octavia.network import data_models as network_models
LOG = logging.getLogger(__name__)
class NoopManager(object):
def __init__(self):
super(NoopManager, self).__init__()
self.networkconfigconfig = {}
def allocate_vip(self, loadbalancer):
LOG.debug("Network %s no-op, allocate_vip loadbalancer %s",
self.__class__.__name__, loadbalancer)
self.networkconfigconfig[loadbalancer.id] = (
loadbalancer, 'allocate_vip')
return data_models.Vip(ip_address='198.51.100.1',
subnet_id=uuidutils.generate_uuid(),
port_id=uuidutils.generate_uuid(),
load_balancer_id=loadbalancer.id)
def deallocate_vip(self, vip):
LOG.debug("Network %s no-op, deallocate_vip vip %s",
self.__class__.__name__, vip.ip_address)
self.networkconfigconfig[vip.ip_address] = (vip,
'deallocate_vip')
def plug_vip(self, loadbalancer, vip):
LOG.debug("Network %s no-op, plug_vip loadbalancer %s, vip %s",
self.__class__.__name__,
loadbalancer.id, vip.ip_address)
self.networkconfigconfig[(loadbalancer.id,
vip.ip_address)] = (loadbalancer, vip,
'plug_vip')
amps = []
for amphora in loadbalancer.amphorae:
amps.append(data_models.Amphora(
id=amphora.id,
compute_id=amphora.compute_id,
vrrp_ip='198.51.100.1',
ha_ip='198.51.100.1',
vrrp_port_id=uuidutils.generate_uuid(),
ha_port_id=uuidutils.generate_uuid()
))
return amps
def plug_distributor_vip(self, load_balancer, distributor, vip):
LOG.debug("Network %s no-op, plug_distributor_vip "
"load_balancer %s, vip %s, distributor %s",
self.__class__.__name__,
load_balancer.id, vip.ip_address, distributor.id)
self.networkconfigconfig[(load_balancer.id,
distributor.id,
vip.ip_address,)] = (load_balancer,
distributor,
vip,
'plug_distributor_vip')
def unplug_vip(self, load_balancer, vip):
LOG.debug("Network %s no-op, unplug_vip load_balancer %s, vip %s",
self.__class__.__name__,
load_balancer.id, vip.ip_address)
self.networkconfigconfig[(load_balancer.id,
vip.ip_address)] = (load_balancer, vip,
'unplug_vip')
def unplug_distributor_vip(self, load_balancer, distributor, vip):
LOG.debug("Network %s no-op, unplug_distributor_vip "
"load_balancer %s, vip %s, distributor %s",
self.__class__.__name__,
load_balancer.id, vip.ip_address, distributor.id)
self.networkconfigconfig[(load_balancer.id,
distributor.id,
vip.ip_address)] = (load_balancer,
distributor,
vip,
'unplug_distributor_vip')
def plug_network(self, compute_id, network_id, ip_address=None):
LOG.debug("Network %s no-op, plug_network compute_id %s, network_id "
"%s, ip_address %s", self.__class__.__name__, compute_id,
network_id, ip_address)
self.networkconfigconfig[(compute_id, network_id, ip_address)] = (
compute_id, network_id, ip_address, 'plug_network')
return network_models.Interface(
id=uuidutils.generate_uuid(),
compute_id=compute_id,
network_id=network_id,
fixed_ips=[],
port_id=uuidutils.generate_uuid()
)
def unplug_network(self, compute_id, network_id, ip_address=None):
LOG.debug("Network %s no-op, unplug_network compute_id %s, "
"network_id %s",
self.__class__.__name__, compute_id, network_id)
self.networkconfigconfig[(compute_id, network_id, ip_address)] = (
compute_id, network_id, ip_address, 'unplug_network')
def get_plugged_networks(self, compute_id):
LOG.debug("Network %s no-op, get_plugged_networks amphora_id %s",
self.__class__.__name__, compute_id)
self.networkconfigconfig[compute_id] = (
compute_id, 'get_plugged_networks')
return []
def update_vip(self, loadbalancer):
LOG.debug("Network %s no-op, update_vip loadbalancer %s",
self.__class__.__name__, loadbalancer)
self.networkconfigconfig[loadbalancer.id] = (
loadbalancer, 'update_vip')
def get_network(self, network_id):
LOG.debug("Network %s no-op, get_network network_id %s",
self.__class__.__name__, network_id)
self.networkconfigconfig[network_id] = (network_id, 'get_network')
return network_models.Network(id=uuidutils.generate_uuid())
def get_subnet(self, subnet_id):
LOG.debug("Subnet %s no-op, get_subnet subnet_id %s",
self.__class__.__name__, subnet_id)
self.networkconfigconfig[subnet_id] = (subnet_id, 'get_subnet')
return network_models.Subnet(id=uuidutils.generate_uuid())
def get_port(self, port_id):
LOG.debug("Port %s no-op, get_port port_id %s",
self.__class__.__name__, port_id)
self.networkconfigconfig[port_id] = (port_id, 'get_port')
return network_models.Port(id=uuidutils.generate_uuid())
def get_network_by_name(self, network_name):
LOG.debug("Network %s no-op, get_network_by_name network_name %s",
self.__class__.__name__, network_name)
self.networkconfigconfig[network_name] = (network_name,
'get_network_by_name')
return network_models.Network(id=uuidutils.generate_uuid())
def get_subnet_by_name(self, subnet_name):
LOG.debug("Subnet %s no-op, get_subnet_by_name subnet_name %s",
self.__class__.__name__, subnet_name)
self.networkconfigconfig[subnet_name] = (subnet_name,
'get_subnet_by_name')
return network_models.Subnet(id=uuidutils.generate_uuid())
def get_port_by_name(self, port_name):
LOG.debug("Port %s no-op, get_port_by_name port_name %s",
self.__class__.__name__, port_name)
self.networkconfigconfig[port_name] = (port_name, 'get_port_by_name')
return network_models.Port(id=uuidutils.generate_uuid())
def get_port_by_net_id_device_id(self, network_id, device_id):
LOG.debug("Port %s no-op, get_port_by_net_id_device_id network_id %s"
" device_id %s",
self.__class__.__name__, network_id, device_id)
self.networkconfigconfig[(network_id, device_id)] = (
network_id, device_id, 'get_port_by_net_id_device_id')
return network_models.Port(id=uuidutils.generate_uuid())
def failover_preparation(self, amphora):
LOG.debug("failover %s no-op, failover_preparation, amphora id %s",
self.__class__.__name__, amphora.id)
def plug_port(self, compute_id, port):
LOG.debug("Network %s no-op, plug_port compute_id %s, port_id "
"%s", self.__class__.__name__, compute_id, port.id)
self.networkconfigconfig[(compute_id, port)] = (
compute_id, port, 'plug_port')
def get_network_configs(self, loadbalancer):
LOG.debug("Network %s no-op, get_network_configs loadbalancer id %s ",
self.__class__.__name__, loadbalancer.id)
self.networkconfigconfig[(loadbalancer.id)] = (
loadbalancer, 'get_network_configs')
def wait_for_port_detach(self, amphora):
LOG.debug("failover %s no-op, wait_for_port_detach, amphora id %s",
self.__class__.__name__, amphora.id)
class NoopNetworkDriver(driver_base.AbstractNetworkDriver):
def __init__(self):
super(NoopNetworkDriver, self).__init__()
self.driver = NoopManager()
def allocate_vip(self, loadbalancer):
return self.driver.allocate_vip(loadbalancer)
def allocate_amphora_vip(self, load_balancer):
return self.driver.allocate_vip(load_balancer)
def deallocate_vip(self, vip):
self.driver.deallocate_vip(vip)
def plug_vip(self, loadbalancer, vip):
return self.driver.plug_vip(loadbalancer, vip)
def plug_distributor_vip(self, load_balancer, distributor, vip):
return self.driver.plug_distributor_vip(load_balancer,
distributor, vip)
def unplug_vip(self, load_balancer, vip):
self.driver.unplug_vip(load_balancer, vip)
def unplug_distributor_vip(self, load_balancer, distributor, vip):
return self.driver.unplug_distributor_vip(load_balancer,
distributor, vip)
def plug_network(self, amphora_id, network_id, ip_address=None):
return self.driver.plug_network(amphora_id, network_id, ip_address)
def unplug_network(self, amphora_id, network_id, ip_address=None):
self.driver.unplug_network(amphora_id, network_id,
ip_address=ip_address)
def get_plugged_networks(self, amphora_id):
return self.driver.get_plugged_networks(amphora_id)
def update_vip(self, loadbalancer):
self.driver.update_vip(loadbalancer)
def get_network(self, network_id):
return self.driver.get_network(network_id)
def get_subnet(self, subnet_id):
return self.driver.get_subnet(subnet_id)
def get_port(self, port_id):
return self.driver.get_port(port_id)
def get_network_by_name(self, network_name):
return self.driver.get_network_by_name(network_name)
def get_subnet_by_name(self, subnet_name):
return self.driver.get_subnet_by_name(subnet_name)
def get_port_by_name(self, port_name):
return self.driver.get_port_by_name(port_name)
def get_port_by_net_id_device_id(self, network_id, device_id):
return self.driver.get_port_by_net_id_device_id(network_id, device_id)
def failover_preparation(self, amphora):
self.driver.failover_preparation(amphora)
def plug_port(self, compute_id, port):
return self.driver.plug_port(compute_id, port)
def get_network_configs(self, loadbalancer):
return self.driver.get_network_configs(loadbalancer)
def wait_for_port_detach(self, amphora):
self.driver.wait_for_port_detach(amphora)
| 44.113971 | 79 | 0.625135 |
23c01a86de38bcce38fc6331cb45380399485775 | 12,740 | py | Python | bbp/utils/batch/build_xml.py | kevinmilner/bbp | d9ba291b123be4e85f76317ef23600a339b2354d | [
"Apache-2.0"
] | 28 | 2017-10-31T09:16:30.000Z | 2022-02-28T23:44:29.000Z | bbp/utils/batch/build_xml.py | kevinmilner/bbp | d9ba291b123be4e85f76317ef23600a339b2354d | [
"Apache-2.0"
] | 37 | 2017-05-23T15:15:35.000Z | 2022-02-05T09:13:18.000Z | bbp/utils/batch/build_xml.py | kevinmilner/bbp | d9ba291b123be4e85f76317ef23600a339b2354d | [
"Apache-2.0"
] | 26 | 2017-09-21T17:43:33.000Z | 2021-11-29T06:34:30.000Z | #!/usr/bin/env python
#build_xml.py
#v1.1, 20110817
#Generate XML files formatted for BBP
#Input: Run description files (txt)
#Output: BBP XML file
#Sample input run description file:
#m6.00_d20_r90_z0_0100.txt
#RUN_TAG = 0010100
#VALIDATION_RUN = n
#SOURCE_DESCRIPTION_FILE = /home/scec-00/rgraves/NgaW2/FwHw/FaultInfo/Inputs/m6.00_d20_r90_z0.src
#STATION_LIST_FILE = /home/scec-00/rgraves/NgaW2/FwHw/StatInfo/rv01-m6.00_stats.stl
#RUPTURE_GENERATOR = URS
#LOW_FREQUENCY_MODULE = URS
#HIGH_FREQUENCY_MODULE = URS
#SITE_RESPONSE_MODULE = URS
#PLOT_VEL = y
#PLOT_ACC = y
#RUN_GOF = n
import os
import shutil
import sys
import time
import optparse
from install_cfg import InstallCfg
import bband_utils
import velocity_models
def main():
parser = optparse.OptionParser()
parser.add_option("-x", "--xml-dir", dest="xmlDir", help="Output folder for XML files", metavar="XML_DIR")
parser.add_option("-i", "--in-dir", dest="inDir", help="Input folder with run description files", metavar="IN_DIR")
(options, args) = parser.parse_args()
if not options.inDir:
parser.error("Folder with run description files is required!")
if os.path.exists(options.inDir):
files = os.listdir(options.inDir)
else:
print "Invalid input dir: %s" % options.inDir
sys.exit(1)
if not files:
print "No run description files were found in input dir: %s" % inDir
sys.exit(1)
if not options.xmlDir:
cur_dir= os.getcwd()
options.xmlDir = os.path.join(cur_dir, "run_xml")
print ("Note: Output XML directory was not specified!\n"
"XMLs will be written to %s" % options.xmlDir)
if not os.path.exists(options.xmlDir):
os.mkdir(options.xmlDir)
required_keys = ["RUN_TAG", "VALIDATION_RUN", "VMODEL_NAME",
"SOURCE_DESCRIPTION_FILE", "STATION_LIST_FILE",
"RUPTURE_GENERATOR", "LOW_FREQUENCY_MODULE",
"HIGH_FREQUENCY_MODULE", "SITE_RESPONSE_MODULE",
"PLOT_VEL", "PLOT_ACC", "RUN_GOF", "GEN_HTML"]
cfg = InstallCfg()
valid_event = ['y', 'n']
available_vmodels = velocity_models.get_all_names()
rupture_gen = ['y', 'n']
rup = ['GP', 'UCSB']
lf = ['GP', 'UCSB']
hf = ['GP', 'UCSB', 'SDSU']
site = ['GP', 'UCSB', 'SDSU', "None"]
plotVel = ['y', 'n']
plotAcc = ['y', 'n']
doGof = ['y', 'n']
gof = ['GP', 'SDSU']
genHtml = ['y', 'n']
# Move out all the files we don't need from start dir
for file in os.listdir(cfg.A_USER_DATA_DIR):
if file.endswith(".srf") or file.endswith(".stl") or file.endswith(".src"):
if not os.path.exists("%s/tmp" % cfg.A_USER_DATA_DIR):
os.mkdir("%s/tmp" % cfg.A_USER_DATA_DIR)
shutil.move("%s/%s" % (cfg.A_USER_DATA_DIR, file), "%s/tmp/%s" % (cfg.A_USER_DATA_DIR, file))
filecount = 0
for file in files:
if file.endswith(".txt"):
file_base = file[0:file.find(".txt")]
#pieces = file_base.split("_")
input_config = {}
output_files = {}
in_file = "%s/%s" % (options.inDir, file)
fn = open(in_file, 'r')
lines = fn.readlines()
if not lines:
print "Warning: Skipping empty input file: %s"% file
continue
for line in lines:
key,value = line.strip().split("=")
input_config[key.strip()] = value.strip()
if not input_config:
print "Warning: Skipping malformed input file: %s" % file
continue
input_keys = input_config.keys()
if not set(input_keys) == set(required_keys):
print "Warning: Skipping malformed input file: %s" % file
print " Missing keys:", list(set(input_keys).symmetric_difference(set(required_keys)))
continue
valid = True
try:
simID = input_config["RUN_TAG"]
if simID[0] == "0":
print "Warning: RUN_TAG cannot start with '0'! RUN_TAG will be left-padded with '1'!"
simID ="1%s" % simID
simID = int(simID)
except ValueError:
print "Invalid value for RUN_TAG: %s, Integer value expected!" % input_config["RUN_TAG"]
valid = False
validation = input_config["VALIDATION_RUN"]
if not validation in valid_event:
print ("Invalid option for VALIDATION_RUN: %s, Expected: %s" %
(validation, str(valid_event)))
valid = False
vel_model = input_config["VMODEL_NAME"]
if not vel_model in available_vmodels:
print ("Velocity model %s not available in the platform!" %
(vel_model))
print available_vmodels
valid = False
if validation =='n':
r_gen = input_config["RUPTURE_GENERATOR"]
if not r_gen in rup:
print "Invalid option for RUPTURE_GENERATOR: %s, Expected: %s" % (r_gen, str(rup))
valid = False
rupture_generator = 'n'
else:
r_gen = rup.index(r_gen) +1
rupture_generator = 'y'
lfm = input_config["LOW_FREQUENCY_MODULE"]
if not lfm in lf:
print "Invalid option for LOW_FREQUENCY_MODULE: %s, Expected: %s" % (lfm, str(lf))
valid = False
else:
lfm = lf.index(lfm) +1
hfm = input_config["HIGH_FREQUENCY_MODULE"]
if not hfm in hf:
print "Invalid option for HIGH_FREQUENCY_MODULE: %s, Expected: %s" % (hfm, str(hf))
valid = False
else:
hfm = hf.index(hfm) +1
srm = input_config["SITE_RESPONSE_MODULE"]
if not srm in site:
print "Invalid option for SITE_RESPONSE_MODULE: %s, Expected: %s"%(srm, str(site))
valid = False
else:
srm = site.index(srm) +1
plt_vel = input_config["PLOT_VEL"]
if not validation in plotVel:
print "Invalid option for PLOT_VEL: %s, Expected: %s"% (plt_vel, str(plotVel))
valid = False
plt_acc= input_config["PLOT_ACC"]
if not validation in plotAcc:
print ("Invalid option for PLOT_ACC: %s, Expected: %s" %
(plt_acc, str(plotAcc)))
valid = False
gof_opt = input_config["RUN_GOF"]
if not validation in doGof:
print ("Invalid option for RUN_GOF: %s, Expected: %s" %
(gof_opt, str(doGof)))
valid = False
gen_html = input_config["GEN_HTML"]
if not gen_html in genHtml:
print ("Invalid option for GEN_HTML: %s, Expected: %s" %
(gen_html, str(genHtml)))
valid = False
src_file = os.path.abspath(input_config["SOURCE_DESCRIPTION_FILE"])
if not os.path.exists(src_file):
print "Unable to locate specified source file: %s" % src_file
valid = False
stat_file = os.path.abspath(input_config["STATION_LIST_FILE"])
if not os.path.exists(stat_file):
print "Unable to locate stations file: %s" % stat_file
valid = False
if not valid:
print "Skipping input file %s due to previous errors!" % file
continue
# Generate an options file
optfilename = "%s/%s_%s.txt" % (options.xmlDir, str(simID), file_base)
print "Generating options file %s" % optfilename
fp = open(optfilename, 'w')
fp.write("%s\n" % validation)
fp.write("%s\n" % vel_model)
fp.write("%s\n" % rupture_generator)
if rupture_generator == 'y':
fp.write("%d\n" % r_gen)
fp.write("2\n") # Enter path to source file
fp.write("%s\n" % (src_file))
else:
# Need to write a path to a srf file
pass
fp.write("%d\n" % lfm)
fp.write("2\n") # Enter path to station list
fp.write("%s\n" % (stat_file))
fp.write("%d\n" % hfm)
fp.write("%d\n" % srm)
fp.write("%s\n" % plt_vel)
fp.write("%s\n" % plt_acc)
fp.write("%s\n" % gof_opt)
if gof_opt == 'y':
fp.write("1\n") #defualt to GP GOF module for now!
fp.write("%s\n" % gen_html)
fp.flush()
fp.close()
#copy src and stl files to start dir
#shutil.copy2(src_file, "%s" % cfg.A_USER_DATA_DIR)
#shutil.copy2(stat_file, "%s" % cfg.A_USER_DATA_DIR)
#move bpp dirs with simID
indatadir = "%s/%d" % (cfg.A_IN_DATA_DIR, simID)
outdatadir = "%s/%d" % (cfg.A_OUT_DATA_DIR, simID)
tmpdatadir = "%s/%d" % (cfg.A_TMP_DATA_DIR, simID)
logdir = "%s/%d" % (cfg.A_OUT_LOG_DIR, simID)
if os.path.exists(indatadir):
shutil.move(indatadir, "%s_%s" % (indatadir, "bkp"))
if os.path.exists(tmpdatadir):
shutil.move(tmpdatadir, "%s_%s" % (tmpdatadir, "bkp"))
if os.path.exists(outdatadir):
shutil.move(outdatadir, "%s_%s" % (outdatadir, "bkp"))
if os.path.exists(logdir):
shutil.move(logdir, "%s_%s" % (logdir, "bkp"))
#Generate XML
#bband_utils.runprog("%s/run_bbp.py -o %s -s %d" % (cfg.A_COMP_DIR, filename, simID))
bband_utils.runprog("%s/run_bbp.py -o %s -s %d -g" % (cfg.A_COMP_DIR, optfilename, simID))
outdir = "%s/%d" % (cfg.A_OUT_DATA_DIR, simID)
#Remove option file
os.remove(optfilename)
#Remove src and stl files from start dir
#os.remove("%s/%s" % (cfg.A_USER_DATA_DIR, os.path.basename(src_file)))
#os.remove("%s/%s" % (cfg.A_USER_DATA_DIR, os.path.basename(stat_file)))
#move back bpp dirs with simID
if os.path.exists("%s_%s" % (indatadir, "bkp")):
shutil.move("%s_%s" % (indatadir, "bkp"), indatadir)
else:
shutil.rmtree(indatadir)
if os.path.exists("%s_%s" % (tmpdatadir, "bkp")):
shutil.move("%s_%s" % (tmpdatadir, "bkp"), tmpdatadir)
else:
shutil.rmtree(tmpdatadir)
if os.path.exists("%s_%s" % (outdatadir, "bkp")):
shutil.move("%s_%s" % (outdatadir, "bkp"), outdatadir)
else:
shutil.rmtree(outdatadir)
if os.path.exists("%s_%s" % (logdir, "bkp")):
shutil.move("%s_%s" % (logdir, "bkp"), logdir)
else:
shutil.rmtree(logdir)
#Copy xml to xmlDir
xmlfilename = "%s/%d_%s.xml" % (options.xmlDir, simID, file_base)
if not os.path.exists("%s/%d.xml" % (cfg.A_XML_DIR, simID)):
print "Failed to generate XML file for %s" % file
continue
shutil.copy2("%s/%d.xml" % (cfg.A_XML_DIR, simID), xmlfilename)
#Fix path in XML file
# xfn = open(xmlfilename, 'r')
# xlines = xfn.readlines()
# xfn.close()
#
# xfn=open(xmlfilename, 'w')
# src_file_base = os.path.basename(src_file)
# stat_file_base = os.path.basename(stat_file)
# old_src_line = "$BBP_INSTALL/start/%s" % src_file_base
# old_stat_line = "$BBP_INSTALL/start/%s" % stat_file_base
# for xline in xlines:
# if xline.strip().startswith(old_src_line):
# xfn.write(xline.replace(old_src_line,src_file))
# elif xline.strip().startswith(old_stat_line):
# xfn.write(xline.replace(old_stat_line,stat_file))
# else:
# xfn.write(xline)
#
# xfn.flush()
# xfn.close()
filecount +=1
print "Processed %d files, skipped %d files!" % (filecount, len(files) - filecount)
if __name__ == "__main__":
main()
| 39.2 | 119 | 0.526452 |
f0e94f3a9363f449e882e2e4a037bec5497a4af2 | 5,717 | py | Python | resources/lib/state.py | ghostface/hyperion.kodi | 2b38eb121ab9099ac7ca04007d32d9c42f136e09 | [
"MIT"
] | null | null | null | resources/lib/state.py | ghostface/hyperion.kodi | 2b38eb121ab9099ac7ca04007d32d9c42f136e09 | [
"MIT"
] | null | null | null | resources/lib/state.py | ghostface/hyperion.kodi | 2b38eb121ab9099ac7ca04007d32d9c42f136e09 | [
"MIT"
] | null | null | null | import math
import xbmc
import xbmcaddon
import binascii
from hyperion.Hyperion import Hyperion
from misc import log
from misc import notify
class DisconnectedState:
'''
Default state class when disconnected from the Hyperion server
'''
def __init__(self, settings):
'''Constructor
- settings: Settings structure
'''
log("Entering disconnected state")
self.__settings = settings
def execute(self):
'''Execute the state
- return: The new state to execute
'''
# check if we are enabled
if not self.__settings.grabbing():
xbmc.sleep(500)
return self
# we are enabled and want to advance to the connected state
try:
nextState = ConnectedState(self.__settings)
return nextState
except Exception as e:
# unable to connect. notify and go to the error state
if self.__settings.showErrorMessage:
notify(xbmcaddon.Addon().getLocalizedString(32100))
self.__settings.showErrorMessage = False
# continue in the error state
return ErrorState(self.__settings)
class ConnectedState:
'''
State class when connected to Hyperion and grabbing video
'''
def __init__(self, settings):
'''Constructor
- settings: Settings structure
'''
log("Entering connected state")
self.__settings = settings
self.__hyperion = None
self.__capture = None
self.__captureState = None
self.__data = None
# try to connect to hyperion
self.__hyperion = Hyperion(self.__settings.address, self.__settings.port)
# Force clearing of priority (mainly for Orbs)
self.clear_priority()
# create the capture object
self.__capture = xbmc.RenderCapture()
self.__capture.capture(self.__settings.capture_width, self.__settings.capture_height)
def __del__(self):
'''Destructor
'''
del self.__hyperion
del self.__capture
del self.__captureState
del self.__data
def clear_priority(self):
# Force clearing of priority (mainly for Orbs)
xbmc.sleep(1000)
self.__hyperion.clear(self.__settings.priority)
xbmc.sleep(1000)
self.__hyperion.clear(self.__settings.priority)
def execute(self):
'''Execute the state
- return: The new state to execute
'''
# check if we still need to grab
if not self.__settings.grabbing():
self.clear_priority()
# return to the disconnected state
return DisconnectedState(self.__settings)
# capture an image
startReadOut = False
self.__data = self.__capture.getImage()
hexdata = binascii.b2a_hex(self.__data)
if len(self.__data) > 0 and not hexdata.startswith(bytes.fromhex('0000000000000000')) and not hexdata.startswith(bytes.fromhex('000000ff000000ff')):
startReadOut = True
if startReadOut:
# retrieve image data and reformat into rgb format
if self.__capture.getImageFormat() == 'BGRA':
del self.__data[3::4]
self.__data[0::3], self.__data[2::3] = self.__data[2::3], self.__data[0::3]
try:
# send image to hyperion
self.__hyperion.sendImage(self.__capture.getWidth(), self.__capture.getHeight(), str(self.__data),
self.__settings.priority, -1)
except Exception as e:
# unable to send image. notify and go to the error state
notify(xbmcaddon.Addon().getLocalizedString(32101))
return ErrorState(self.__settings)
else:
# Force clearing of priority (mainly for Orbs)
self.clear_priority()
# Sleep if any delay is configured
sleeptime = self.__settings.delay
if self.__settings.useDefaultDelay == False:
try:
videofps = math.ceil(float(xbmc.getInfoLabel('Player.Process(VideoFPS)')))
if videofps == 24:
sleeptime = self.__settings.delay24
if videofps == 25:
sleeptime = self.__settings.delay25
if videofps == 50:
sleeptime = self.__settings.delay50
if videofps == 59:
sleeptime = self.__settings.delay59
if videofps == 60:
sleeptime = self.__settings.delay60
except ValueError:
pass
# log('Sleeping for (ms): %d :: %.3f' % (sleeptime, videofps))
xbmc.sleep(sleeptime)
return self
class ErrorState:
'''
State class which is activated upon an error
'''
def __init__(self, settings):
'''Constructor
- settings: Settings structure
'''
log("Entering error state")
self.__settings = settings
def execute(self):
'''Execute the state
- return: The new state to execute
'''
# take note of the current revision of the settings
rev = self.__settings.rev
# stay in error state for the specified timeout or until the settings have been changed
i = 0
while (i < self.__settings.timeout) and (rev == self.__settings.rev):
if self.__settings.abort:
return self
else:
xbmc.sleep(1000)
i += 1
# continue in the disconnected state
return DisconnectedState(self.__settings)
| 32.299435 | 156 | 0.587196 |
526ff59ab9db93e0ef7cd23da922d61e15f36fe9 | 1,499 | py | Python | kapitan/errors.py | gruzewski/kapitan | f9cb7f90cc4f6e5065ed1e2eb965953004c6d54f | [
"Apache-2.0"
] | null | null | null | kapitan/errors.py | gruzewski/kapitan | f9cb7f90cc4f6e5065ed1e2eb965953004c6d54f | [
"Apache-2.0"
] | null | null | null | kapitan/errors.py | gruzewski/kapitan | f9cb7f90cc4f6e5065ed1e2eb965953004c6d54f | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 The Kapitan Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"kapitan error classes"
class KapitanError(Exception):
"""generic kapitan error"""
pass
class CompileError(KapitanError):
"""compile error"""
pass
class InventoryError(KapitanError):
"""inventory error"""
pass
class SecretError(KapitanError):
"""secrets error"""
pass
class RefError(KapitanError):
"""ref error"""
pass
class RefBackendError(KapitanError):
"""ref backend error"""
pass
class RefFromFuncError(KapitanError):
"""ref from func error"""
pass
class RefHashMismatchError(KapitanError):
"""ref has mismatch error"""
pass
class GitSubdirNotFoundError(KapitanError):
"""git dependency subdir not found error"""
pass
class RequestUnsuccessfulError(KapitanError):
"""request error"""
pass
class KubernetesManifestValidationError(KapitanError):
"""kubernetes manifest schema validation error"""
pass
| 21.112676 | 74 | 0.719146 |
f1da9bc2d9a1c4d1658299deeb916540b9eb5d29 | 8,371 | py | Python | tools/plain_train_net.py | JaninaMattes/detectron2_dla | da9b0925eb280a208e7837986f7cf79779d3ca61 | [
"Apache-2.0"
] | null | null | null | tools/plain_train_net.py | JaninaMattes/detectron2_dla | da9b0925eb280a208e7837986f7cf79779d3ca61 | [
"Apache-2.0"
] | null | null | null | tools/plain_train_net.py | JaninaMattes/detectron2_dla | da9b0925eb280a208e7837986f7cf79779d3ca61 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
Detectron2 training script with a plain training loop.
This scripts reads a given config file and runs the training or evaluation.
It is an entry point that is able to train standard models in detectron2.
In order to let one script support training of many models,
this script contains logic that are specific to these built-in models and therefore
may not be suitable for your own project.
For example, your research project perhaps only needs a single "evaluator".
Therefore, we recommend you to use detectron2 as an library and take
this file as an example of how to use the library.
You may want to write your own script with your datasets and other customizations.
Compared to "train_net.py", this script supports fewer features, and also
includes fewer abstraction.
"""
import logging
import os
from collections import OrderedDict
import torch
from torch.nn.parallel import DistributedDataParallel
import detectron2.utils.comm as comm
from detectron2.checkpoint import DetectionCheckpointer, PeriodicCheckpointer
from detectron2.config import get_cfg
from detectron2.data import (
MetadataCatalog,
build_detection_test_loader,
build_detection_train_loader,
)
from detectron2.engine import default_argument_parser, default_setup, launch
from detectron2.evaluation import (
CityscapesEvaluator,
COCOEvaluator,
COCOPanopticEvaluator,
DatasetEvaluators,
LVISEvaluator,
PascalVOCDetectionEvaluator,
SemSegEvaluator,
inference_on_dataset,
print_csv_format,
)
from detectron2.modeling import build_model
from detectron2.solver import build_lr_scheduler, build_optimizer
from detectron2.utils.events import (
CommonMetricPrinter,
EventStorage,
JSONWriter,
TensorboardXWriter,
)
logger = logging.getLogger("detectron2")
def get_evaluator(cfg, dataset_name, output_folder=None):
"""
Create evaluator(s) for a given dataset.
This uses the special metadata "evaluator_type" associated with each builtin dataset.
For your own dataset, you can simply create an evaluator manually in your
script and do not have to worry about the hacky if-else logic here.
"""
if output_folder is None:
output_folder = os.path.join(cfg.OUTPUT_DIR, "inference")
evaluator_list = []
evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type
if evaluator_type in ["sem_seg", "coco_panoptic_seg"]:
evaluator_list.append(
SemSegEvaluator(
dataset_name,
distributed=True,
num_classes=cfg.MODEL.SEM_SEG_HEAD.NUM_CLASSES,
ignore_label=cfg.MODEL.SEM_SEG_HEAD.IGNORE_VALUE,
output_dir=output_folder,
)
)
if evaluator_type in ["coco", "coco_panoptic_seg"]:
evaluator_list.append(COCOEvaluator(dataset_name, cfg, True, output_folder))
if evaluator_type == "coco_panoptic_seg":
evaluator_list.append(COCOPanopticEvaluator(dataset_name, output_folder))
if evaluator_type == "cityscapes":
assert (
torch.cuda.device_count() >= comm.get_rank()
), "CityscapesEvaluator currently do not work with multiple machines."
return CityscapesEvaluator(dataset_name)
if evaluator_type == "pascal_voc":
return PascalVOCDetectionEvaluator(dataset_name)
if evaluator_type == "lvis":
return LVISEvaluator(dataset_name, cfg, True, output_folder)
if len(evaluator_list) == 0:
raise NotImplementedError(
"no Evaluator for the dataset {} with the type {}".format(dataset_name, evaluator_type)
)
if len(evaluator_list) == 1:
return evaluator_list[0]
return DatasetEvaluators(evaluator_list)
def do_test(cfg, model):
results = OrderedDict()
for dataset_name in cfg.DATASETS.TEST:
data_loader = build_detection_test_loader(cfg, dataset_name)
evaluator = get_evaluator(
cfg, dataset_name, os.path.join(cfg.OUTPUT_DIR, "inference", dataset_name)
)
results_i = inference_on_dataset(model, data_loader, evaluator)
results[dataset_name] = results_i
if comm.is_main_process():
logger.info("Evaluation results for {} in csv format:".format(dataset_name))
print_csv_format(results_i)
if len(results) == 1:
results = list(results.values())[0]
return results
def do_train(cfg, model, resume=False):
model.train()
optimizer = build_optimizer(cfg, model)
scheduler = build_lr_scheduler(cfg, optimizer)
checkpointer = DetectionCheckpointer(
model, cfg.OUTPUT_DIR, optimizer=optimizer, scheduler=scheduler
)
start_iter = (
checkpointer.resume_or_load(cfg.MODEL.WEIGHTS, resume=resume).get("iteration", -1) + 1
)
max_iter = cfg.SOLVER.MAX_ITER
periodic_checkpointer = PeriodicCheckpointer(
checkpointer, cfg.SOLVER.CHECKPOINT_PERIOD, max_iter=max_iter
)
writers = (
[
CommonMetricPrinter(max_iter),
JSONWriter(os.path.join(cfg.OUTPUT_DIR, "metrics.json")),
TensorboardXWriter(cfg.OUTPUT_DIR),
]
if comm.is_main_process()
else []
)
# compared to "train_net.py", we do not support accurate timing and
# precise BN here, because they are not trivial to implement
data_loader = build_detection_train_loader(cfg)
logger.info("Starting training from iteration {}".format(start_iter))
with EventStorage(start_iter) as storage:
for data, iteration in zip(data_loader, range(start_iter, max_iter)):
iteration = iteration + 1
storage.step()
loss_dict = model(data)
losses = sum(loss for loss in loss_dict.values())
assert torch.isfinite(losses).all(), loss_dict
loss_dict_reduced = {k: v.item() for k, v in comm.reduce_dict(loss_dict).items()}
losses_reduced = sum(loss for loss in loss_dict_reduced.values())
if comm.is_main_process():
storage.put_scalars(total_loss=losses_reduced, **loss_dict_reduced)
optimizer.zero_grad()
losses.backward()
optimizer.step()
storage.put_scalar("lr", optimizer.param_groups[0]["lr"], smoothing_hint=False)
scheduler.step()
if (
cfg.TEST.EVAL_PERIOD > 0
and iteration % cfg.TEST.EVAL_PERIOD == 0
and iteration != max_iter
):
do_test(cfg, model)
# Compared to "train_net.py", the test results are not dumped to EventStorage
comm.synchronize()
if iteration - start_iter > 5 and (iteration % 20 == 0 or iteration == max_iter):
for writer in writers:
writer.write()
periodic_checkpointer.step(iteration)
def setup(args):
"""
Create configs and perform basic setups.
"""
cfg = get_cfg()
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
cfg.freeze()
default_setup(
cfg, args
) # if you don't like any of the default setup, write your own setup code
return cfg
def main(args):
cfg = setup(args)
model = build_model(cfg)
logger.info("Model:\n{}".format(model))
if args.eval_only:
DetectionCheckpointer(model, save_dir=cfg.OUTPUT_DIR).resume_or_load(
cfg.MODEL.WEIGHTS, resume=args.resume
)
return do_test(cfg, model)
distributed = comm.get_world_size() > 1
if distributed:
model = DistributedDataParallel(
model, device_ids=[comm.get_local_rank()], broadcast_buffers=False
)
do_train(cfg, model)
return do_test(cfg, model)
if __name__ == "__main__":
args = default_argument_parser().parse_args()
print("Command Line Args:", args)
launch(
main,
args.num_gpus,
num_machines=args.num_machines,
machine_rank=args.machine_rank,
dist_url=args.dist_url,
args=(args,),
)
| 36.238095 | 100 | 0.660853 |
75d3039c211943120cff97d08ed4e838a6501fd0 | 2,806 | py | Python | test/graph_definitions/star_ring_4_unbalanced.py | seth586/lndmanage | 64ac11fa5c24ce5793e894ea181b5d0a8e9ef78d | [
"MIT"
] | 166 | 2019-04-10T17:20:44.000Z | 2022-03-22T18:21:04.000Z | test/graph_definitions/star_ring_4_unbalanced.py | seth586/lndmanage | 64ac11fa5c24ce5793e894ea181b5d0a8e9ef78d | [
"MIT"
] | 67 | 2019-04-12T04:44:58.000Z | 2022-03-16T06:40:34.000Z | test/graph_definitions/star_ring_4_unbalanced.py | seth586/lndmanage | 64ac11fa5c24ce5793e894ea181b5d0a8e9ef78d | [
"MIT"
] | 19 | 2019-04-13T11:01:37.000Z | 2022-03-01T08:02:14.000Z | """
Implements a complete graph, where the master node A can be thought of
being surrounded by four nodes, which share a liquid network of channels.
The master node has unbalanced total inbound to outbound ratio.
"""
nodes = {
'A': {
'grpc_port': 11009,
'rest_port': 8080,
'port': 9735,
'base_fee_msat': 1,
'fee_rate': 0.000001,
'channels': {
1: {
'to': 'B',
'capacity': 1000000,
'ratio_local': 10,
'ratio_remote': 0,
},
2: {
'to': 'C',
'capacity': 1000000,
'ratio_local': 3,
'ratio_remote': 7,
},
3: {
'to': 'D',
'capacity': 1000000,
'ratio_local': 3,
'ratio_remote': 7,
},
4: {
'to': 'E',
'capacity': 1000000,
'ratio_local': 4,
'ratio_remote': 6,
},
}
},
'B': {
'grpc_port': 11010,
'rest_port': 8081,
'port': 9736,
'base_fee_msat': 2,
'fee_rate': 0.000001,
'channels': {
5: {
'to': 'C',
'capacity': 5000000,
'ratio_local': 5,
'ratio_remote': 5,
},
6: {
'to': 'D',
'capacity': 5000000,
'ratio_local': 5,
'ratio_remote': 5,
},
7: {
'to': 'E',
'capacity': 5000000,
'ratio_local': 5,
'ratio_remote': 5,
},
}
},
'C': {
'grpc_port': 11011,
'rest_port': 8082,
'port': 9737,
'base_fee_msat': 2,
'fee_rate': 0.000001,
'channels': {
8: {
'to': 'D',
'capacity': 5000000,
'ratio_local': 5,
'ratio_remote': 5,
},
9: {
'to': 'E',
'capacity': 5000000,
'ratio_local': 5,
'ratio_remote': 5,
},
}
},
'D': {
'grpc_port': 11012,
'rest_port': 8083,
'port': 9738,
'base_fee_msat': 2,
'fee_rate': 0.000001,
'channels': {
10: {
'to': 'E',
'capacity': 5000000,
'ratio_local': 5,
'ratio_remote': 5,
},
}
},
'E': {
'grpc_port': 11013,
'rest_port': 8084,
'port': 9739,
'base_fee_msat': 2,
'fee_rate': 0.000001,
'channels': {
}
},
}
| 24.831858 | 73 | 0.349964 |
e8b466d6b16c7f4d6d8b2938cd6adcc616e07872 | 9,294 | py | Python | huaweicloud-sdk-dds/huaweicloudsdkdds/v3/model/list_backups_result.py | Adek06/huaweicloud-sdk-python-v3 | 3d13b27d089e04a1ae567cd649b3c5509e0391d2 | [
"Apache-2.0"
] | null | null | null | huaweicloud-sdk-dds/huaweicloudsdkdds/v3/model/list_backups_result.py | Adek06/huaweicloud-sdk-python-v3 | 3d13b27d089e04a1ae567cd649b3c5509e0391d2 | [
"Apache-2.0"
] | null | null | null | huaweicloud-sdk-dds/huaweicloudsdkdds/v3/model/list_backups_result.py | Adek06/huaweicloud-sdk-python-v3 | 3d13b27d089e04a1ae567cd649b3c5509e0391d2 | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
import pprint
import re
import six
class ListBackupsResult:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'id': 'str',
'name': 'str',
'instance_id': 'str',
'instance_name': 'str',
'datastore': 'ListBackupsDatastoreResult',
'type': 'str',
'begin_time': 'str',
'end_time': 'str',
'status': 'str',
'size': 'int',
'description': 'str'
}
attribute_map = {
'id': 'id',
'name': 'name',
'instance_id': 'instance_id',
'instance_name': 'instance_name',
'datastore': 'datastore',
'type': 'type',
'begin_time': 'begin_time',
'end_time': 'end_time',
'status': 'status',
'size': 'size',
'description': 'description'
}
def __init__(self, id=None, name=None, instance_id=None, instance_name=None, datastore=None, type=None, begin_time=None, end_time=None, status=None, size=None, description=None):
"""ListBackupsResult - a model defined in huaweicloud sdk"""
self._id = None
self._name = None
self._instance_id = None
self._instance_name = None
self._datastore = None
self._type = None
self._begin_time = None
self._end_time = None
self._status = None
self._size = None
self._description = None
self.discriminator = None
self.id = id
self.name = name
self.instance_id = instance_id
self.instance_name = instance_name
self.datastore = datastore
self.type = type
self.begin_time = begin_time
self.end_time = end_time
self.status = status
self.size = size
self.description = description
@property
def id(self):
"""Gets the id of this ListBackupsResult.
备份ID。
:return: The id of this ListBackupsResult.
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this ListBackupsResult.
备份ID。
:param id: The id of this ListBackupsResult.
:type: str
"""
self._id = id
@property
def name(self):
"""Gets the name of this ListBackupsResult.
备份名称。
:return: The name of this ListBackupsResult.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this ListBackupsResult.
备份名称。
:param name: The name of this ListBackupsResult.
:type: str
"""
self._name = name
@property
def instance_id(self):
"""Gets the instance_id of this ListBackupsResult.
备份所属的实例ID。
:return: The instance_id of this ListBackupsResult.
:rtype: str
"""
return self._instance_id
@instance_id.setter
def instance_id(self, instance_id):
"""Sets the instance_id of this ListBackupsResult.
备份所属的实例ID。
:param instance_id: The instance_id of this ListBackupsResult.
:type: str
"""
self._instance_id = instance_id
@property
def instance_name(self):
"""Gets the instance_name of this ListBackupsResult.
备份所属的实例名称。
:return: The instance_name of this ListBackupsResult.
:rtype: str
"""
return self._instance_name
@instance_name.setter
def instance_name(self, instance_name):
"""Sets the instance_name of this ListBackupsResult.
备份所属的实例名称。
:param instance_name: The instance_name of this ListBackupsResult.
:type: str
"""
self._instance_name = instance_name
@property
def datastore(self):
"""Gets the datastore of this ListBackupsResult.
:return: The datastore of this ListBackupsResult.
:rtype: ListBackupsDatastoreResult
"""
return self._datastore
@datastore.setter
def datastore(self, datastore):
"""Sets the datastore of this ListBackupsResult.
:param datastore: The datastore of this ListBackupsResult.
:type: ListBackupsDatastoreResult
"""
self._datastore = datastore
@property
def type(self):
"""Gets the type of this ListBackupsResult.
备份类型。 - 取值为“Auto”,表示自动全量备份。 - 取值为“Manual”,表示手动全量备份。
:return: The type of this ListBackupsResult.
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this ListBackupsResult.
备份类型。 - 取值为“Auto”,表示自动全量备份。 - 取值为“Manual”,表示手动全量备份。
:param type: The type of this ListBackupsResult.
:type: str
"""
self._type = type
@property
def begin_time(self):
"""Gets the begin_time of this ListBackupsResult.
备份开始时间,格式为“yyyy-mm-dd hh:mm:ss”。该时间为UTC时间。
:return: The begin_time of this ListBackupsResult.
:rtype: str
"""
return self._begin_time
@begin_time.setter
def begin_time(self, begin_time):
"""Sets the begin_time of this ListBackupsResult.
备份开始时间,格式为“yyyy-mm-dd hh:mm:ss”。该时间为UTC时间。
:param begin_time: The begin_time of this ListBackupsResult.
:type: str
"""
self._begin_time = begin_time
@property
def end_time(self):
"""Gets the end_time of this ListBackupsResult.
备份结束时间,格式为“yyyy-mm-dd hh:mm:ss”。该时间为UTC时间。
:return: The end_time of this ListBackupsResult.
:rtype: str
"""
return self._end_time
@end_time.setter
def end_time(self, end_time):
"""Sets the end_time of this ListBackupsResult.
备份结束时间,格式为“yyyy-mm-dd hh:mm:ss”。该时间为UTC时间。
:param end_time: The end_time of this ListBackupsResult.
:type: str
"""
self._end_time = end_time
@property
def status(self):
"""Gets the status of this ListBackupsResult.
备份状态。 取值: - BUILDING:备份中。 - COMPLETED:备份完成。 - FAILED:备份失败。 - DISABLED:备份删除中。
:return: The status of this ListBackupsResult.
:rtype: str
"""
return self._status
@status.setter
def status(self, status):
"""Sets the status of this ListBackupsResult.
备份状态。 取值: - BUILDING:备份中。 - COMPLETED:备份完成。 - FAILED:备份失败。 - DISABLED:备份删除中。
:param status: The status of this ListBackupsResult.
:type: str
"""
self._status = status
@property
def size(self):
"""Gets the size of this ListBackupsResult.
备份大小,单位:KB。
:return: The size of this ListBackupsResult.
:rtype: int
"""
return self._size
@size.setter
def size(self, size):
"""Sets the size of this ListBackupsResult.
备份大小,单位:KB。
:param size: The size of this ListBackupsResult.
:type: int
"""
self._size = size
@property
def description(self):
"""Gets the description of this ListBackupsResult.
备份描述。
:return: The description of this ListBackupsResult.
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""Sets the description of this ListBackupsResult.
备份描述。
:param description: The description of this ListBackupsResult.
:type: str
"""
self._description = description
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ListBackupsResult):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 25.255435 | 182 | 0.570476 |
2b140b24376c9f0fd9d11a6bdae35547a8578c37 | 10,797 | py | Python | spark_fhir_schemas/r4/complex_types/riskevidencesynthesis_samplesize.py | icanbwell/SparkFhirSchemas | 8c828313c39850b65f8676e67f526ee92b7d624e | [
"Apache-2.0"
] | null | null | null | spark_fhir_schemas/r4/complex_types/riskevidencesynthesis_samplesize.py | icanbwell/SparkFhirSchemas | 8c828313c39850b65f8676e67f526ee92b7d624e | [
"Apache-2.0"
] | null | null | null | spark_fhir_schemas/r4/complex_types/riskevidencesynthesis_samplesize.py | icanbwell/SparkFhirSchemas | 8c828313c39850b65f8676e67f526ee92b7d624e | [
"Apache-2.0"
] | null | null | null | from typing import Union, List, Optional
from pyspark.sql.types import StructType, StructField, StringType, ArrayType, DataType
# This file is auto-generated by generate_schema so do not edit it manually
# noinspection PyPep8Naming
class RiskEvidenceSynthesis_SampleSizeSchema:
"""
The RiskEvidenceSynthesis resource describes the likelihood of an outcome in a
population plus exposure state where the risk estimate is derived from a
combination of research studies.
"""
# noinspection PyDefaultArgument
@staticmethod
def get_schema(
max_nesting_depth: Optional[int] = 6,
nesting_depth: int = 0,
nesting_list: List[str] = [],
max_recursion_limit: Optional[int] = 2,
include_extension: Optional[bool] = False,
extension_fields: Optional[List[str]] = None,
extension_depth: int = 0,
max_extension_depth: Optional[int] = 2,
include_modifierExtension: Optional[bool] = False,
use_date_for: Optional[List[str]] = None,
parent_path: Optional[str] = "",
) -> Union[StructType, DataType]:
"""
The RiskEvidenceSynthesis resource describes the likelihood of an outcome in a
population plus exposure state where the risk estimate is derived from a
combination of research studies.
id: Unique id for the element within a resource (for internal references). This
may be any string value that does not contain spaces.
extension: May be used to represent additional information that is not part of the basic
definition of the element. To make the use of extensions safe and manageable,
there is a strict set of governance applied to the definition and use of
extensions. Though any implementer can define an extension, there is a set of
requirements that SHALL be met as part of the definition of the extension.
modifierExtension: May be used to represent additional information that is not part of the basic
definition of the element and that modifies the understanding of the element
in which it is contained and/or the understanding of the containing element's
descendants. Usually modifier elements provide negation or qualification. To
make the use of extensions safe and manageable, there is a strict set of
governance applied to the definition and use of extensions. Though any
implementer can define an extension, there is a set of requirements that SHALL
be met as part of the definition of the extension. Applications processing a
resource are required to check for modifier extensions.
Modifier extensions SHALL NOT change the meaning of any elements on Resource
or DomainResource (including cannot change the meaning of modifierExtension
itself).
description: Human-readable summary of sample size.
numberOfStudies: Number of studies included in this evidence synthesis.
numberOfParticipants: Number of participants included in this evidence synthesis.
"""
if extension_fields is None:
extension_fields = [
"valueBoolean",
"valueCode",
"valueDate",
"valueDateTime",
"valueDecimal",
"valueId",
"valueInteger",
"valuePositiveInt",
"valueString",
"valueTime",
"valueUnsignedInt",
"valueUri",
"valueUrl",
"valueReference",
"valueCodeableConcept",
"valueAddress",
]
from spark_fhir_schemas.r4.complex_types.extension import ExtensionSchema
from spark_fhir_schemas.r4.simple_types.integer import integerSchema
if (
max_recursion_limit
and nesting_list.count("RiskEvidenceSynthesis_SampleSize")
>= max_recursion_limit
) or (max_nesting_depth and nesting_depth >= max_nesting_depth):
return StructType([StructField("id", StringType(), True)])
# add my name to recursion list for later
my_nesting_list: List[str] = nesting_list + ["RiskEvidenceSynthesis_SampleSize"]
my_parent_path = (
parent_path + ".riskevidencesynthesis_samplesize"
if parent_path
else "riskevidencesynthesis_samplesize"
)
schema = StructType(
[
# Unique id for the element within a resource (for internal references). This
# may be any string value that does not contain spaces.
StructField("id", StringType(), True),
# May be used to represent additional information that is not part of the basic
# definition of the element. To make the use of extensions safe and manageable,
# there is a strict set of governance applied to the definition and use of
# extensions. Though any implementer can define an extension, there is a set of
# requirements that SHALL be met as part of the definition of the extension.
StructField(
"extension",
ArrayType(
ExtensionSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
use_date_for=use_date_for,
parent_path=my_parent_path,
)
),
True,
),
# May be used to represent additional information that is not part of the basic
# definition of the element and that modifies the understanding of the element
# in which it is contained and/or the understanding of the containing element's
# descendants. Usually modifier elements provide negation or qualification. To
# make the use of extensions safe and manageable, there is a strict set of
# governance applied to the definition and use of extensions. Though any
# implementer can define an extension, there is a set of requirements that SHALL
# be met as part of the definition of the extension. Applications processing a
# resource are required to check for modifier extensions.
#
# Modifier extensions SHALL NOT change the meaning of any elements on Resource
# or DomainResource (including cannot change the meaning of modifierExtension
# itself).
StructField(
"modifierExtension",
ArrayType(
ExtensionSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
use_date_for=use_date_for,
parent_path=my_parent_path,
)
),
True,
),
# Human-readable summary of sample size.
StructField("description", StringType(), True),
# Number of studies included in this evidence synthesis.
StructField(
"numberOfStudies",
integerSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
use_date_for=use_date_for,
parent_path=my_parent_path + ".numberofstudies",
),
True,
),
# Number of participants included in this evidence synthesis.
StructField(
"numberOfParticipants",
integerSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
use_date_for=use_date_for,
parent_path=my_parent_path + ".numberofparticipants",
),
True,
),
]
)
if not include_extension:
schema.fields = [
c
if c.name != "extension"
else StructField("extension", StringType(), True)
for c in schema.fields
]
if not include_modifierExtension:
schema.fields = [
c
if c.name != "modifierExtension"
else StructField("modifierExtension", StringType(), True)
for c in schema.fields
]
return schema
| 49.30137 | 104 | 0.57942 |
26fd7a01a82527291b20196d3ffbf5f4f4b5d6d3 | 5,329 | py | Python | projections/deep_eq.py | ricardog/raster-project | 37d508ca329d31d4b1d21614371596f4c1bca526 | [
"Apache-2.0"
] | 1 | 2018-02-23T14:26:17.000Z | 2018-02-23T14:26:17.000Z | projections/deep_eq.py | NaturalHistoryMuseum/raster-project | 319a0f633de8cf2317eba5d82396036f01ce5262 | [
"Apache-2.0"
] | null | null | null | projections/deep_eq.py | NaturalHistoryMuseum/raster-project | 319a0f633de8cf2317eba5d82396036f01ce5262 | [
"Apache-2.0"
] | 1 | 2017-10-11T15:49:18.000Z | 2017-10-11T15:49:18.000Z | # https://gist.github.com/samuraisam/901117
#Copyright (c) 2010-2013 Samuel Sutch [samuel.sutch@gmail.com]
#
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#
#The above copyright notice and this permission notice shall be included in
#all copies or substantial portions of the Software.
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
#THE SOFTWARE.
import datetime, time, functools, operator, types
default_fudge = datetime.timedelta(seconds=0, microseconds=0, days=0)
def deep_eq(_v1, _v2, datetime_fudge=default_fudge, _assert=False):
"""
Tests for deep equality between two python data structures recursing
into sub-structures if necessary. Works with all python types including
iterators and generators. This function was dreampt up to test API responses
but could be used for anything. Be careful. With deeply nested structures
you may blow the stack.
Options:
datetime_fudge => this is a datetime.timedelta object which, when
comparing dates, will accept values that differ
by the number of seconds specified
_assert => passing yes for this will raise an assertion error
when values do not match, instead of returning
false (very useful in combination with pdb)
Doctests included:
>>> x1, y1 = ({'a': 'b'}, {'a': 'b'})
>>> deep_eq(x1, y1)
True
>>> x2, y2 = ({'a': 'b'}, {'b': 'a'})
>>> deep_eq(x2, y2)
False
>>> x3, y3 = ({'a': {'b': 'c'}}, {'a': {'b': 'c'}})
>>> deep_eq(x3, y3)
True
>>> x4, y4 = ({'c': 't', 'a': {'b': 'c'}}, {'a': {'b': 'n'}, 'c': 't'})
>>> deep_eq(x4, y4)
False
>>> x5, y5 = ({'a': [1,2,3]}, {'a': [1,2,3]})
>>> deep_eq(x5, y5)
True
>>> x6, y6 = ({'a': [1,'b',8]}, {'a': [2,'b',8]})
>>> deep_eq(x6, y6)
False
>>> x7, y7 = ('a', 'a')
>>> deep_eq(x7, y7)
True
>>> x8, y8 = (['p','n',['asdf']], ['p','n',['asdf']])
>>> deep_eq(x8, y8)
True
>>> x9, y9 = (['p','n',['asdf',['omg']]], ['p', 'n', ['asdf',['nowai']]])
>>> deep_eq(x9, y9)
False
>>> x10, y10 = (1, 2)
>>> deep_eq(x10, y10)
False
>>> deep_eq((str(p) for p in xrange(10)), (str(p) for p in xrange(10)))
True
>>> str(deep_eq(range(4), range(4)))
'True'
>>> deep_eq(xrange(100), xrange(100))
True
>>> deep_eq(xrange(2), xrange(5))
False
>>> import datetime
>>> from datetime import datetime as dt
>>> d1, d2 = (dt.now(), dt.now() + datetime.timedelta(seconds=4))
>>> deep_eq(d1, d2)
False
>>> deep_eq(d1, d2, datetime_fudge=datetime.timedelta(seconds=5))
True
"""
_deep_eq = functools.partial(deep_eq, datetime_fudge=datetime_fudge,
_assert=_assert)
def _check_assert(R, a, b, reason=''):
if _assert and not R:
assert 0, "an assertion has failed in deep_eq (%s) %s != %s" % (
reason, str(a), str(b))
return R
def _deep_dict_eq(d1, d2):
try:
k1, k2 = (sorted(d1.keys()), sorted(d2.keys()))
except AttributeError:
return False
if k1 != k2: # keys should be exactly equal
return _check_assert(False, k1, k2, "keys")
return _check_assert(operator.eq(sum(_deep_eq(d1[k], d2[k])
for k in k1),
len(k1)), d1, d2, "dictionaries")
def _deep_iter_eq(l1, l2):
if len(l1) != len(l2):
return _check_assert(False, l1, l2, "lengths")
return _check_assert(operator.eq(sum(_deep_eq(v1, v2)
for v1, v2 in zip(l1, l2)),
len(l1)), l1, l2, "iterables")
def op(a, b):
_op = operator.eq
if type(a) == datetime.datetime and type(b) == datetime.datetime:
s = datetime_fudge.seconds
t1, t2 = (time.mktime(a.timetuple()), time.mktime(b.timetuple()))
l = t1 - t2
l = -l if l > 0 else l
return _check_assert((-s if s > 0 else s) <= l, a, b, "dates")
return _check_assert(_op(a, b), a, b, "values")
c1, c2 = (_v1, _v2)
# guard against strings because they are iterable and their
# elements yield iterables infinitely.
# I N C E P T I O N
for t in types.StringTypes:
if isinstance(_v1, t):
break
else:
if isinstance(_v1, types.DictType):
op = _deep_dict_eq
else:
try:
c1, c2 = (list(iter(_v1)), list(iter(_v2)))
except TypeError:
c1, c2 = _v1, _v2
else:
op = _deep_iter_eq
return op(c1, c2)
| 36.006757 | 80 | 0.59636 |
411a25bb7f8bd4ec03909a3a76e47775ff6729d4 | 801 | py | Python | minigame5/HighScore.py | Tdallau/HRO_project2_minigames | 7fdecaa942d1a05d2cd451fd7f5d47e51c571c42 | [
"MIT"
] | null | null | null | minigame5/HighScore.py | Tdallau/HRO_project2_minigames | 7fdecaa942d1a05d2cd451fd7f5d47e51c571c42 | [
"MIT"
] | null | null | null | minigame5/HighScore.py | Tdallau/HRO_project2_minigames | 7fdecaa942d1a05d2cd451fd7f5d47e51c571c42 | [
"MIT"
] | null | null | null | from os import path
class HighScore:
def __init__(self, amound):
self.amound = self.getHighscore("highscore.txt")
self.dir = ''
def checkForHighScore(self, amound):
if int(self.amound) == 0 or int(self.amound) > int(amound):
print(str(self.amound) + "||||"+str(amound))
self.amound = amound
self.writeHighscore(amound)
def writeHighscore(self, amound):
with open(path.join(self.dir, "minigame5/highscore.txt"), 'r+') as f:
f.write(str(amound))
def getHighscore(self,HS_file):
self.dir = path.dirname(__file__)
with open(path.join(self.dir, HS_file), 'r+') as f:
try:
return int(f.read())
except:
return 0
| 28.607143 | 77 | 0.548065 |
a1fb6b96bd72e3e102fbb14ffc2d3485642db31f | 4,554 | py | Python | rollbar/lib/traverse.py | uploadcare/pyrollbar | 1c5e25ebc261dfbddb009465ac994a75b9cb01a9 | [
"MIT"
] | null | null | null | rollbar/lib/traverse.py | uploadcare/pyrollbar | 1c5e25ebc261dfbddb009465ac994a75b9cb01a9 | [
"MIT"
] | null | null | null | rollbar/lib/traverse.py | uploadcare/pyrollbar | 1c5e25ebc261dfbddb009465ac994a75b9cb01a9 | [
"MIT"
] | null | null | null | import logging
try:
# Python 3
from collections.abc import Mapping
from collections.abc import Sequence
except ImportError:
# Python 2.7
from collections import Mapping
from collections import Sequence
from rollbar.lib import binary_type, iteritems, string_types, circular_reference_label
from itertools import islice
CIRCULAR = -1
DEFAULT = 0
MAPPING = 1
TUPLE = 2
NAMEDTUPLE = 3
LIST = 4
SET = 5
STRING = 6
MAX_LIST = 100
log = logging.getLogger(__name__)
def _noop_circular(a, **kw):
return circular_reference_label(a, ref_key=kw.get('ref_key'))
def _noop(a, **_):
return a
def _noop_tuple(a, **_):
return tuple(a)
def _noop_namedtuple(a, **_):
return a._make(a)
def _noop_list(a, **_):
return list(a)
def _noop_set(a, **_):
return set(a)
def _noop_mapping(a, **_):
return dict(a)
_default_handlers = {
CIRCULAR: _noop_circular,
DEFAULT: _noop,
STRING: _noop,
TUPLE: _noop_tuple,
NAMEDTUPLE: _noop_namedtuple,
LIST: _noop_list,
SET: _noop_set,
MAPPING: _noop_mapping,
}
def get_type(obj):
if isinstance(obj, (string_types, binary_type)):
return STRING
if isinstance(obj, Mapping):
return MAPPING
if isinstance(obj, tuple):
if hasattr(obj, '_fields'):
return NAMEDTUPLE
return TUPLE
if isinstance(obj, set):
return SET
if isinstance(obj, Sequence):
return LIST
return DEFAULT
def limited_enumerate(i, max):
return enumerate(islice(i, 0, max))
def limited_iteritems(dict, max):
return islice(iteritems(dict), 0, max)
def traverse(obj,
key=(),
string_handler=_default_handlers[STRING],
tuple_handler=_default_handlers[TUPLE],
namedtuple_handler=_default_handlers[NAMEDTUPLE],
list_handler=_default_handlers[LIST],
set_handler=_default_handlers[SET],
mapping_handler=_default_handlers[MAPPING],
default_handler=_default_handlers[DEFAULT],
circular_reference_handler=_default_handlers[CIRCULAR],
allowed_circular_reference_types=None,
memo=None,
max_list=MAX_LIST,
**custom_handlers):
memo = memo or {}
obj_id = id(obj)
obj_type = get_type(obj)
ref_key = memo.get(obj_id)
if ref_key:
if not allowed_circular_reference_types or not isinstance(obj, allowed_circular_reference_types):
return circular_reference_handler(obj, key=key, ref_key=ref_key)
memo[obj_id] = key
kw = {
'string_handler': string_handler,
'tuple_handler': tuple_handler,
'namedtuple_handler': namedtuple_handler,
'list_handler': list_handler,
'set_handler': set_handler,
'mapping_handler': mapping_handler,
'default_handler': default_handler,
'circular_reference_handler': circular_reference_handler,
'allowed_circular_reference_types': allowed_circular_reference_types,
'memo': memo,
'max_list': max_list
}
kw.update(custom_handlers)
try:
if obj_type is STRING:
return string_handler(obj, key=key)
elif obj_type is TUPLE:
return tuple_handler(tuple(traverse(elem, key=key + (i,), **kw) for i, elem in limited_enumerate(obj, max_list)), key=key)
elif obj_type is NAMEDTUPLE:
return namedtuple_handler(obj._make(traverse(v, key=key + (k,), **kw) for k, v in limited_iteritems(obj._asdict(), max_list)), key=key)
elif obj_type is LIST:
return list_handler(list(traverse(elem, key=key + (i,), **kw) for i, elem in limited_enumerate(obj, max_list)), key=key)
elif obj_type is SET:
return set_handler(set(traverse(elem, key=key + (i,), **kw) for i, elem in limited_enumerate(obj, max_list)), key=key)
elif obj_type is MAPPING:
return mapping_handler(dict((k, traverse(v, key=key + (k,), **kw)) for k, v in limited_iteritems(obj, max_list)), key=key)
elif obj_type is DEFAULT:
for handler_type, handler in iteritems(custom_handlers):
if isinstance(obj, handler_type):
return handler(obj, key=key)
except:
# use the default handler for unknown object types
log.debug("Exception while traversing object using type-specific "
"handler. Switching to default handler.", exc_info=True)
return default_handler(obj, key=key)
__all__ = ['traverse']
| 27.433735 | 147 | 0.653491 |
cfbc7080b6b982235771b4c6a731d9edb192f206 | 13,914 | py | Python | pysigdig/pysigdig.py | m-yuhas/pysigdig | 57275e14ea520f6ea9728830b55a2e38e9b851b0 | [
"MIT"
] | null | null | null | pysigdig/pysigdig.py | m-yuhas/pysigdig | 57275e14ea520f6ea9728830b55a2e38e9b851b0 | [
"MIT"
] | null | null | null | pysigdig/pysigdig.py | m-yuhas/pysigdig | 57275e14ea520f6ea9728830b55a2e38e9b851b0 | [
"MIT"
] | null | null | null | """Module to do arithmetic operations with significant digits."""
from typing import Union
import math
import re
class Number:
"""Class representing a number with information about significant figures
and tolerance."""
def __init__(self, value: Union[int, float, str], **kwargs) -> None:
self._tolerance = None
self._value = None
self._lsd = None
self._sigdigs = None
if isinstance(value, float):
self._value = value
self._sigdigs = float('inf')
self._lsd = float('-inf')
elif isinstance(value, int):
self._value = value
self._sigdigs, self._lsd = Number.get_sigdigs_from_int(value)
elif isinstance(value, str):
self._value, self._sigdigs, self._lsd = Number.parse_string(value)
else:
raise TypeError(
'Invalid type {} provided for argument "value"'.format(
type(value)))
if 'sigdigs' in kwargs:
self._sigdigs = kwargs['sigdigs']
self.set_lsd_from_sigdigs()
if 'lsd' in kwargs:
self._lsd = kwargs['lsd']
self.set_sigdigs_from_lsd()
if 'tolerance' in kwargs:
if isinstance(kwargs['tolerance'], (float, int)):
self._tolerance = abs(kwargs['tolerance'])
else:
self._tolerance = None
def __int__(self) -> int:
return int(float(self))
def __float__(self) -> float:
if self.lsd == float('-inf'):
return self._value
return float(round(self._value, int(-math.log10(self.lsd))))
def __str__(self) -> str:
digits = int(-math.log10(self.lsd))
string = str(float(self))
if self._lsd >= 1:
string = string.split('.')[0]
else:
string += (digits - len(string.split('.')[1])) * '0'
if self.tolerance is not None:
string += ' ± {}'.format(self.tolerance)
return string
def __add__(self, other) -> 'Number':
if isinstance(other, (float, int)):
new_value = self._value + other
new_lsd = self.lsd
new_tolerance = self.tolerance
elif isinstance(other, Number):
new_value = self._value + other._value
new_lsd = max(self.lsd, other.lsd)
if self.tolerance is None and other.tolerance is None:
new_tolerance = None
else:
new_tolerance = (self.tolerance or 0) + (other.tolerance or 0)
else:
raise TypeError(
'Cannot add type {} to Number.'.format(type(other)))
return Number(new_value, lsd=new_lsd, tolerance=new_tolerance)
def __sub__(self, other) -> 'Number':
if isinstance(other, (float, int)):
new_value = self._value - other
new_lsd = self.lsd
new_tolerance = self.tolerance
elif isinstance(other, Number):
new_value = self._value - other._value
new_lsd = max(self.lsd, other.lsd)
if self.tolerance is None and other.tolerance is None:
new_tolerance = None
else:
new_tolerance = (self.tolerance or 0) + (other.tolerance or 0)
else:
raise TypeError(
'Cannot subtract type {} from Number.'.format(type(other)))
return Number(new_value, lsd=new_lsd, tolerance=new_tolerance)
def __mul__(self, other) -> 'Number':
if isinstance(other, (float, int)):
new_value = self._value * other
new_sigdigs = self.sigdigs
if self.tolerance is None:
new_tolerance = None
else:
new_tolerance = self.tolerance * other
elif isinstance(other, Number):
new_value = self._value * other._value
new_sigdigs = min(self.sigdigs, other.sigdigs)
if self.tolerance is None and other.tolerance is None:
new_tolerance = None
else:
new_tolerance = abs((self.tolerance or 0) * other._value) + \
abs((other.tolerance or 0) * self._value) + \
(self.tolerance or 0) * (other.tolerance or 0)
else:
raise TypeError(
'Cannot multiply Number by type {}.'.format(type(other)))
return Number(new_value, sigdigs=new_sigdigs, tolerance=new_tolerance)
def __truediv__(self, other) -> 'Number':
if isinstance(other, (float, int)):
new_value = self._value / other
new_sigdigs = self.sigdigs
if self.tolerance is None:
new_tolerance = None
else:
new_tolerance = self.tolerance / other
elif isinstance(other, Number):
new_value = self._value / other._value
new_sigdigs = min(self.sigdigs, other.sigdigs)
if self.tolerance is None and other.tolerance is None:
new_tolerance = None
else:
new_tolerance = max(
abs(
abs(new_value) -
abs(self.max_value / other.min_value)),
abs(
abs(new_value) -
abs(self.min_value / other.max_value)))
else:
raise TypeError(
'Cannot divide Number by type {}.'.format(type(other)))
return Number(new_value, sigdigs=new_sigdigs, tolerance=new_tolerance)
def __floordiv__(self, other) -> 'Number':
if isinstance(other, (float, int)):
new_value = self._value // other
new_sigdigs = min(
self.sigdigs,
Number.get_sigdigs_from_int(new_value)[0])
if self.tolerance is None:
new_tolerance = None
else:
new_tolerance = abs(new_value) - abs(self.max_value / other)
elif isinstance(other, Number):
new_value = self._value // other._value
new_sigdigs = min(
self.sigdigs,
other.sigdigs,
Number.get_sigdigs_from_int(new_value)[0])
if self.tolerance is None and other.tolerance is None:
new_tolerance = None
else:
new_tolerance = max(
abs(
abs(new_value) -
abs(self.max_value / other.min_value)),
abs(
abs(new_value) -
abs(self.min_value / other.max_value)))
else:
raise TypeError(
'Cannot perform floor division on Number by type {}.'.format(
type(other)))
return Number(new_value, sigdigs=new_sigdigs, tolerance=new_tolerance)
def __mod__(self, other) -> 'Number':
if isinstance(other, (float, int)):
new_value = self._value % other
new_sigdigs = self.sigdigs
if self.tolerance is None:
new_tolerance = None
else:
new_tolerance = max(
abs(abs(new_value) - abs(self.max_value % other)),
abs(abs(new_value) - abs(self.min_value % other)))
elif isinstance(other, Number):
new_value = self._value % other._value
new_sigdigs = min(self.sigdigs, other.sigdigs)
if self.tolerance is None and other.tolerance is None:
new_tolerance = None
else:
new_tolerance = max(
abs(
abs(new_value) -
abs(self.max_value % other.min_value)),
abs(
abs(new_value) -
abs(self.min_value % other.max_value)))
else:
raise TypeError(
'Cannot perform modulo deivision on Number by type {}'.format(
type(other)))
return Number(new_value, sigdigs=new_sigdigs, tolerance=new_tolerance)
def __pow__(self, other) -> 'Number':
if isinstance(other, (float, int)):
new_value = self._value ** other
new_sigdigs = self.sigdigs
if self.tolerance is None:
new_tolerance = None
else:
new_tolerance = max(
abs(abs(new_value) - abs(self.max_value ** other)),
abs(abs(new_value) - abs(self.min_value ** other)))
else:
raise TypeError(
'Only exponentiating by a constant (float or int) is '
'supported.')
return Number(new_value, sigdigs=new_sigdigs, tolerance=new_tolerance)
def __lt__(self, other) -> bool:
return self.max_value < other.min_value
def __gt__(self, other) -> bool:
return self.min_value > other.max_value
def __le__(self, other) -> bool:
return self.max_value < other.max_value
def __ge__(self, other) -> bool:
return self.min_value > other.min_value
def __eq__(self, other) -> bool:
return (
self.value == other.value and self.sigdigs == other.sigdigs and
self.tolerance == other.tolerance and self.lsd == other.lsd)
def __ne__(self, other) -> bool:
return not self == other
def __iadd__(self, other) -> 'Number':
return self + other
def __isub__(self, other) -> 'Number':
return self - other
def __imul__(self, other) -> 'Number':
return self * other
def __idiv__(self, other) -> 'Number':
return self / other
def __ifloordiv__(self, other) -> 'Number':
return self // other
def __imod__(self, other) -> 'Number':
return self % other
def __ipow__(self, other) -> 'Number':
return self ** other
def __neg__(self) -> 'Number':
return Number(
-self.value,
sigdigs=self.sigdigs,
tolerance=self.tolerance)
def __pos__(self) -> 'Number':
return self
def set_lsd_from_sigdigs(self):
"""Determine the least significant digit based on the specified number
of significant digits and the current value."""
temp_value = self._value
if temp_value < 0:
temp_value = 0 - temp_value
place = 1
if temp_value >= 1:
while temp_value > 0:
place *= 10
temp_value -= temp_value % place
place /= 10
elif temp_value == 0:
self._lsd = 1
return
else:
place = float(place)
while temp_value % place == temp_value:
place /= 10
self._lsd = float(place) / 10 ** (self.sigdigs - 1)
def set_sigdigs_from_lsd(self):
"""Determine the number of significant digits based on the specified
least significant digit and current value."""
temp_value = self._value
if temp_value < 0:
temp_value = 0 - temp_value
place = float(self.lsd)
self._sigdigs = 1
while temp_value / place >= 1:
self._sigdigs += 1
place *= 10
self._sigdigs -= 1
@property
def value(self):
"""Foo"""
return int(self) if isinstance(self._value, int) else float(self)
@property
def max_value(self):
"""Foo"""
return max(
float(self) + (self.tolerance or 0),
float(self) - (self.tolerance or 0))
@property
def min_value(self):
"""Foo"""
return min(
float(self) + (self.tolerance or 0),
float(self) - (self.tolerance or 0))
@property
def sigdigs(self):
"""Get sigdigs"""
return self._sigdigs
@property
def lsd(self):
"""Get least significant digit."""
return self._lsd
@property
def tolerance(self):
"""Get tolerance."""
return self._tolerance
@staticmethod
def get_sigdigs_from_int(value: int):
"""Get the number of significant digits from an integer"""
if value < 0:
value = -value
if value == 0:
return 1, 1
place = 1
lsd = None
count = 0
while value > 0:
remainder = value % (place * 10)
if remainder != 0 and lsd is None:
lsd = place
if lsd is not None:
count += 1
value -= remainder
place *= 10
return count, lsd
@staticmethod
def parse_string(string: str):
"""Parse a string and return it's value, significant digits and least
significant digit."""
string = string.strip().lstrip('0')
if re.match(r'\-?\d*\.?\d*', string).group() == '':
raise ValueError('String could not be cast to number')
decimal_index = string.find('.')
value = 0
sigdigs = 0
lsd = None
if decimal_index == -1:
place = 1
for i in range(len(string) - 1, -1, -1):
value += int(string[i]) * place
if string[i] != '0' and lsd is None:
lsd = place
if lsd is not None:
sigdigs += 1
place *= 10
else:
place = 1
for i in range(decimal_index - 1, -1, -1):
value += int(string[i]) * place
place *= 10
sigdigs += 1
place = 1
for i in range(decimal_index + 1, len(string)):
place /= 10
value += int(string[i]) * place
sigdigs += 1
lsd = 1 if string[-1] == '.' else place
return -value if string[0] == '-' else value, sigdigs, lsd
| 35.494898 | 78 | 0.528604 |
eeb72a7b66e6acb422edd60e89935fdc651d0f7d | 622 | py | Python | manage.py | thewolfcommander/yappapoo-teamTanay-project-1 | 38712d7e88c5fc49e9216b5d05ebfa47f9d6fbcd | [
"Apache-2.0"
] | null | null | null | manage.py | thewolfcommander/yappapoo-teamTanay-project-1 | 38712d7e88c5fc49e9216b5d05ebfa47f9d6fbcd | [
"Apache-2.0"
] | 6 | 2021-03-19T02:18:53.000Z | 2021-09-22T18:55:17.000Z | manage.py | thewolfcommander/yappapoo-teamTanay-project-1 | 38712d7e88c5fc49e9216b5d05ebfa47f9d6fbcd | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'yp.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| 28.272727 | 73 | 0.680064 |
faa373dc4256341108bc7a7b2ae3d13389b787ab | 80,616 | py | Python | electrum/lnworker.py | multibit2016/electrum | 91fa62bdf2000d3909fd4a9fa7042c540bb52f88 | [
"MIT"
] | null | null | null | electrum/lnworker.py | multibit2016/electrum | 91fa62bdf2000d3909fd4a9fa7042c540bb52f88 | [
"MIT"
] | null | null | null | electrum/lnworker.py | multibit2016/electrum | 91fa62bdf2000d3909fd4a9fa7042c540bb52f88 | [
"MIT"
] | null | null | null | # Copyright (C) 2018 The Electrum developers
# Distributed under the MIT software license, see the accompanying
# file LICENCE or http://www.opensource.org/licenses/mit-license.php
import asyncio
import os
from decimal import Decimal
import random
import time
from typing import Optional, Sequence, Tuple, List, Set, Dict, TYPE_CHECKING, NamedTuple, Union, Mapping, Any
import threading
import socket
import aiohttp
import json
from datetime import datetime, timezone
from functools import partial
from collections import defaultdict
import concurrent
from concurrent import futures
import urllib.parse
import dns.resolver
import dns.exception
from aiorpcx import run_in_thread, TaskGroup, NetAddress
from . import constants, util
from . import keystore
from .util import profiler, chunks
from .invoices import PR_TYPE_LN, PR_UNPAID, PR_EXPIRED, PR_PAID, PR_INFLIGHT, PR_FAILED, PR_ROUTING, LNInvoice, LN_EXPIRY_NEVER
from .util import NetworkRetryManager, JsonRPCClient
from .lnutil import LN_MAX_FUNDING_SAT
from .keystore import BIP32_KeyStore
from .bitcoin import COIN
from .transaction import Transaction
from .crypto import sha256
from .bip32 import BIP32Node
from .util import bh2u, bfh, InvoiceError, resolve_dns_srv, is_ip_address, log_exceptions
from .util import ignore_exceptions, make_aiohttp_session, SilentTaskGroup
from .util import timestamp_to_datetime, random_shuffled_copy
from .util import MyEncoder, is_private_netaddress
from .logging import Logger
from .lntransport import LNTransport, LNResponderTransport
from .lnpeer import Peer, LN_P2P_NETWORK_TIMEOUT
from .lnaddr import lnencode, LnAddr, lndecode
from .ecc import der_sig_from_sig_string
from .lnchannel import Channel
from .lnchannel import ChannelState, PeerState
from .lnrater import LNRater
from . import lnutil
from .lnutil import funding_output_script
from .bitcoin import redeem_script_to_address
from .lnutil import (Outpoint, LNPeerAddr,
get_compressed_pubkey_from_bech32, extract_nodeid,
PaymentFailure, split_host_port, ConnStringFormatError,
generate_keypair, LnKeyFamily, LOCAL, REMOTE,
MIN_FINAL_CLTV_EXPIRY_FOR_INVOICE,
NUM_MAX_EDGES_IN_PAYMENT_PATH, SENT, RECEIVED, HTLCOwner,
UpdateAddHtlc, Direction, LnFeatures,
ShortChannelID, PaymentAttemptLog, PaymentAttemptFailureDetails,
BarePaymentAttemptLog, derive_payment_secret_from_payment_preimage)
from .lnutil import ln_dummy_address, ln_compare_features, IncompatibleLightningFeatures
from .transaction import PartialTxOutput, PartialTransaction, PartialTxInput
from .lnonion import OnionFailureCode, process_onion_packet, OnionPacket, OnionRoutingFailureMessage
from .lnmsg import decode_msg
from .i18n import _
from .lnrouter import (RouteEdge, LNPaymentRoute, LNPaymentPath, is_route_sane_to_use,
NoChannelPolicy, LNPathInconsistent)
from .address_synchronizer import TX_HEIGHT_LOCAL
from . import lnsweep
from .lnwatcher import LNWalletWatcher
from .crypto import pw_encode_with_version_and_mac, pw_decode_with_version_and_mac
from .lnutil import ChannelBackupStorage
from .lnchannel import ChannelBackup
from .channel_db import UpdateStatus
from .channel_db import get_mychannel_info, get_mychannel_policy
from .submarine_swaps import SwapManager
if TYPE_CHECKING:
from .network import Network
from .wallet import Abstract_Wallet
from .channel_db import ChannelDB
from .simple_config import SimpleConfig
SAVED_PR_STATUS = [PR_PAID, PR_UNPAID] # status that are persisted
NUM_PEERS_TARGET = 4
MPP_EXPIRY = 120
FALLBACK_NODE_LIST_TESTNET = (
LNPeerAddr(host='203.132.95.10', port=9735, pubkey=bfh('038863cf8ab91046230f561cd5b386cbff8309fa02e3f0c3ed161a3aeb64a643b9')),
LNPeerAddr(host='2401:d002:4402:0:bf1d:986a:7598:6d49', port=9735, pubkey=bfh('038863cf8ab91046230f561cd5b386cbff8309fa02e3f0c3ed161a3aeb64a643b9')),
LNPeerAddr(host='50.116.3.223', port=9734, pubkey=bfh('03236a685d30096b26692dce0cf0fa7c8528bdf61dbf5363a3ef6d5c92733a3016')),
LNPeerAddr(host='3.16.119.191', port=9735, pubkey=bfh('03d5e17a3c213fe490e1b0c389f8cfcfcea08a29717d50a9f453735e0ab2a7c003')),
LNPeerAddr(host='34.250.234.192', port=9735, pubkey=bfh('03933884aaf1d6b108397e5efe5c86bcf2d8ca8d2f700eda99db9214fc2712b134')),
LNPeerAddr(host='88.99.209.230', port=9735, pubkey=bfh('0260d9119979caedc570ada883ff614c6efb93f7f7382e25d73ecbeba0b62df2d7')),
LNPeerAddr(host='160.16.233.215', port=9735, pubkey=bfh('023ea0a53af875580899da0ab0a21455d9c19160c4ea1b7774c9d4be6810b02d2c')),
LNPeerAddr(host='197.155.6.173', port=9735, pubkey=bfh('0269a94e8b32c005e4336bfb743c08a6e9beb13d940d57c479d95c8e687ccbdb9f')),
LNPeerAddr(host='2c0f:fb18:406::4', port=9735, pubkey=bfh('0269a94e8b32c005e4336bfb743c08a6e9beb13d940d57c479d95c8e687ccbdb9f')),
LNPeerAddr(host='163.172.94.64', port=9735, pubkey=bfh('030f0bf260acdbd3edcad84d7588ec7c5df4711e87e6a23016f989b8d3a4147230')),
LNPeerAddr(host='23.237.77.12', port=9735, pubkey=bfh('02312627fdf07fbdd7e5ddb136611bdde9b00d26821d14d94891395452f67af248')),
LNPeerAddr(host='197.155.6.172', port=9735, pubkey=bfh('02ae2f22b02375e3e9b4b4a2db4f12e1b50752b4062dbefd6e01332acdaf680379')),
LNPeerAddr(host='2c0f:fb18:406::3', port=9735, pubkey=bfh('02ae2f22b02375e3e9b4b4a2db4f12e1b50752b4062dbefd6e01332acdaf680379')),
LNPeerAddr(host='23.239.23.44', port=9740, pubkey=bfh('034fe52e98a0e9d3c21b767e1b371881265d8c7578c21f5afd6d6438da10348b36')),
LNPeerAddr(host='2600:3c01::f03c:91ff:fe05:349c', port=9740, pubkey=bfh('034fe52e98a0e9d3c21b767e1b371881265d8c7578c21f5afd6d6438da10348b36')),
)
FALLBACK_NODE_LIST_MAINNET = [
LNPeerAddr(host='172.81.181.3', port=9735, pubkey=bfh('0214382bdce7750dfcb8126df8e2b12de38536902dc36abcebdaeefdeca1df8284')),
LNPeerAddr(host='35.230.100.60', port=9735, pubkey=bfh('023f5e3582716bed96f6f26cfcd8037e07474d7b4743afdc8b07e692df63464d7e')),
LNPeerAddr(host='40.69.71.114', port=9735, pubkey=bfh('028303182c9885da93b3b25c9621d22cf34475e63c123942e402ab530c0556e675')),
LNPeerAddr(host='94.177.171.73', port=9735, pubkey=bfh('0276e09a267592e7451a939c932cf685f0754de382a3ca85d2fb3a864d4c365ad5')),
LNPeerAddr(host='34.236.113.58', port=9735, pubkey=bfh('02fa50c72ee1e2eb5f1b6d9c3032080c4c864373c4201dfa2966aa34eee1051f97')),
LNPeerAddr(host='52.50.244.44', port=9735, pubkey=bfh('030c3f19d742ca294a55c00376b3b355c3c90d61c6b6b39554dbc7ac19b141c14f')),
LNPeerAddr(host='157.245.68.47', port=9735, pubkey=bfh('03c2abfa93eacec04721c019644584424aab2ba4dff3ac9bdab4e9c97007491dda')),
LNPeerAddr(host='18.221.23.28', port=9735, pubkey=bfh('03abf6f44c355dec0d5aa155bdbdd6e0c8fefe318eff402de65c6eb2e1be55dc3e')),
LNPeerAddr(host='52.224.178.244', port=9735, pubkey=bfh('026b105ac13212c48714c6be9b11577a9ce10f10e1c88a45ce217e6331209faf8b')),
LNPeerAddr(host='34.239.230.56', port=9735, pubkey=bfh('03864ef025fde8fb587d989186ce6a4a186895ee44a926bfc370e2c366597a3f8f')),
LNPeerAddr(host='46.229.165.136', port=9735, pubkey=bfh('0390b5d4492dc2f5318e5233ab2cebf6d48914881a33ef6a9c6bcdbb433ad986d0')),
LNPeerAddr(host='157.230.28.160', port=9735, pubkey=bfh('0279c22ed7a068d10dc1a38ae66d2d6461e269226c60258c021b1ddcdfe4b00bc4')),
LNPeerAddr(host='74.108.13.152', port=9735, pubkey=bfh('0331f80652fb840239df8dc99205792bba2e559a05469915804c08420230e23c7c')),
LNPeerAddr(host='167.172.44.148', port=9735, pubkey=bfh('0395033b252c6f40e3756984162d68174e2bd8060a129c0d3462a9370471c6d28f')),
LNPeerAddr(host='138.68.14.104', port=9735, pubkey=bfh('03bb88ccc444534da7b5b64b4f7b15e1eccb18e102db0e400d4b9cfe93763aa26d')),
LNPeerAddr(host='3.124.63.44', port=9735, pubkey=bfh('0242a4ae0c5bef18048fbecf995094b74bfb0f7391418d71ed394784373f41e4f3')),
LNPeerAddr(host='2001:470:8:2e1::43', port=9735, pubkey=bfh('03baa70886d9200af0ffbd3f9e18d96008331c858456b16e3a9b41e735c6208fef')),
LNPeerAddr(host='2601:186:c100:6bcd:219:d1ff:fe75:dc2f', port=9735, pubkey=bfh('0298f6074a454a1f5345cb2a7c6f9fce206cd0bf675d177cdbf0ca7508dd28852f')),
LNPeerAddr(host='2001:41d0:e:734::1', port=9735, pubkey=bfh('03a503d8e30f2ff407096d235b5db63b4fcf3f89a653acb6f43d3fc492a7674019')),
LNPeerAddr(host='2a01:4f9:2b:2254::2', port=9735, pubkey=bfh('02f3069a342ae2883a6f29e275f06f28a56a6ea2e2d96f5888a3266444dcf542b6')),
LNPeerAddr(host='2a02:8070:24c1:100:528c:2997:6dbc:a054', port=9735, pubkey=bfh('02a45def9ae014fdd2603dd7033d157faa3a55a72b06a63ae22ef46d9fafdc6e8d')),
LNPeerAddr(host='2600:3c01::f03c:91ff:fe05:349c', port=9736, pubkey=bfh('02731b798b39a09f9f14e90ee601afb6ebb796d6e5797de14582a978770b33700f')),
LNPeerAddr(host='2a00:8a60:e012:a00::21', port=9735, pubkey=bfh('027ce055380348d7812d2ae7745701c9f93e70c1adeb2657f053f91df4f2843c71')),
LNPeerAddr(host='2604:a880:400:d1::8bd:1001', port=9735, pubkey=bfh('03649c72a4816f0cd546f84aafbd657e92a30ab474de7ab795e8b5650a427611f7')),
LNPeerAddr(host='2a01:4f8:c0c:7b31::1', port=9735, pubkey=bfh('02c16cca44562b590dd279c942200bdccfd4f990c3a69fad620c10ef2f8228eaff')),
LNPeerAddr(host='2001:41d0:1:b40d::1', port=9735, pubkey=bfh('026726a4b043d413b45b334876d17b8a98848129604429ec65532ba286a42efeac')),
]
class PaymentInfo(NamedTuple):
payment_hash: bytes
amount_msat: Optional[int]
direction: int
status: int
class NoPathFound(PaymentFailure):
def __str__(self):
return _('No path found')
class ErrorAddingPeer(Exception): pass
# set some feature flags as baseline for both LNWallet and LNGossip
# note that e.g. DATA_LOSS_PROTECT is needed for LNGossip as many peers require it
BASE_FEATURES = LnFeatures(0)\
| LnFeatures.OPTION_DATA_LOSS_PROTECT_OPT\
| LnFeatures.OPTION_STATIC_REMOTEKEY_OPT\
| LnFeatures.VAR_ONION_OPT\
| LnFeatures.PAYMENT_SECRET_OPT\
| LnFeatures.OPTION_UPFRONT_SHUTDOWN_SCRIPT_OPT
# we do not want to receive unrequested gossip (see lnpeer.maybe_save_remote_update)
LNWALLET_FEATURES = BASE_FEATURES\
| LnFeatures.OPTION_DATA_LOSS_PROTECT_REQ\
| LnFeatures.OPTION_STATIC_REMOTEKEY_REQ\
| LnFeatures.GOSSIP_QUERIES_REQ\
| LnFeatures.BASIC_MPP_OPT
LNGOSSIP_FEATURES = BASE_FEATURES\
| LnFeatures.GOSSIP_QUERIES_OPT\
| LnFeatures.GOSSIP_QUERIES_REQ
class LNWorker(Logger, NetworkRetryManager[LNPeerAddr]):
def __init__(self, xprv, features):
Logger.__init__(self)
NetworkRetryManager.__init__(
self,
max_retry_delay_normal=3600,
init_retry_delay_normal=600,
max_retry_delay_urgent=300,
init_retry_delay_urgent=4,
)
self.lock = threading.RLock()
self.node_keypair = generate_keypair(BIP32Node.from_xkey(xprv), LnKeyFamily.NODE_KEY)
self._peers = {} # type: Dict[bytes, Peer] # pubkey -> Peer # needs self.lock
self.taskgroup = SilentTaskGroup()
self.listen_server = None # type: Optional[asyncio.AbstractServer]
self.features = features
self.network = None # type: Optional[Network]
self.config = None # type: Optional[SimpleConfig]
self.channel_db = None # type: Optional[ChannelDB]
util.register_callback(self.on_proxy_changed, ['proxy_set'])
@property
def peers(self) -> Mapping[bytes, Peer]:
"""Returns a read-only copy of peers."""
with self.lock:
return self._peers.copy()
def channels_for_peer(self, node_id):
return {}
def get_node_alias(self, node_id):
if self.channel_db:
node_info = self.channel_db.get_node_info_for_node_id(node_id)
node_alias = (node_info.alias if node_info else '') or node_id.hex()
else:
node_alias = ''
return node_alias
async def maybe_listen(self):
# FIXME: only one LNWorker can listen at a time (single port)
listen_addr = self.config.get('lightning_listen')
if listen_addr:
self.logger.info(f'lightning_listen enabled. will try to bind: {listen_addr!r}')
try:
netaddr = NetAddress.from_string(listen_addr)
except Exception as e:
self.logger.error(f"failed to parse config key 'lightning_listen'. got: {e!r}")
return
addr = str(netaddr.host)
async def cb(reader, writer):
transport = LNResponderTransport(self.node_keypair.privkey, reader, writer)
try:
node_id = await transport.handshake()
except Exception as e:
self.logger.info(f'handshake failure from incoming connection: {e!r}')
return
peer = Peer(self, node_id, transport)
with self.lock:
self._peers[node_id] = peer
await self.taskgroup.spawn(peer.main_loop())
try:
self.listen_server = await asyncio.start_server(cb, addr, netaddr.port)
except OSError as e:
self.logger.error(f"cannot listen for lightning p2p. error: {e!r}")
@ignore_exceptions # don't kill outer taskgroup
async def main_loop(self):
self.logger.info("starting taskgroup.")
try:
async with self.taskgroup as group:
await group.spawn(self._maintain_connectivity())
except asyncio.CancelledError:
raise
except Exception as e:
self.logger.exception("taskgroup died.")
finally:
self.logger.info("taskgroup stopped.")
async def _maintain_connectivity(self):
while True:
await asyncio.sleep(1)
now = time.time()
if len(self._peers) >= NUM_PEERS_TARGET:
continue
peers = await self._get_next_peers_to_try()
for peer in peers:
if self._can_retry_addr(peer, now=now):
try:
await self._add_peer(peer.host, peer.port, peer.pubkey)
except ErrorAddingPeer as e:
self.logger.info(f"failed to add peer: {peer}. exc: {e!r}")
async def _add_peer(self, host: str, port: int, node_id: bytes) -> Peer:
if node_id in self._peers:
return self._peers[node_id]
port = int(port)
peer_addr = LNPeerAddr(host, port, node_id)
self._trying_addr_now(peer_addr)
self.logger.info(f"adding peer {peer_addr}")
if node_id == self.node_keypair.pubkey:
raise ErrorAddingPeer("cannot connect to self")
transport = LNTransport(self.node_keypair.privkey, peer_addr,
proxy=self.network.proxy)
peer = Peer(self, node_id, transport)
await self.taskgroup.spawn(peer.main_loop())
with self.lock:
self._peers[node_id] = peer
return peer
def peer_closed(self, peer: Peer) -> None:
with self.lock:
self._peers.pop(peer.pubkey, None)
def num_peers(self) -> int:
return sum([p.is_initialized() for p in self.peers.values()])
def start_network(self, network: 'Network'):
assert network
self.network = network
self.config = network.config
self.channel_db = self.network.channel_db
self._add_peers_from_config()
asyncio.run_coroutine_threadsafe(self.main_loop(), self.network.asyncio_loop)
def stop(self):
if self.listen_server:
self.network.asyncio_loop.call_soon_threadsafe(self.listen_server.close)
asyncio.run_coroutine_threadsafe(self.taskgroup.cancel_remaining(), self.network.asyncio_loop)
util.unregister_callback(self.on_proxy_changed)
def _add_peers_from_config(self):
peer_list = self.config.get('lightning_peers', [])
for host, port, pubkey in peer_list:
asyncio.run_coroutine_threadsafe(
self._add_peer(host, int(port), bfh(pubkey)),
self.network.asyncio_loop)
def is_good_peer(self, peer):
# the purpose of this method is to filter peers that advertise the desired feature bits
# it is disabled for now, because feature bits published in node announcements seem to be unreliable
return True
node_id = peer.pubkey
node = self.channel_db._nodes.get(node_id)
if not node:
return False
try:
ln_compare_features(self.features, node.features)
except IncompatibleLightningFeatures:
return False
#self.logger.info(f'is_good {peer.host}')
return True
def on_peer_successfully_established(self, peer: Peer) -> None:
if isinstance(peer.transport, LNTransport):
peer_addr = peer.transport.peer_addr
# reset connection attempt count
self._on_connection_successfully_established(peer_addr)
# add into channel db
if self.channel_db:
self.channel_db.add_recent_peer(peer_addr)
# save network address into channels we might have with peer
for chan in peer.channels.values():
chan.add_or_update_peer_addr(peer_addr)
async def _get_next_peers_to_try(self) -> Sequence[LNPeerAddr]:
now = time.time()
await self.channel_db.data_loaded.wait()
# first try from recent peers
recent_peers = self.channel_db.get_recent_peers()
for peer in recent_peers:
if not peer:
continue
if peer.pubkey in self._peers:
continue
if not self._can_retry_addr(peer, now=now):
continue
if not self.is_good_peer(peer):
continue
return [peer]
# try random peer from graph
unconnected_nodes = self.channel_db.get_200_randomly_sorted_nodes_not_in(self.peers.keys())
if unconnected_nodes:
for node_id in unconnected_nodes:
addrs = self.channel_db.get_node_addresses(node_id)
if not addrs:
continue
host, port, timestamp = self.choose_preferred_address(list(addrs))
try:
peer = LNPeerAddr(host, port, node_id)
except ValueError:
continue
if not self._can_retry_addr(peer, now=now):
continue
if not self.is_good_peer(peer):
continue
#self.logger.info('taking random ln peer from our channel db')
return [peer]
# getting desperate... let's try hardcoded fallback list of peers
if constants.net in (constants.BitcoinTestnet, ):
fallback_list = FALLBACK_NODE_LIST_TESTNET
elif constants.net in (constants.BitcoinMainnet, ):
fallback_list = FALLBACK_NODE_LIST_MAINNET
else:
return [] # regtest??
fallback_list = [peer for peer in fallback_list if self._can_retry_addr(peer, now=now)]
if fallback_list:
return [random.choice(fallback_list)]
# last resort: try dns seeds (BOLT-10)
return await run_in_thread(self._get_peers_from_dns_seeds)
def _get_peers_from_dns_seeds(self) -> Sequence[LNPeerAddr]:
# NOTE: potentially long blocking call, do not run directly on asyncio event loop.
# Return several peers to reduce the number of dns queries.
if not constants.net.LN_DNS_SEEDS:
return []
dns_seed = random.choice(constants.net.LN_DNS_SEEDS)
self.logger.info('asking dns seed "{}" for ln peers'.format(dns_seed))
try:
# note: this might block for several seconds
# this will include bech32-encoded-pubkeys and ports
srv_answers = resolve_dns_srv('r{}.{}'.format(
constants.net.LN_REALM_BYTE, dns_seed))
except dns.exception.DNSException as e:
self.logger.info(f'failed querying (1) dns seed "{dns_seed}" for ln peers: {repr(e)}')
return []
random.shuffle(srv_answers)
num_peers = 2 * NUM_PEERS_TARGET
srv_answers = srv_answers[:num_peers]
# we now have pubkeys and ports but host is still needed
peers = []
for srv_ans in srv_answers:
try:
# note: this might block for several seconds
answers = dns.resolver.resolve(srv_ans['host'])
except dns.exception.DNSException as e:
self.logger.info(f'failed querying (2) dns seed "{dns_seed}" for ln peers: {repr(e)}')
continue
try:
ln_host = str(answers[0])
port = int(srv_ans['port'])
bech32_pubkey = srv_ans['host'].split('.')[0]
pubkey = get_compressed_pubkey_from_bech32(bech32_pubkey)
peers.append(LNPeerAddr(ln_host, port, pubkey))
except Exception as e:
self.logger.info(f'error with parsing peer from dns seed: {repr(e)}')
continue
self.logger.info(f'got {len(peers)} ln peers from dns seed')
return peers
@staticmethod
def choose_preferred_address(addr_list: Sequence[Tuple[str, int, int]]) -> Tuple[str, int, int]:
assert len(addr_list) >= 1
# choose first one that is an IP
for host, port, timestamp in addr_list:
if is_ip_address(host):
return host, port, timestamp
# otherwise choose one at random
# TODO maybe filter out onion if not on tor?
choice = random.choice(addr_list)
return choice
def on_proxy_changed(self, event, *args):
for peer in self.peers.values():
peer.close_and_cleanup()
self._clear_addr_retry_times()
@log_exceptions
async def add_peer(self, connect_str: str) -> Peer:
node_id, rest = extract_nodeid(connect_str)
peer = self._peers.get(node_id)
if not peer:
if rest is not None:
host, port = split_host_port(rest)
else:
addrs = self.channel_db.get_node_addresses(node_id)
if not addrs:
raise ConnStringFormatError(_('Don\'t know any addresses for node:') + ' ' + bh2u(node_id))
host, port, timestamp = self.choose_preferred_address(list(addrs))
port = int(port)
# Try DNS-resolving the host (if needed). This is simply so that
# the caller gets a nice exception if it cannot be resolved.
try:
await asyncio.get_event_loop().getaddrinfo(host, port)
except socket.gaierror:
raise ConnStringFormatError(_('Hostname does not resolve (getaddrinfo failed)'))
# add peer
peer = await self._add_peer(host, port, node_id)
return peer
class LNGossip(LNWorker):
max_age = 14*24*3600
LOGGING_SHORTCUT = 'g'
def __init__(self):
seed = os.urandom(32)
node = BIP32Node.from_rootseed(seed, xtype='standard')
xprv = node.to_xprv()
super().__init__(xprv, LNGOSSIP_FEATURES)
self.unknown_ids = set()
def start_network(self, network: 'Network'):
assert network
super().start_network(network)
asyncio.run_coroutine_threadsafe(self.taskgroup.spawn(self.maintain_db()), self.network.asyncio_loop)
async def maintain_db(self):
await self.channel_db.data_loaded.wait()
while True:
if len(self.unknown_ids) == 0:
self.channel_db.prune_old_policies(self.max_age)
self.channel_db.prune_orphaned_channels()
await asyncio.sleep(120)
async def add_new_ids(self, ids):
known = self.channel_db.get_channel_ids()
new = set(ids) - set(known)
self.unknown_ids.update(new)
util.trigger_callback('unknown_channels', len(self.unknown_ids))
util.trigger_callback('gossip_peers', self.num_peers())
util.trigger_callback('ln_gossip_sync_progress')
def get_ids_to_query(self):
N = 500
l = list(self.unknown_ids)
self.unknown_ids = set(l[N:])
util.trigger_callback('unknown_channels', len(self.unknown_ids))
util.trigger_callback('ln_gossip_sync_progress')
return l[0:N]
def get_sync_progress_estimate(self) -> Tuple[Optional[int], Optional[int], Optional[int]]:
"""Estimates the gossip synchronization process and returns the number
of synchronized channels, the total channels in the network and a
rescaled percentage of the synchronization process."""
if self.num_peers() == 0:
return None, None, None
nchans_with_0p, nchans_with_1p, nchans_with_2p = self.channel_db.get_num_channels_partitioned_by_policy_count()
num_db_channels = nchans_with_0p + nchans_with_1p + nchans_with_2p
# some channels will never have two policies (only one is in gossip?...)
# so if we have at least 1 policy for a channel, we consider that channel "complete" here
current_est = num_db_channels - nchans_with_0p
total_est = len(self.unknown_ids) + num_db_channels
progress = current_est / total_est if total_est and current_est else 0
progress_percent = (1.0 / 0.95 * progress) * 100
progress_percent = min(progress_percent, 100)
progress_percent = round(progress_percent)
# take a minimal number of synchronized channels to get a more accurate
# percentage estimate
if current_est < 200:
progress_percent = 0
return current_est, total_est, progress_percent
async def process_gossip(self, chan_anns, node_anns, chan_upds):
await self.channel_db.data_loaded.wait()
self.logger.debug(f'process_gossip {len(chan_anns)} {len(node_anns)} {len(chan_upds)}')
# note: data processed in chunks to avoid taking sql lock for too long
# channel announcements
for chan_anns_chunk in chunks(chan_anns, 300):
self.channel_db.add_channel_announcement(chan_anns_chunk)
# node announcements
for node_anns_chunk in chunks(node_anns, 100):
self.channel_db.add_node_announcement(node_anns_chunk)
# channel updates
for chan_upds_chunk in chunks(chan_upds, 1000):
categorized_chan_upds = self.channel_db.add_channel_updates(
chan_upds_chunk, max_age=self.max_age)
orphaned = categorized_chan_upds.orphaned
if orphaned:
self.logger.info(f'adding {len(orphaned)} unknown channel ids')
orphaned_ids = [c['short_channel_id'] for c in orphaned]
await self.add_new_ids(orphaned_ids)
if categorized_chan_upds.good:
self.logger.debug(f'on_channel_update: {len(categorized_chan_upds.good)}/{len(chan_upds_chunk)}')
class LNWallet(LNWorker):
lnwatcher: Optional['LNWalletWatcher']
def __init__(self, wallet: 'Abstract_Wallet', xprv):
Logger.__init__(self)
self.wallet = wallet
self.db = wallet.db
LNWorker.__init__(self, xprv, LNWALLET_FEATURES)
self.config = wallet.config
self.lnwatcher = None
self.lnrater: LNRater = None
self.payments = self.db.get_dict('lightning_payments') # RHASH -> amount, direction, is_paid
self.preimages = self.db.get_dict('lightning_preimages') # RHASH -> preimage
# note: this sweep_address is only used as fallback; as it might result in address-reuse
self.sweep_address = wallet.get_new_sweep_address_for_channel()
self.logs = defaultdict(list) # type: Dict[str, List[PaymentAttemptLog]] # key is RHASH # (not persisted)
self.is_routing = set() # (not persisted) keys of invoices that are in PR_ROUTING state
# used in tests
self.enable_htlc_settle = asyncio.Event()
self.enable_htlc_settle.set()
# note: accessing channels (besides simple lookup) needs self.lock!
self._channels = {} # type: Dict[bytes, Channel]
channels = self.db.get_dict("channels")
for channel_id, c in random_shuffled_copy(channels.items()):
self._channels[bfh(channel_id)] = Channel(c, sweep_address=self.sweep_address, lnworker=self)
self.pending_payments = defaultdict(asyncio.Future) # type: Dict[bytes, asyncio.Future[BarePaymentAttemptLog]]
self.pending_htlcs = defaultdict(set) # type: Dict[bytes, set]
self.swap_manager = SwapManager(wallet=self.wallet, lnworker=self)
# detect inflight payments
for payment_hash in self.get_payments(status='inflight').keys():
self.set_invoice_status(payment_hash.hex(), PR_INFLIGHT)
@property
def channels(self) -> Mapping[bytes, Channel]:
"""Returns a read-only copy of channels."""
with self.lock:
return self._channels.copy()
def get_channel_by_id(self, channel_id: bytes) -> Optional[Channel]:
return self._channels.get(channel_id, None)
@ignore_exceptions
@log_exceptions
async def sync_with_local_watchtower(self):
watchtower = self.network.local_watchtower
if watchtower:
while True:
for chan in self.channels.values():
await self.sync_channel_with_watchtower(chan, watchtower.sweepstore)
await asyncio.sleep(5)
@ignore_exceptions
@log_exceptions
async def sync_with_remote_watchtower(self):
while True:
# periodically poll if the user updated 'watchtower_url'
await asyncio.sleep(5)
watchtower_url = self.config.get('watchtower_url')
if not watchtower_url:
continue
parsed_url = urllib.parse.urlparse(watchtower_url)
if not (parsed_url.scheme == 'https' or is_private_netaddress(parsed_url.hostname)):
self.logger.warning(f"got watchtower URL for remote tower but we won't use it! "
f"can only use HTTPS (except if private IP): not using {watchtower_url!r}")
continue
# try to sync with the remote watchtower
try:
async with make_aiohttp_session(proxy=self.network.proxy) as session:
watchtower = JsonRPCClient(session, watchtower_url)
watchtower.add_method('get_ctn')
watchtower.add_method('add_sweep_tx')
for chan in self.channels.values():
await self.sync_channel_with_watchtower(chan, watchtower)
except aiohttp.client_exceptions.ClientConnectorError:
self.logger.info(f'could not contact remote watchtower {watchtower_url}')
async def sync_channel_with_watchtower(self, chan: Channel, watchtower):
outpoint = chan.funding_outpoint.to_str()
addr = chan.get_funding_address()
current_ctn = chan.get_oldest_unrevoked_ctn(REMOTE)
watchtower_ctn = await watchtower.get_ctn(outpoint, addr)
for ctn in range(watchtower_ctn + 1, current_ctn):
sweeptxs = chan.create_sweeptxs(ctn)
for tx in sweeptxs:
await watchtower.add_sweep_tx(outpoint, ctn, tx.inputs()[0].prevout.to_str(), tx.serialize())
def start_network(self, network: 'Network'):
assert network
self.network = network
self.config = network.config
self.channel_db = self.network.channel_db
self.lnwatcher = LNWalletWatcher(self, network)
self.lnwatcher.start_network(network)
self.swap_manager.start_network(network=network, lnwatcher=self.lnwatcher)
self.lnrater = LNRater(self, network)
for chan in self.channels.values():
self.lnwatcher.add_channel(chan.funding_outpoint.to_str(), chan.get_funding_address())
for coro in [
self.maybe_listen(),
self.lnwatcher.on_network_update('network_updated'), # shortcut (don't block) if funding tx locked and verified
self.reestablish_peers_and_channels(),
self.sync_with_local_watchtower(),
self.sync_with_remote_watchtower(),
]:
tg_coro = self.taskgroup.spawn(coro)
asyncio.run_coroutine_threadsafe(tg_coro, self.network.asyncio_loop)
def stop(self):
super().stop()
self.lnwatcher.stop()
self.lnwatcher = None
def peer_closed(self, peer):
for chan in self.channels_for_peer(peer.pubkey).values():
chan.peer_state = PeerState.DISCONNECTED
util.trigger_callback('channel', self.wallet, chan)
super().peer_closed(peer)
def get_payments(self, *, status=None):
# return one item per payment_hash
# note: with AMP we will have several channels per payment
out = defaultdict(list)
for chan in self.channels.values():
d = chan.get_payments(status=status)
for k, v in d.items():
out[k] += v
return out
def get_payment_value(self, info: Optional['PaymentInfo'], plist):
amount_msat = 0
fee_msat = None
for chan_id, htlc, _direction, _status in plist:
amount_msat += int(_direction) * htlc.amount_msat
if _direction == SENT and info and info.amount_msat:
fee_msat = (fee_msat or 0) - info.amount_msat - amount_msat
timestamp = min([htlc.timestamp for chan_id, htlc, _direction, _status in plist])
return amount_msat, fee_msat, timestamp
def get_lightning_history(self):
out = {}
for payment_hash, plist in self.get_payments(status='settled').items():
if len(plist) == 0:
continue
key = payment_hash.hex()
info = self.get_payment_info(payment_hash)
amount_msat, fee_msat, timestamp = self.get_payment_value(info, plist)
if info is not None:
label = self.wallet.get_label(key)
direction = ('sent' if info.direction == SENT else 'received') if len(plist)==1 else 'self-payment'
else:
direction = 'forwarding'
label = _('Forwarding')
preimage = self.get_preimage(payment_hash).hex()
item = {
'type': 'payment',
'label': label,
'timestamp': timestamp or 0,
'date': timestamp_to_datetime(timestamp),
'direction': direction,
'amount_msat': amount_msat,
'fee_msat': fee_msat,
'payment_hash': key,
'preimage': preimage,
}
# add group_id to swap transactions
swap = self.swap_manager.get_swap(payment_hash)
if swap:
if swap.is_reverse:
item['group_id'] = swap.spending_txid
item['group_label'] = 'Reverse swap' + ' ' + self.config.format_amount_and_units(swap.lightning_amount)
else:
item['group_id'] = swap.funding_txid
item['group_label'] = 'Forward swap' + ' ' + self.config.format_amount_and_units(swap.onchain_amount)
# done
out[payment_hash] = item
return out
def get_onchain_history(self):
out = {}
# add funding events
for chan in self.channels.values():
item = chan.get_funding_height()
if item is None:
continue
funding_txid, funding_height, funding_timestamp = item
item = {
'channel_id': bh2u(chan.channel_id),
'type': 'channel_opening',
'label': self.wallet.get_label_for_txid(funding_txid) or (_('Open channel') + ' ' + chan.get_id_for_log()),
'txid': funding_txid,
'amount_msat': chan.balance(LOCAL, ctn=0),
'direction': 'received',
'timestamp': funding_timestamp,
'fee_msat': None,
}
out[funding_txid] = item
item = chan.get_closing_height()
if item is None:
continue
closing_txid, closing_height, closing_timestamp = item
item = {
'channel_id': bh2u(chan.channel_id),
'txid': closing_txid,
'label': self.wallet.get_label_for_txid(closing_txid) or (_('Close channel') + ' ' + chan.get_id_for_log()),
'type': 'channel_closure',
'amount_msat': -chan.balance_minus_outgoing_htlcs(LOCAL),
'direction': 'sent',
'timestamp': closing_timestamp,
'fee_msat': None,
}
out[closing_txid] = item
# add info about submarine swaps
settled_payments = self.get_payments(status='settled')
current_height = self.wallet.get_local_height()
for payment_hash_hex, swap in self.swap_manager.swaps.items():
txid = swap.spending_txid if swap.is_reverse else swap.funding_txid
if txid is None:
continue
payment_hash = bytes.fromhex(payment_hash_hex)
if payment_hash in settled_payments:
plist = settled_payments[payment_hash]
info = self.get_payment_info(payment_hash)
amount_msat, fee_msat, timestamp = self.get_payment_value(info, plist)
else:
amount_msat = 0
label = 'Reverse swap' if swap.is_reverse else 'Forward swap'
delta = current_height - swap.locktime
if not swap.is_redeemed and swap.spending_txid is None and delta < 0:
label += f' (refundable in {-delta} blocks)' # fixme: only if unspent
out[txid] = {
'txid': txid,
'group_id': txid,
'amount_msat': 0,
#'amount_msat': amount_msat, # must not be added
'type': 'swap',
'label': self.wallet.get_label_for_txid(txid) or label,
}
return out
def get_history(self):
out = list(self.get_lightning_history().values()) + list(self.get_onchain_history().values())
# sort by timestamp
out.sort(key=lambda x: (x.get('timestamp') or float("inf")))
balance_msat = 0
for item in out:
balance_msat += item['amount_msat']
item['balance_msat'] = balance_msat
return out
def channel_peers(self) -> List[bytes]:
node_ids = [chan.node_id for chan in self.channels.values() if not chan.is_closed()]
return node_ids
def channels_for_peer(self, node_id):
assert type(node_id) is bytes
return {chan_id: chan for (chan_id, chan) in self.channels.items()
if chan.node_id == node_id}
def channel_state_changed(self, chan: Channel):
self.save_channel(chan)
util.trigger_callback('channel', self.wallet, chan)
def save_channel(self, chan: Channel):
assert type(chan) is Channel
if chan.config[REMOTE].next_per_commitment_point == chan.config[REMOTE].current_per_commitment_point:
raise Exception("Tried to save channel with next_point == current_point, this should not happen")
self.wallet.save_db()
util.trigger_callback('channel', self.wallet, chan)
def channel_by_txo(self, txo: str) -> Optional[Channel]:
for chan in self.channels.values():
if chan.funding_outpoint.to_str() == txo:
return chan
async def on_channel_update(self, chan):
if chan.get_state() == ChannelState.OPEN and chan.should_be_closed_due_to_expiring_htlcs(self.network.get_local_height()):
self.logger.info(f"force-closing due to expiring htlcs")
await self.try_force_closing(chan.channel_id)
elif chan.get_state() == ChannelState.FUNDED:
peer = self._peers.get(chan.node_id)
if peer and peer.is_initialized():
peer.send_funding_locked(chan)
elif chan.get_state() == ChannelState.OPEN:
peer = self._peers.get(chan.node_id)
if peer:
await peer.maybe_update_fee(chan)
conf = self.lnwatcher.get_tx_height(chan.funding_outpoint.txid).conf
peer.on_network_update(chan, conf)
elif chan.get_state() == ChannelState.FORCE_CLOSING:
force_close_tx = chan.force_close_tx()
txid = force_close_tx.txid()
height = self.lnwatcher.get_tx_height(txid).height
if height == TX_HEIGHT_LOCAL:
self.logger.info('REBROADCASTING CLOSING TX')
await self.network.try_broadcasting(force_close_tx, 'force-close')
@log_exceptions
async def _open_channel_coroutine(self, *, connect_str: str, funding_tx: PartialTransaction,
funding_sat: int, push_sat: int,
password: Optional[str]) -> Tuple[Channel, PartialTransaction]:
peer = await self.add_peer(connect_str)
# will raise if init fails
await asyncio.wait_for(peer.initialized, LN_P2P_NETWORK_TIMEOUT)
chan, funding_tx = await peer.channel_establishment_flow(
password=password,
funding_tx=funding_tx,
funding_sat=funding_sat,
push_msat=push_sat * 1000,
temp_channel_id=os.urandom(32))
util.trigger_callback('channels_updated', self.wallet)
self.wallet.add_transaction(funding_tx) # save tx as local into the wallet
self.wallet.set_label(funding_tx.txid(), _('Open channel'))
if funding_tx.is_complete():
await self.network.try_broadcasting(funding_tx, 'open_channel')
return chan, funding_tx
def add_channel(self, chan: Channel):
with self.lock:
self._channels[chan.channel_id] = chan
self.lnwatcher.add_channel(chan.funding_outpoint.to_str(), chan.get_funding_address())
def add_new_channel(self, chan: Channel):
self.add_channel(chan)
channels_db = self.db.get_dict('channels')
channels_db[chan.channel_id.hex()] = chan.storage
for addr in chan.get_wallet_addresses_channel_might_want_reserved():
self.wallet.set_reserved_state_of_address(addr, reserved=True)
try:
self.save_channel(chan)
self.wallet.save_backup()
except:
chan.set_state(ChannelState.REDEEMED)
self.remove_channel(chan.channel_id)
raise
def mktx_for_open_channel(self, *, coins: Sequence[PartialTxInput], funding_sat: int,
fee_est=None) -> PartialTransaction:
dummy_address = ln_dummy_address()
outputs = [PartialTxOutput.from_address_and_value(dummy_address, funding_sat)]
tx = self.wallet.make_unsigned_transaction(
coins=coins,
outputs=outputs,
fee=fee_est)
tx.set_rbf(False)
return tx
def open_channel(self, *, connect_str: str, funding_tx: PartialTransaction,
funding_sat: int, push_amt_sat: int, password: str = None,
timeout: Optional[int] = 20) -> Tuple[Channel, PartialTransaction]:
if funding_sat > LN_MAX_FUNDING_SAT:
raise Exception(_("Requested channel capacity is over protocol allowed maximum."))
coro = self._open_channel_coroutine(connect_str=connect_str, funding_tx=funding_tx, funding_sat=funding_sat,
push_sat=push_amt_sat, password=password)
fut = asyncio.run_coroutine_threadsafe(coro, self.network.asyncio_loop)
try:
chan, funding_tx = fut.result(timeout=timeout)
except concurrent.futures.TimeoutError:
raise Exception(_("open_channel timed out"))
# at this point the channel opening was successful
# if this is the first channel that got opened, we start gossiping
if self.channels:
self.network.start_gossip()
return chan, funding_tx
def pay(self, invoice: str, *, amount_msat: int = None, attempts: int = 1) -> Tuple[bool, List[PaymentAttemptLog]]:
"""
Can be called from other threads
"""
coro = self._pay(invoice, amount_msat=amount_msat, attempts=attempts)
fut = asyncio.run_coroutine_threadsafe(coro, self.network.asyncio_loop)
return fut.result()
def get_channel_by_short_id(self, short_channel_id: bytes) -> Optional[Channel]:
for chan in self.channels.values():
if chan.short_channel_id == short_channel_id:
return chan
@log_exceptions
async def _pay(
self,
invoice: str,
*,
amount_msat: int = None,
attempts: int = 1,
full_path: LNPaymentPath = None,
) -> Tuple[bool, List[PaymentAttemptLog]]:
lnaddr = self._check_invoice(invoice, amount_msat=amount_msat)
payment_hash = lnaddr.paymenthash
key = payment_hash.hex()
amount_msat = lnaddr.get_amount_msat()
status = self.get_payment_status(payment_hash)
if status == PR_PAID:
raise PaymentFailure(_("This invoice has been paid already"))
if status == PR_INFLIGHT:
raise PaymentFailure(_("A payment was already initiated for this invoice"))
info = PaymentInfo(payment_hash, amount_msat, SENT, PR_UNPAID)
self.save_payment_info(info)
self.wallet.set_label(key, lnaddr.get_description())
self.logs[key] = log = []
success = False
reason = ''
for i in range(attempts):
try:
# note: path-finding runs in a separate thread so that we don't block the asyncio loop
# graph updates might occur during the computation
self.set_invoice_status(key, PR_ROUTING)
util.trigger_callback('invoice_status', self.wallet, key)
route = await run_in_thread(partial(self._create_route_from_invoice, lnaddr, full_path=full_path))
self.set_invoice_status(key, PR_INFLIGHT)
util.trigger_callback('invoice_status', self.wallet, key)
payment_attempt_log = await self._pay_to_route(route, lnaddr)
except Exception as e:
log.append(PaymentAttemptLog(success=False, exception=e))
self.set_invoice_status(key, PR_UNPAID)
reason = str(e)
break
log.append(payment_attempt_log)
success = payment_attempt_log.success
if success:
break
else:
reason = _('Failed after {} attempts').format(attempts)
util.trigger_callback('invoice_status', self.wallet, key)
if success:
util.trigger_callback('payment_succeeded', self.wallet, key)
else:
util.trigger_callback('payment_failed', self.wallet, key, reason)
return success, log
async def _pay_to_route(self, route: LNPaymentRoute, lnaddr: LnAddr) -> PaymentAttemptLog:
short_channel_id = route[0].short_channel_id
chan = self.get_channel_by_short_id(short_channel_id)
peer = self._peers.get(route[0].node_id)
if not peer:
raise Exception('Dropped peer')
await peer.initialized
htlc = peer.pay(
route=route,
chan=chan,
amount_msat=lnaddr.get_amount_msat(),
payment_hash=lnaddr.paymenthash,
min_final_cltv_expiry=lnaddr.get_min_final_cltv_expiry(),
payment_secret=lnaddr.payment_secret)
util.trigger_callback('htlc_added', chan, htlc, SENT)
payment_attempt = await self.await_payment(lnaddr.paymenthash)
if payment_attempt.success:
failure_log = None
else:
if payment_attempt.error_bytes:
# TODO "decode_onion_error" might raise, catch and maybe blacklist/penalise someone?
failure_msg, sender_idx = chan.decode_onion_error(payment_attempt.error_bytes, route, htlc.htlc_id)
is_blacklisted = self.handle_error_code_from_failed_htlc(failure_msg, sender_idx, route, peer)
if is_blacklisted:
# blacklist channel after reporter node
# TODO this should depend on the error (even more granularity)
# also, we need finer blacklisting (directed edges; nodes)
try:
short_chan_id = route[sender_idx + 1].short_channel_id
except IndexError:
self.logger.info("payment destination reported error")
else:
self.logger.info(f'blacklisting channel {short_chan_id}')
self.network.channel_blacklist.add(short_chan_id)
else:
# probably got "update_fail_malformed_htlc". well... who to penalise now?
assert payment_attempt.failure_message is not None
sender_idx = None
failure_msg = payment_attempt.failure_message
is_blacklisted = False
failure_log = PaymentAttemptFailureDetails(sender_idx=sender_idx,
failure_msg=failure_msg,
is_blacklisted=is_blacklisted)
return PaymentAttemptLog(route=route,
success=payment_attempt.success,
preimage=payment_attempt.preimage,
failure_details=failure_log)
def handle_error_code_from_failed_htlc(self, failure_msg, sender_idx, route, peer):
code, data = failure_msg.code, failure_msg.data
self.logger.info(f"UPDATE_FAIL_HTLC {repr(code)} {data}")
self.logger.info(f"error reported by {bh2u(route[sender_idx].node_id)}")
# handle some specific error codes
failure_codes = {
OnionFailureCode.TEMPORARY_CHANNEL_FAILURE: 0,
OnionFailureCode.AMOUNT_BELOW_MINIMUM: 8,
OnionFailureCode.FEE_INSUFFICIENT: 8,
OnionFailureCode.INCORRECT_CLTV_EXPIRY: 4,
OnionFailureCode.EXPIRY_TOO_SOON: 0,
OnionFailureCode.CHANNEL_DISABLED: 2,
}
if code in failure_codes:
offset = failure_codes[code]
channel_update_len = int.from_bytes(data[offset:offset+2], byteorder="big")
channel_update_as_received = data[offset+2: offset+2+channel_update_len]
payload = self._decode_channel_update_msg(channel_update_as_received)
if payload is None:
self.logger.info(f'could not decode channel_update for failed htlc: {channel_update_as_received.hex()}')
return True
r = self.channel_db.add_channel_update(payload)
blacklist = False
short_channel_id = ShortChannelID(payload['short_channel_id'])
if r == UpdateStatus.GOOD:
self.logger.info(f"applied channel update to {short_channel_id}")
peer.maybe_save_remote_update(payload)
elif r == UpdateStatus.ORPHANED:
# maybe it is a private channel (and data in invoice was outdated)
self.logger.info(f"Could not find {short_channel_id}. maybe update is for private channel?")
start_node_id = route[sender_idx].node_id
self.channel_db.add_channel_update_for_private_channel(payload, start_node_id)
elif r == UpdateStatus.EXPIRED:
blacklist = True
elif r == UpdateStatus.DEPRECATED:
self.logger.info(f'channel update is not more recent.')
blacklist = True
elif r == UpdateStatus.UNCHANGED:
blacklist = True
else:
blacklist = True
return blacklist
@classmethod
def _decode_channel_update_msg(cls, chan_upd_msg: bytes) -> Optional[Dict[str, Any]]:
channel_update_as_received = chan_upd_msg
channel_update_typed = (258).to_bytes(length=2, byteorder="big") + channel_update_as_received
# note: some nodes put channel updates in error msgs with the leading msg_type already there.
# we try decoding both ways here.
try:
message_type, payload = decode_msg(channel_update_typed)
if payload['chain_hash'] != constants.net.rev_genesis_bytes(): raise Exception()
payload['raw'] = channel_update_typed
return payload
except: # FIXME: too broad
try:
message_type, payload = decode_msg(channel_update_as_received)
if payload['chain_hash'] != constants.net.rev_genesis_bytes(): raise Exception()
payload['raw'] = channel_update_as_received
return payload
except:
return None
@staticmethod
def _check_invoice(invoice: str, *, amount_msat: int = None) -> LnAddr:
addr = lndecode(invoice, expected_hrp=constants.net.SEGWIT_HRP)
if addr.is_expired():
raise InvoiceError(_("This invoice has expired"))
if amount_msat: # replace amt in invoice. main usecase is paying zero amt invoices
existing_amt_msat = addr.get_amount_msat()
if existing_amt_msat and amount_msat < existing_amt_msat:
raise Exception("cannot pay lower amt than what is originally in LN invoice")
addr.amount = Decimal(amount_msat) / COIN / 1000
if addr.amount is None:
raise InvoiceError(_("Missing amount"))
if addr.get_min_final_cltv_expiry() > lnutil.NBLOCK_CLTV_EXPIRY_TOO_FAR_INTO_FUTURE:
raise InvoiceError("{}\n{}".format(
_("Invoice wants us to risk locking funds for unreasonably long."),
f"min_final_cltv_expiry: {addr.get_min_final_cltv_expiry()}"))
return addr
@profiler
def _create_route_from_invoice(self, decoded_invoice: 'LnAddr',
*, full_path: LNPaymentPath = None) -> LNPaymentRoute:
amount_msat = decoded_invoice.get_amount_msat()
invoice_pubkey = decoded_invoice.pubkey.serialize()
# use 'r' field from invoice
route = None # type: Optional[LNPaymentRoute]
# only want 'r' tags
r_tags = list(filter(lambda x: x[0] == 'r', decoded_invoice.tags))
# strip the tag type, it's implicitly 'r' now
r_tags = list(map(lambda x: x[1], r_tags))
# if there are multiple hints, we will use the first one that works,
# from a random permutation
random.shuffle(r_tags)
channels = list(self.channels.values())
scid_to_my_channels = {chan.short_channel_id: chan for chan in channels
if chan.short_channel_id is not None}
blacklist = self.network.channel_blacklist.get_current_list()
for private_route in r_tags:
if len(private_route) == 0:
continue
if len(private_route) > NUM_MAX_EDGES_IN_PAYMENT_PATH:
continue
border_node_pubkey = private_route[0][0]
if full_path:
# user pre-selected path. check that end of given path coincides with private_route:
if [edge.short_channel_id for edge in full_path[-len(private_route):]] != [edge[1] for edge in private_route]:
continue
path = full_path[:-len(private_route)]
else:
# find path now on public graph, to border node
path = None
try:
route = self.network.path_finder.find_route(
self.node_keypair.pubkey, border_node_pubkey, amount_msat,
path=path, my_channels=scid_to_my_channels, blacklist=blacklist)
except NoChannelPolicy:
continue
if not route:
continue
# we need to shift the node pubkey by one towards the destination:
private_route_nodes = [edge[0] for edge in private_route][1:] + [invoice_pubkey]
private_route_rest = [edge[1:] for edge in private_route]
prev_node_id = border_node_pubkey
for node_pubkey, edge_rest in zip(private_route_nodes, private_route_rest):
short_channel_id, fee_base_msat, fee_proportional_millionths, cltv_expiry_delta = edge_rest
short_channel_id = ShortChannelID(short_channel_id)
# if we have a routing policy for this edge in the db, that takes precedence,
# as it is likely from a previous failure
channel_policy = self.channel_db.get_policy_for_node(
short_channel_id=short_channel_id,
node_id=prev_node_id,
my_channels=scid_to_my_channels)
if channel_policy:
fee_base_msat = channel_policy.fee_base_msat
fee_proportional_millionths = channel_policy.fee_proportional_millionths
cltv_expiry_delta = channel_policy.cltv_expiry_delta
node_info = self.channel_db.get_node_info_for_node_id(node_id=node_pubkey)
route.append(
RouteEdge(
node_id=node_pubkey,
short_channel_id=short_channel_id,
fee_base_msat=fee_base_msat,
fee_proportional_millionths=fee_proportional_millionths,
cltv_expiry_delta=cltv_expiry_delta,
node_features=node_info.features if node_info else 0))
prev_node_id = node_pubkey
# test sanity
if not is_route_sane_to_use(route, amount_msat, decoded_invoice.get_min_final_cltv_expiry()):
self.logger.info(f"rejecting insane route {route}")
route = None
continue
break
# if could not find route using any hint; try without hint now
if route is None:
route = self.network.path_finder.find_route(
self.node_keypair.pubkey, invoice_pubkey, amount_msat,
path=full_path, my_channels=scid_to_my_channels, blacklist=blacklist)
if not route:
raise NoPathFound()
if not is_route_sane_to_use(route, amount_msat, decoded_invoice.get_min_final_cltv_expiry()):
self.logger.info(f"rejecting insane route {route}")
raise NoPathFound()
assert len(route) > 0
if route[-1].node_id != invoice_pubkey:
raise LNPathInconsistent("last node_id != invoice pubkey")
# add features from invoice
invoice_features = decoded_invoice.get_tag('9') or 0
route[-1].node_features |= invoice_features
return route
def add_request(self, amount_sat, message, expiry) -> str:
coro = self._add_request_coro(amount_sat, message, expiry)
fut = asyncio.run_coroutine_threadsafe(coro, self.network.asyncio_loop)
try:
return fut.result(timeout=5)
except concurrent.futures.TimeoutError:
raise Exception(_("add invoice timed out"))
@log_exceptions
async def create_invoice(self, *, amount_msat: Optional[int], message, expiry: int):
timestamp = int(time.time())
routing_hints = await self._calc_routing_hints_for_invoice(amount_msat)
if not routing_hints:
self.logger.info("Warning. No routing hints added to invoice. "
"Other clients will likely not be able to send to us.")
payment_preimage = os.urandom(32)
payment_hash = sha256(payment_preimage)
info = PaymentInfo(payment_hash, amount_msat, RECEIVED, PR_UNPAID)
amount_btc = amount_msat/Decimal(COIN*1000) if amount_msat else None
if expiry == 0:
expiry = LN_EXPIRY_NEVER
lnaddr = LnAddr(paymenthash=payment_hash,
amount=amount_btc,
tags=[('d', message),
('c', MIN_FINAL_CLTV_EXPIRY_FOR_INVOICE),
('x', expiry),
('9', self.features.for_invoice())]
+ routing_hints,
date=timestamp,
payment_secret=derive_payment_secret_from_payment_preimage(payment_preimage))
invoice = lnencode(lnaddr, self.node_keypair.privkey)
self.save_preimage(payment_hash, payment_preimage)
self.save_payment_info(info)
return lnaddr, invoice
async def _add_request_coro(self, amount_sat: Optional[int], message, expiry: int) -> str:
amount_msat = amount_sat * 1000 if amount_sat is not None else None
lnaddr, invoice = await self.create_invoice(
amount_msat=amount_msat,
message=message,
expiry=expiry,
)
key = bh2u(lnaddr.paymenthash)
req = LNInvoice.from_bech32(invoice)
self.wallet.add_payment_request(req)
self.wallet.set_label(key, message)
return key
def save_preimage(self, payment_hash: bytes, preimage: bytes):
assert sha256(preimage) == payment_hash
self.preimages[bh2u(payment_hash)] = bh2u(preimage)
self.wallet.save_db()
def get_preimage(self, payment_hash: bytes) -> Optional[bytes]:
r = self.preimages.get(bh2u(payment_hash))
return bfh(r) if r else None
def get_payment_info(self, payment_hash: bytes) -> Optional[PaymentInfo]:
"""returns None if payment_hash is a payment we are forwarding"""
key = payment_hash.hex()
with self.lock:
if key in self.payments:
amount_msat, direction, status = self.payments[key]
return PaymentInfo(payment_hash, amount_msat, direction, status)
def save_payment_info(self, info: PaymentInfo) -> None:
key = info.payment_hash.hex()
assert info.status in SAVED_PR_STATUS
with self.lock:
self.payments[key] = info.amount_msat, info.direction, info.status
self.wallet.save_db()
def htlc_received(self, short_channel_id, htlc, expected_msat):
status = self.get_payment_status(htlc.payment_hash)
if status == PR_PAID:
return True, None
s = self.pending_htlcs[htlc.payment_hash]
if (short_channel_id, htlc) not in s:
s.add((short_channel_id, htlc))
total = sum([htlc.amount_msat for scid, htlc in s])
first_timestamp = min([htlc.timestamp for scid, htlc in s])
expired = time.time() - first_timestamp > MPP_EXPIRY
if total >= expected_msat and not expired:
# status must be persisted
self.payment_received(htlc.payment_hash)
return True, None
if expired:
return None, True
return None, None
def get_payment_status(self, payment_hash):
info = self.get_payment_info(payment_hash)
return info.status if info else PR_UNPAID
def get_invoice_status(self, invoice):
key = invoice.rhash
log = self.logs[key]
if key in self.is_routing:
return PR_ROUTING
# status may be PR_FAILED
status = self.get_payment_status(bfh(key))
if status == PR_UNPAID and log:
status = PR_FAILED
return status
def set_invoice_status(self, key, status):
if status == PR_ROUTING:
self.is_routing.add(key)
elif key in self.is_routing:
self.is_routing.remove(key)
if status in SAVED_PR_STATUS:
self.set_payment_status(bfh(key), status)
async def await_payment(self, payment_hash: bytes) -> BarePaymentAttemptLog:
# note side-effect: Future is created and added here (defaultdict):
payment_attempt = await self.pending_payments[payment_hash]
self.pending_payments.pop(payment_hash)
return payment_attempt
def set_payment_status(self, payment_hash: bytes, status):
info = self.get_payment_info(payment_hash)
if info is None:
# if we are forwarding
return
info = info._replace(status=status)
self.save_payment_info(info)
def payment_failed(
self,
chan: Channel,
payment_hash: bytes,
error_bytes: Optional[bytes],
failure_message: Optional['OnionRoutingFailureMessage'],
):
self.set_payment_status(payment_hash, PR_UNPAID)
f = self.pending_payments.get(payment_hash)
if f and not f.cancelled():
payment_attempt = BarePaymentAttemptLog(
success=False,
error_bytes=error_bytes,
failure_message=failure_message)
f.set_result(payment_attempt)
else:
chan.logger.info('received unexpected payment_failed, probably from previous session')
key = payment_hash.hex()
util.trigger_callback('invoice_status', self.wallet, key)
util.trigger_callback('payment_failed', self.wallet, key, '')
util.trigger_callback('ln_payment_failed', payment_hash, chan.channel_id)
def payment_sent(self, chan, payment_hash: bytes):
self.set_payment_status(payment_hash, PR_PAID)
preimage = self.get_preimage(payment_hash)
f = self.pending_payments.get(payment_hash)
if f and not f.cancelled():
payment_attempt = BarePaymentAttemptLog(
success=True,
preimage=preimage)
f.set_result(payment_attempt)
else:
chan.logger.info('received unexpected payment_sent, probably from previous session')
key = payment_hash.hex()
util.trigger_callback('invoice_status', self.wallet, key)
util.trigger_callback('payment_succeeded', self.wallet, key)
util.trigger_callback('ln_payment_completed', payment_hash, chan.channel_id)
def payment_received(self, payment_hash: bytes):
self.set_payment_status(payment_hash, PR_PAID)
util.trigger_callback('request_status', self.wallet, payment_hash.hex(), PR_PAID)
#util.trigger_callback('ln_payment_completed', payment_hash, chan.channel_id)
async def _calc_routing_hints_for_invoice(self, amount_msat: Optional[int]):
"""calculate routing hints (BOLT-11 'r' field)"""
routing_hints = []
channels = list(self.channels.values())
random.shuffle(channels) # not sure this has any benefit but let's not leak channel order
scid_to_my_channels = {chan.short_channel_id: chan for chan in channels
if chan.short_channel_id is not None}
if not amount_msat:
# for no amt invoices, check if channel can receive at least 1 msat
amount_msat = 1
# note: currently we add *all* our channels; but this might be a privacy leak?
for chan in channels:
# do minimal filtering of channels.
# we include channels that cannot *right now* receive (e.g. peer disconnected or balance insufficient)
if not (chan.is_open() and not chan.is_frozen_for_receiving()):
continue
if amount_msat > 1000 * chan.constraints.capacity:
continue
chan_id = chan.short_channel_id
assert isinstance(chan_id, bytes), chan_id
channel_info = get_mychannel_info(chan_id, scid_to_my_channels)
# note: as a fallback, if we don't have a channel update for the
# incoming direction of our private channel, we fill the invoice with garbage.
# the sender should still be able to pay us, but will incur an extra round trip
# (they will get the channel update from the onion error)
# at least, that's the theory. https://github.com/lightningnetwork/lnd/issues/2066
fee_base_msat = fee_proportional_millionths = 0
cltv_expiry_delta = 1 # lnd won't even try with zero
missing_info = True
if channel_info:
policy = get_mychannel_policy(channel_info.short_channel_id, chan.node_id, scid_to_my_channels)
if policy:
fee_base_msat = policy.fee_base_msat
fee_proportional_millionths = policy.fee_proportional_millionths
cltv_expiry_delta = policy.cltv_expiry_delta
missing_info = False
if missing_info:
self.logger.info(f"Warning. Missing channel update for our channel {chan_id}; "
f"filling invoice with incorrect data.")
routing_hints.append(('r', [(chan.node_id,
chan_id,
fee_base_msat,
fee_proportional_millionths,
cltv_expiry_delta)]))
return routing_hints
def delete_payment(self, payment_hash_hex: str):
try:
with self.lock:
del self.payments[payment_hash_hex]
except KeyError:
return
self.wallet.save_db()
def get_balance(self):
with self.lock:
return Decimal(sum(chan.balance(LOCAL) if not chan.is_closed() else 0
for chan in self.channels.values())) / 1000
def num_sats_can_send(self) -> Decimal:
send_values = [Decimal(0)]
with self.lock:
if self.channels:
for c in self.channels.values():
send_values.append(Decimal(c.available_to_spend(LOCAL)) / 1000)
return max(send_values)
def num_sats_can_receive(self) -> Decimal:
receive_values = [Decimal(0)]
with self.lock:
if self.channels:
for c in self.channels.values():
receive_values.append(Decimal(c.available_to_spend(REMOTE)) / 1000)
return max(receive_values)
def can_pay_invoice(self, invoice: LNInvoice) -> bool:
return invoice.get_amount_sat() <= self.num_sats_can_send()
def can_receive_invoice(self, invoice: LNInvoice) -> bool:
return invoice.get_amount_sat() <= self.num_sats_can_receive()
async def close_channel(self, chan_id):
chan = self._channels[chan_id]
peer = self._peers[chan.node_id]
return await peer.close_channel(chan_id)
async def force_close_channel(self, chan_id):
# returns txid or raises
chan = self._channels[chan_id]
tx = chan.force_close_tx()
await self.network.broadcast_transaction(tx)
chan.set_state(ChannelState.FORCE_CLOSING)
return tx.txid()
async def try_force_closing(self, chan_id):
# fails silently but sets the state, so that we will retry later
chan = self._channels[chan_id]
tx = chan.force_close_tx()
chan.set_state(ChannelState.FORCE_CLOSING)
await self.network.try_broadcasting(tx, 'force-close')
def remove_channel(self, chan_id):
chan = self._channels[chan_id]
assert chan.get_state() == ChannelState.REDEEMED
with self.lock:
self._channels.pop(chan_id)
self.db.get('channels').pop(chan_id.hex())
for addr in chan.get_wallet_addresses_channel_might_want_reserved():
self.wallet.set_reserved_state_of_address(addr, reserved=False)
util.trigger_callback('channels_updated', self.wallet)
util.trigger_callback('wallet_updated', self.wallet)
@ignore_exceptions
@log_exceptions
async def reestablish_peer_for_given_channel(self, chan: Channel) -> None:
now = time.time()
peer_addresses = []
# will try last good address first, from gossip
last_good_addr = self.channel_db.get_last_good_address(chan.node_id)
if last_good_addr:
peer_addresses.append(last_good_addr)
# will try addresses for node_id from gossip
addrs_from_gossip = self.channel_db.get_node_addresses(chan.node_id) or []
for host, port, ts in addrs_from_gossip:
peer_addresses.append(LNPeerAddr(host, port, chan.node_id))
# will try addresses stored in channel storage
peer_addresses += list(chan.get_peer_addresses())
# Done gathering addresses.
# Now select first one that has not failed recently.
for peer in peer_addresses:
if self._can_retry_addr(peer, urgent=True, now=now):
await self._add_peer(peer.host, peer.port, peer.pubkey)
return
async def reestablish_peers_and_channels(self):
while True:
await asyncio.sleep(1)
for chan in self.channels.values():
if chan.is_closed():
continue
# reestablish
if not chan.should_try_to_reestablish_peer():
continue
peer = self._peers.get(chan.node_id, None)
if peer:
await peer.taskgroup.spawn(peer.reestablish_channel(chan))
else:
await self.taskgroup.spawn(self.reestablish_peer_for_given_channel(chan))
def current_feerate_per_kw(self):
from .simple_config import FEE_LN_ETA_TARGET, FEERATE_FALLBACK_STATIC_FEE, FEERATE_REGTEST_HARDCODED
if constants.net is constants.BitcoinRegtest:
return FEERATE_REGTEST_HARDCODED // 4
feerate_per_kvbyte = self.network.config.eta_target_to_fee(FEE_LN_ETA_TARGET)
if feerate_per_kvbyte is None:
feerate_per_kvbyte = FEERATE_FALLBACK_STATIC_FEE
return max(253, feerate_per_kvbyte // 4)
def create_channel_backup(self, channel_id):
chan = self._channels[channel_id]
# do not backup old-style channels
assert chan.is_static_remotekey_enabled()
peer_addresses = list(chan.get_peer_addresses())
peer_addr = peer_addresses[0]
return ChannelBackupStorage(
node_id = chan.node_id,
privkey = self.node_keypair.privkey,
funding_txid = chan.funding_outpoint.txid,
funding_index = chan.funding_outpoint.output_index,
funding_address = chan.get_funding_address(),
host = peer_addr.host,
port = peer_addr.port,
is_initiator = chan.constraints.is_initiator,
channel_seed = chan.config[LOCAL].channel_seed,
local_delay = chan.config[LOCAL].to_self_delay,
remote_delay = chan.config[REMOTE].to_self_delay,
remote_revocation_pubkey = chan.config[REMOTE].revocation_basepoint.pubkey,
remote_payment_pubkey = chan.config[REMOTE].payment_basepoint.pubkey)
def export_channel_backup(self, channel_id):
xpub = self.wallet.get_fingerprint()
backup_bytes = self.create_channel_backup(channel_id).to_bytes()
assert backup_bytes == ChannelBackupStorage.from_bytes(backup_bytes).to_bytes(), "roundtrip failed"
encrypted = pw_encode_with_version_and_mac(backup_bytes, xpub)
assert backup_bytes == pw_decode_with_version_and_mac(encrypted, xpub), "encrypt failed"
return 'channel_backup:' + encrypted
async def request_remote_force_close(
self, *, funding_txid: str, funding_index: int, connect_str: str,
):
"""
Requests the remote to force close a channel. Can be used without
having state or any backup for the channel.
Assumes that channel was originally opened with the same local peer (node_keypair).
Kept for console use.
Example:
network.run_from_another_thread(wallet.lnworker.request_remote_force_close(funding_txid="11a3b391bc99dbca0b2be4fdd8f18ca641896c81ae4d9596b30cbf1eef17af71", funding_index=1, connect_str="023a8dfe081c6bbd0504e599f33d39d17687de63023a8b20afcb59147d9d77c19d"))
"""
channel_id = lnutil.channel_id_from_funding_tx(funding_txid, funding_index)[0]
peer = await self.add_peer(connect_str)
await peer.trigger_force_close(channel_id)
class LNBackups(Logger):
lnwatcher: Optional['LNWalletWatcher']
def __init__(self, wallet: 'Abstract_Wallet'):
Logger.__init__(self)
self.features = LNWALLET_FEATURES
self.lock = threading.RLock()
self.wallet = wallet
self.db = wallet.db
self.lnwatcher = None
self.channel_backups = {}
for channel_id, cb in random_shuffled_copy(self.db.get_dict("channel_backups").items()):
self.channel_backups[bfh(channel_id)] = ChannelBackup(cb, sweep_address=self.sweep_address, lnworker=self)
@property
def sweep_address(self) -> str:
# TODO possible address-reuse
return self.wallet.get_new_sweep_address_for_channel()
def channel_state_changed(self, chan):
util.trigger_callback('channel', self.wallet, chan)
def peer_closed(self, chan):
pass
async def on_channel_update(self, chan):
util.trigger_callback('channel', self.wallet, chan)
def channel_by_txo(self, txo):
with self.lock:
channel_backups = list(self.channel_backups.values())
for chan in channel_backups:
if chan.funding_outpoint.to_str() == txo:
return chan
def on_peer_successfully_established(self, peer: Peer) -> None:
pass
def channels_for_peer(self, node_id):
return {}
def start_network(self, network: 'Network'):
assert network
self.lnwatcher = LNWalletWatcher(self, network)
self.lnwatcher.start_network(network)
self.network = network
for cb in self.channel_backups.values():
self.lnwatcher.add_channel(cb.funding_outpoint.to_str(), cb.get_funding_address())
def stop(self):
self.lnwatcher.stop()
self.lnwatcher = None
def import_channel_backup(self, data):
assert data.startswith('channel_backup:')
encrypted = data[15:]
xpub = self.wallet.get_fingerprint()
decrypted = pw_decode_with_version_and_mac(encrypted, xpub)
cb_storage = ChannelBackupStorage.from_bytes(decrypted)
channel_id = cb_storage.channel_id().hex()
if channel_id in self.db.get_dict("channels"):
raise Exception('Channel already in wallet')
d = self.db.get_dict("channel_backups")
d[channel_id] = cb_storage
self.channel_backups[bfh(channel_id)] = cb = ChannelBackup(cb_storage, sweep_address=self.sweep_address, lnworker=self)
self.wallet.save_db()
util.trigger_callback('channels_updated', self.wallet)
self.lnwatcher.add_channel(cb.funding_outpoint.to_str(), cb.get_funding_address())
def remove_channel_backup(self, channel_id):
d = self.db.get_dict("channel_backups")
if channel_id.hex() not in d:
raise Exception('Channel not found')
d.pop(channel_id.hex())
self.channel_backups.pop(channel_id)
self.wallet.save_db()
util.trigger_callback('channels_updated', self.wallet)
@log_exceptions
async def request_force_close(self, channel_id: bytes):
cb = self.channel_backups[channel_id].cb
# TODO also try network addresses from gossip db (as it might have changed)
peer_addr = LNPeerAddr(cb.host, cb.port, cb.node_id)
transport = LNTransport(cb.privkey, peer_addr,
proxy=self.network.proxy)
peer = Peer(self, cb.node_id, transport)
async with TaskGroup() as group:
await group.spawn(peer._message_loop())
await group.spawn(peer.trigger_force_close(channel_id))
# TODO force-exit taskgroup, to clean-up
| 47.871734 | 263 | 0.647899 |
45c5562de980ab3003127899366b50b823c848bf | 20,103 | py | Python | tf2/objective.py | ilia10000/simclr | f0312afbedc7b9890236d70640e4e0a880f417ec | [
"Apache-2.0"
] | null | null | null | tf2/objective.py | ilia10000/simclr | f0312afbedc7b9890236d70640e4e0a880f417ec | [
"Apache-2.0"
] | null | null | null | tf2/objective.py | ilia10000/simclr | f0312afbedc7b9890236d70640e4e0a880f417ec | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# Copyright 2020 The SimCLR Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific simclr governing permissions and
# limitations under the License.
# ==============================================================================
"""Contrastive loss functions."""
from absl import flags
import tensorflow.compat.v2 as tf
# from keras.applications.imagenet_utils import decode_predictions
import numpy as np
import tensorflow
from tensorflow.compat.v2.keras.losses import KLDivergence
FLAGS = flags.FLAGS
LARGE_NUM = 1e9
def add_supervised_loss(labels, logits):
"""Compute mean supervised loss over local batch."""
losses = tf.keras.losses.CategoricalCrossentropy(
from_logits=True, reduction=tf.keras.losses.Reduction.NONE)(labels,
logits)
return tf.reduce_mean(losses)
def add_contrastive_loss(hidden,
hidden_norm=True,
temperature=1.0,
strategy=None):
"""Compute loss for model.
Args:
hidden: hidden vector (`Tensor`) of shape (bsz, dim).
hidden_norm: whether or not to use normalization on the hidden vector.
temperature: a `floating` number for temperature scaling.
strategy: context information for tpu.
Returns:
A loss scalar.
The logits for contrastive prediction task.
The labels for contrastive prediction task.
"""
# Get (normalized) hidden1 and hidden2.
if hidden_norm:
hidden = tf.math.l2_normalize(hidden, -1)
hidden1, hidden2 = tf.split(hidden, 2, 0)
batch_size = tf.shape(hidden1)[0]
# Gather hidden1/hidden2 across replicas and create local labels.
if strategy is not None:
hidden1_large = tpu_cross_replica_concat(hidden1, strategy)
hidden2_large = tpu_cross_replica_concat(hidden2, strategy)
enlarged_batch_size = tf.shape(hidden1_large)[0]
# TODO(iamtingchen): more elegant way to convert u32 to s32 for replica_id.
replica_context = tf.distribute.get_replica_context()
replica_id = tf.cast(
tf.cast(replica_context.replica_id_in_sync_group, tf.uint32), tf.int32)
labels_idx = tf.range(batch_size) + replica_id * batch_size
labels = tf.one_hot(labels_idx, enlarged_batch_size * 2)
masks = tf.one_hot(labels_idx, enlarged_batch_size)
else:
hidden1_large = hidden1
hidden2_large = hidden2
labels = tf.one_hot(tf.range(batch_size), batch_size * 2)
masks = tf.one_hot(tf.range(batch_size), batch_size)
# labels = tf.one_hot(tf.range(batch_size), batch_size)
labels=labels*0.9 +(1-0.9)*(1-labels)/labels.shape[-1] #Label smoothing
# labels=tf.concat([labels, labels-tf.linalg.diag(tf.linalg.diag_part(labels))],1)
logits_aa = tf.matmul(hidden1, hidden1_large, transpose_b=True) / temperature
#tf.print(logits_aa)
logits_aa = logits_aa - masks * LARGE_NUM
logits_bb = tf.matmul(hidden2, hidden2_large, transpose_b=True) / temperature
logits_bb = logits_bb - masks * LARGE_NUM
logits_ab = tf.matmul(hidden1, hidden2_large, transpose_b=True) / temperature
logits_ba = tf.matmul(hidden2, hidden1_large, transpose_b=True) / temperature
if True:
loss_fn = tf.nn.softmax_cross_entropy_with_logits
loss_a = loss_fn(
labels, tf.concat([logits_ab, logits_aa], 1))
loss_b = loss_fn(
labels, tf.concat([logits_ba, logits_bb], 1))
else:
loss_fn = KLDivergence(tf.keras.losses.Reduction.NONE)
loss_a = loss_fn(
labels, tf.concat([tf.nn.softmax(logits_ab), tf.nn.softmax(logits_aa)], 1))
loss_b = loss_fn(
labels, tf.concat([tf.nn.softmax(logits_ba), tf.nn.softmax(logits_bb)], 1))
loss = tf.reduce_mean(loss_a + loss_b)
return loss, logits_ab, labels
#TODO: precompute sims at start of run, also use tensor operations instead of scalat
def names2sims(names, embed_model, bsz, dataset='imagenet2012'):
embeds = embed_model.lookup(names)
norm_embeds = tf.nn.l2_normalize(embeds,1)
sim_mat=tf.matmul(norm_embeds, norm_embeds, transpose_b=True)
#sim_mat.set_shape([bsz,bsz])
# def get_sims_outer(x):
# def get_sims_inner(y):
# return tf.reduce_sum(tf.multiply(tf.nn.l2_normalize(ex,0),tf.nn.l2_normalize(ey,0)))
# return tf.map_fn(get_sims_inner,names, fn_output_signature=tf.float32)
# sim_mat=tf.map_fn(get_sims_outer, names,fn_output_signature=tf.float32)
return tf.math.square(sim_mat)
def ids2sims(ids, embed_model, bsz, method='sim'):
if method=='sim':
embeds = embed_model.lookup(ids)
norm_embeds = tf.nn.l2_normalize(embeds,1)
elif method=='onehot':
norm_embeds = tf.one_hot(ids,tf.cast(tf.reduce_max(ids)+1,tf.int32))
sim_mat=tf.matmul(norm_embeds, norm_embeds, transpose_b=True)
sim_mat=sim_mat/tf.reduce_sum(sim_mat,1)
sim_mat.set_shape([bsz,bsz])
# def get_sims_outer(x):
# def get_sims_inner(y):
# return tf.reduce_sum(tf.multiply(tf.nn.l2_normalize(ex,0),tf.nn.l2_normalize(ey,0)))
# return tf.map_fn(get_sims_inner,names, fn_output_signature=tf.float32)
# sim_mat=tf.map_fn(get_sims_outer, names,fn_output_signature=tf.float32)
return sim_mat
def get_names(pred):
label_dict = {0:'airplane', 1:'automobile', 2:'bird', 3:'cat', 4:'deer', 5:'dog', 6:'frog', 7:'horse', 8:'ship', 9:'truck'}
table=tf.lookup.StaticHashTable(
initializer=tf.lookup.KeyValueTensorInitializer(
tf.constant(list(label_dict.keys()), dtype=tf.int64),
tf.convert_to_tensor(list(label_dict.values()))),
default_value=tf.constant(''))
return table.lookup(tf.argmax(pred))
def get_batch_sims(labels, embed_model, bsz, dataset='imagenet2012', method="sim"):
'''
Args:
labels: vector of one-hot labels with shape (bsz, num_classes).
Returns:
Similarity matrix of shape (bsz,bsz).
'''
ids = tf.argmax(labels,1)
sims = ids2sims(ids, embed_model, bsz, method)
#Get label names
# if dataset=='imagenet2012':
# label_names = [i[0][1] for i in decode_predictions(labels, top=1)]
# elif dataset=='cifar10':
# label_names= tf.map_fn(get_names, labels, fn_output_signature=tf.string)
# sims=names2sims(label_names, embed_model, bsz, dataset)
#Load CNNB similarity dict
#sims = tf.matmul(labels,labels, transpose_b=True)#
#sims = tf.convert_to_tensor(sims)
return sims
def add_CNNB_loss(true_labels,
hidden,
embed_model,
bsz=512,
dataset='imagenet2012',
hidden_norm=True,
temperature=1.0,
strategy=None,
loss_type='softmax-ce'):
"""Compute loss for model.
Args:
true_labels: vector of labels.
hidden: hidden vector (`Tensor`) of shape (bsz, dim).
hidden_norm: whether or not to use normalization on the hidden vector.
temperature: a `floating` number for temperature scaling.
strategy: context information for tpu.
Returns:
A loss scalar.
The logits for contrastive prediction task.
The labels for contrastive prediction task.
"""
# Get (normalized) hidden1 and hidden2.
if hidden_norm:
hidden = tf.math.l2_normalize(hidden, -1)
hidden1, hidden2 = tf.split(hidden, 2, 0)
batch_size = tf.shape(hidden1)[0]
# Gather hidden1/hidden2 across replicas and create local labels.
if strategy is not None:
hidden1_large = tpu_cross_replica_concat(hidden1, strategy)
hidden2_large = tpu_cross_replica_concat(hidden2, strategy)
enlarged_batch_size = tf.shape(hidden1_large)[0]
# TODO(iamtingchen): more elegant way to convert u32 to s32 for replica_id.
replica_context = tf.distribute.get_replica_context()
reps = strategy.num_replicas_in_sync
sims=get_batch_sims(true_labels, embed_model, bsz//reps, dataset)
#sims.set_shape([512//reps, 512//reps])
replica_id = tf.cast(
tf.cast(replica_context.replica_id_in_sync_group, tf.uint32), tf.int32)
labels_idx = tf.range(batch_size) + replica_id * batch_size
labels1=tf.concat([sims if i==replica_id else tf.zeros(sims.shape) for i in range(reps)],1)
labels2=tf.concat([sims-tf.linalg.diag(tf.linalg.diag_part(sims)) if i==replica_id else tf.zeros(sims.shape) for i in range(reps)],1)
labels=tf.concat([labels1,labels2],1)
masks = tf.one_hot(labels_idx, enlarged_batch_size)
else:
#sims.set_shape([batch_size, batch_size])
sims=get_batch_sims(true_labels, embed_model, bsz, dataset)
hidden1_large = hidden1
hidden2_large = hidden2
labels=tf.concat([sims,sims-tf.linalg.diag(tf.linalg.diag_part(sims))],1)
masks = tf.one_hot(tf.range(batch_size), batch_size)
#Calculate similarity between hidden representations from aug1 and from aug1
logits_aa = tf.matmul(hidden1, hidden1_large, transpose_b=True) / temperature
# tf.print(true_labels)
# tf.print(logits_aa)
#Calculate similarity between hidden representations from aug2 and from aug2
logits_bb = tf.matmul(hidden2, hidden2_large, transpose_b=True) / temperature
if loss_type not in ['fro']:
#Mask out entries corresponding to diagonal (self-similarity) so they are 0 once softmaxed
logits_aa = logits_aa - masks * LARGE_NUM
#Mask out entries corresponding to diagonal (self-similarity) so they are 0 once softmaxed
logits_bb = logits_bb - masks * LARGE_NUM
else:
logits_aa = logits_aa - masks * logits_aa
logits_bb = logits_bb - masks * logits_bb
#Calculate similarity between hidden representations from aug1 and from aug2
logits_ab = tf.matmul(hidden1, hidden2_large, transpose_b=True) / temperature
#Calculate similarity between hidden representations from aug2 and from aug1
#-> identical to above case if using single GPU
logits_ba = tf.matmul(hidden2, hidden1_large, transpose_b=True) / temperature
#Calculate loss for aug1 samples by taking softmax over logits and then applying cross_entropy
if loss_type=='ce':
loss_fn = tf.nn.softmax_cross_entropy_with_logits
loss_a = loss_fn(
#The identity part of labels (left-side) compares against sim(aug1,aug2);
#Zeros (right-side) compare against masked sim(aug1,aug1)
labels,
#Horizontally concatenate sim(aug1, aug2) with sim(aug1,aug1)
tf.concat([logits_ab, logits_aa], 1))
#Take symmetrical loss for aug2 samples
loss_b = loss_fn(
labels, tf.concat([logits_ba, logits_bb], 1))
elif loss_type=='softmax-ce':
loss_fn = tf.nn.softmax_cross_entropy_with_logits
loss_a = loss_fn(
#The identity part of labels (left-side) compares against sim(aug1,aug2);
#Zeros (right-side) compare against masked sim(aug1,aug1)
tf.nn.softmax(labels),
#Horizontally concatenate sim(aug1, aug2) with sim(aug1,aug1)
tf.concat([logits_ab, logits_aa], 1))
#Take symmetrical loss for aug2 samples
loss_b = loss_fn(
labels, tf.concat([logits_ba, logits_bb], 1))
elif loss_type=='kl': # Consider softmaxing labels here
loss_fn = KLDivergence(tf.keras.losses.Reduction.NONE)
loss_a = loss_fn(
labels, tf.concat([tf.nn.softmax(logits_ab), tf.nn.softmax(logits_aa)], 1))
loss_b = loss_fn(
labels, tf.concat([tf.nn.softmax(logits_ba), tf.nn.softmax(logits_bb)], 1))
elif loss_type=='klsoft':
loss_fn = KLDivergence(tf.keras.losses.Reduction.NONE)
loss_a = loss_fn(
tf.nn.softmax(labels), tf.nn.softmax(tf.concat([logits_ab, logits_aa], 1)))
loss_b = loss_fn(
tf.nn.softmax(labels), tf.nn.softmax(tf.concat([logits_ba, logits_bb], 1)))
elif loss_type=='fro': #Consider softmaxing labels here
loss_fn=tf.norm
loss_a = loss_fn(
labels-tf.concat([logits_ab, logits_aa], 1), ord='fro', axis=(0,1))
loss_b = loss_fn(
labels - tf.concat([logits_ba, logits_bb], 1), ord='fro', axis=(0,1))
loss = tf.reduce_mean(loss_a + loss_b)
return loss, logits_ab, labels
def add_CNNB_loss_v2(true_labels,
hidden,
embed_model,
bsz=512,
dataset='imagenet2012',
hidden_norm=True,
temperature=1.0,
strategy=None,
loss_type='ce',
clip_min=0,
method='onehot'):
"""Compute loss for model.
Args:
true_labels: vector of labels.
hidden: hidden vector (`Tensor`) of shape (bsz, dim).
hidden_norm: whether or not to use normalization on the hidden vector.
temperature: a `floating` number for temperature scaling.
strategy: context information for tpu.
Returns:
A loss scalar.
The logits for contrastive prediction task.
The labels for contrastive prediction task.
"""
# Get (normalized) hidden1 and hidden2.
if hidden_norm:
hidden = tf.math.l2_normalize(hidden, -1)
hidden1, hidden2 = tf.split(hidden, 2, 0)
batch_size = tf.shape(hidden1)[0]
# Gather hidden1/hidden2 across replicas and create local labels.
if strategy is not None:
hidden1_large = tpu_cross_replica_concat(hidden1, strategy)
hidden2_large = tpu_cross_replica_concat(hidden2, strategy)
enlarged_batch_size = tf.shape(hidden1_large)[0]
# TODO(iamtingchen): more elegant way to convert u32 to s32 for replica_id.
replica_context = tf.distribute.get_replica_context()
reps = strategy.num_replicas_in_sync
sims=get_batch_sims(true_labels, embed_model, bsz//reps, dataset, method)
sims=tf.cast(sims > clip_min, sims.dtype) * sims
#sims.set_shape([512//reps, 512//reps])
replica_id = tf.cast(
tf.cast(replica_context.replica_id_in_sync_group, tf.uint32), tf.int32)
labels_idx = tf.range(batch_size) + replica_id * batch_size
labels1=tf.concat([sims if i==replica_id else tf.zeros(sims.shape) for i in range(reps)],1)
labels2=tf.concat([sims-tf.linalg.diag(tf.linalg.diag_part(sims)) if i==replica_id else tf.zeros(sims.shape) for i in range(reps)],1)
labels=tf.concat([labels1,labels2],1)
masks = tf.one_hot(labels_idx, enlarged_batch_size)
else:
#sims.set_shape([batch_size, batch_size])
sims=get_batch_sims(true_labels, embed_model, bsz, dataset, method)
sims=tf.cast(sims > clip_min, sims.dtype) * sims
hidden1_large = hidden1
hidden2_large = hidden2
labels=tf.concat([sims,sims-tf.linalg.diag(tf.linalg.diag_part(sims))],1)
masks = tf.one_hot(tf.range(batch_size), batch_size)
slabels=tf.split(labels, 2, axis=1)
#Calculate similarity between hidden representations from aug1 and from aug1
logits_aa = tf.matmul(hidden1, hidden1_large, transpose_b=True) / temperature
# tf.print(true_labels)
# tf.print(logits_aa)
#Calculate similarity between hidden representations from aug2 and from aug2
logits_bb = tf.matmul(hidden2, hidden2_large, transpose_b=True) / temperature
if loss_type not in ['fro']:
#Mask out entries corresponding to diagonal (self-similarity) so they are 0 once softmaxed
logits_aa = logits_aa - masks * LARGE_NUM
#Mask out entries corresponding to diagonal (self-similarity) so they are 0 once softmaxed
logits_bb = logits_bb - masks * LARGE_NUM
else:
logits_aa = logits_aa - masks * logits_aa
logits_bb = logits_bb - masks * logits_bb
#Calculate similarity between hidden representations from aug1 and from aug2
logits_ab = tf.matmul(hidden1, hidden2_large, transpose_b=True) / temperature
#Calculate similarity between hidden representations from aug2 and from aug1
#-> identical to above case if using single GPU
logits_ba = tf.matmul(hidden2, hidden1_large, transpose_b=True) / temperature
#Calculate loss for aug1 samples by taking softmax over logits and then applying cross_entropy
# tf.print(slabels[0].shape)
# tf.print(slabels[1].shape)
# tf.print(logits_ab.shape)
# tf.print(logits_aa.shape)
if loss_type=='ce':
loss_fn = tf.nn.softmax_cross_entropy_with_logits
loss_a = tf.reduce_mean(loss_fn(slabels[0],logits_ab)+loss_fn(slabels[1]-masks*slabels[1],logits_aa))
loss_b = tf.reduce_mean(loss_fn(slabels[0],logits_ba)+loss_fn(slabels[1]-masks*slabels[1],logits_bb))
elif loss_type=='softmax-ce':
loss_fn = tf.nn.softmax_cross_entropy_with_logits
slabels[0]=tf.nn.softmax(slabels[0]/temperature)
slabels[1]=tf.nn.softmax((slabels[1]/temperature)-masks*LARGE_NUM)
loss_a = tf.reduce_mean(loss_fn(slabels[0],logits_ab)+loss_fn(slabels[1],logits_aa))
loss_b = tf.reduce_mean(loss_fn(slabels[0],logits_ba)+loss_fn(slabels[1],logits_bb))
elif loss_type=='kl': # Consider softmaxing labels here
loss_fn = KLDivergence(tf.keras.losses.Reduction.NONE)
loss_a = tf.reduce_mean(loss_fn(slabels[0],tf.nn.softmax(logits_ab))+loss_fn(slabels[1]-masks*slabels[1],tf.nn.softmax(logits_aa)))
loss_b = tf.reduce_mean(loss_fn(slabels[0],tf.nn.softmax(logits_ba))+loss_fn(slabels[1]-masks*slabels[1],tf.nn.softmax(logits_bb)))
elif loss_type=='klsoft':
loss_fn = KLDivergence(tf.keras.losses.Reduction.NONE)
slabels[0]=tf.nn.softmax(slabels[0]/temperature)
slabels[1]=tf.nn.softmax((slabels[1]/temperature)-masks*LARGE_NUM)
loss_a = tf.reduce_mean(loss_fn(slabels[0],tf.nn.softmax(logits_ab))+loss_fn(slabels[1],tf.nn.softmax(logits_aa)))
loss_b = tf.reduce_mean(loss_fn(slabels[0],tf.nn.softmax(logits_ba))+loss_fn(slabels[1],tf.nn.softmax(logits_bb)))
elif loss_type=='fro': #Consider softmaxing labels here
loss_fn=tf.norm
loss_a = tf.reduce_mean(loss_fn(slabels[0]-logits_ab, ord='fro', axis=(0,1))+loss_fn(slabels[1]-logits_aa, ord='fro', axis=(0,1)))
loss_b = tf.reduce_mean(loss_fn(slabels[0]-logits_ba, ord='fro', axis=(0,1))+loss_fn(slabels[1]-logits_bb, ord='fro', axis=(0,1)))
loss = tf.reduce_mean(loss_a + loss_b)
return loss, logits_ab, labels
def tpu_cross_replica_concat(tensor, strategy=None):
"""Reduce a concatenation of the `tensor` across TPU cores.
Args:
tensor: tensor to concatenate.
strategy: A `tf.distribute.Strategy`. If not set, CPU execution is assumed.
Returns:
Tensor of the same rank as `tensor` with first dimension `num_replicas`
times larger.
"""
if strategy is None or strategy.num_replicas_in_sync <= 1:
return tensor
num_replicas = strategy.num_replicas_in_sync
replica_context = tf.distribute.get_replica_context()
with tf.name_scope('tpu_cross_replica_concat'):
# This creates a tensor that is like the input tensor but has an added
# replica dimension as the outermost dimension. On each replica it will
# contain the local values and zeros for all other values that need to be
# fetched from other replicas.
ext_tensor = tf.scatter_nd(
indices=[[replica_context.replica_id_in_sync_group]],
updates=[tensor],
shape=tf.concat([[num_replicas], tf.shape(tensor)], axis=0))
# As every value is only present on one replica and 0 in all others, adding
# them all together will result in the full tensor on all replicas.
ext_tensor = replica_context.all_reduce(tf.distribute.ReduceOp.SUM,
ext_tensor)
# Flatten the replica dimension.
# The first dimension size will be: tensor.shape[0] * num_replicas
# Using [-1] trick to support also scalar input.
return tf.reshape(ext_tensor, [-1] + ext_tensor.shape.as_list()[2:])
| 45.792711 | 137 | 0.692136 |
cdd6dd31946db9a1968bfdcbf89369182cba8c09 | 358 | py | Python | plenum/test/ledger/test_ledger_reset_uncommitted.py | andkononykhin/plenum | 28dc1719f4b7e80d31dafbadb38cfec4da949886 | [
"Apache-2.0"
] | 148 | 2017-07-11T19:05:25.000Z | 2022-03-16T21:31:20.000Z | plenum/test/ledger/test_ledger_reset_uncommitted.py | andkononykhin/plenum | 28dc1719f4b7e80d31dafbadb38cfec4da949886 | [
"Apache-2.0"
] | 561 | 2017-06-29T17:59:56.000Z | 2022-03-09T15:47:14.000Z | plenum/test/ledger/test_ledger_reset_uncommitted.py | andkononykhin/plenum | 28dc1719f4b7e80d31dafbadb38cfec4da949886 | [
"Apache-2.0"
] | 378 | 2017-06-29T17:45:27.000Z | 2022-03-26T07:27:59.000Z | def test_reset_uncommitted(ledger_with_batches_appended, inital_size, inital_root_hash):
ledger = ledger_with_batches_appended
ledger.reset_uncommitted()
assert ledger.uncommitted_size == inital_size
assert ledger.uncommittedRootHash is None
assert ledger.uncommitted_root_hash == inital_root_hash
assert ledger.uncommittedTxns == []
| 44.75 | 88 | 0.807263 |
0149a53c2435331334ccf9cac5e7cf1248089003 | 76 | py | Python | plugins/uniq/komand_uniq/actions/uniq_string_array/__init__.py | lukaszlaszuk/insightconnect-plugins | 8c6ce323bfbb12c55f8b5a9c08975d25eb9f8892 | [
"MIT"
] | 46 | 2019-06-05T20:47:58.000Z | 2022-03-29T10:18:01.000Z | plugins/uniq/komand_uniq/actions/uniq_string_array/__init__.py | lukaszlaszuk/insightconnect-plugins | 8c6ce323bfbb12c55f8b5a9c08975d25eb9f8892 | [
"MIT"
] | 386 | 2019-06-07T20:20:39.000Z | 2022-03-30T17:35:01.000Z | plugins/uniq/komand_uniq/actions/uniq_string_array/__init__.py | lukaszlaszuk/insightconnect-plugins | 8c6ce323bfbb12c55f8b5a9c08975d25eb9f8892 | [
"MIT"
] | 43 | 2019-07-09T14:13:58.000Z | 2022-03-28T12:04:46.000Z | # GENERATED BY KOMAND SDK - DO NOT EDIT
from .action import UniqStringArray
| 25.333333 | 39 | 0.789474 |
f2f929df2b215d6276b6772359961014d5534a8e | 189 | py | Python | project/warehouse/filters.py | hlystovea/resource_planner | 5ddbef31004a7e50201e1d414152a8090d1b0caf | [
"MIT"
] | null | null | null | project/warehouse/filters.py | hlystovea/resource_planner | 5ddbef31004a7e50201e1d414152a8090d1b0caf | [
"MIT"
] | null | null | null | project/warehouse/filters.py | hlystovea/resource_planner | 5ddbef31004a7e50201e1d414152a8090d1b0caf | [
"MIT"
] | null | null | null | import django_filters
from warehouse.models import Material
class MaterialFilter(django_filters.FilterSet):
class Meta:
model = Material
fields = ['amount__storage']
| 18.9 | 47 | 0.730159 |
3071a4c39c4a5919e181090a16bfbabb7166c556 | 4,962 | py | Python | services/ASG.py | asurion/Hibernate | b95c68ba8dba6a43baea288ade231944d1719988 | [
"Apache-2.0"
] | 9 | 2017-06-06T17:47:57.000Z | 2021-08-06T18:30:11.000Z | services/ASG.py | asurion/Hibernate | b95c68ba8dba6a43baea288ade231944d1719988 | [
"Apache-2.0"
] | null | null | null | services/ASG.py | asurion/Hibernate | b95c68ba8dba6a43baea288ade231944d1719988 | [
"Apache-2.0"
] | null | null | null | from asyncProducerUtil.utils.connect import Connect
class ASG(Connect):
api_name = 'autoscaling'
def __init__(self, account, region, asg_name):
self.asg_name = asg_name
Connect.__init__(self, account, region)
self.client = Connect.client_connect(self, self.api_name)
self.ec2_client = Connect.client_connect(self, 'ec2')
self.ec2_resource = Connect.resource_connect(self, 'ec2')
print "[ASG-LOG] {}".format(self.asg_name)
self.describeOnce = self.client.describe_auto_scaling_groups(AutoScalingGroupNames=[self.asg_name])['AutoScalingGroups'][0]
lc = self.describeOnce['LaunchConfigurationName']
self.describe_lc = self.client.describe_launch_configurations(LaunchConfigurationNames=[lc])['LaunchConfigurations'][0]
def get_instances(self):
return self.describeOnce['Instances']
def get_config_tag(self):
response = self.client.describe_tags(
Filters=[
{
'Name': 'auto-scaling-group',
'Values': [
self.asg_name,
]
},
]
)
config_tags = next(
(item for item in response['Tags'] if item["Key"] == "scheduler:asg-previous:min,max,desired"), None)
if config_tags:
cf = config_tags['Value']
tmp = cf.split(';')
cf = int(tmp[0]), int(tmp[1]), int(tmp[2])
else:
cf = None
return cf
def put_previous_config_tag(self, prev):
min = prev[0]
max = prev[1]
desired = prev[2]
self.client.create_or_update_tags(
Tags=[
{
'ResourceId': self.asg_name,
'ResourceType': 'auto-scaling-group',
'Key': 'scheduler:asg-previous:min,max,desired',
'Value': '{};{};{}'.format(min, max, desired),
'PropagateAtLaunch': True
},
]
)
def wake_auto_scaling_group(self, asg_previous_tag):
minsize = asg_previous_tag[0]
maxsize = asg_previous_tag[1]
desired = asg_previous_tag[2]
self.client.update_auto_scaling_group(
AutoScalingGroupName=self.asg_name,
MaxSize=int(maxsize),
MinSize=int(minsize),
DesiredCapacity=int(desired)
)
def sleep_auto_scaling_group(self):
self.client.update_auto_scaling_group(
AutoScalingGroupName=self.asg_name,
MinSize=0,
DesiredCapacity=0
)
@staticmethod
def discover_sleep_tags(client):
paginator = client.get_paginator('describe_tags')
response_iterator = paginator.paginate(
Filters=[
{
'Name': 'key',
'Values': [
'scheduler:sleep', 'SCHEDULER:SLEEP'
]
},
],
)
z = []
for r in response_iterator:
z.extend(r['Tags'])
good = [g for g in z if not g['Value'].lower() in ['inactive', 'alternative']]
# list of Auto Scaling Groups that have the scheduler:sleep tag
return good
@property
def arn(self):
return self.describeOnce['AutoScalingGroupARN']
@property
def id(self):
return self.describeOnce['AutoScalingGroupName']
@property
def name(self):
return self.describeOnce['AutoScalingGroupName']
@property
def tags(self):
return self.describeOnce['Tags']
@property
def minSize(self):
return self.describeOnce['MinSize']
@property
def maxSize(self):
return self.describeOnce['MaxSize']
@property
def desiredCapacity(self):
return self.describeOnce['DesiredCapacity']
@property
def state(self):
state = self.describeOnce['DesiredCapacity']
if state == 0:
state = 'stopped'
elif state >= 1:
state = 'running'
return {'Name': state}
@property
def VPCZoneIdentifier(self):
return self.describeOnce['VPCZoneIdentifier']
@property
def LaunchConfigurationName(self):
return self.describeOnce['LaunchConfigurationName']
@property
def lc(self):
return self.describe_lc
@property
def instance_type(self):
return self.describe_lc['InstanceType']
@property
def num_instances(self):
return len(self.get_instances())
@property
def tenancy(self):
t = self.describe_lc.get('PlacementTenancy')
if t is None:
t = 'default'
return t
@property
def operating_system(self):
instance_id = self.describeOnce['Instances'][0]['InstanceId']
i = self.ec2_resource.Instance(instance_id)
return i.platform
| 26.677419 | 131 | 0.568722 |
c84f1fee5976de1ba99eb152ac74fe09484116a3 | 5,317 | py | Python | application/flicket/forms/forms_main.py | juanvmarquezl/flicket | 0ed68d38b1f12e105436d8c51af163476b60a982 | [
"MIT"
] | 95 | 2017-10-31T21:25:11.000Z | 2022-03-07T05:46:12.000Z | application/flicket/forms/forms_main.py | juanvmarquezl/flicket | 0ed68d38b1f12e105436d8c51af163476b60a982 | [
"MIT"
] | 47 | 2017-05-15T10:52:23.000Z | 2021-12-13T09:30:52.000Z | application/flicket/forms/forms_main.py | juanvmarquezl/flicket | 0ed68d38b1f12e105436d8c51af163476b60a982 | [
"MIT"
] | 54 | 2017-07-13T03:38:47.000Z | 2022-02-12T20:10:02.000Z | #! usr/bin/python3
# -*- coding: utf-8 -*-
#
# Flicket - copyright Paul Bourne: evereux@gmail.com
import bcrypt
from flask import g
from flask_wtf import FlaskForm
from flask_babel import lazy_gettext
from wtforms import (PasswordField,
StringField,
FileField,
SelectField)
from wtforms.validators import (DataRequired,
Length,
EqualTo)
from application import app
from application.flicket.models.flicket_user import (FlicketUser,
user_field_size)
from application.flicket.scripts.functions_login import check_email_format
def does_username_exist(form, field):
"""
Username must be unique so we check against the database to ensure it doesn't
:param form:
:param field:
:return True / False:
"""
result = FlicketUser.query.filter_by(username=form.username.data).count()
if result > 0:
field.errors.append('A user with this username has already registered.')
return False
return True
def check_password_formatting(form, field):
"""
Check formatting of password.
:param form:
:param field:
:return True / False:
"""
ok = True
min = 6
if len(field.data) < min:
field.errors.append('Password must be more than {} characters.'.format(min))
ok = False
if not any(s.isupper() for s in field.data) and not any(s.islower() for s in field.data):
field.errors.append('Password must contain upper and lower characters.')
ok = False
return ok
def check_password(form, field):
"""
Check formatting of password.
:param form:
:param field:
:return True / False:
"""
ok = True
result = FlicketUser.query.filter_by(username=g.user.username).first()
if bcrypt.hashpw(form.password.data.encode('utf-8'), result.password) != result.password:
field.errors.append('Entered password is incorrect.')
return False
return ok
def check_email(form, field):
ok = True
if not check_email_format(field.data):
field.errors.append('Please enter a valid email address.')
ok = False
result = FlicketUser.query.filter_by(email=form.email.data).count()
if result > 0:
field.errors.append('A user with this email address has already registered.')
ok = False
return ok
def change_email(form, field):
"""
Ensure the form email matches the users email.
:param form:
:param field:
:return:
"""
if form.email.data == g.user.email:
return True
else:
return False
class CheckPasswordCorrect:
"""
Check that the entered password matches that in the database.
"""
def __call__(self, form, field):
self.username = form.username.data
self.password = form.password.data
self.password = self.password.encode('utf-8')
ok = True
user = FlicketUser.query.filter_by(username=form.username.data).first()
# hashed = user.password
if user and not bcrypt.hashpw(self.password, user.password) == user.password:
field.errors.append('Your username and password do not match those in the database.')
ok = False
return ok
class EditUserForm(FlaskForm):
def __init__(self, *args, **kwargs):
form = super(EditUserForm, self).__init__(*args, **kwargs)
self.locale.choices = [(_id, lang) for _id, lang in app.config['SUPPORTED_LANGUAGES'].items()]
username = StringField(lazy_gettext('username'))
name = StringField(lazy_gettext('name'),
validators=[Length(min=user_field_size['name_min'], max=user_field_size['name_max'])])
email = StringField(lazy_gettext('email'),
validators=[Length(min=user_field_size['email_min'], max=user_field_size['email_max']),
change_email])
avatar = FileField(lazy_gettext('avatar'))
password = PasswordField(lazy_gettext('password'),
validators=[DataRequired(),
CheckPasswordCorrect(),
Length(min=user_field_size['password_min'],
max=user_field_size['password_max'])])
new_password = PasswordField(lazy_gettext('new_password'),
validators=[EqualTo('confirm',
message='Passwords must match'),
])
confirm = PasswordField(lazy_gettext('Repeat Password'))
job_title = StringField(lazy_gettext('job_title'), validators=[Length(max=user_field_size['job_title'])])
locale = SelectField(lazy_gettext('Locale'), validators=[DataRequired()], )
class ConfirmPassword(FlaskForm):
password = PasswordField(lazy_gettext('password'),
validators=[DataRequired(),
check_password
])
| 34.980263 | 112 | 0.583788 |
27cb54cc900560cadcba77e17ee593b5f0749a37 | 1,157 | py | Python | result/migrations/tab/0056_auto_20191019_0451.py | Uqhs-1/uqhs | 1c7199d8c23a9d9eb3f75b1e36633a145fd2cd40 | [
"MIT"
] | 3 | 2020-06-16T20:03:31.000Z | 2021-01-17T20:45:51.000Z | result/migrations/tab/0056_auto_20191019_0451.py | Uqhs-1/uqhs | 1c7199d8c23a9d9eb3f75b1e36633a145fd2cd40 | [
"MIT"
] | 8 | 2020-02-08T09:04:08.000Z | 2021-06-09T18:31:03.000Z | result/migrations/tab/0056_auto_20191019_0451.py | Uqhs-1/uqhs | 1c7199d8c23a9d9eb3f75b1e36633a145fd2cd40 | [
"MIT"
] | null | null | null | # Generated by Django 2.1.3 on 2019-10-18 15:51
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('result', '0055_auto_20191019_0449'),
]
operations = [
migrations.AlterField(
model_name='tutor_home',
name='first_term',
field=models.ForeignKey(blank=True, default='0', help_text='Not editable', null=True, on_delete=django.db.models.deletion.CASCADE, related_name='first', to='result.BTUTOR'),
),
migrations.AlterField(
model_name='tutor_home',
name='second_term',
field=models.ForeignKey(blank=True, default='0', help_text='Not editable', null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='second', to='result.BTUTOR'),
),
migrations.AlterField(
model_name='tutor_home',
name='third_term',
field=models.ForeignKey(blank=True, default='0', help_text='Not editable', null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='third', to='result.BTUTOR'),
),
]
| 38.566667 | 187 | 0.649092 |
0a0ea27d4bd43980b67cc30f51ff71c53153e091 | 3,310 | py | Python | ETL/ETL_mapper.py | nerokapa/RBDA_Project | aa99be3df326d2eff5dd9f495470cd24accf67f8 | [
"Apache-2.0"
] | 1 | 2018-12-11T22:31:15.000Z | 2018-12-11T22:31:15.000Z | ETL/ETL_mapper.py | nerokapa/RBDA_Project | aa99be3df326d2eff5dd9f495470cd24accf67f8 | [
"Apache-2.0"
] | null | null | null | ETL/ETL_mapper.py | nerokapa/RBDA_Project | aa99be3df326d2eff5dd9f495470cd24accf67f8 | [
"Apache-2.0"
] | null | null | null | # -*- coiding: utf-8 -*-
import csv,json,sys
MOVIE_DATASET = "/Users/huodahaha/Documents/BigData/hw9/tmdb_5000_movies.csv"
TEST_DATASET = "/Users/huodahaha/Documents/BigData/hw9/head.csv"
COL_CNT = 20
PROCESS_OK = 0
BAD_COL_CNT = 100
PARSE_FAILURE = 200
DAMAGED_RECORD = 300
recorded_genres = {'Mystery': 14, 'Romance': 8, 'History': 15, 'Family': 6, 'Fantasy': 10, 'Horror': 16, 'Crime': 0, 'Drama': 7, 'Science Fiction': 4, 'Animation': 5, 'Music': 9, 'Adventure': 2, 'Foreign': 18, 'Action': 3, 'Comedy': 1, 'Documentary': 17, 'War': 12, 'Thriller': 11, 'Western': 13}
recorded_langs = {'en': 0, 'zh': 3, 'cn': 17, 'af': 9, 'vi': 20, 'is': 25, 'it': 6, 'xx': 22, 'id': 23, 'es': 2, 'ru': 12, 'nl': 16, 'pt': 7, 'no': 18, 'nb': 21, 'th': 15, 'ro': 11, 'pl': 24, 'fr': 5, 'de': 1, 'da': 10, 'fa': 19, 'hi': 13, 'ja': 4, 'he': 14, 'te': 26, 'ko': 8}
def line_data(filename, skip_first = True):
f = open(filename)
if skip_first:
s = f.readline();
while True:
s = f.readline()
if len(s) == 0:
break;
else:
yield s
def data_check(dic):
# check budget
check_errors = []
ages = 0
if dic["budget"] == 0:
check_errors.append("INVALID BUDGET")
# check genre
if len(dic["genre"]) == 0:
check_errors.append("INVALID GENRES")
# check revenue
if dic["revenue"] == 0:
check_errors.append("INVALID REVENUE")
return check_errors
def ETL_process(line):
ret = PROCESS_OK
errors = []
check_errors = []
dic = {}
output = ""
# parse a single line
reader = csv.reader([line])
parsed_line = reader.next()
if len(parsed_line) != COL_CNT:
ret = BAD_COL_CNT
errors.append("DATA_COL_FAIL")
else:
# store the parsed result
try:
budget = int(parsed_line[0])
title = parsed_line[17]
genre_json = json.loads(parsed_line[1])
genres = [0]* len(recorded_genres)
for genre in genre_json:
genre_id = recorded_genres[genre['name']]
genres[genre_id] = 1
lang = parsed_line[5]
lang_vec = [0] * len(recorded_langs)
lang_id = recorded_langs[lang]
lang_vec[lang_id] = 1
movie_id = int(parsed_line[3])
revenue = int(parsed_line[12])
released_time = parsed_line[11]
year = int(released_time.split("-")[0])
dic = {"title": title,\
"genre": genres,\
"budget": budget,\
"revenue": revenue,\
"lang": lang_vec,\
"year": year}
check_errors = data_check(dic)
except Exception as e:
ret = PARSE_FAILURE
errors.append("DATA_FORMAT_FAIL")
else:
errors += check_errors
if len(errors) != 0:
ret = DAMAGED_RECORD
# generate return data
if ret == PROCESS_OK:
output = "%08d\t%s"%(movie_id, json.dumps(dic))
return output.strip()
for line in sys.stdin:
try:
output = ETL_process(line)
except Exception as e:
pass
else:
sys.stdout.write(output)
if len(output):
sys.stdout.write("\n")
| 31.52381 | 296 | 0.536556 |
c1a2eea4cc70fccc349dbc178fdf9ab1420f715f | 733 | py | Python | Vetores/11.py | HugoLeda/EstruturaDeDados | ed3b2489b7b1421ca8f8adf580aabaa967cf4a25 | [
"MIT"
] | null | null | null | Vetores/11.py | HugoLeda/EstruturaDeDados | ed3b2489b7b1421ca8f8adf580aabaa967cf4a25 | [
"MIT"
] | null | null | null | Vetores/11.py | HugoLeda/EstruturaDeDados | ed3b2489b7b1421ca8f8adf580aabaa967cf4a25 | [
"MIT"
] | null | null | null | # Escrever um algoritmo que lê um vetor X(20) e o escreve. Escreva, a seguir, cada um dos valores distintos que aparecem em X dizendo quantas vezes cada valor aparece em X.
def ocorrencias(v: list):
vldistintos = []
for i in v:
if (not i in vldistintos):
vldistintos.append(i)
qtd = [0] * len(vldistintos)
for i in range(len(vldistintos)):
for j in v:
if (vldistintos[i] == j):
qtd[i] = qtd[i] + 1
return {'Elementos distintos que apareceram': vldistintos, 'Quantidade de vezes que apareceram': qtd}
def lerVetor(p: int):
res = []
for i in range(p):
n = int(input('Digite um número: '))
res.append(n)
return res
vetor = lerVetor(5)
res = ocorrencias(vetor)
print(res) | 25.275862 | 172 | 0.649386 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.