hexsha stringlengths 40 40 | size int64 3 1.03M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 3 972 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 972 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 972 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 3 1.03M | avg_line_length float64 1.13 941k | max_line_length int64 2 941k | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
58ee81b6216af85f7d5be706eaed9b6829571a8b | 10,495 | py | Python | ble2mqtt/devices/kettle_redmond.py | e-minguez/ble2mqtt | ff729dc6b259b00fc2295bebd73265dcbb27c60d | [
"MIT"
] | null | null | null | ble2mqtt/devices/kettle_redmond.py | e-minguez/ble2mqtt | ff729dc6b259b00fc2295bebd73265dcbb27c60d | [
"MIT"
] | null | null | null | ble2mqtt/devices/kettle_redmond.py | e-minguez/ble2mqtt | ff729dc6b259b00fc2295bebd73265dcbb27c60d | [
"MIT"
] | null | null | null | import asyncio as aio
import json
import logging
import uuid
from ..protocols.redmond import (ColorTarget, Kettle200State, Mode,
RedmondKettle200Protocol, RunState)
from .base import Device
from .uuids import DEVICE_NAME
logger = logging.getLogger(__name__)
UUID_NORDIC_TX = uuid.UUID("6e400002-b5a3-f393-e0a9-e50e24dcca9e")
UUID_NORDIC_RX = uuid.UUID("6e400003-b5a3-f393-e0a9-e50e24dcca9e")
BOIL_ENTITY = 'boil'
HEAT_ENTITY = 'heat' # not implemented yet
TEMPERATURE_ENTITY = 'temperature'
LIGHT_ENTITY = 'backlight'
class RedmondKettle(RedmondKettle200Protocol, Device):
MAC_TYPE = 'random'
NAME = 'redmond200'
TX_CHAR = UUID_NORDIC_TX
RX_CHAR = UUID_NORDIC_RX
REQUIRE_CONNECTION = True
CONNECTION_TIMEOUT = 30
MANUFACTURER = 'Redmond'
UPDATE_PERIOD = 5 # seconds when boiling
STANDBY_UPDATE_PERIOD_MULTIPLIER = 12 # 12 * 5 seconds in standby mode
def __init__(self, mac, key=b'\xff\xff\xff\xff\xff\xff\xff\xff',
*args, loop, **kwargs):
super().__init__(mac, *args, loop=loop, **kwargs)
assert isinstance(key, bytes) and len(key) == 8
self._key = key
self._state = None
self._color = (255, 255, 255)
self._brightness = 255
self._statistics = {}
self._update_period_multiplier = self.STANDBY_UPDATE_PERIOD_MULTIPLIER
self.initial_status_sent = False
@property
def entities(self):
return {
'switch': [
{
'name': BOIL_ENTITY,
'icon': 'kettle',
},
],
'sensor': [
{
'name': TEMPERATURE_ENTITY,
'device_class': 'temperature',
'unit_of_measurement': '\u00b0C',
},
{
'name': 'statistics',
'topic': 'statistics',
'icon': 'chart-bar',
'json': True,
'main_value': 'number_of_starts',
'unit_of_measurement': ' ',
},
],
'light': [
{
'name': LIGHT_ENTITY,
},
],
}
async def get_device_data(self):
await super().protocol_start()
await self.login(self._key)
model = await self._read_with_timeout(DEVICE_NAME)
if isinstance(model, (bytes, bytearray)):
self._model = model.decode()
else:
# macos can't access characteristic
self._model = 'G200S'
version = await self.get_version()
if version:
self._version = f'{version[0]}.{version[1]}'
state = await self.get_mode()
if state:
self._state = state
self.update_multiplier()
self.initial_status_sent = False
await self.set_time()
await self._update_statistics()
def update_multiplier(self, state: Kettle200State = None):
if state is None:
state = self._state
self._update_period_multiplier = (
1
if state.state == RunState.ON and
state.mode in [Mode.BOIL, Mode.HEAT]
else self.STANDBY_UPDATE_PERIOD_MULTIPLIER
)
async def _notify_state(self, publish_topic):
logger.info(f'[{self._mac}] send state={self._state}')
coros = []
state = {}
for sensor_name, value in (
('temperature', self._state.temperature),
):
if any(
x['name'] == sensor_name
for x in self.entities.get('sensor', [])
):
state[sensor_name] = self.transform_value(value)
if state:
coros.append(publish_topic(
topic='/'.join((self.unique_id, 'state')),
value=json.dumps(state),
))
# keep statistics in a separate topic
logger.info(f'[{self._mac}] send statistics={self._statistics}')
for sensor_name, value in (
('statistics', self._statistics),
):
if any(
x['name'] == sensor_name
for x in self.entities.get('sensor', [])
):
coros.append(publish_topic(
topic='/'.join((self.unique_id, sensor_name)),
value=json.dumps(value),
))
lights = self.entities.get('light', [])
for light in lights:
if light['name'] == LIGHT_ENTITY:
light_state = {
'state': (
RunState.ON.name
if self._state.state == RunState.ON and
self._state.mode == Mode.LIGHT
else RunState.OFF.name
),
'brightness': 255,
'color': {
'r': self._color[0],
'g': self._color[1],
'b': self._color[2],
},
}
coros.append(publish_topic(
topic='/'.join((self.unique_id, light['name'])),
value=json.dumps(light_state),
))
if coros:
await aio.gather(*coros)
async def notify_run_state(self, new_state: Kettle200State, publish_topic):
if not self.initial_status_sent or \
new_state.state != self._state.state or \
new_state.mode != self._state.mode:
state_to_str = {
True: RunState.ON.name,
False: RunState.OFF.name,
}
boil_mode = state_to_str[
new_state.mode == Mode.BOIL and
new_state.state == RunState.ON
]
heat_mode = state_to_str[
new_state.mode == Mode.HEAT and
new_state.state == RunState.ON
]
topics = {
BOIL_ENTITY: boil_mode,
HEAT_ENTITY: heat_mode,
}
await aio.gather(
*[
publish_topic(
topic='/'.join((self.unique_id, topic)),
value=value,
) for topic, value in topics.items()
],
self._notify_state(publish_topic),
)
self.initial_status_sent = True
self._state = new_state
await self._notify_state(publish_topic)
else:
self._state = new_state
self.update_multiplier()
async def _update_statistics(self):
statistics = await self.get_statistics()
self._statistics = {
'number_of_starts': statistics['starts'],
'Energy spent (kWh)': round(statistics['watts_hours']/1000, 2),
'Working time (minutes)': round(statistics['seconds_run']/60, 1),
}
async def handle(self, publish_topic, *args, **kwargs):
counter = 0
while True:
# if boiling notify every 5 seconds, 60 sec otherwise
new_state = await self.get_mode()
await self.notify_run_state(new_state, publish_topic)
counter += 1
if counter > self.UPDATE_PERIOD * self._update_period_multiplier:
await self._update_statistics()
await self._notify_state(publish_topic)
counter = 0
await aio.sleep(1)
async def _switch_mode(self, mode, value):
if value == RunState.ON.name:
try:
if self._state.mode != mode:
await self.stop()
await self.set_mode(Kettle200State(mode=mode))
except ValueError:
# if the MODE is the same then it returns
# en error. Treat it as normal
pass
await self.run()
next_state = RunState.ON
else:
await self.stop()
next_state = RunState.OFF
self.update_multiplier(Kettle200State(state=next_state))
async def _switch_boil(self, value):
await self._switch_mode(Mode.BOIL, value)
async def _switch_backlight(self, value):
await self._switch_mode(Mode.LIGHT, value)
async def handle_messages(self, publish_topic, *args, **kwargs):
while True:
message = await self.message_queue.get()
value = message['value']
entity_name = self.get_entity_from_topic(message['topic'])
if entity_name == BOIL_ENTITY:
value = self.transform_value(value)
logger.info(
f'[{self._mac}] switch kettle {entity_name} value={value}',
)
while True:
try:
await self._switch_boil(value)
# update state to real values
await self.get_mode()
await aio.gather(
publish_topic(
topic='/'.join((self.unique_id, entity_name)),
value=self.transform_value(value),
),
self._notify_state(publish_topic),
loop=self._loop,
)
break
except ConnectionError as e:
logger.exception(str(e))
await aio.sleep(5)
if entity_name == LIGHT_ENTITY:
logger.info(f'set backlight {value}')
if value.get('state'):
await self._switch_backlight(value['state'])
if value.get('color') or value.get('brightness'):
if value.get('color'):
color = value['color']
try:
self._color = color['r'], color['g'], color['b']
except ValueError:
return
if value.get('brightness'):
self._brightness = value['brightness']
await self.set_color(
ColorTarget.LIGHT,
*self._color,
self._brightness,
)
| 35.941781 | 79 | 0.495569 |
e21d73d4f841d3d2fb41a6ebe2ae668e863c21cd | 2,272 | py | Python | sky/tools/webkitpy/layout_tests/reftests/extract_reference_link.py | domenic/mojo | 53dda76fed90a47c35ed6e06baf833a0d44495b8 | [
"BSD-3-Clause"
] | 5,964 | 2016-09-27T03:46:29.000Z | 2022-03-31T16:25:27.000Z | third_party/WebKit/Tools/Scripts/webkitpy/layout_tests/reftests/extract_reference_link.py | w4454962/miniblink49 | b294b6eacb3333659bf7b94d670d96edeeba14c0 | [
"Apache-2.0"
] | 459 | 2016-09-29T00:51:38.000Z | 2022-03-07T14:37:46.000Z | third_party/WebKit/Tools/Scripts/webkitpy/layout_tests/reftests/extract_reference_link.py | w4454962/miniblink49 | b294b6eacb3333659bf7b94d670d96edeeba14c0 | [
"Apache-2.0"
] | 1,006 | 2016-09-27T05:17:27.000Z | 2022-03-30T02:46:51.000Z | # Copyright (C) 2011 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Utility module for reftests."""
from HTMLParser import HTMLParser
class ExtractReferenceLinkParser(HTMLParser):
def __init__(self):
HTMLParser.__init__(self)
self.matches = []
self.mismatches = []
def handle_starttag(self, tag, attrs):
if tag != "link":
return
attrs = dict(attrs)
if not "rel" in attrs:
return
if not "href" in attrs:
return
if attrs["rel"] == "match":
self.matches.append(attrs["href"])
if attrs["rel"] == "mismatch":
self.mismatches.append(attrs["href"])
def get_reference_link(html_string):
"""Returns reference links in the given html_string.
Returns:
a tuple of two URL lists, (matches, mismatches).
"""
parser = ExtractReferenceLinkParser()
parser.feed(html_string)
parser.close()
return parser.matches, parser.mismatches
| 35.5 | 72 | 0.709507 |
b7e830ac23fcac3a40813eef2cc5607852c23d4a | 4,623 | py | Python | tests/regressiontests/middleware/tests.py | Yuanoung/djg-master | 40413f268dd7dc3ca2d48f51c9327928b0b922de | [
"BSD-3-Clause"
] | 23 | 2015-01-26T12:16:59.000Z | 2022-02-10T10:58:40.000Z | tests/regressiontests/middleware/tests.py | joetyson/django | c3699190186561d5c216b2a77ecbfc487d42a734 | [
"BSD-3-Clause"
] | 1 | 2021-08-20T19:02:08.000Z | 2021-08-20T19:02:08.000Z | tests/regressiontests/middleware/tests.py | joetyson/django | c3699190186561d5c216b2a77ecbfc487d42a734 | [
"BSD-3-Clause"
] | 11 | 2016-04-15T06:22:58.000Z | 2022-02-10T10:58:41.000Z | # -*- coding: utf-8 -*-
from django.test import TestCase
from django.http import HttpRequest
from django.middleware.common import CommonMiddleware
from django.conf import settings
class CommonMiddlewareTest(TestCase):
def setUp(self):
self.slash = settings.APPEND_SLASH
self.www = settings.PREPEND_WWW
def tearDown(self):
settings.APPEND_SLASH = self.slash
settings.PREPEND_WWW = self.www
def _get_request(self, path):
request = HttpRequest()
request.META = {
'SERVER_NAME': 'testserver',
'SERVER_PORT': 80,
}
request.path = request.path_info = "/middleware/%s" % path
return request
def test_append_slash_have_slash(self):
"""
Tests that URLs with slashes go unmolested.
"""
settings.APPEND_SLASH = True
request = self._get_request('slash/')
self.assertEquals(CommonMiddleware().process_request(request), None)
def test_append_slash_slashless_resource(self):
"""
Tests that matches to explicit slashless URLs go unmolested.
"""
settings.APPEND_SLASH = True
request = self._get_request('noslash')
self.assertEquals(CommonMiddleware().process_request(request), None)
def test_append_slash_slashless_unknown(self):
"""
Tests that APPEND_SLASH doesn't redirect to unknown resources.
"""
settings.APPEND_SLASH = True
request = self._get_request('unknown')
self.assertEquals(CommonMiddleware().process_request(request), None)
def test_append_slash_redirect(self):
"""
Tests that APPEND_SLASH redirects slashless URLs to a valid pattern.
"""
settings.APPEND_SLASH = True
request = self._get_request('slash')
r = CommonMiddleware().process_request(request)
self.assertEquals(r.status_code, 301)
self.assertEquals(r['Location'], 'http://testserver/middleware/slash/')
def test_append_slash_no_redirect_on_POST_in_DEBUG(self):
"""
Tests that while in debug mode, an exception is raised with a warning
when a failed attempt is made to POST to an URL which would normally be
redirected to a slashed version.
"""
settings.APPEND_SLASH = True
settings.DEBUG = True
request = self._get_request('slash')
request.method = 'POST'
self.assertRaises(
RuntimeError,
CommonMiddleware().process_request,
request)
try:
CommonMiddleware().process_request(request)
except RuntimeError, e:
self.failUnless('end in a slash' in str(e))
settings.DEBUG = False
def test_append_slash_disabled(self):
"""
Tests disabling append slash functionality.
"""
settings.APPEND_SLASH = False
request = self._get_request('slash')
self.assertEquals(CommonMiddleware().process_request(request), None)
def test_append_slash_quoted(self):
"""
Tests that URLs which require quoting are redirected to their slash
version ok.
"""
settings.APPEND_SLASH = True
request = self._get_request('needsquoting#')
r = CommonMiddleware().process_request(request)
self.assertEquals(r.status_code, 301)
self.assertEquals(
r['Location'],
'http://testserver/middleware/needsquoting%23/')
def test_prepend_www(self):
settings.PREPEND_WWW = True
settings.APPEND_SLASH = False
request = self._get_request('path/')
r = CommonMiddleware().process_request(request)
self.assertEquals(r.status_code, 301)
self.assertEquals(
r['Location'],
'http://www.testserver/middleware/path/')
def test_prepend_www_append_slash_have_slash(self):
settings.PREPEND_WWW = True
settings.APPEND_SLASH = True
request = self._get_request('slash/')
r = CommonMiddleware().process_request(request)
self.assertEquals(r.status_code, 301)
self.assertEquals(r['Location'],
'http://www.testserver/middleware/slash/')
def test_prepend_www_append_slash_slashless(self):
settings.PREPEND_WWW = True
settings.APPEND_SLASH = True
request = self._get_request('slash')
r = CommonMiddleware().process_request(request)
self.assertEquals(r.status_code, 301)
self.assertEquals(r['Location'],
'http://www.testserver/middleware/slash/')
| 36.117188 | 79 | 0.640926 |
9db98688b23356fca3ce2d595167d9afb4ff0b7e | 25,758 | py | Python | src/cryptoadvance/specter/specter.py | abitmore/specter-desktop | c4d992c8ea31f20cfa80e210ff967128f9623799 | [
"MIT"
] | 1 | 2021-09-06T16:58:30.000Z | 2021-09-06T16:58:30.000Z | src/cryptoadvance/specter/specter.py | abitmore/specter-desktop | c4d992c8ea31f20cfa80e210ff967128f9623799 | [
"MIT"
] | null | null | null | src/cryptoadvance/specter/specter.py | abitmore/specter-desktop | c4d992c8ea31f20cfa80e210ff967128f9623799 | [
"MIT"
] | null | null | null | import copy
import json
import logging
import os
import platform
import random
import secrets
import signal
import threading
import time
import traceback
import zipfile
from io import BytesIO
from sys import exit
from urllib.parse import urlparse
import requests
from requests.exceptions import ConnectionError
from stem.control import Controller
from urllib3.exceptions import NewConnectionError
from cryptoadvance.specter.devices.device_types import DeviceTypes
from .helpers import clean_psbt, deep_update, is_liquid, is_testnet, get_asset_label
from .internal_node import InternalNode
from .liquid.rpc import LiquidRPC
from .managers.config_manager import ConfigManager
from .managers.node_manager import NodeManager
from .managers.otp_manager import OtpManager
from .managers.user_manager import UserManager
from .managers.wallet_manager import WalletManager
from .node import Node
from .persistence import read_json_file, write_json_file, write_node
from .process_controller.bitcoind_controller import BitcoindPlainController
from .rpc import (
BitcoinRPC,
RpcError,
get_default_datadir,
)
from .services.service_manager import ServiceManager
from .services.service import devstatus_alpha, devstatus_beta, devstatus_prod
from .specter_error import ExtProcTimeoutException, SpecterError
from .tor_daemon import TorDaemonController
from .user import User
from .util.checker import Checker
from .util.version import VersionChecker
from .util.price_providers import update_price
from .util.setup_states import SETUP_STATES
from .util.tor import get_tor_daemon_suffix
logger = logging.getLogger(__name__)
class Specter:
"""A central Object mostly holding app-settings"""
# use this lock for all fs operations
lock = threading.Lock()
_default_asset = None
def __init__(
self,
data_folder="./data",
config={},
internal_bitcoind_version="",
service_devstatus_threshold=devstatus_prod,
):
if data_folder.startswith("~"):
data_folder = os.path.expanduser(data_folder)
data_folder = os.path.abspath(data_folder)
# creating folders if they don't exist
if not os.path.isdir(data_folder):
os.makedirs(data_folder)
self.data_folder = data_folder
self.user_manager = UserManager(
self
) # has to come before calling VersionChecker()
self.service_manager = ServiceManager(
specter=self, devstatus_threshold=service_devstatus_threshold
)
# version checker
# checks for new versions once per hour
self.version = VersionChecker(specter=self)
self.version.start()
self._config_manager = ConfigManager(self.data_folder, config)
self.internal_bitcoind_version = internal_bitcoind_version
# Migrating from Specter 1.3.1 and lower (prior to the node manager)
self.migrate_old_node_format()
self.node_manager = NodeManager(
proxy_url=self.proxy_url,
only_tor=self.only_tor,
active_node=self.active_node_alias,
bitcoind_path=self.bitcoind_path,
internal_bitcoind_version=internal_bitcoind_version,
data_folder=os.path.join(self.data_folder, "nodes"),
)
self.torbrowser_path = os.path.join(
self.data_folder, f"tor-binaries/tor{get_tor_daemon_suffix()}"
)
self._tor_daemon = None
self.setup_status = {
"stage": "start",
"bitcoind": {
"stage_progress": -1,
"stage": "",
"error": "",
},
"torbrowser": {
"stage_progress": -1,
"stage": "",
"error": "",
},
}
# health check: loads config, tests rpc
# also loads and checks wallets for all users
try:
self.check(check_all=True)
if os.path.isfile(self.torbrowser_path):
self.tor_daemon.start_tor_daemon()
except Exception as e:
logger.error(e)
self.update_tor_controller()
self.checker = Checker(lambda: self.check(check_all=True), desc="health")
self.checker.start()
self.price_checker = Checker(
lambda: update_price(self, self.user), desc="price"
)
if self.price_check and self.price_provider:
self.price_checker.start()
# This is for CTRL-C --> SIGINT
signal.signal(signal.SIGINT, self.cleanup_on_exit)
# This is for kill $pid --> SIGTERM
signal.signal(signal.SIGTERM, self.cleanup_on_exit)
def cleanup_on_exit(self, signum=0, frame=0):
if self._tor_daemon:
logger.info("Specter exit cleanup: Stopping Tor daemon")
self._tor_daemon.stop_tor_daemon()
for node in self.node_manager.nodes.values():
if not node.external_node:
node.stop()
logger.info("Closing Specter after cleanup")
# For some reason we need to explicitely exit here. Otherwise it will hang
exit(0)
def check(self, user=None, check_all=False):
"""
Checks and updates everything for a particular user:
- config if changed
- rpc including check if it's connected
- node info
- wallet manager
- device manager
"""
# check if config file have changed
self.check_config()
self.node.update_rpc()
# if rpc is not available
# do checks more often, once in 20 seconds
if self.rpc is None or self.node.info.get("initialblockdownload", True):
period = 20
else:
period = 600
if hasattr(self, "checker") and self.checker.period != period:
self.checker.period = period
if not check_all:
# find proper user
user = self.user_manager.get_user(user)
user.check()
else:
for u in self.user_manager.users:
u.check()
@property
def node(self):
try:
return self.node_manager.active_node
except SpecterError as e:
logger.error("SpecterError while accessing active_node")
logger.exception(e)
self.update_active_node(list(self.node_manager.nodes.values())[0].alias)
return self.node_manager.active_node
@property
def default_node(self):
return self.node_manager.default_node()
@property
def rpc(self):
return self.node.rpc
@property
def utxorescanwallet(self):
return self.node.utxorescanwallet
@utxorescanwallet.setter
def utxorescanwallet(self, value):
self.node.utxorescanwallet = value
@property
def config(self):
"""A convenience property simply redirecting to the config_manager"""
return self.config_manager.data
def check_blockheight(self):
if self.node.check_blockheight():
self.check(check_all=True)
def get_user_folder_id(self, user=None):
"""
Returns the suffix for the user wallets and devices.
User can be either a flask_login user or a string.
"""
user = self.user_manager.get_user(user)
if not user.is_admin:
return "_" + user.id
return ""
def check_config(self):
"""
Updates config if file config have changed.
Priority (low to high):
- existing / default config
- file config from config.json
- arg_config passed in constructor
"""
self.config_manager.check_config()
def delete_user(self, user):
if user not in self.user_manager.users:
return
user = self.user_manager.get_user(user)
user.wallet_manager.delete(self)
user.device_manager.delete(self)
self.user_manager.delete_user(user)
# mark
@property
def bitcoin_datadir(self):
return self.node.datadir
# mark
def _save(self):
write_json_file(self.config, self.config_fname, lock=self.lock)
@property
def config_fname(self):
return os.path.join(self.data_folder, "config.json")
# mark
def update_active_node(self, node_alias):
"""update the current active node to use"""
self.config_manager.update_active_node(node_alias)
self.node_manager.switch_node(node_alias)
self.check()
def update_setup_status(self, software_name, stage):
self.setup_status[software_name]["error"] = ""
if stage in SETUP_STATES:
self.setup_status[software_name]["stage"] = SETUP_STATES[stage].get(
software_name, stage
)
else:
self.setup_status[software_name]["stage"] = stage
self.setup_status[software_name]["stage_progress"] = 0
def update_setup_download_progress(self, software_name, progress):
self.setup_status[software_name]["error"] = ""
self.setup_status[software_name]["stage_progress"] = progress
def update_setup_error(self, software_name, error):
self.setup_status[software_name]["error"] = error
self.setup_status[software_name]["stage_progress"] = -1
def reset_setup(self, software_name):
self.setup_status[software_name]["error"] = ""
self.setup_status[software_name]["stage"] = ""
self.setup_status[software_name]["stage_progress"] = -1
def get_setup_status(self, software_name):
if software_name == "bitcoind":
installed = os.path.isfile(self.bitcoind_path)
elif software_name == "torbrowser":
installed = os.path.isfile(self.torbrowser_path)
else:
installed = False
return {"installed": installed, **self.setup_status[software_name]}
# mark
def update_auth(self, method, rate_limit, registration_link_timeout):
"""simply persisting the current auth-choice"""
self.config_manager.update_auth(method, rate_limit, registration_link_timeout)
# mark
def update_explorer(self, explorer_id, explorer_data, user):
"""update the block explorers urls"""
self.config_manager.update_explorer(
explorer_id, explorer_data, user, self.chain
)
# mark
def update_fee_estimator(self, fee_estimator, custom_url, user):
"""update the fee estimator option and its url if custom"""
self.config_manager.update_fee_estimator(fee_estimator, custom_url, user)
# mark
def update_tor_type(self, tor_type, user):
"""update the Tor proxy url"""
if tor_type == "builtin":
self.update_proxy_url("socks5h://localhost:9050", user)
self.update_tor_control_port("", user)
self.config_manager.update_tor_type(tor_type, user)
# mark
def update_proxy_url(self, proxy_url, user):
"""update the Tor proxy url"""
self.config_manager.update_proxy_url(proxy_url, user)
# mark
def toggle_tor_status(self):
"""toggle the Tor status"""
self.config_manager.toggle_tor_status()
# mark
def update_only_tor(self, only_tor, user):
"""switch whatever to use Tor for all calls"""
self.config_manager.update_only_tor(only_tor, user)
# mark
def update_tor_control_port(self, tor_control_port, user):
"""set the control port of the tor daemon"""
if self.config_manager.update_tor_control_port:
self.update_tor_controller()
# mark
def generate_torrc_password(self, overwrite=False):
self.config_manager.generate_torrc_password(overwrite)
def update_tor_controller(self):
if "torrc_password" not in self.config:
# Will be missing if the user did not go through the built-in Tor setup
self.generate_torrc_password()
try:
tor_control_address = urlparse(self.proxy_url).netloc.split(":")[0]
if tor_control_address == "localhost":
tor_control_address = "127.0.0.1"
self._tor_controller = Controller.from_port(
address=tor_control_address,
port=int(self.tor_control_port) if self.tor_control_port else "default",
)
self._tor_controller.authenticate(
password=self.config.get("torrc_password", "")
)
except Exception as e:
logger.warning(f"Failed to connect to Tor control port. Error: {e}")
self._tor_controller = None
@property
def tor_daemon(self):
if os.path.isfile(self.torbrowser_path) and os.path.join(
self.data_folder, "torrc"
):
if not self._tor_daemon:
self._tor_daemon = TorDaemonController(
tor_daemon_path=self.torbrowser_path,
tor_config_path=os.path.join(self.data_folder, "torrc"),
)
return self._tor_daemon
raise SpecterError(
"Tor daemon files missing. Make sure Tor is installed within Specter"
)
def is_tor_dameon_running(self):
return self._tor_daemon and self._tor_daemon.is_running()
@property
def tor_controller(self):
if self._tor_controller:
return self._tor_controller
self.update_tor_controller()
if self._tor_controller:
return self._tor_controller
raise SpecterError(
"Failed to connect to the Tor daemon. Make sure ControlPort is properly configured."
)
# mark
def update_hwi_bridge_url(self, url, user):
"""update the hwi bridge url to use"""
self.config_manager.update_hwi_bridge_url(url, user)
# mark
def update_unit(self, unit, user):
self.config_manager.update_unit(unit, user)
# mark
def update_hide_sensitive_info(self, hide_sensitive_info_bool, user):
self.config_manager.update_hide_sensitive_info(hide_sensitive_info_bool, user)
# mark
def update_price_check_setting(self, price_check_bool, user):
self.config_manager.update_price_check_setting(price_check_bool, user)
# mark
def update_price_provider(self, price_provider, user):
self.config_manager.update_price_provider(price_provider, user)
# mark needs User-Type injection
def update_weight_unit(self, weight_unit, user):
self.config_manager.update_weight_unit(weight_unit, user)
# mark needs User-Type injection
def update_alt_rate(self, alt_rate, user):
self.config_manager.update_alt_rate(alt_rate, user)
# mark
def update_alt_symbol(self, alt_symbol, user):
self.config_manager.update_alt_symbol(alt_symbol, user)
def update_services(self, services):
"""takes a list of service_names which should be activated"""
self.config["services"] = services
self._save()
def update_merkleproof_settings(self, validate_bool):
if validate_bool is True and self.info.get("pruned") is True:
validate_bool = False
logger.warning("Cannot enable merkleproof setting on pruned node.")
self.config_manager.update_merkleproof_settings(validate_bool)
def combine(self, psbt_arr):
# backward compatibility with current Core psbt parser
psbt_arr = [clean_psbt(psbt) for psbt in psbt_arr]
final_psbt = self.rpc.combinepsbt(psbt_arr)
return final_psbt
def finalize(self, psbt):
final_psbt = self.rpc.finalizepsbt(psbt)
return final_psbt
def broadcast(self, raw):
res = self.rpc.sendrawtransaction(raw)
return res
def estimatesmartfee(self, blocks):
res = self.rpc.estimatesmartfee(blocks)
if "feerate" not in res and self.is_liquid:
return 0.000001
return res
@property
def bitcoind_path(self):
bitcoind_path = os.path.join(self.data_folder, "bitcoin-binaries/bin/bitcoind")
if platform.system() == "Windows":
bitcoind_path += ".exe"
return bitcoind_path
@property
def info(self):
return self.node.info
@property
def network_info(self):
return self.node.network_info
@property
def bitcoin_core_version(self):
return self.node.bitcoin_core_version
@property
def bitcoin_core_version_raw(self):
return self.node.bitcoin_core_version_raw
@property
def taproot_support(self):
return self.node.taproot_support
@property
def chain(self):
return self.node.chain
@property
def network_parameters(self):
return self.node.network_parameters
@property
def is_testnet(self):
return self.node.is_testnet
@property
def is_liquid(self):
return is_liquid(self.chain)
@property
def user_config(self):
return self.config if self.user.is_admin else self.user.config
@property
def active_node_alias(self):
return self.user_config.get("active_node_alias", "default")
@property
def explorer(self):
return self.user_config.get("explorers", {}).get(self.chain, "")
@property
def explorer_id(self):
return self.user_config.get("explorer_id", {}).get(self.chain, "CUSTOM")
@property
def asset_labels(self):
user_assets = self.user_config.get("asset_labels", {}).get(self.chain, {})
node_assets = self.node.asset_labels
asset_labels = {}
deep_update(asset_labels, node_assets)
deep_update(asset_labels, user_assets)
return asset_labels
@property
def default_asset(self):
"""returns hash of LBTC"""
if self._default_asset is None:
for asset, lbl in self.asset_labels.items():
if lbl in ["LBTC", "tLBTC"]:
self._default_asset = asset
return asset
return self._default_asset
def asset_label(self, asset):
if asset == "":
return ""
return get_asset_label(asset, known_assets=self.asset_labels)
def update_asset_label(self, asset, label):
if asset == self.default_asset:
raise SpecterError("LBTC should stay LBTC")
self.config_manager.update_asset_label(asset, label, self.chain, self.user)
@property
def fee_estimator(self):
return self.user_config.get("fee_estimator", "bitcoin_core")
@property
def tor_type(self):
return self.user_config.get("tor_type", "builtin")
@property
def proxy_url(self):
return self.user_config.get("proxy_url", "socks5h://localhost:9050")
@property
def only_tor(self):
return self.user_config.get("only_tor", False)
@property
def tor_control_port(self):
return self.user_config.get("tor_control_port", "")
@property
def hwi_bridge_url(self):
return self.user_config.get("hwi_bridge_url", "")
@property
def unit(self):
return self.user_config.get("unit", "btc")
@property
def price_check(self):
return self.user_config.get("price_check", False)
@property
def price_provider(self):
return self.user_config.get("price_provider", False)
@property
def weight_unit(self):
return self.user_config.get("weight_unit", "oz")
@property
def alt_rate(self):
return self.user_config.get("alt_rate", 1)
@property
def alt_symbol(self):
return self.user_config.get("alt_symbol", "BTC")
@property
def admin(self):
for u in self.user_manager.users:
if u.is_admin:
return u
@property
def user(self):
return self.user_manager.user
@property
def config_manager(self):
if not hasattr(self, "_config_manager"):
self._config_manager = ConfigManager(self.data_folder)
return self._config_manager
@property
def device_manager(self):
return self.user.device_manager
@property
def wallet_manager(self):
return self.user.wallet_manager
@property
def otp_manager(self):
if not hasattr(self, "_otp_manager"):
self._otp_manager = OtpManager(self.data_folder)
return self._otp_manager
@property
def hide_sensitive_info(self):
return self.user_config.get("hide_sensitive_info", False)
@property
def autohide_sensitive_info_timeout(self):
return self.user_config.get("autohide_sensitive_info_timeout_minutes", 20)
@property
def autologout_timeout(self):
return self.user_config.get("autologout_timeout_hours", 4)
def requests_session(self, force_tor=False):
requests_session = requests.Session()
if self.only_tor or force_tor:
proxy_url = self.proxy_url
proxy_parsed_url = urlparse(self.proxy_url)
proxy_url = proxy_parsed_url._replace(
netloc="{}:{}@{}".format(
str(random.randint(10000, 0x7FFFFFFF)),
"random",
proxy_parsed_url.netloc,
)
).geturl()
requests_session.proxies["http"] = proxy_url
requests_session.proxies["https"] = proxy_url
return requests_session
def specter_backup_file(self):
memory_file = BytesIO()
with zipfile.ZipFile(memory_file, "w") as zf:
if self.wallet_manager:
for wallet in self.wallet_manager.wallets.values():
data = zipfile.ZipInfo("{}.json".format(wallet.alias))
data.date_time = time.localtime(time.time())[:6]
data.compress_type = zipfile.ZIP_DEFLATED
zf.writestr(
"wallets/{}.json".format(wallet.alias),
json.dumps(wallet.to_json(for_export=True)),
)
if self.device_manager:
for device in self.device_manager.devices.values():
data = zipfile.ZipInfo("{}.json".format(device.alias))
data.date_time = time.localtime(time.time())[:6]
data.compress_type = zipfile.ZIP_DEFLATED
device = device.json
# Exporting the bitcoincore hot wallet as watchonly
if device["type"] == DeviceTypes.BITCOINCORE:
device["type"] = DeviceTypes.BITCOINCORE_WATCHONLY
zf.writestr(
"devices/{}.json".format(device["alias"]), json.dumps(device)
)
memory_file.seek(0)
return memory_file
# Migrating RPC nodes from Specter 1.3.1 and lower (prior to the node manager)
def migrate_old_node_format(self):
if not os.path.isdir(os.path.join(self.data_folder, "nodes")):
os.mkdir(os.path.join(self.data_folder, "nodes"))
old_rpc = self.config.get("rpc", None)
old_internal_rpc = self.config.get("internal_node", None)
if old_internal_rpc and os.path.isfile(self.bitcoind_path):
internal_node = InternalNode(
"Specter Bitcoin",
"specter_bitcoin",
old_internal_rpc.get("autodetect", False),
old_internal_rpc.get("datadir", get_default_datadir()),
old_internal_rpc.get("user", ""),
old_internal_rpc.get("password", ""),
old_internal_rpc.get("port", 8332),
old_internal_rpc.get("host", "localhost"),
old_internal_rpc.get("protocol", "http"),
os.path.join(
os.path.join(self.data_folder, "nodes"), "specter_bitcoin.json"
),
self,
self.bitcoind_path,
"mainnet",
"0.20.1",
)
logger.info(f"persisting {internal_node} in migrate_old_node_format")
write_node(
internal_node,
os.path.join(
os.path.join(self.data_folder, "nodes"), "specter_bitcoin.json"
),
)
del self.config["internal_node"]
if not old_rpc or not old_rpc.get("external_node", True):
self.config_manager.update_active_node("specter_bitcoin")
if old_rpc:
node = Node(
"Bitcoin Core",
"default",
old_rpc.get("autodetect", True),
old_rpc.get("datadir", get_default_datadir()),
old_rpc.get("user", ""),
old_rpc.get("password", ""),
old_rpc.get("port", None),
old_rpc.get("host", "localhost"),
old_rpc.get("protocol", "http"),
True,
os.path.join(os.path.join(self.data_folder, "nodes"), "default.json"),
"BTC",
self,
)
logger.info(f"persisting {node} in migrate_old_node_format")
write_node(
node,
os.path.join(os.path.join(self.data_folder, "nodes"), "default.json"),
)
del self.config["rpc"]
self._save()
class SpecterConfiguration:
"""An abstract class which only holds functionality relevant for storage of information mostly
deferring to ConfigManager.
Do not put logic in here, which is not directly relevant for the config.json.
Do not deal with the config.json directly but implement that in the ConfigManager
"""
pass
# ToDo: move all the methods above here.
| 33.71466 | 98 | 0.630445 |
a074f944ce154268484d0f6e3f10e2ea9a0da5a2 | 4,227 | py | Python | spark_auto_mapper_fhir/backbone_elements/claim_related.py | imranq2/SparkAutoMapper.FHIR | dd23b218fb0097d1edc2f3e688e8d6d4d7278bd2 | [
"Apache-2.0"
] | 1 | 2020-10-31T23:25:07.000Z | 2020-10-31T23:25:07.000Z | spark_auto_mapper_fhir/backbone_elements/claim_related.py | icanbwell/SparkAutoMapper.FHIR | 98f368e781b46523142c7cb513c670d659a93c9b | [
"Apache-2.0"
] | null | null | null | spark_auto_mapper_fhir/backbone_elements/claim_related.py | icanbwell/SparkAutoMapper.FHIR | 98f368e781b46523142c7cb513c670d659a93c9b | [
"Apache-2.0"
] | null | null | null | from __future__ import annotations
from typing import Optional, TYPE_CHECKING
from spark_auto_mapper_fhir.fhir_types.list import FhirList
from spark_auto_mapper_fhir.fhir_types.string import FhirString
from spark_auto_mapper_fhir.extensions.extension_base import ExtensionBase
from spark_auto_mapper_fhir.base_types.fhir_backbone_element_base import (
FhirBackboneElementBase,
)
if TYPE_CHECKING:
pass
# id_ (string)
# extension (Extension)
# modifierExtension (Extension)
# claim (Reference)
from spark_auto_mapper_fhir.complex_types.reference import Reference
# Imports for References for claim
from spark_auto_mapper_fhir.resources.claim import Claim
# relationship (CodeableConcept)
from spark_auto_mapper_fhir.complex_types.codeable_concept import CodeableConcept
# End Import for References for relationship
# Import for CodeableConcept for relationship
from spark_auto_mapper_fhir.value_sets.example_related_claim_relationship_codes import (
ExampleRelatedClaimRelationshipCodesCode,
)
# End Import for CodeableConcept for relationship
# reference (Identifier)
from spark_auto_mapper_fhir.complex_types.identifier import Identifier
# This file is auto-generated by generate_classes so do not edit manually
# noinspection PyPep8Naming
class ClaimRelated(FhirBackboneElementBase):
"""
Claim.Related
A provider issued list of professional services and products which have been provided, or are to be provided, to a patient which is sent to an insurer for reimbursement.
"""
# noinspection PyPep8Naming
def __init__(
self,
*,
id_: Optional[FhirString] = None,
extension: Optional[FhirList[ExtensionBase]] = None,
modifierExtension: Optional[FhirList[ExtensionBase]] = None,
claim: Optional[Reference[Claim]] = None,
relationship: Optional[
CodeableConcept[ExampleRelatedClaimRelationshipCodesCode]
] = None,
reference: Optional[Identifier] = None,
) -> None:
"""
A provider issued list of professional services and products which have been
provided, or are to be provided, to a patient which is sent to an insurer for
reimbursement.
:param id_: None
:param extension: May be used to represent additional information that is not part of the basic
definition of the element. To make the use of extensions safe and manageable,
there is a strict set of governance applied to the definition and use of
extensions. Though any implementer can define an extension, there is a set of
requirements that SHALL be met as part of the definition of the extension.
:param modifierExtension: May be used to represent additional information that is not part of the basic
definition of the element and that modifies the understanding of the element
in which it is contained and/or the understanding of the containing element's
descendants. Usually modifier elements provide negation or qualification. To
make the use of extensions safe and manageable, there is a strict set of
governance applied to the definition and use of extensions. Though any
implementer can define an extension, there is a set of requirements that SHALL
be met as part of the definition of the extension. Applications processing a
resource are required to check for modifier extensions.
Modifier extensions SHALL NOT change the meaning of any elements on Resource
or DomainResource (including cannot change the meaning of modifierExtension
itself).
:param claim: Reference to a related claim.
:param relationship: A code to convey how the claims are related.
:param reference: An alternate organizational reference to the case or file to which this
particular claim pertains.
"""
super().__init__(
id_=id_,
extension=extension,
modifierExtension=modifierExtension,
claim=claim,
relationship=relationship,
reference=reference,
)
| 44.494737 | 177 | 0.723918 |
141d1e4fa3b41530c5e1438ff7ff18d230370354 | 11,840 | py | Python | selfdrive/car/volvo/carcontroller.py | xirtam531us/openpilot | 5b0a8da36e45c743bbbbba3bdb8057950f64dfaf | [
"MIT"
] | 1 | 2020-12-15T08:53:56.000Z | 2020-12-15T08:53:56.000Z | selfdrive/car/volvo/carcontroller.py | xirtam531us/openpilot | 5b0a8da36e45c743bbbbba3bdb8057950f64dfaf | [
"MIT"
] | null | null | null | selfdrive/car/volvo/carcontroller.py | xirtam531us/openpilot | 5b0a8da36e45c743bbbbba3bdb8057950f64dfaf | [
"MIT"
] | 2 | 2020-12-07T18:28:13.000Z | 2021-02-08T11:57:56.000Z | from common.numpy_fast import clip, interp
from selfdrive.car.volvo.values import CAR, PLATFORM, DBC, CarControllerParams as CCP
from selfdrive.car.volvo import volvocan
from opendbc.can.packer import CANPacker
from collections import deque
class SteerCommand:
angle_request = 0
steer_direction = 0
trqlim = 0
class CarController():
def __init__(self, dbc_name, CP, VW):
# state
self.acc_enabled_prev = 0
# steering related
self.angle_request_prev = 0
# Direction change statemachine
self.UNBLOCKED = 0
self.BLOCKED = 1
self.BLOCK_LEN = CCP.BLOCK_LEN # Block steer direction change for x samples
self.dir_state = 0
self.block_steering = 0
self.steer_direction_bf_block = 0
self.des_steer_direction_prev = 0
# SteerCommand
self.SteerCommand = SteerCommand
self.trq_fifo = deque([])
self.fault_frame = -200
# Diag
self.doDTCRequests = True # Turn on and off DTC requests
self.checkPN = False # Check partnumbers
self.clearDtcs = False # Set false to stop sending diagnostic requests
self.timeout = 0 # Set to 0 as init
self.diagRequest = {
"byte0": 0x03,
"byte1": 0x19,
"byte2": 0x02,
"byte3": 0x02,
}
self.flowControl = {
"byte0": 0x30,
"byte1": 0x00,
"byte2": 0x00,
"byte3": 0x00,
}
self.clearDTC = {
"byte0": 0x04,
"byte1": 0x14,
"byte2": 0xFF,
"byte3": 0xFF,
"byte4": 0xFF,
}
# Part number
self.cnt = 0 # Init at 0 always
self.sndNxtFrame = 0 # Init at low value
self.dictKeys = ["byte"+str(x) for x in range(8)]
startdid = 0xf1a1 # Start with this DID (Data IDentifier, read UDS Spec for more info)
self.dids = [x for x in range(startdid, startdid+9)]
# Setup detection helper. Routes commands to
# an appropriate CAN bus number.
self.CP = CP
self.packer = CANPacker(DBC[CP.carFingerprint]['pt'])
def max_angle_req(self, current_steer_angle, angle_request_prev, CCP):
"""
Calculate maximum angle request delta/offset from current steering angle.
This is just a helper function that calculates the boundary for min and max
steering angle request. It uses the parameters CCP.MAX_ACT_ANGLE_REQUEST_DIFF
and CCP.STEER_ANGLE_DELTA_REQ_DIFF. To calculate the max and min allowed delta/offset request.
The delta request is just a rate limiter. The request angle cant change more
than CCP.STEER_ANGLE_DELTA_REQ_DIFF per loop.
"""
# determine max and min allowed lka angle request
# based on delta per sample
max_delta_right = angle_request_prev-CCP.STEER_ANGLE_DELTA_REQ_DIFF
max_delta_left = angle_request_prev+CCP.STEER_ANGLE_DELTA_REQ_DIFF
# based on distance from actual steering angle
max_right = current_steer_angle-CCP.MAX_ACT_ANGLE_REQUEST_DIFF
max_left = current_steer_angle+CCP.MAX_ACT_ANGLE_REQUEST_DIFF
return max_right, max_left, max_delta_right, max_delta_left
def dir_change(self, steer_direction, error):
""" Filters out direction changes
Uses a simple state machine to determine if we should
block or allow the steer_direction bits to pass thru.
"""
dessd = steer_direction
dzError = 0 if abs(error) < CCP.DEADZONE else error
tState = -1
# Update prev with desired if just enabled.
self.des_steer_direction_prev = steer_direction if not self.acc_enabled_prev else self.des_steer_direction_prev
# Check conditions for state change
if self.dir_state == self.UNBLOCKED:
tState = self.BLOCKED if (steer_direction != self.des_steer_direction_prev and dzError != 0) else tState
elif self.dir_state == self.BLOCKED:
if (steer_direction == self.steer_direction_bf_block) or (self.block_steering <= 0) or (dzError == 0):
tState = self.UNBLOCKED
# State transition
if tState == self.UNBLOCKED:
self.dir_state = self.UNBLOCKED
elif tState == self.BLOCKED:
self.steer_direction_bf_block = self.des_steer_direction_prev
self.block_steering = self.BLOCK_LEN
self.dir_state = self.BLOCKED
# Run actions in state
if self.dir_state == self.UNBLOCKED:
if dzError == 0:
steer_direction = self.des_steer_direction_prev # Set old request when inside deadzone
if self.dir_state == self.BLOCKED:
self.block_steering -= 1
steer_direction = CCP.STEER_NO
#print("State:{} Sd:{} Sdp:{} Bs:{} Dz:{:.2f} Err:{:.2f}".format(self.dir_state, steer_direction, self.des_steer_direction_prev, self.block_steering, dzError, error))
return steer_direction
def update(self, enabled, CS, frame,
actuators,
visualAlert, leftLaneVisible,
rightLaneVisible, leadVisible,
leftLaneDepart, rightLaneDepart):
""" Controls thread """
# Send CAN commands.
can_sends = []
# run at 50hz
if (frame % 2 == 0):
fingerprint = self.CP.carFingerprint
if enabled and CS.out.vEgo > self.CP.minSteerSpeed:
current_steer_angle = CS.out.steeringAngle
self.SteerCommand.angle_request = actuators.steerAngle # Desired value from pathplanner
# # windup slower
if self.angle_request_prev * self.SteerCommand.angle_request > 0. and abs(self.SteerCommand.angle_request) > abs(self.angle_request_prev):
angle_rate_lim = interp(CS.out.vEgo, CCP.ANGLE_DELTA_BP, CCP.ANGLE_DELTA_V)
else:
angle_rate_lim = interp(CS.out.vEgo, CCP.ANGLE_DELTA_BP, CCP.ANGLE_DELTA_VU)
self.SteerCommand.angle_request = clip(self.SteerCommand.angle_request, self.angle_request_prev - angle_rate_lim, self.angle_request_prev + angle_rate_lim)
# Create trqlim from angle request (before constraints)
if fingerprint in PLATFORM.C1:
self.SteerCommand.trqlim = -127 if current_steer_angle > self.SteerCommand.angle_request else 127
self.SteerCommand.steer_direction = CCP.STEER
elif fingerprint in PLATFORM.EUCD:
self.SteerCommand.trqlim = 0
# MIGHT be needed for EUCD
self.SteerCommand.steer_direction = CCP.STEER_RIGHT if current_steer_angle > self.SteerCommand.angle_request else CCP.STEER_LEFT
self.SteerCommand.steer_direction = self.dir_change(self.SteerCommand.steer_direction, current_steer_angle-self.SteerCommand.angle_request) # Filter the direction change
else:
self.SteerCommand.steer_direction = CCP.STEER_NO
self.SteerCommand.trqlim = 0
if fingerprint in PLATFORM.C1:
self.SteerCommand.angle_request = clip(CS.out.steeringAngle, -359.95, 359.90) # Cap values at max min values (Cap 2 steps from max min). Max=359.99445, Min=-360.0384
else:
self.SteerCommand.angle_request = 0
# Count no of consequtive samples of zero torque by lka.
# Try to recover, blocking steering request for 2 seconds.
if fingerprint in PLATFORM.C1:
if enabled and CS.out.vEgo > self.CP.minSteerSpeed:
self.trq_fifo.append(CS.PSCMInfo.LKATorque)
if len(self.trq_fifo) > CCP.N_ZERO_TRQ:
self.trq_fifo.popleft()
else:
self.trq_fifo.clear()
self.fault_frame = -200
if (self.trq_fifo.count(0) >= CCP.N_ZERO_TRQ) and (self.fault_frame == -200):
self.fault_frame = frame+100
if enabled and (frame < self.fault_frame):
self.SteerCommand.steer_direction = CCP.STEER_NO
if frame > self.fault_frame+8: # Ignore steerWarning for another 8 samples.
self.fault_frame = -200
# update stored values
self.acc_enabled_prev = enabled
self.angle_request_prev = self.SteerCommand.angle_request
if self.SteerCommand.steer_direction == CCP.STEER_RIGHT or self.SteerCommand.steer_direction == CCP.STEER_LEFT: # TODO: Move this inside dir_change, think it should work?
self.des_steer_direction_prev = self.SteerCommand.steer_direction # Used for dir_change function
# Manipulate data from servo to FSM
# Avoid fault codes, that will stop LKA
can_sends.append(volvocan.manipulateServo(self.packer, self.CP.carFingerprint, CS))
# send can, add to list.
can_sends.append(volvocan.create_steering_control(self.packer, frame, self.CP.carFingerprint, self.SteerCommand, CS.FSMInfo))
# Cancel ACC if engaged when OP is not.
if not enabled and CS.out.cruiseState.enabled:
can_sends.append(volvocan.cancelACC(self.packer, self.CP.carFingerprint, CS))
# Send diagnostic requests
if(self.doDTCRequests):
if(frame % 100 == 0) and (not self.clearDtcs):
# Request diagnostic codes, 2 Hz
can_sends.append(self.packer.make_can_msg("diagFSMReq", 2, self.diagRequest))
#can_sends.append(self.packer.make_can_msg("diagGlobalReq", 2, self.diagRequest))
can_sends.append(self.packer.make_can_msg("diagGlobalReq", 0, self.diagRequest))
#can_sends.append(self.packer.make_can_msg("diagPSCMReq", 0, self.diagRequest))
#can_sends.append(self.packer.make_can_msg("diagCEMReq", 0, self.diagRequest))
#can_sends.append(self.packer.make_can_msg("diagCVMReq", 0, self.diagRequest))
self.timeout = frame + 5 # Set wait time
# Handle flow control in case of many DTC
if frame > self.timeout and self.timeout > 0: # Wait fix time before sending flow control, otherwise just spamming...
self.timeout = 0
if (CS.diag.diagFSMResp & 0x10000000):
can_sends.append(self.packer.make_can_msg("diagFSMReq", 2, self.flowControl))
if (CS.diag.diagCEMResp & 0x10000000):
can_sends.append(self.packer.make_can_msg("diagCEMReq", 0, self.flowControl))
if (CS.diag.diagPSCMResp & 0x10000000):
can_sends.append(self.packer.make_can_msg("diagPSCMReq", 0, self.flowControl))
if (CS.diag.diagCVMResp & 0x10000000):
can_sends.append(self.packer.make_can_msg("diagCVMReq", 0, self.flowControl))
# Check part numbers
if self.checkPN and frame > 100 and frame > self.sndNxtFrame:
if self.cnt < len(self.dids):
did = [0x03, 0x22, (self.dids[self.cnt] & 0xff00)>>8, self.dids[self.cnt] & 0x00ff] # Create diagnostic command
did.extend([0]*(8-len(did)))
diagReq = dict(zip(self.dictKeys,did))
#can_sends.append(self.packer.make_can_msg("diagGlobalReq", 2, diagReq))
#can_sends.append(self.packer.make_can_msg("diagGlobalReq", 0, diagReq))
can_sends.append(self.packer.make_can_msg("diagFSMReq", 2, diagReq))
can_sends.append(self.packer.make_can_msg("diagCEMReq", 0, diagReq))
can_sends.append(self.packer.make_can_msg("diagPSCMReq", 0, diagReq))
can_sends.append(self.packer.make_can_msg("diagCVMReq", 0, diagReq))
self.cnt += 1
self.timeout = frame+5 # When to send flowControl
self.sndNxtFrame = self.timeout+5 # When to send next part number request
elif True: # Stop when list has been looped thru.
self.checkPN = False
# Clear DTCs in FSM on start
# TODO check for engine running before clearing dtc.
if(self.clearDtcs and (frame > 0) and (frame % 500 == 0)):
can_sends.append(self.packer.make_can_msg("diagGlobalReq", 0, self.clearDTC))
can_sends.append(self.packer.make_can_msg("diagFSMReq", 2, self.clearDTC))
#can_sends.append(self.packer.make_can_msg("diagPSCMReq", 0, self.clearDTC))
#can_sends.append(self.packer.make_can_msg("diagCEMReq", 0, self.clearDTC))
self.clearDtcs = False
return can_sends
| 42.589928 | 180 | 0.680321 |
9966374aafb18aed2871e8b0ad2d97aab8b80a12 | 3,643 | py | Python | pymit/mutualinformation.py | tud-zih-energy/pymit | 773bab8c66254738eee3fb90bc02b4515d551150 | [
"BSD-3-Clause"
] | 4 | 2019-05-29T13:07:54.000Z | 2022-01-18T16:52:48.000Z | pymit/mutualinformation.py | tud-zih-energy/pymit | 773bab8c66254738eee3fb90bc02b4515d551150 | [
"BSD-3-Clause"
] | 2 | 2020-10-30T11:48:33.000Z | 2020-12-14T20:20:51.000Z | pymit/mutualinformation.py | tud-zih-energy/pymit | 773bab8c66254738eee3fb90bc02b4515d551150 | [
"BSD-3-Clause"
] | 1 | 2020-10-27T05:54:50.000Z | 2020-10-27T05:54:50.000Z | import math
import numpy as np
import pymit
LOG_BASE = 2
def I(X, Y, bins):
"""
Calculates the mutual information of X and Y.
If X and Y are already discretised, set \ref bins to the amount of bins, aka states of X and Y.
If X and Y are not discretised, \ref bins will be used, to diskretise X and Y into \ref bins states
@param X np vector
@param Y np vector
@param bins If X and Y are already diskretised specify the amount of bins of X and Y. If X and Y are not discretised specifies the amount of bins to diskretise X and Y into
bins can be spcified as tuple, e.g. bins = (bins_x, bins_y), to diskretise X and Y independently, or as a single value to diskretise X and Y similar, e.g. bins = amount_of_bins
@return Returns the mutual information
"""
if type(bins) == list:
ybins = bins[1]
xbins = bins[0]
base = LOG_BASE
else:
ybins = bins
xbins = bins
base = bins
p_xy, _, _ = pymit._lib.histogram2d(X, Y, bins=[xbins, ybins])
p_xy = p_xy / len(X)
p_y, _ = pymit._lib.histogram(Y, bins=ybins)
p_y = p_y / len(Y)
p_x, _ = pymit._lib.histogram(X, bins=xbins)
p_x = p_x / len(X)
return pymit._lib._I_impl(p_xy, p_x, p_y, xbins, ybins, base)
def _I_impl(p_xy, p_x, p_y, xbins, ybins, base):
I_ = 0
for i in range(xbins):
for j in range(ybins):
if p_x[i] > 0 and p_y[j] > 0 and p_xy[i, j] > 0:
I_ += p_xy[i, j] * math.log(p_xy[i, j] / (p_x[i] * p_y[j]))
I_ = I_ / math.log(base)
return I_
def I_cond(X, Y, Z, bins):
"""
Calculates the conditional mutual information of X and Y depending on Z.
If X, Y and Z are already discretised, set \ref bins to the amount of bins, aka states of X, Y and Z.
If X, Y and Z are not discretised, \ref bins will be used, to diskretise X, Y and Z into \ref bins states
@param X np vector
@param Y np vector
@param Z np vector
@param bins If X, Y and Z are already diskretised specify the amount of bins of X, Y and Z. If X, Y and Z are not discretised specifies the amount of bins to diskretise X, Y and Z into
bins can be spcified as tuple, e.g. bins = (bins_x, bins_y, bins_z), to diskretise X, Y and Z independently, or as a single value to diskretise X, Y and Z similar, e.g. bins = amount_of_bins
@return Returns the mutual information
"""
if type(bins) == list:
xbins = bins[0]
ybins = bins[1]
zbins = bins[2]
base = LOG_BASE
else:
ybins = bins
xbins = bins
zbins = bins
base = bins
XYZ = pymit._lib._transform3D(X, Y, Z)
p_xyz, _ = pymit._lib.histogramdd(XYZ, bins=[xbins, ybins, zbins])
p_xyz = p_xyz / len(Z)
p_xz, _, _ = pymit._lib.histogram2d(X, Z, bins=[xbins, zbins])
p_xz = p_xz / len(X)
p_yz, _, _ = pymit._lib.histogram2d(Y, Z, bins=[ybins, zbins])
p_yz = p_yz / len(Y)
p_z, _ = pymit._lib.histogram(Z, bins=zbins)
p_z = p_z / len(Z)
return pymit._lib._I_cond_impl(p_xyz, p_xz, p_yz, p_z, xbins, ybins, zbins, base)
def _transform3D(X, Y, Z):
return np.ascontiguousarray(np.array([X, Y, Z]).transpose())
def _I_cond_impl(p_xyz, p_xz, p_yz, p_z, xbins, ybins, zbins, base):
I_ = 0
for i in range(xbins):
for j in range(ybins):
for k in range(zbins):
if p_xyz[i, j, k] > 0 and p_xz[i, k] > 0 and p_yz[j, k] > 0 and p_z[k] > 0:
I_ += p_xyz[i, j, k] * math.log((p_z[k] * p_xyz[i, j, k]) / (p_xz[i, k] * p_yz[j, k]))
I_ = I_ / math.log(base)
return I_
| 36.79798 | 206 | 0.607741 |
1f5507c9bd12dc367738d818bde71e13fca7ddaa | 4,952 | py | Python | zihu_clone/qa/views.py | Kelvinson/zhihu_clone | 8347ffbc3578f88f27c37dc6c04884ef20225d63 | [
"MIT"
] | null | null | null | zihu_clone/qa/views.py | Kelvinson/zhihu_clone | 8347ffbc3578f88f27c37dc6c04884ef20225d63 | [
"MIT"
] | null | null | null | zihu_clone/qa/views.py | Kelvinson/zhihu_clone | 8347ffbc3578f88f27c37dc6c04884ef20225d63 | [
"MIT"
] | null | null | null | #!/usr/bin/python3
from django.urls import reverse_lazy
from django.contrib import messages
from django.http import JsonResponse
from django.core.exceptions import PermissionDenied
from django.contrib.auth.decorators import login_required
from django.contrib.auth.mixins import LoginRequiredMixin
from django.views.decorators.http import require_http_methods
from django.views.generic import CreateView, ListView, DetailView
from django.views.decorators.cache import cache_page
from django.utils.decorators import method_decorator
from zihu_clone.helpers import ajax_required
from zihu_clone.qa.models import Question, Answer
from zihu_clone.qa.forms import QuestionForm
from zihu_clone.notifications.views import notification_handler
class QuestionListView(LoginRequiredMixin, ListView):
queryset = Question.objects.select_related('user')
paginate_by = 10
context_object_name = "questions"
template_name = "qa/question_list.html"
def get_context_data(self, *, object_list=None, **kwargs):
context = super(QuestionListView, self).get_context_data()
context["popular_tags"] = Question.objects.get_counted_tags()
context["active"] = "all"
return context
class AnsweredQuestionListView(QuestionListView):
def get_queryset(self):
return Question.objects.get_answered()
def get_context_data(self, *, object_list=None, **kwargs):
context = super(AnsweredQuestionListView, self).get_context_data()
context["active"] = "answered"
return context
class UnansweredQuestionListView(QuestionListView):
def get_queryset(self):
return Question.objects.get_unanswered()
def get_context_data(self, *, object_list=None, **kwargs):
context = super(UnansweredQuestionListView, self).get_context_data()
context["active"] = "unanswered"
return context
@method_decorator(cache_page(60 * 60), name='get')
class CreateQuestionView(LoginRequiredMixin, CreateView):
form_class = QuestionForm
template_name = 'qa/question_form.html'
message = "Your question was submitted!!"
def form_valid(self, form):
form.instance.user = self.request.user
return super(CreateQuestionView, self).form_valid(form)
def get_success_url(self):
messages.success(self.request, self.message)
return reverse_lazy("qa:unanswered_q")
class QuestionDetailView(LoginRequiredMixin, DetailView):
model = Question
context_object_name = 'question'
template_name = 'qa/question_detail.html'
def get_queryset(self):
return Question.objects.select_related('user').filter(pk=self.kwargs['pk'])
@method_decorator(cache_page(60 * 60), name='get')
class CreateAnswerView(LoginRequiredMixin, CreateView):
model = Answer
fields = ['content', ]
message = 'Your answer was submitted!'
template_name = 'qa/answer_form.html'
def form_valid(self, form):
form.instance.user = self.request.user
form.instance.question_id = self.kwargs['question_id']
return super(CreateAnswerView, self).form_valid(form)
def get_success_url(self):
messages.success(self.request, self.message)
return reverse_lazy('qa:question_detail', kwargs={"pk": self.kwargs['question_id']})
@login_required
@ajax_required
@require_http_methods(["POST"])
def question_vote(request):
question_id = request.POST["question"]
value = True if request.POST["value"] == 'U' else False
question = Question.objects.get(pk=question_id)
users = question.votes.values_list('user', flat=True)
if request.user.pk in users and (question.votes.get(user=request.user).value == value):
question.votes.get(user=request.user).delete()
else:
question.votes.update_or_create(user=request.user, defaults={"value": value})
return JsonResponse({"votes": question.total_votes()})
@login_required
@ajax_required
@require_http_methods(["POST"])
def answer_vote(request):
"send Ajax post request to vote on the answer"
answer_id = request.POST["answer"]
value = True if request.POST["value"] == 'U' else False
answer = Answer.objects.get(uuid_id=answer_id)
users = answer.votes.values_list('user', flat=True)
if request.user.pk in users and (answer.votes.get(user=request.user).value == value):
answer.votes.get(user=request.user).delete()
else:
answer.votes.update_or_create(user=request.user, defaults={"value": value})
return JsonResponse({"votes": answer.total_votes()})
@login_required
@ajax_required
@require_http_methods(["POST"])
def accept_answer(request):
answer_id = request.POST["answer"]
answer = Answer.objects.get(pk=answer_id)
if answer.question.user.username != request.user.username:
raise PermissionDenied
answer.accept_answer()
notification_handler(request.user, answer.user, 'W', answer)
return JsonResponse({'status': 'true'}, status=200)
| 35.884058 | 92 | 0.732633 |
01037f136df8446ca4405e048539230209845e93 | 2,432 | py | Python | bnw/handlers/base.py | l29ah/bnw | d03db025e12c96de42c9a7a4ede329d7c9d216c5 | [
"BSD-2-Clause"
] | 23 | 2015-01-14T13:22:37.000Z | 2022-01-11T11:38:43.000Z | bnw/handlers/base.py | l29ah/bnw | d03db025e12c96de42c9a7a4ede329d7c9d216c5 | [
"BSD-2-Clause"
] | 31 | 2015-01-27T19:57:45.000Z | 2018-10-04T22:35:22.000Z | bnw/handlers/base.py | l29ah/bnw | d03db025e12c96de42c9a7a4ede329d7c9d216c5 | [
"BSD-2-Clause"
] | 11 | 2015-01-02T10:29:14.000Z | 2018-06-28T13:09:53.000Z | # -*- coding: utf-8 -*-
import re
from twisted.internet import defer, reactor
import bnw.core.base
# please don't remove the group from the regexp - it is used in
# bnw/xmpp/parser_simplified.py
USER_RE = ur'@?([0-9A-Za-z_-]+)'
MESSAGE_RE = r'#?([0-9A-Za-z]+)'
COMMENT_RE = r'#?([0-9A-Za-z]+(?:#|/)[0-9A-Za-z]+)'
MESSAGE_COMMENT_RE = r'#?([0-9A-Za-z]+(?:(?:#|/)[0-9A-Za-z]+)?)'
USER_REC = re.compile(USER_RE)
MESSAGE_REC = re.compile(MESSAGE_RE)
COMMENT_REC = re.compile(COMMENT_RE)
MESSAGE_COMMENT_REC = re.compile(MESSAGE_COMMENT_RE)
def canonic_message(s):
m = MESSAGE_REC.match(s)
return m.group(1) if m else ""
def canonic_comment(s):
m = COMMENT_REC.match(s)
return m.group(1).replace('#', '/') if m else ""
def canonic_message_comment(s):
m = MESSAGE_COMMENT_REC.match(s)
return m.group(1).replace('#', '/') if m else ""
def canonic_user(s):
m = USER_REC.match(s)
return m.group(1) if m else ""
class CommandParserException(Exception):
pass
class BaseCommand(object):
pass
def _(s, user):
return s
def require_auth(fun):
@defer.inlineCallbacks
def newfun(request, *args, **kwargs):
if request.user is None or not request.user.get('name'):
defer.returnValue(
dict(ok=False, desc='Only for registered users')
)
else:
defer.returnValue((yield fun(request, *args, **kwargs)))
newfun.__doc__ = fun.__doc__
return newfun
def check_arg(**kwargs): # fun,name,rex):
rexs = {}
for name, value in kwargs.iteritems():
rexc = re.compile(
r'\A' + value + r'\Z', re.DOTALL | re.UNICODE | re.MULTILINE)
rexs[name] = (value, rexc)
def damndeco(fun):
@defer.inlineCallbacks
def new_fun(request, *args, **kwargs):
for name, value in kwargs.iteritems():
if value is None:
value = ''
if (name in rexs) and not rexs[name][1].match(value):
defer.returnValue(
dict(ok=False,
desc='Option "%s" doesn''t meet the constraint "%s"' % (
name, rexs[name][0]),
constraint=True)
)
defer.returnValue((yield fun(request, *args, **kwargs)))
new_fun.__doc__ = fun.__doc__
return new_fun
return damndeco
| 27.325843 | 85 | 0.571135 |
173be1aa247246b7fa01898a58e6fd1c44156ffd | 841 | py | Python | task_manager/users/models.py | Sidarth-V/task_manager | 01b7d2ea1d43c74ecf7d4afd8b3e4a1b9e799deb | [
"MIT"
] | null | null | null | task_manager/users/models.py | Sidarth-V/task_manager | 01b7d2ea1d43c74ecf7d4afd8b3e4a1b9e799deb | [
"MIT"
] | 4 | 2022-03-14T23:35:40.000Z | 2022-03-24T23:30:25.000Z | task_manager/users/models.py | Sidarth-V/task_manager | 01b7d2ea1d43c74ecf7d4afd8b3e4a1b9e799deb | [
"MIT"
] | null | null | null | from django.contrib.auth.models import AbstractUser
from django.db.models import CharField
from django.urls import reverse
from django.utils.translation import gettext_lazy as _
class User(AbstractUser):
"""
Default custom user model for sidarth-task-manager.
If adding fields that need to be filled at user signup,
check forms.SignupForm and forms.SocialSignupForms accordingly.
"""
#: First and last name do not cover name patterns around the globe
name = CharField(_("Name of User"), blank=True, max_length=255)
first_name = None # type: ignore
last_name = None # type: ignore
def get_absolute_url(self):
"""Get url for user's detail view.
Returns:
str: URL for user detail.
"""
return reverse("users:detail", kwargs={"username": self.username})
| 31.148148 | 74 | 0.694411 |
ca77c0e8b9232f7abbcdc675d61ad1e88f347424 | 4,208 | py | Python | .history/run_update_20220328124717.py | miguel-fresh/geoip-translation | ccf9dbc0330e597704e57d8b2967fc9be16017ed | [
"Info-ZIP"
] | null | null | null | .history/run_update_20220328124717.py | miguel-fresh/geoip-translation | ccf9dbc0330e597704e57d8b2967fc9be16017ed | [
"Info-ZIP"
] | null | null | null | .history/run_update_20220328124717.py | miguel-fresh/geoip-translation | ccf9dbc0330e597704e57d8b2967fc9be16017ed | [
"Info-ZIP"
] | null | null | null | from asyncio.subprocess import STDOUT
from fileinput import filename
from genericpath import exists
import subprocess
from pathlib import Path
from os import remove, rename, path
import yaml
def removeFileIfExists(file_path):
if path.exists(file_path):
remove(file_path)
def checkExistence(path_to_check):
if not path.exists(path_to_check):
raise(Exception(f"No existe la ruta \'{path_to_check}\'"))
return True
def good_msg(msg):
return f"+ {msg}"
def bad_msg(msg):
return f"- {msg}"
def neutral_msg(msg):
return f"~ {msg}"
# LEGACY
ZIP_LEGACY_NAME = 'GeoLite2-City-CSV.zip'
# Default values
ONSTART_DOWNLOAD = False
ONSTART_CONVERT = True
CURRENT_DIR = Path(__file__).parent.resolve()
CONFIG_FILENAME = 'config.yml'
CONFIG_ABSPATH = CURRENT_DIR.joinpath(CONFIG_FILENAME)
ZIP_NAME = 'GeoLite2-City-CSV.zip'
DAT_NAME = 'GeoLiteCity.dat'
DOWNLOAD_DIRNAME = './data'
OUTPUT_DIRNAME = './output'
LICENSE_KEY = ''
DB_EDITION = 'GeoLite2-City-CSV'
# Get config from config.yml file
try:
with open(CONFIG_ABSPATH) as cfg_file:
documents = yaml.full_load(cfg_file)
paths = documents['paths']
names = documents['names']
on_start = documents['on_start']
OUTPUT_DIRNAME = paths['output']
DOWNLOAD_DIRNAME = paths['data']
ZIP_NAME = names['zip']
DAT_NAME = names['dat']
ONSTART_DOWNLOAD = on_start['download_zip']
ONSTART_CONVERT = on_start['convert_to_dat']
if (not ('max-mind' in documents)):
print(bad_msg('No se ha especificado una license-key para MaxMind :('))
# if ('license-key' in max_mind):
# LICENSE_KEY = max_mind['license-key']
# else:
# print(bad_msg('No se ha especificado una license-key para MaxMind :('))
DB_EDITION = max_mind['edition'] if 'edition' in max_mind else DB_EDITION
except:
print(neutral_msg('No se encontró un archivo config.yml válido, usando valores por defecto...'))
if (not ONSTART_CONVERT and not ONSTART_DOWNLOAD):
print(good_msg("No se especificó ninguna acción (download_zip, convert_to_dat). Saliendo..."))
exit(0)
# Setting paths
DOWNLOAD_ABSPATH = CURRENT_DIR.joinpath(DOWNLOAD_DIRNAME)
OUTPUT_ABSPATH = CURRENT_DIR.joinpath(OUTPUT_DIRNAME)
ZIP_ABSPATH = DOWNLOAD_ABSPATH.joinpath(ZIP_LEGACY_NAME)
DAT_ABSPATH = OUTPUT_ABSPATH.joinpath(DAT_NAME)
# Download .zip
if ONSTART_DOWNLOAD:
# Check if download folder exists
checkExistence(DOWNLOAD_ABSPATH)
# Remove previous .zip file if exists
removeFileIfExists(ZIP_ABSPATH)
print(good_msg(f'Descargando {ZIP_LEGACY_NAME}...'))
# Download .zip
download_output = subprocess.run(['php', 'download.php',
'--license-key', LICENSE_KEY,
'--output-path', DOWNLOAD_ABSPATH,
'--edition', DB_EDITION],
cwd=CURRENT_DIR.joinpath('./geoip2-update'), stderr=STDOUT)
# Rename .zip if necessary
if (ZIP_LEGACY_NAME != ZIP_NAME):
rename(ZIP_ABSPATH, DOWNLOAD_ABSPATH.joinpath(ZIP_NAME))
# Check if download was successful
if (download_output.returncode != 0):
raise(Exception(bad_msg('Error en la descarga :(')))
checkExistence(ZIP_ABSPATH)
print(good_msg(f'Descarga exitosa :) -> {ZIP_ABSPATH}'))
# Convert format
if ONSTART_CONVERT:
# Check if .zip exists
checkExistence(ZIP_ABSPATH)
# Check if output folder exists
checkExistence(OUTPUT_ABSPATH)
# python geolite2legacy.py -i GeoLite2-City-CSV.zip -o GeoLiteCity.dat -f geoname2fips.csv
update_output = subprocess.run(['python', 'geolite2legacy.py',
'-i', ZIP_ABSPATH,
'-o', DAT_ABSPATH,
'-f', 'geoname2fips.csv'],
cwd='./geolite2legacy')
# Check convertion was successful
if update_output.returncode != 0:
raise(Exception(bad_msg('Error en la conversión de formato :(')))
print(good_msg(f'Conversión existosa :) -> {DAT_ABSPATH}'))
| 30.057143 | 100 | 0.650903 |
b6a9f15b151d2b54ccb901e6b81ca2ea4b754f21 | 60 | py | Python | lights/__main__.py | fwcd/lights | 8a7d338d5d5aaa2206d3adbbddf2e132cc1c9b76 | [
"MIT"
] | null | null | null | lights/__main__.py | fwcd/lights | 8a7d338d5d5aaa2206d3adbbddf2e132cc1c9b76 | [
"MIT"
] | 3 | 2022-02-24T04:21:48.000Z | 2022-03-23T17:43:43.000Z | lights/__main__.py | fwcd/lights | 8a7d338d5d5aaa2206d3adbbddf2e132cc1c9b76 | [
"MIT"
] | null | null | null | import lights
if __name__ == "__main__":
lights.main()
| 12 | 26 | 0.666667 |
55a2d059b4fa7f4f0fc0f277a5616f3311a281a0 | 1,881 | py | Python | objectModel/Python/cdm/persistence/cdmfolder/e2e_relationship_persistence.py | rt112000/CDM | 34bd34f9260140a8f8aa02bd87c23033f3daad4c | [
"CC-BY-4.0",
"MIT"
] | 884 | 2019-05-10T02:09:10.000Z | 2022-03-31T14:02:00.000Z | objectModel/Python/cdm/persistence/cdmfolder/e2e_relationship_persistence.py | rt112000/CDM | 34bd34f9260140a8f8aa02bd87c23033f3daad4c | [
"CC-BY-4.0",
"MIT"
] | 171 | 2019-06-10T11:34:37.000Z | 2022-03-31T22:50:12.000Z | objectModel/Python/cdm/persistence/cdmfolder/e2e_relationship_persistence.py | rt112000/CDM | 34bd34f9260140a8f8aa02bd87c23033f3daad4c | [
"CC-BY-4.0",
"MIT"
] | 340 | 2019-05-07T18:00:16.000Z | 2022-03-31T12:00:15.000Z | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
from cdm.enums import CdmObjectType
from cdm.objectmodel import CdmCorpusContext, CdmE2ERelationship
from cdm.utilities import CopyOptions, ResolveOptions, copy_data_utils
from cdm.utilities.string_utils import StringUtils
from . import utils
from .types import E2ERelationship
class E2ERelationshipPersistence:
@staticmethod
def from_data(ctx: CdmCorpusContext, data: E2ERelationship) -> CdmE2ERelationship:
relationship = ctx.corpus.make_object(CdmObjectType.E2E_RELATIONSHIP_DEF)
if not StringUtils.is_null_or_white_space(data.name):
relationship.name = data.name
relationship.from_entity = data.fromEntity
relationship.from_entity_attribute = data.fromEntityAttribute
relationship.to_entity = data.toEntity
relationship.to_entity_attribute = data.toEntityAttribute
utils.add_list_to_cdm_collection(relationship.exhibits_traits,
utils.create_trait_reference_array(ctx, data.exhibitsTraits))
return relationship
@staticmethod
def to_data(instance: 'CdmE2ERelationship', res_opt: ResolveOptions, options: CopyOptions) -> E2ERelationship:
relationship = E2ERelationship()
if not StringUtils.is_null_or_white_space(instance.name):
relationship.name = instance.name
relationship.fromEntity = instance.from_entity
relationship.fromEntityAttribute = instance.from_entity_attribute
relationship.toEntity = instance.to_entity
relationship.toEntityAttribute = instance.to_entity_attribute
relationship.exhibitsTraits = copy_data_utils._array_copy_data(res_opt, instance.exhibits_traits, options)
return relationship
| 47.025 | 114 | 0.760766 |
9776691f1fc6b9ad2172d3cdd1152dac1c124950 | 8,879 | py | Python | port_range/tests/test_port_range.py | online-labs/port-range | 43b80312f841cc932a4e0ddd7063fa47b17bd4aa | [
"BSD-2-Clause"
] | 3 | 2016-05-08T20:15:24.000Z | 2020-01-07T20:21:17.000Z | port_range/tests/test_port_range.py | online-labs/port-range | 43b80312f841cc932a4e0ddd7063fa47b17bd4aa | [
"BSD-2-Clause"
] | 7 | 2017-08-02T04:59:09.000Z | 2020-04-02T09:16:11.000Z | port_range/tests/test_port_range.py | online-labs/port-range | 43b80312f841cc932a4e0ddd7063fa47b17bd4aa | [
"BSD-2-Clause"
] | 6 | 2016-05-08T20:15:26.000Z | 2022-03-08T20:54:36.000Z | # -*- coding: utf-8 -*-
#
# Copyright (c) 2014-2016 Scaleway and Contributors. All Rights Reserved.
# Kevin Deldycke <kdeldycke@scaleway.com>
#
# Licensed under the BSD 2-Clause License (the "License"); you may not use this
# file except in compliance with the License. You may obtain a copy of the
# License at https://opensource.org/licenses/BSD-2-Clause
from __future__ import (
absolute_import,
division,
print_function,
unicode_literals
)
import unittest
from port_range import PortRange
class TestPortRange(unittest.TestCase):
def test_cidr_parsing(self):
self.assertEqual(PortRange('1027/15').bounds, (1027, 1028))
self.assertEqual(PortRange(' 1027 / 15 ').bounds, (1027, 1028))
def test_range_parsing(self):
# Normal range.
self.assertEqual(PortRange('42-4242').bounds, (42, 4242))
self.assertEqual(PortRange([42, 4242]).bounds, (42, 4242))
self.assertEqual(PortRange(['42', '4242']).bounds, (42, 4242))
# Single port.
self.assertEqual(PortRange('42').bounds, (42, 42))
self.assertEqual(PortRange(42).bounds, (42, 42))
self.assertEqual(PortRange([42]).bounds, (42, 42))
self.assertEqual(PortRange(['42']).bounds, (42, 42))
# Reversed order.
self.assertEqual(PortRange('4242-42').bounds, (42, 4242))
self.assertEqual(PortRange([4242, 42]).bounds, (42, 4242))
self.assertEqual(PortRange((4242, 42)).bounds, (42, 4242))
self.assertEqual(PortRange(('4242', '42')).bounds, (42, 4242))
self.assertEqual(PortRange(set([4242, 42])).bounds, (42, 4242))
self.assertEqual(PortRange(set(['4242', '42'])).bounds, (42, 4242))
# Invalid types and length.
self.assertRaises(ValueError, PortRange, [None, 42])
self.assertRaises(ValueError, PortRange, [42, None])
self.assertRaises(ValueError, PortRange, [42, 32, 3])
self.assertRaises(ValueError, PortRange, [42, None, 32, 3, -4])
def test_strict_range_parsing(self):
# Normal range.
self.assertEqual(
PortRange('42-4242', strict=True).bounds, (42, 4242))
self.assertEqual(
PortRange([42, 4242], strict=True).bounds, (42, 4242))
self.assertEqual(
PortRange(['42', '4242'], strict=True).bounds, (42, 4242))
# Single port.
self.assertEqual(PortRange('42', strict=True).bounds, (42, 42))
self.assertEqual(PortRange(42, strict=True).bounds, (42, 42))
self.assertEqual(PortRange([42], strict=True).bounds, (42, 42))
self.assertEqual(PortRange(['42'], strict=True).bounds, (42, 42))
# Reversed order.
self.assertRaises(ValueError, PortRange, [4242, 42], True)
self.assertRaises(ValueError, PortRange, '4242-42', True)
self.assertRaises(ValueError, PortRange, (4242, 42), True)
self.assertRaises(ValueError, PortRange, ('4242', '42'), True)
# Casting a set to a list might ends up with a naturally sorted list.
# self.assertRaises(ValueError, PortRange, set([4242, 42]), True)
# self.assertRaises(ValueError, PortRange, set(['4242', '42']), True)
# Invalid types and length.
self.assertRaises(ValueError, PortRange, [None, 42], True)
self.assertRaises(ValueError, PortRange, [42, None], True)
self.assertRaises(ValueError, PortRange, [42, 32, 3], True)
self.assertRaises(ValueError, PortRange, [42, None, 32, 3, -4], True)
def test_equality_and_hash(self):
self.assertEqual(PortRange('80'), PortRange('80'))
self.assertEqual(PortRange('80-81'), PortRange('80-81'))
self.assertNotEqual(PortRange('80'), PortRange('80-81'))
self.assertEqual(PortRange('1027/15'), PortRange('1027/15'))
self.assertEqual(hash(PortRange('80')), hash(PortRange('80')))
self.assertEqual(hash(PortRange('80-81')), hash(PortRange('80-81')))
self.assertNotEqual(hash(PortRange('80')), hash(PortRange('80-81')))
self.assertEqual(
hash(PortRange('1027/15')),
hash(PortRange('1027/15'))
)
def test_cidr_properties(self):
port = PortRange('1027/15')
self.assertEqual(port.base, 1027)
self.assertEqual(port.prefix, 15)
self.assertEqual(port.cidr, (1027, 15))
self.assertEqual(port.mask, 1)
self.assertEqual(port.offset, 3)
self.assertEqual(port.port_from, 1027)
self.assertEqual(port.port_to, 1028)
self.assertEqual(port.bounds, (1027, 1028))
def test_range_properties(self):
port = PortRange([4242, 42])
self.assertEqual(str(port), '42-4242')
self.assertEqual(port.base, 42)
self.assertEqual(port.prefix, None)
self.assertEqual(port.cidr, (42, None))
self.assertEqual(port.mask, None)
self.assertEqual(port.offset, 10)
self.assertEqual(port.port_from, 42)
self.assertEqual(port.port_to, 4242)
self.assertEqual(port.bounds, (42, 4242))
def test_normalization(self):
port = PortRange(' 0001234 ')
self.assertEqual(str(port), '1234')
self.assertEqual(port.base, 1234)
self.assertEqual(port.prefix, 16)
self.assertEqual(port.cidr, (1234, 16))
self.assertEqual(port.mask, 0)
self.assertEqual(port.offset, 210)
self.assertEqual(port.port_from, 1234)
self.assertEqual(port.port_to, 1234)
self.assertEqual(port.bounds, (1234, 1234))
# Upper-bound cap.
self.assertEqual(PortRange('64666/3').bounds, (64666, 65535))
def test_output_string(self):
self.assertEqual(str(PortRange('1027/15')), '1027/15')
self.assertEqual(str(PortRange([42, 4242])), '42-4242')
self.assertEqual(str(PortRange(42)), '42')
self.assertEqual(str(PortRange([1027, 1028])), '1027/15')
self.assertEqual(
repr(PortRange([1027, 1028])),
"PortRange(port_from=1027, port_to=1028, base=1027, offset=3, "
"prefix=15, mask=1, is_single_port=False, is_cidr=True)")
def test_cidr_string_rendering(self):
self.assertEqual(PortRange([32768, 65535]).cidr_string, '32768/1')
self.assertEqual(PortRange([32767, 65534]).cidr_string, '32767/1')
with self.assertRaises(ValueError):
PortRange([32767, 65535]).cidr_string
def test_validation(self):
# Test empty params
self.assertRaises(ValueError, PortRange, None)
self.assertRaises(ValueError, PortRange, [None])
self.assertRaises(ValueError, PortRange, [None, None])
# Invalid int
self.assertRaises(ValueError, PortRange, ' A233 ')
self.assertRaises(ValueError, PortRange, '1 2 3 4')
self.assertRaises(ValueError, PortRange, 'abcd')
# Test negative values
self.assertRaises(ValueError, PortRange, '-24')
self.assertRaises(ValueError, PortRange, '-24/3')
self.assertRaises(ValueError, PortRange, '1024/-3')
self.assertRaises(ValueError, PortRange, '-1000/-3')
self.assertRaises(ValueError, PortRange, '-3/-1000')
self.assertRaises(ValueError, PortRange, ' - 24')
self.assertRaises(ValueError, PortRange, ' - 24 / 3')
self.assertRaises(ValueError, PortRange, ' 1024 / - 3')
self.assertRaises(ValueError, PortRange, ' - 1000 / - 3')
self.assertRaises(ValueError, PortRange, ' - 3/ - 1000 ')
# Test maximums and minimums
self.assertRaises(ValueError, PortRange, '1024/0')
self.assertRaises(ValueError, PortRange, '1024/17')
self.assertRaises(ValueError, PortRange, '10-66666', strict=True)
# Test capping.
self.assertEqual(PortRange('0').bounds, (1, 1))
self.assertRaises(ValueError, PortRange, '0', True)
self.assertEqual(PortRange('66666').bounds, (65535, 65535))
self.assertRaises(ValueError, PortRange, '66666', True)
# Test notation
self.assertRaises(ValueError, PortRange, '23/')
self.assertRaises(ValueError, PortRange, '/4')
self.assertRaises(ValueError, PortRange, '/4')
self.assertRaises(ValueError, PortRange, '400-')
self.assertRaises(ValueError, PortRange, '123 / 123/ 123')
self.assertRaises(ValueError, PortRange, ' 123 - 31123- 1323')
def test_strict_mode(self):
# Test power of two port base
PortRange('257', strict=True)
PortRange('257/16', strict=True)
self.assertRaises(ValueError, PortRange, '257/4', strict=True)
# Test overflowing upper bound
self.assertRaises(ValueError, PortRange, '65535/8', strict=True)
def test_computation(self):
self.assertEqual(PortRange('2/3').bounds, (2, 8193))
self.assertEqual(PortRange('7/3').bounds, (7, 8198))
| 43.101942 | 79 | 0.63622 |
fb46e6061d8e9f4c79332225ecbc920c127cffec | 1,708 | py | Python | app/oauth_office365/urls.py | ricardojba/PwnAuth | 875c4689bb0e35abdfa5ddda7fe89d15d1e52c1f | [
"Apache-2.0"
] | null | null | null | app/oauth_office365/urls.py | ricardojba/PwnAuth | 875c4689bb0e35abdfa5ddda7fe89d15d1e52c1f | [
"Apache-2.0"
] | null | null | null | app/oauth_office365/urls.py | ricardojba/PwnAuth | 875c4689bb0e35abdfa5ddda7fe89d15d1e52c1f | [
"Apache-2.0"
] | null | null | null | from django.conf.urls import url
from . import views
from django.views.generic.base import TemplateView
from django.contrib.auth.decorators import login_required
app_name = 'office365'
ID_REGEX = '[a-zA-Z0-9\-\=\_]+'
#TODO fix that URL param needs to be hardcoded in
urlpatterns = [
url(r'^schema$', login_required(TemplateView.as_view(template_name= 'oauth_office365/oas3.json', content_type='application/json')), name='schema'),
url(r'^app_detail$', views.AppDetailViewGeneric.as_view(), name='app_detail'),
url(r'^refresh/(?P<victim_id>[0-9]+)$', views.ForceTokenRefresh.as_view(), name='force_refresh'),
url(r'^victim/(?P<victim_id>[0-9]+)$', views.VictimDetailView.as_view(), name='victim_detail'),
url(r'^victim/(?P<victim_id>[0-9]+)/messages$', views.MailMessageView.as_view(), name='victim_messages'),
url(r'^victim/(?P<victim_id>[0-9]+)/message/(?P<id>[a-zA-Z0-9-_=]+)$', views.MailMessageDetail.as_view(), name='victim_message_detail'),
url(r'^victim/(?P<victim_id>[0-9]+)/message/(?P<message_id>[a-zA-Z0-9-_=]+)/attachment/(?P<attachment_id>[a-zA-Z0-9-_=]+)$', views.MailAttachmentView.as_view(), name='victim_message_attachments'),
url(r'^victim/(?P<victim_id>[0-9]+)/gal$', views.DumpUsersView.as_view(), name='victim_dump_users'),
url(r'^victim/(?P<victim_id>[0-9]+)/drive/(?P<id>[a-zA-Z0-9\-\=\_]*)$', views.OneDriveView.as_view(), name='victim_onedrive'),
url(r'^victim/(?P<victim_id>[0-9]+)/attachment_test', views.AttachmentViewGeneric.as_view(), name='attach-test'),
url(r'^victim/(?P<victim_id>[0-9]+)/message_send', views.MessageViewGeneric.as_view(), name='message-send'),
url(r'^callback$', views.token_callback, name='callback'),
] | 65.692308 | 200 | 0.699649 |
2d57492086a15983f7d3ae9c2ff6938af9209524 | 10,187 | py | Python | test/functional/feature_llmq_is_retroactive.py | luckycoinblu/luckycoinoro | 40524d4143ab67def698ccbd87ad6a3885d5720e | [
"MIT"
] | null | null | null | test/functional/feature_llmq_is_retroactive.py | luckycoinblu/luckycoinoro | 40524d4143ab67def698ccbd87ad6a3885d5720e | [
"MIT"
] | 1 | 2022-01-27T01:34:48.000Z | 2022-01-27T01:59:47.000Z | test/functional/feature_llmq_is_retroactive.py | luckycoinblu/luckycoinoro | 40524d4143ab67def698ccbd87ad6a3885d5720e | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2015-2021 The Dash Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_framework.mininode import *
from test_framework.test_framework import LuckyCoinOROTestFramework
from test_framework.util import set_node_times, isolate_node, reconnect_isolated_node
'''
feature_llmq_is_retroactive.py
Tests retroactive signing
We have 6 nodes where node 0 is the control node, nodes 1-5 are masternodes.
Mempool inconsistencies are simulated via disconnecting/reconnecting node 3
and by having a higher relay fee on nodes 4 and 5.
'''
class LLMQ_IS_RetroactiveSigning(LuckyCoinOROTestFramework):
def set_test_params(self):
# -whitelist is needed to avoid the trickling logic on node0
self.set_luckycoinoro_test_params(6, 5, [["-whitelist=127.0.0.1"], [], [], [], ["-minrelaytxfee=0.001"], ["-minrelaytxfee=0.001"]], fast_dip3_enforcement=True)
self.set_luckycoinoro_llmq_test_params(5, 3)
def run_test(self):
self.activate_dip8()
self.nodes[0].spork("SPORK_17_QUORUM_DKG_ENABLED", 0)
# Turn mempool IS signing off
self.nodes[0].spork("SPORK_2_INSTANTSEND_ENABLED", 1)
self.wait_for_sporks_same()
self.mine_quorum()
self.mine_quorum()
# Make sure that all nodes are chainlocked at the same height before starting actual tests
self.wait_for_chainlocked_block_all_nodes(self.nodes[0].getbestblockhash(), timeout=30)
self.log.info("trying normal IS lock w/ signing spork off. Shouldn't be islocked before block is created.")
txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 1)
# 3 nodes should be enough to create an IS lock even if nodes 4 and 5 (which have no tx itself)
# are the only "neighbours" in intra-quorum connections for one of them.
self.wait_for_instantlock(txid, self.nodes[0], False, 5)
# Have to disable ChainLocks to avoid signing a block with a "safe" tx too early
self.nodes[0].spork("SPORK_19_CHAINLOCKS_ENABLED", 4000000000)
self.wait_for_sporks_same()
# We have to wait in order to include tx in block
self.bump_mocktime(10 * 60 + 1)
block = self.nodes[0].generate(1)[0]
self.wait_for_instantlock(txid, self.nodes[0])
self.nodes[0].spork("SPORK_19_CHAINLOCKS_ENABLED", 0)
self.wait_for_sporks_same()
self.wait_for_chainlocked_block_all_nodes(block)
self.log.info("Enable mempool IS signing")
self.nodes[0].spork("SPORK_2_INSTANTSEND_ENABLED", 0)
self.wait_for_sporks_same()
self.log.info("trying normal IS lock")
txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 1)
# 3 nodes should be enough to create an IS lock even if nodes 4 and 5 (which have no tx itself)
# are the only "neighbours" in intra-quorum connections for one of them.
self.wait_for_instantlock(txid, self.nodes[0])
self.bump_mocktime(1)
block = self.nodes[0].generate(1)[0]
self.wait_for_chainlocked_block_all_nodes(block)
self.log.info("testing normal signing with partially known TX")
isolate_node(self.nodes[3])
txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 1)
# Make sure nodes 1 and 2 received the TX before we continue,
# otherwise it might announce the TX to node 3 when reconnecting
self.wait_for_tx(txid, self.nodes[1])
self.wait_for_tx(txid, self.nodes[2])
reconnect_isolated_node(self.nodes[3], 0)
# Make sure nodes actually try re-connecting quorum connections
self.bump_mocktime(30)
self.wait_for_mnauth(self.nodes[3], 2)
# node 3 fully reconnected but the TX wasn't relayed to it, so there should be no IS lock
self.wait_for_instantlock(txid, self.nodes[0], False, 5)
# push the tx directly via rpc
self.nodes[3].sendrawtransaction(self.nodes[0].getrawtransaction(txid))
# node 3 should vote on a tx now since it became aware of it via sendrawtransaction
# and this should be enough to complete an IS lock
self.wait_for_instantlock(txid, self.nodes[0])
self.log.info("testing retroactive signing with unknown TX")
isolate_node(self.nodes[3])
rawtx = self.nodes[0].createrawtransaction([], {self.nodes[0].getnewaddress(): 1})
rawtx = self.nodes[0].fundrawtransaction(rawtx)['hex']
rawtx = self.nodes[0].signrawtransactionwithwallet(rawtx)['hex']
txid = self.nodes[3].sendrawtransaction(rawtx)
# Make node 3 consider the TX as safe
self.bump_mocktime(10 * 60 + 1)
block = self.nodes[3].generatetoaddress(1, self.nodes[0].getnewaddress())[0]
reconnect_isolated_node(self.nodes[3], 0)
self.wait_for_chainlocked_block_all_nodes(block)
self.nodes[0].setmocktime(self.mocktime)
self.log.info("testing retroactive signing with partially known TX")
isolate_node(self.nodes[3])
txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 1)
# Make sure nodes 1 and 2 received the TX before we continue,
# otherwise it might announce the TX to node 3 when reconnecting
self.wait_for_tx(txid, self.nodes[1])
self.wait_for_tx(txid, self.nodes[2])
reconnect_isolated_node(self.nodes[3], 0)
# Make sure nodes actually try re-connecting quorum connections
self.bump_mocktime(30)
self.wait_for_mnauth(self.nodes[3], 2)
# node 3 fully reconnected but the TX wasn't relayed to it, so there should be no IS lock
self.wait_for_instantlock(txid, self.nodes[0], False, 5)
# Make node0 consider the TX as safe
self.bump_mocktime(10 * 60 + 1)
block = self.nodes[0].generate(1)[0]
assert(txid in self.nodes[0].getblock(block, 1)['tx'])
self.wait_for_chainlocked_block_all_nodes(block)
self.log.info("testing retroactive signing with partially known TX and all nodes session timeout")
self.test_all_nodes_session_timeout(False)
self.log.info("repeating test, but with cycled LLMQs")
self.test_all_nodes_session_timeout(True)
self.log.info("testing retroactive signing with partially known TX and single node session timeout")
self.test_single_node_session_timeout(False)
self.log.info("repeating test, but with cycled LLMQs")
self.test_single_node_session_timeout(True)
def cycle_llmqs(self):
self.mine_quorum()
self.mine_quorum()
self.wait_for_chainlocked_block_all_nodes(self.nodes[0].getbestblockhash(), timeout=30)
def test_all_nodes_session_timeout(self, do_cycle_llmqs):
set_node_times(self.nodes, self.mocktime)
isolate_node(self.nodes[3])
rawtx = self.nodes[0].createrawtransaction([], {self.nodes[0].getnewaddress(): 1})
rawtx = self.nodes[0].fundrawtransaction(rawtx)['hex']
rawtx = self.nodes[0].signrawtransactionwithwallet(rawtx)['hex']
txid = self.nodes[0].sendrawtransaction(rawtx)
txid = self.nodes[3].sendrawtransaction(rawtx)
# Make sure nodes 1 and 2 received the TX before we continue
self.wait_for_tx(txid, self.nodes[1])
self.wait_for_tx(txid, self.nodes[2])
# Make sure signing is done on nodes 1 and 2 (it's async)
time.sleep(5)
# Make the signing session for the IS lock timeout on nodes 1-3
self.bump_mocktime(61)
time.sleep(2) # make sure Cleanup() is called
reconnect_isolated_node(self.nodes[3], 0)
# Make sure nodes actually try re-connecting quorum connections
self.bump_mocktime(30)
self.wait_for_mnauth(self.nodes[3], 2)
# node 3 fully reconnected but the signing session is already timed out on all nodes, so no IS lock
self.wait_for_instantlock(txid, self.nodes[0], False, 5)
if do_cycle_llmqs:
self.cycle_llmqs()
self.wait_for_instantlock(txid, self.nodes[0], False, 5)
# Make node 0 consider the TX as safe
self.bump_mocktime(10 * 60 + 1)
block = self.nodes[0].generate(1)[0]
assert(txid in self.nodes[0].getblock(block, 1)['tx'])
self.wait_for_chainlocked_block_all_nodes(block)
def test_single_node_session_timeout(self, do_cycle_llmqs):
set_node_times(self.nodes, self.mocktime)
isolate_node(self.nodes[3])
rawtx = self.nodes[0].createrawtransaction([], {self.nodes[0].getnewaddress(): 1})
rawtx = self.nodes[0].fundrawtransaction(rawtx)['hex']
rawtx = self.nodes[0].signrawtransactionwithwallet(rawtx)['hex']
txid = self.nodes[3].sendrawtransaction(rawtx)
time.sleep(2) # make sure signing is done on node 2 (it's async)
# Make the signing session for the IS lock timeout on node 3
self.bump_mocktime(61)
time.sleep(2) # make sure Cleanup() is called
reconnect_isolated_node(self.nodes[3], 0)
# Make sure nodes actually try re-connecting quorum connections
self.bump_mocktime(30)
self.wait_for_mnauth(self.nodes[3], 2)
self.nodes[0].sendrawtransaction(rawtx)
# Make sure nodes 1 and 2 received the TX
self.wait_for_tx(txid, self.nodes[1])
self.wait_for_tx(txid, self.nodes[2])
# Make sure signing is done on nodes 1 and 2 (it's async)
time.sleep(5)
# node 3 fully reconnected but the signing session is already timed out on it, so no IS lock
self.wait_for_instantlock(txid, self.nodes[0], False, 1)
if do_cycle_llmqs:
self.cycle_llmqs()
self.wait_for_instantlock(txid, self.nodes[0], False, 5)
# Make node 0 consider the TX as safe
self.bump_mocktime(10 * 60 + 1)
block = self.nodes[0].generate(1)[0]
assert(txid in self.nodes[0].getblock(block, 1)['tx'])
self.wait_for_chainlocked_block_all_nodes(block)
if __name__ == '__main__':
LLMQ_IS_RetroactiveSigning().main()
| 50.935 | 167 | 0.68342 |
de1a52cad96fdb35b7f0ac6ada0267276b31a686 | 325 | py | Python | qs_techtest/config.py | greener2/qs_techtest | 7974c4d9c6a5e2d0668826cd783f4f66d076d605 | [
"MIT"
] | null | null | null | qs_techtest/config.py | greener2/qs_techtest | 7974c4d9c6a5e2d0668826cd783f4f66d076d605 | [
"MIT"
] | null | null | null | qs_techtest/config.py | greener2/qs_techtest | 7974c4d9c6a5e2d0668826cd783f4f66d076d605 | [
"MIT"
] | null | null | null | """QS Tech Test config objects"""
import os
class BaseConfig:
SECRET_KEY = os.getenv("SECRET_KEY") or "ohsosecret"
SQLALCHEMY_TRACK_MODIFICATIONS = False
class Development(BaseConfig):
SQLALCHEMY_DATABASE_URI = os.getenv("DATABASE_URI")
class Testing(BaseConfig):
SQLALCHEMY_DATABASE_URI = "sqlite://"
| 19.117647 | 56 | 0.741538 |
9808ff4ea22152c600631d16d31e9ed7c53e0c7a | 1,889 | bzl | Python | javascript/tink_javascript_deps.bzl | yfung/tink | e6792c3b57986623c536af1161ec6dad4f6159e4 | [
"Apache-2.0"
] | 1 | 2022-03-15T03:21:44.000Z | 2022-03-15T03:21:44.000Z | javascript/tink_javascript_deps.bzl | yfung/tink | e6792c3b57986623c536af1161ec6dad4f6159e4 | [
"Apache-2.0"
] | 1 | 2022-03-02T13:25:38.000Z | 2022-03-02T13:25:38.000Z | javascript/tink_javascript_deps.bzl | yfung/tink | e6792c3b57986623c536af1161ec6dad4f6159e4 | [
"Apache-2.0"
] | null | null | null | """Dependencies of TypeScript/JavaScript Tink."""
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
def tink_javascript_deps():
"""Load dependencies of TypeScript/JavaScript Tink."""
if not native.existing_rule("build_bazel_rules_nodejs"):
# Release from 2021-10-11
http_archive(
name = "build_bazel_rules_nodejs",
urls = ["https://github.com/bazelbuild/rules_nodejs/releases/download/4.4.0/rules_nodejs-4.4.0.tar.gz"],
sha256 = "c9c5d60d6234d65b06f86abd5edc60cadd1699f739ee49d33a099d2d67eb1ae8",
)
if not native.existing_rule("io_bazel_rules_closure"):
# Tag from 2021-06-11
http_archive(
name = "io_bazel_rules_closure",
strip_prefix = "rules_closure-0.12.0",
urls = ["https://github.com/bazelbuild/rules_closure/archive/0.12.0.tar.gz"],
sha256 = "9498e57368efb82b985db1ed426a767cbf1ba0398fd7aed632fc3908654e1b1e",
)
if not native.existing_rule("io_bazel_rules_webtesting"):
# Release from 2021-09-15
http_archive(
name = "io_bazel_rules_webtesting",
urls = ["https://github.com/bazelbuild/rules_webtesting/releases/download/0.3.5/rules_webtesting.tar.gz"],
sha256 = "e9abb7658b6a129740c0b3ef6f5a2370864e102a5ba5ffca2cea565829ed825a",
)
if not native.existing_rule("bazel_skylib"):
# Release from 2021-09-27
http_archive(
name = "bazel_skylib",
urls = [
"https://github.com/bazelbuild/bazel-skylib/releases/download/1.1.1/bazel-skylib-1.1.1.tar.gz",
"https://mirror.bazel.build/github.com/bazelbuild/bazel-skylib/releases/download/1.1.1/bazel-skylib-1.1.1.tar.gz",
],
sha256 = "c6966ec828da198c5d9adbaa94c05e3a1c7f21bd012a0b29ba8ddbccb2c93b0d",
)
| 44.97619 | 130 | 0.661196 |
a4a9ce08c7b8ae8b0afa64d7d52dff184522e4b9 | 799 | py | Python | apps/article/search_indexes.py | PyCN/BlogBackendProject | 144ea98d54c624cf93a69816779e4f3483ab80a2 | [
"Apache-2.0"
] | 335 | 2018-02-06T11:40:44.000Z | 2022-02-01T07:55:24.000Z | apps/article/search_indexes.py | PyCN/BlogBackendProject | 144ea98d54c624cf93a69816779e4f3483ab80a2 | [
"Apache-2.0"
] | 18 | 2018-03-11T05:02:27.000Z | 2022-03-11T23:18:34.000Z | apps/article/search_indexes.py | PyCN/BlogBackendProject | 144ea98d54c624cf93a69816779e4f3483ab80a2 | [
"Apache-2.0"
] | 83 | 2018-03-02T03:24:06.000Z | 2021-12-15T05:04:09.000Z | from haystack import indexes
from .models import ArticleDetail
class ArticleDetailIndex(indexes.SearchIndex, indexes.Indexable):
text = indexes.CharField(document=True, use_template=True)
title = indexes.CharField(model_attr='article_info__title')
author = indexes.CharField(model_attr='article_info__author')
add_time = indexes.CharField(model_attr='add_time')
update_time = indexes.CharField(model_attr='update_time')
link = indexes.CharField(model_attr='article_info__get_absolute_url')
type = indexes.CharField(model_attr='article_info__post_type')
@staticmethod
def prepare_autocomplete(obj):
return " "
def get_model(self):
return ArticleDetail
def index_queryset(self, using=None):
return self.get_model().objects.all() | 36.318182 | 73 | 0.753442 |
9ad3ab05c77905b1d7a63e40a3292ec7c8204a06 | 9,649 | py | Python | examples/train_pattern_imagenet.py | rtygbwwwerr/innvestigate | de6d9a191d969a542ab1f7ab3fb615ede10c7362 | [
"MIT"
] | 2 | 2019-12-19T08:07:28.000Z | 2020-03-12T02:57:55.000Z | examples/train_pattern_imagenet.py | rtygbwwwerr/innvestigate | de6d9a191d969a542ab1f7ab3fb615ede10c7362 | [
"MIT"
] | null | null | null | examples/train_pattern_imagenet.py | rtygbwwwerr/innvestigate | de6d9a191d969a542ab1f7ab3fb615ede10c7362 | [
"MIT"
] | 1 | 2020-11-06T01:52:28.000Z | 2020-11-06T01:52:28.000Z | # Begin: Python 2/3 compatibility header small
# Get Python 3 functionality:
from __future__ import\
absolute_import, print_function, division, unicode_literals
from future.utils import raise_with_traceback, raise_from
# catch exception with: except Exception as e
from builtins import range, map, zip, filter
from io import open
import six
# End: Python 2/3 compatability header small
###############################################################################
###############################################################################
###############################################################################
import matplotlib
import imp
import keras.backend
import keras.models
import keras.preprocessing.image
import keras.utils
import matplotlib.pyplot as plt
import numpy as np
import sys
import os
import innvestigate
import innvestigate.tools
import innvestigate.utils as iutils
import innvestigate.utils.tests.networks.imagenet
import innvestigate.utils.visualizations as ivis
###############################################################################
###############################################################################
###############################################################################
base_dir = os.path.dirname(__file__)
eutils = imp.load_source("utils", os.path.join(base_dir, "utils.py"))
# Path to train and validation images of Imagenet.
# Each directory should contain one directory for each class which contains
# the according images,
# see https://keras.io/preprocessing/image/#imagedatagenerator-class
# function flow_from_directory().
imagenet_train_dir = "/temp/datasets/imagenet/2012/train_set_small"
imagenet_val_dir = "/temp/datasets/imagenet/2012/train_set_small"
###############################################################################
###############################################################################
###############################################################################
if __name__ == "__main__":
netname = sys.argv[1] if len(sys.argv) > 1 else "vgg16"
pattern_type = "relu"
steps = None
gpu_count = len(os.environ.get("CUDA_VISIBLE_DEVICES", "").split(","))
max_queue_size = 100
workers = 4 * gpu_count
use_multiprocessing = True
print("GPU_COUNT", gpu_count)
###########################################################################
# Build model.
###########################################################################
tmp = getattr(innvestigate.applications.imagenet, netname)
net = tmp(load_weights=True)
model = keras.models.Model(inputs=net["in"], outputs=net["out"])
model.compile(optimizer="adam", loss="categorical_crossentropy")
modelp = keras.models.Model(inputs=net["in"], outputs=net["sm_out"])
modelp.compile(optimizer="adam", loss="categorical_crossentropy",
metrics=["accuracy"])
if gpu_count > 1:
modelp = keras.utils.multi_gpu_model(modelp, gpus=gpu_count)
modelp.compile(optimizer="adam",
loss="categorical_crossentropy",
metrics=["accuracy"])
###########################################################################
# Create data loaders.
###########################################################################
if keras.backend.image_data_format() == "channels_first":
target_size = net["input_shape"][2:4]
else:
target_size = net["input_shape"][1:3]
def preprocess(X):
X = X.copy()
X = net["preprocess_f"](X)
return X
train_data_generator = keras.preprocessing.image.ImageDataGenerator(
preprocessing_function=preprocess,
horizontal_flip=True)
test_data_generator = keras.preprocessing.image.ImageDataGenerator(
preprocessing_function=preprocess)
train_generator = train_data_generator.flow_from_directory(
imagenet_train_dir,
target_size=target_size,
batch_size=32*gpu_count,
class_mode=None)
val_generator = test_data_generator.flow_from_directory(
imagenet_val_dir,
target_size=target_size,
batch_size=32*gpu_count,
class_mode='categorical')
###########################################################################
# Evaluate and compute patterns.
###########################################################################
# Check if all works correctly.
print("Evaluate:")
val_evaluation = modelp.evaluate_generator(
val_generator,
steps=steps,
max_queue_size=max_queue_size,
workers=workers,
use_multiprocessing=use_multiprocessing)
print(val_evaluation)
print("Compute patterns:")
pattern_computer = innvestigate.tools.PatternComputer(
model,
pattern_type=pattern_type,
compute_layers_in_parallel=True,
gpus=gpu_count)
patterns = pattern_computer.compute_generator(
train_generator,
steps_per_epoch=steps,
max_queue_size=max_queue_size,
workers=workers,
use_multiprocessing=use_multiprocessing,
verbose=1)
np.savez("%s_pattern_type_%s_tf_dim_ordering_tf_kernels.npz" %
(netname, pattern_type),
*patterns)
###########################################################################
# Utility functions.
###########################################################################
color_conversion = "BGRtoRGB" if net["color_coding"] == "BGR" else None
channels_first = keras.backend.image_data_format == "channels_first"
def preprocess(X):
X = X.copy()
X = net["preprocess_f"](X)
return X
def postprocess(X):
X = X.copy()
X = iutils.postprocess_images(X,
color_coding=color_conversion,
channels_first=channels_first)
return X
def image(X):
X = X.copy()
return ivis.project(X, absmax=255.0, input_is_postive_only=True)
def bk_proj(X):
return ivis.project(X)
def heatmap(X):
return ivis.heatmap(X)
def graymap(X):
return ivis.graymap(np.abs(X), input_is_postive_only=True)
###########################################################################
# Analysis.
###########################################################################
# Get some example test set images.
images, label_to_class_name = eutils.get_imagenet_data()
# Methods we use and some properties.
methods = [
# NAME POSTPROCESSING TITLE
# Show input.
("input", {}, image, "Input"),
# Function
("gradient", {}, graymap, "Gradient"),
("smoothgrad", {"noise_scale": 50}, graymap, "SmoothGrad"),
("integrated_gradients", {}, graymap, ("Integrated", "Gradients")),
# Signal
("deconvnet", {}, bk_proj, "Deconvnet"),
("guided_backprop", {}, bk_proj, ("Guided", "Backprop"),),
("pattern.net", {"patterns": patterns}, bk_proj, "PatterNet"),
# Interaction
("pattern.attribution", {"patterns": patterns}, heatmap, "PatternAttribution"),
("lrp.z", {}, heatmap, "LRP-Z"),
("lrp.composite_a_flat", {}, heatmap, "LRP-CompositeAFlat"),
]
# Create analyzers.
analyzers = []
for method in methods:
analyzers.append(innvestigate.create_analyzer(method[0],
model,
**method[1]))
# Create analysis.
analysis = np.zeros([len(images), len(analyzers), 224, 224, 3])
text = []
for i, (image, y) in enumerate(images):
image = image[None, :, :, :]
# Predict label.
x = preprocess(image)
prob = modelp.predict_on_batch(x)[0]
y_hat = prob.argmax()
text.append((r"\textbf{%s}" % label_to_class_name[y],
r"\textit{(%.2f)}" % prob.max(),
r"\textit{%s}" % label_to_class_name[y_hat]))
for aidx, analyzer in enumerate(analyzers):
is_input_analyzer = methods[aidx][0] == "input"
# Analyze.
a = analyzer.analyze(image if is_input_analyzer else x)
# Postprocess.
if not np.all(np.isfinite(a)):
print("Image %i, analysis of %s not finite: nan %s inf %s" %
(i, methods[aidx][3],
np.any(np.isnan(a)), np.any(np.isinf(a))))
if not is_input_analyzer:
a = postprocess(a)
a = methods[aidx][2](a)
analysis[i, aidx] = a[0]
###########################################################################
# Plot the analysis.
###########################################################################
grid = [[analysis[i, j] for j in range(analysis.shape[1])]
for i in range(analysis.shape[0])]
row_labels = text
col_labels = [method[3] for method in methods]
file_name = "all_methods_%s_%s.pdf" % (netname, pattern_type)
eutils.plot_image_grid(grid, row_labels, col_labels,
row_label_offset=50,
col_label_offset=-50,
usetex=True, file_name=file_name)
| 37.254826 | 98 | 0.500466 |
c806742e80254b963146b42d1133c8046e69a668 | 1,133 | py | Python | stubs.min/System/Windows/Forms/__init___parts/SecurityIDType.py | ricardyn/ironpython-stubs | 4d2b405eda3ceed186e8adca55dd97c332c6f49d | [
"MIT"
] | 1 | 2021-02-02T13:39:16.000Z | 2021-02-02T13:39:16.000Z | stubs.min/System/Windows/Forms/__init___parts/SecurityIDType.py | hdm-dt-fb/ironpython-stubs | 4d2b405eda3ceed186e8adca55dd97c332c6f49d | [
"MIT"
] | null | null | null | stubs.min/System/Windows/Forms/__init___parts/SecurityIDType.py | hdm-dt-fb/ironpython-stubs | 4d2b405eda3ceed186e8adca55dd97c332c6f49d | [
"MIT"
] | null | null | null | class SecurityIDType(Enum,IComparable,IFormattable,IConvertible):
"""
This enumeration is not used.
enum SecurityIDType,values: Alias (4),Computer (9),DeletedAccount (6),Domain (3),Group (2),Invalid (7),Unknown (8),User (1),WellKnownGroup (5)
"""
def __eq__(self,*args):
""" x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """
pass
def __format__(self,*args):
""" __format__(formattable: IFormattable,format: str) -> str """
pass
def __ge__(self,*args):
pass
def __gt__(self,*args):
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __le__(self,*args):
pass
def __lt__(self,*args):
pass
def __ne__(self,*args):
pass
def __reduce_ex__(self,*args):
pass
def __str__(self,*args):
pass
Alias=None
Computer=None
DeletedAccount=None
Domain=None
Group=None
Invalid=None
Unknown=None
User=None
value__=None
WellKnownGroup=None
| 27.634146 | 215 | 0.670786 |
f2caf84ab6ea9c1b04852b35415e15563df46988 | 1,372 | py | Python | pyclesperanto_prototype/_tier1/_not_equal_constant.py | elsandal/pyclesperanto_prototype | 7bda828813b86b44b63d73d5e8f466d9769cded1 | [
"BSD-3-Clause"
] | 64 | 2020-03-18T12:11:22.000Z | 2022-03-31T08:19:18.000Z | pyclesperanto_prototype/_tier1/_not_equal_constant.py | elsandal/pyclesperanto_prototype | 7bda828813b86b44b63d73d5e8f466d9769cded1 | [
"BSD-3-Clause"
] | 148 | 2020-05-14T06:14:11.000Z | 2022-03-26T15:02:31.000Z | pyclesperanto_prototype/_tier1/_not_equal_constant.py | elsandal/pyclesperanto_prototype | 7bda828813b86b44b63d73d5e8f466d9769cded1 | [
"BSD-3-Clause"
] | 16 | 2020-05-31T00:53:44.000Z | 2022-03-23T13:20:57.000Z | from .._tier0 import execute, create_binary_like
from .._tier0 import plugin_function
from .._tier0 import Image
@plugin_function(categories=['binarize', 'in assistant'], output_creator=create_binary_like)
def not_equal_constant(source : Image, destination : Image = None, constant : float = 0):
"""Determines if two images A and B equal pixel wise.
f(a, b) = 1 if a != b; 0 otherwise.Parameters
----------
source : Image
The image where every pixel is compared to the constant.
destination : Image
The resulting binary image where pixels will be 1 only if source1
and source2 equal in the given pixel.
constant : float
The constant where every pixel is compared to.
Returns
-------
destination
Examples
--------
>>> import pyclesperanto_prototype as cle
>>> cle.not_equal_constant(source, destination, constant)
References
----------
.. [1] https://clij.github.io/clij2-docs/reference_notEqualConstant
"""
parameters = {
"src1":source,
"scalar":float(constant),
"dst":destination
}
execute(__file__, '../clij-opencl-kernels/kernels/not_equal_constant_' + str(len(destination.shape)) + 'd_x.cl', 'not_equal_constant_' + str(len(destination.shape)) + 'd', destination.shape, parameters)
return destination
| 31.906977 | 206 | 0.658163 |
06b65fdeb52139885ab0dd249b180fa95ba923aa | 8,224 | py | Python | tests/test_formal_sram.py | StanfordAHA/lake | 9a17e86a3c80fbc2d5adfb8c0734a9c93d74458d | [
"BSD-3-Clause"
] | 11 | 2019-10-14T02:05:38.000Z | 2022-03-10T14:10:22.000Z | tests/test_formal_sram.py | StanfordAHA/lake | 9a17e86a3c80fbc2d5adfb8c0734a9c93d74458d | [
"BSD-3-Clause"
] | 29 | 2019-09-02T05:49:40.000Z | 2022-02-26T00:57:54.000Z | tests/test_formal_sram.py | StanfordAHA/lake | 9a17e86a3c80fbc2d5adfb8c0734a9c93d74458d | [
"BSD-3-Clause"
] | 1 | 2021-04-16T20:26:13.000Z | 2021-04-16T20:26:13.000Z | from lake.top.lake_top import *
import fault
import pytest
import tempfile
@pytest.mark.skip
def test_formal_sram():
lt_dut, n, u, t = get_formal_module("sram")
magma_dut = kts.util.to_magma(lt_dut,
flatten_array=True,
check_multiple_driver=False,
optimize_if=False,
check_flip_flop_always_ff=False)
tester = fault.Tester(magma_dut, magma_dut.clk)
tester.circuit.tile_en = 1
tester.circuit.clk = 0
tester.circuit.rst_n = 0
tester.step(2)
tester.circuit.rst_n = 1
tester.step(2)
tester.circuit.clk_en = 1
tester.eval()
config = {}
config["strg_ub_agg_sram_shared_agg_read_sched_gen_0_sched_addr_gen_strides_0"] = 4
config["strg_ub_agg_sram_shared_agg_read_sched_gen_0_sched_addr_gen_strides_1"] = 0
config["strg_ub_agg_sram_shared_agg_read_sched_gen_0_sched_addr_gen_strides_2"] = 0
config["strg_ub_agg_sram_shared_agg_read_sched_gen_0_sched_addr_gen_strides_3"] = 0
config["strg_ub_agg_sram_shared_agg_read_sched_gen_0_sched_addr_gen_strides_4"] = 0
config["strg_ub_agg_sram_shared_agg_read_sched_gen_0_sched_addr_gen_strides_5"] = 0
config["strg_ub_agg_sram_shared_agg_read_sched_gen_0_enable"] = 1
config["strg_ub_agg_sram_shared_loops_in2buf_autovec_write_0_ranges_0"] = 769
config["strg_ub_agg_sram_shared_loops_in2buf_autovec_write_0_ranges_1"] = 0
config["strg_ub_agg_sram_shared_loops_in2buf_autovec_write_0_ranges_2"] = 0
config["strg_ub_agg_sram_shared_loops_in2buf_autovec_write_0_ranges_3"] = 0
config["strg_ub_agg_sram_shared_loops_in2buf_autovec_write_0_ranges_4"] = 0
config["strg_ub_agg_sram_shared_loops_in2buf_autovec_write_0_ranges_5"] = 0
config["strg_ub_sram_only_input_addr_gen_0_strides_0"] = 117
config["strg_ub_sram_only_input_addr_gen_0_strides_1"] = 511
config["strg_ub_sram_only_input_addr_gen_0_strides_2"] = 511
config["strg_ub_sram_only_input_addr_gen_0_strides_3"] = 511
config["strg_ub_sram_only_input_addr_gen_0_strides_4"] = 511
config["strg_ub_sram_only_input_addr_gen_0_strides_5"] = 511
config["strg_ub_agg_sram_shared_loops_in2buf_autovec_write_1_dimensionality"] = 0
config["strg_ub_sram_only_output_addr_gen_1_starting_addr"] = 374
config["strg_ub_sram_tb_shared_output_sched_gen_1_enable"] = 1
config["strg_ub_sram_tb_shared_loops_buf2out_autovec_read_0_dimensionality"] = 1
config["strg_ub_agg_sram_shared_loops_in2buf_autovec_write_1_ranges_0"] = 0
config["strg_ub_agg_sram_shared_loops_in2buf_autovec_write_1_ranges_1"] = 0
config["strg_ub_agg_sram_shared_loops_in2buf_autovec_write_1_ranges_2"] = 0
config["strg_ub_agg_sram_shared_loops_in2buf_autovec_write_1_ranges_3"] = 0
config["strg_ub_agg_sram_shared_loops_in2buf_autovec_write_1_ranges_4"] = 0
config["strg_ub_agg_sram_shared_loops_in2buf_autovec_write_1_ranges_5"] = 0
config["strg_ub_sram_only_output_addr_gen_1_strides_0"] = 117
config["strg_ub_sram_only_output_addr_gen_1_strides_1"] = 511
config["strg_ub_sram_only_output_addr_gen_1_strides_2"] = 511
config["strg_ub_sram_only_output_addr_gen_1_strides_3"] = 511
config["strg_ub_sram_only_output_addr_gen_1_strides_4"] = 511
config["strg_ub_sram_only_output_addr_gen_1_strides_5"] = 511
config["strg_ub_sram_tb_shared_output_sched_gen_1_sched_addr_gen_starting_addr"] = 53
config["strg_ub_sram_tb_shared_output_sched_gen_0_enable"] = 1
config["strg_ub_sram_tb_shared_output_sched_gen_1_sched_addr_gen_strides_0"] = 4
config["strg_ub_sram_tb_shared_output_sched_gen_1_sched_addr_gen_strides_1"] = 0
config["strg_ub_sram_tb_shared_output_sched_gen_1_sched_addr_gen_strides_2"] = 0
config["strg_ub_sram_tb_shared_output_sched_gen_1_sched_addr_gen_strides_3"] = 0
config["strg_ub_sram_tb_shared_output_sched_gen_1_sched_addr_gen_strides_4"] = 0
config["strg_ub_sram_tb_shared_output_sched_gen_1_sched_addr_gen_strides_5"] = 0
config["strg_ub_sram_only_output_addr_gen_0_starting_addr"] = 67
config["strg_ub_sram_tb_shared_loops_buf2out_autovec_read_0_ranges_0"] = 180
config["strg_ub_sram_tb_shared_loops_buf2out_autovec_read_0_ranges_1"] = 0
config["strg_ub_sram_tb_shared_loops_buf2out_autovec_read_0_ranges_2"] = 0
config["strg_ub_sram_tb_shared_loops_buf2out_autovec_read_0_ranges_3"] = 0
config["strg_ub_sram_tb_shared_loops_buf2out_autovec_read_0_ranges_4"] = 0
config["strg_ub_sram_tb_shared_loops_buf2out_autovec_read_0_ranges_5"] = 0
config["strg_ub_sram_tb_shared_output_sched_gen_0_sched_addr_gen_strides_0"] = 4
config["strg_ub_sram_tb_shared_output_sched_gen_0_sched_addr_gen_strides_1"] = 0
config["strg_ub_sram_tb_shared_output_sched_gen_0_sched_addr_gen_strides_2"] = 0
config["strg_ub_sram_tb_shared_output_sched_gen_0_sched_addr_gen_strides_3"] = 0
config["strg_ub_sram_tb_shared_output_sched_gen_0_sched_addr_gen_strides_4"] = 0
config["strg_ub_sram_tb_shared_output_sched_gen_0_sched_addr_gen_strides_5"] = 0
config["strg_ub_agg_sram_shared_agg_read_sched_gen_1_sched_addr_gen_starting_addr"] = 0
config["strg_ub_sram_only_input_addr_gen_0_starting_addr"] = 67
config["strg_ub_agg_sram_shared_agg_read_sched_gen_1_enable"] = 0
config["strg_ub_sram_tb_shared_output_sched_gen_0_sched_addr_gen_starting_addr"] = 51
config["strg_ub_agg_sram_shared_agg_read_sched_gen_1_sched_addr_gen_strides_0"] = 0
config["strg_ub_agg_sram_shared_agg_read_sched_gen_1_sched_addr_gen_strides_1"] = 0
config["strg_ub_agg_sram_shared_agg_read_sched_gen_1_sched_addr_gen_strides_2"] = 0
config["strg_ub_agg_sram_shared_agg_read_sched_gen_1_sched_addr_gen_strides_3"] = 0
config["strg_ub_agg_sram_shared_agg_read_sched_gen_1_sched_addr_gen_strides_4"] = 0
config["strg_ub_agg_sram_shared_agg_read_sched_gen_1_sched_addr_gen_strides_5"] = 0
config["strg_ub_agg_sram_shared_agg_read_sched_gen_0_sched_addr_gen_starting_addr"] = 4
config["strg_ub_sram_tb_shared_loops_buf2out_autovec_read_1_ranges_0"] = 180
config["strg_ub_sram_tb_shared_loops_buf2out_autovec_read_1_ranges_1"] = 0
config["strg_ub_sram_tb_shared_loops_buf2out_autovec_read_1_ranges_2"] = 0
config["strg_ub_sram_tb_shared_loops_buf2out_autovec_read_1_ranges_3"] = 0
config["strg_ub_sram_tb_shared_loops_buf2out_autovec_read_1_ranges_4"] = 0
config["strg_ub_sram_tb_shared_loops_buf2out_autovec_read_1_ranges_5"] = 0
config["strg_ub_agg_sram_shared_loops_in2buf_autovec_write_0_dimensionality"] = 1
config["strg_ub_sram_only_input_addr_gen_1_starting_addr"] = 0
config["strg_ub_sram_tb_shared_loops_buf2out_autovec_read_1_dimensionality"] = 1
config["strg_ub_sram_only_output_addr_gen_0_strides_0"] = 117
config["strg_ub_sram_only_output_addr_gen_0_strides_1"] = 511
config["strg_ub_sram_only_output_addr_gen_0_strides_2"] = 511
config["strg_ub_sram_only_output_addr_gen_0_strides_3"] = 511
config["strg_ub_sram_only_output_addr_gen_0_strides_4"] = 511
config["strg_ub_sram_only_output_addr_gen_0_strides_5"] = 511
config["strg_ub_sram_only_input_addr_gen_1_strides_0"] = 0
config["strg_ub_sram_only_input_addr_gen_1_strides_1"] = 0
config["strg_ub_sram_only_input_addr_gen_1_strides_2"] = 0
config["strg_ub_sram_only_input_addr_gen_1_strides_3"] = 0
config["strg_ub_sram_only_input_addr_gen_1_strides_4"] = 0
config["strg_ub_sram_only_input_addr_gen_1_strides_5"] = 0
for f1 in config:
setattr(tester.circuit, f1, config[f1])
counter = 0
curr = 4
for i in range(785):
if i == curr:
curr += 4
for i in range(4):
setattr(tester.circuit, f"agg_data_out_top_0_{i}", counter)
counter += 1
# check formal_mem_data at top level
tester.eval()
tester.step(2)
with tempfile.TemporaryDirectory() as tempdir:
tempdir = "sram_formal"
tester.compile_and_run(target="verilator",
directory=tempdir,
flags=["-Wno-fatal", "--trace"])
if __name__ == "__main__":
test_formal_sram()
| 56.717241 | 91 | 0.793531 |
c29dc1b8b95238e1cb4039ce6ab9c1a9f78b295b | 1,306 | py | Python | bin/pathed.py | pgreze/dotfiles | eff9226421ff3bdfbba131f6f10527bb206a25ca | [
"Apache-2.0"
] | 1 | 2021-01-07T02:46:17.000Z | 2021-01-07T02:46:17.000Z | bin/pathed.py | pgreze/dotfiles | eff9226421ff3bdfbba131f6f10527bb206a25ca | [
"Apache-2.0"
] | 15 | 2019-09-22T04:31:01.000Z | 2021-03-13T03:47:42.000Z | bin/pathed.py | pgreze/dotfiles | eff9226421ff3bdfbba131f6f10527bb206a25ca | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import os.path
import sys
import typer
from typing import List
paths = lambda: (v.rstrip("/") for v in os.environ["PATH"].split(":"))
app = typer.Typer(help="""PATH editor utility.
Expected usages:
if {cli} exists /usr/local/bin; then TODO; fi
export PATH="$({cli} add /usr/local/bin --unique)"
""".format(cli=os.path.basename(sys.argv[0])))
@app.command(name="list", help="Display all entries in PATH")
def ls():
for p in paths(): print(p)
@app.command(help="Fail any provided value is not in PATH")
def exists(values: List[str]):
raise typer.Exit(len(set(v.rstrip("/") for v in values) - set(paths())))
@app.command(name="add", help="Add to the top of PATH provided values, and remove duplicates if asked")
def add(
values: List[str],
unique: bool = typer.Option(False, help = "Delete duplicates of provided values if found")
):
values = [v.rstrip("/") for v in values]
print(":".join(values + [v for v in paths() if v not in value or not unique]))
@app.command(name="del", help="Deletes all occurences of the provided values in PATH")
def delete(values: List[str]):
values = [v.rstrip("/") for v in values]
print(":".join(v for v in paths() if v not in values))
if __name__ == '__main__':
app()
| 27.208333 | 103 | 0.661562 |
505d914a04adc35160331ede7f00cdcdd3df0299 | 37,334 | py | Python | netbox/ipam/tests/test_api.py | 0xAalaoui/netbox | 07364abf9e9ff193bad49b790e657382cf186f0c | [
"Apache-2.0"
] | 1 | 2018-07-31T06:54:02.000Z | 2018-07-31T06:54:02.000Z | netbox/ipam/tests/test_api.py | 0xAalaoui/netbox | 07364abf9e9ff193bad49b790e657382cf186f0c | [
"Apache-2.0"
] | null | null | null | netbox/ipam/tests/test_api.py | 0xAalaoui/netbox | 07364abf9e9ff193bad49b790e657382cf186f0c | [
"Apache-2.0"
] | 1 | 2018-07-31T06:54:11.000Z | 2018-07-31T06:54:11.000Z | from __future__ import unicode_literals
from django.contrib.auth.models import User
from django.urls import reverse
from netaddr import IPNetwork
from rest_framework import status
from rest_framework.test import APITestCase
from dcim.models import Device, DeviceRole, DeviceType, Manufacturer, Site
from ipam.constants import IP_PROTOCOL_TCP, IP_PROTOCOL_UDP
from ipam.models import Aggregate, IPAddress, Prefix, RIR, Role, Service, VLAN, VLANGroup, VRF
from users.models import Token
from utilities.tests import HttpStatusMixin
class VRFTest(HttpStatusMixin, APITestCase):
def setUp(self):
user = User.objects.create(username='testuser', is_superuser=True)
token = Token.objects.create(user=user)
self.header = {'HTTP_AUTHORIZATION': 'Token {}'.format(token.key)}
self.vrf1 = VRF.objects.create(name='Test VRF 1', rd='65000:1')
self.vrf2 = VRF.objects.create(name='Test VRF 2', rd='65000:2')
self.vrf3 = VRF.objects.create(name='Test VRF 3', rd='65000:3')
def test_get_vrf(self):
url = reverse('ipam-api:vrf-detail', kwargs={'pk': self.vrf1.pk})
response = self.client.get(url, **self.header)
self.assertEqual(response.data['name'], self.vrf1.name)
def test_list_vrfs(self):
url = reverse('ipam-api:vrf-list')
response = self.client.get(url, **self.header)
self.assertEqual(response.data['count'], 3)
def test_create_vrf(self):
data = {
'name': 'Test VRF 4',
'rd': '65000:4',
}
url = reverse('ipam-api:vrf-list')
response = self.client.post(url, data, format='json', **self.header)
self.assertHttpStatus(response, status.HTTP_201_CREATED)
self.assertEqual(VRF.objects.count(), 4)
vrf4 = VRF.objects.get(pk=response.data['id'])
self.assertEqual(vrf4.name, data['name'])
self.assertEqual(vrf4.rd, data['rd'])
def test_create_vrf_bulk(self):
data = [
{
'name': 'Test VRF 4',
'rd': '65000:4',
},
{
'name': 'Test VRF 5',
'rd': '65000:5',
},
{
'name': 'Test VRF 6',
'rd': '65000:6',
},
]
url = reverse('ipam-api:vrf-list')
response = self.client.post(url, data, format='json', **self.header)
self.assertHttpStatus(response, status.HTTP_201_CREATED)
self.assertEqual(VRF.objects.count(), 6)
self.assertEqual(response.data[0]['name'], data[0]['name'])
self.assertEqual(response.data[1]['name'], data[1]['name'])
self.assertEqual(response.data[2]['name'], data[2]['name'])
def test_update_vrf(self):
data = {
'name': 'Test VRF X',
'rd': '65000:99',
}
url = reverse('ipam-api:vrf-detail', kwargs={'pk': self.vrf1.pk})
response = self.client.put(url, data, format='json', **self.header)
self.assertHttpStatus(response, status.HTTP_200_OK)
self.assertEqual(VRF.objects.count(), 3)
vrf1 = VRF.objects.get(pk=response.data['id'])
self.assertEqual(vrf1.name, data['name'])
self.assertEqual(vrf1.rd, data['rd'])
def test_delete_vrf(self):
url = reverse('ipam-api:vrf-detail', kwargs={'pk': self.vrf1.pk})
response = self.client.delete(url, **self.header)
self.assertHttpStatus(response, status.HTTP_204_NO_CONTENT)
self.assertEqual(VRF.objects.count(), 2)
class RIRTest(HttpStatusMixin, APITestCase):
def setUp(self):
user = User.objects.create(username='testuser', is_superuser=True)
token = Token.objects.create(user=user)
self.header = {'HTTP_AUTHORIZATION': 'Token {}'.format(token.key)}
self.rir1 = RIR.objects.create(name='Test RIR 1', slug='test-rir-1')
self.rir2 = RIR.objects.create(name='Test RIR 2', slug='test-rir-2')
self.rir3 = RIR.objects.create(name='Test RIR 3', slug='test-rir-3')
def test_get_rir(self):
url = reverse('ipam-api:rir-detail', kwargs={'pk': self.rir1.pk})
response = self.client.get(url, **self.header)
self.assertEqual(response.data['name'], self.rir1.name)
def test_list_rirs(self):
url = reverse('ipam-api:rir-list')
response = self.client.get(url, **self.header)
self.assertEqual(response.data['count'], 3)
def test_create_rir(self):
data = {
'name': 'Test RIR 4',
'slug': 'test-rir-4',
}
url = reverse('ipam-api:rir-list')
response = self.client.post(url, data, format='json', **self.header)
self.assertHttpStatus(response, status.HTTP_201_CREATED)
self.assertEqual(RIR.objects.count(), 4)
rir4 = RIR.objects.get(pk=response.data['id'])
self.assertEqual(rir4.name, data['name'])
self.assertEqual(rir4.slug, data['slug'])
def test_create_rir_bulk(self):
data = [
{
'name': 'Test RIR 4',
'slug': 'test-rir-4',
},
{
'name': 'Test RIR 5',
'slug': 'test-rir-5',
},
{
'name': 'Test RIR 6',
'slug': 'test-rir-6',
},
]
url = reverse('ipam-api:rir-list')
response = self.client.post(url, data, format='json', **self.header)
self.assertHttpStatus(response, status.HTTP_201_CREATED)
self.assertEqual(RIR.objects.count(), 6)
self.assertEqual(response.data[0]['name'], data[0]['name'])
self.assertEqual(response.data[1]['name'], data[1]['name'])
self.assertEqual(response.data[2]['name'], data[2]['name'])
def test_update_rir(self):
data = {
'name': 'Test RIR X',
'slug': 'test-rir-x',
}
url = reverse('ipam-api:rir-detail', kwargs={'pk': self.rir1.pk})
response = self.client.put(url, data, format='json', **self.header)
self.assertHttpStatus(response, status.HTTP_200_OK)
self.assertEqual(RIR.objects.count(), 3)
rir1 = RIR.objects.get(pk=response.data['id'])
self.assertEqual(rir1.name, data['name'])
self.assertEqual(rir1.slug, data['slug'])
def test_delete_rir(self):
url = reverse('ipam-api:rir-detail', kwargs={'pk': self.rir1.pk})
response = self.client.delete(url, **self.header)
self.assertHttpStatus(response, status.HTTP_204_NO_CONTENT)
self.assertEqual(RIR.objects.count(), 2)
class AggregateTest(HttpStatusMixin, APITestCase):
def setUp(self):
user = User.objects.create(username='testuser', is_superuser=True)
token = Token.objects.create(user=user)
self.header = {'HTTP_AUTHORIZATION': 'Token {}'.format(token.key)}
self.rir1 = RIR.objects.create(name='Test RIR 1', slug='test-rir-1')
self.rir2 = RIR.objects.create(name='Test RIR 2', slug='test-rir-2')
self.aggregate1 = Aggregate.objects.create(prefix=IPNetwork('10.0.0.0/8'), rir=self.rir1)
self.aggregate2 = Aggregate.objects.create(prefix=IPNetwork('172.16.0.0/12'), rir=self.rir1)
self.aggregate3 = Aggregate.objects.create(prefix=IPNetwork('192.168.0.0/16'), rir=self.rir1)
def test_get_aggregate(self):
url = reverse('ipam-api:aggregate-detail', kwargs={'pk': self.aggregate1.pk})
response = self.client.get(url, **self.header)
self.assertEqual(response.data['prefix'], str(self.aggregate1.prefix))
def test_list_aggregates(self):
url = reverse('ipam-api:aggregate-list')
response = self.client.get(url, **self.header)
self.assertEqual(response.data['count'], 3)
def test_create_aggregate(self):
data = {
'prefix': '192.0.2.0/24',
'rir': self.rir1.pk,
}
url = reverse('ipam-api:aggregate-list')
response = self.client.post(url, data, format='json', **self.header)
self.assertHttpStatus(response, status.HTTP_201_CREATED)
self.assertEqual(Aggregate.objects.count(), 4)
aggregate4 = Aggregate.objects.get(pk=response.data['id'])
self.assertEqual(str(aggregate4.prefix), data['prefix'])
self.assertEqual(aggregate4.rir_id, data['rir'])
def test_create_aggregate_bulk(self):
data = [
{
'prefix': '100.0.0.0/8',
'rir': self.rir1.pk,
},
{
'prefix': '101.0.0.0/8',
'rir': self.rir1.pk,
},
{
'prefix': '102.0.0.0/8',
'rir': self.rir1.pk,
},
]
url = reverse('ipam-api:aggregate-list')
response = self.client.post(url, data, format='json', **self.header)
self.assertHttpStatus(response, status.HTTP_201_CREATED)
self.assertEqual(Aggregate.objects.count(), 6)
self.assertEqual(response.data[0]['prefix'], data[0]['prefix'])
self.assertEqual(response.data[1]['prefix'], data[1]['prefix'])
self.assertEqual(response.data[2]['prefix'], data[2]['prefix'])
def test_update_aggregate(self):
data = {
'prefix': '11.0.0.0/8',
'rir': self.rir2.pk,
}
url = reverse('ipam-api:aggregate-detail', kwargs={'pk': self.aggregate1.pk})
response = self.client.put(url, data, format='json', **self.header)
self.assertHttpStatus(response, status.HTTP_200_OK)
self.assertEqual(Aggregate.objects.count(), 3)
aggregate1 = Aggregate.objects.get(pk=response.data['id'])
self.assertEqual(str(aggregate1.prefix), data['prefix'])
self.assertEqual(aggregate1.rir_id, data['rir'])
def test_delete_aggregate(self):
url = reverse('ipam-api:aggregate-detail', kwargs={'pk': self.aggregate1.pk})
response = self.client.delete(url, **self.header)
self.assertHttpStatus(response, status.HTTP_204_NO_CONTENT)
self.assertEqual(Aggregate.objects.count(), 2)
class RoleTest(HttpStatusMixin, APITestCase):
def setUp(self):
user = User.objects.create(username='testuser', is_superuser=True)
token = Token.objects.create(user=user)
self.header = {'HTTP_AUTHORIZATION': 'Token {}'.format(token.key)}
self.role1 = Role.objects.create(name='Test Role 1', slug='test-role-1')
self.role2 = Role.objects.create(name='Test Role 2', slug='test-role-2')
self.role3 = Role.objects.create(name='Test Role 3', slug='test-role-3')
def test_get_role(self):
url = reverse('ipam-api:role-detail', kwargs={'pk': self.role1.pk})
response = self.client.get(url, **self.header)
self.assertEqual(response.data['name'], self.role1.name)
def test_list_roles(self):
url = reverse('ipam-api:role-list')
response = self.client.get(url, **self.header)
self.assertEqual(response.data['count'], 3)
def test_create_role(self):
data = {
'name': 'Test Role 4',
'slug': 'test-role-4',
}
url = reverse('ipam-api:role-list')
response = self.client.post(url, data, format='json', **self.header)
self.assertHttpStatus(response, status.HTTP_201_CREATED)
self.assertEqual(Role.objects.count(), 4)
role4 = Role.objects.get(pk=response.data['id'])
self.assertEqual(role4.name, data['name'])
self.assertEqual(role4.slug, data['slug'])
def test_create_role_bulk(self):
data = [
{
'name': 'Test Role 4',
'slug': 'test-role-4',
},
{
'name': 'Test Role 5',
'slug': 'test-role-5',
},
{
'name': 'Test Role 6',
'slug': 'test-role-6',
},
]
url = reverse('ipam-api:role-list')
response = self.client.post(url, data, format='json', **self.header)
self.assertHttpStatus(response, status.HTTP_201_CREATED)
self.assertEqual(Role.objects.count(), 6)
self.assertEqual(response.data[0]['name'], data[0]['name'])
self.assertEqual(response.data[1]['name'], data[1]['name'])
self.assertEqual(response.data[2]['name'], data[2]['name'])
def test_update_role(self):
data = {
'name': 'Test Role X',
'slug': 'test-role-x',
}
url = reverse('ipam-api:role-detail', kwargs={'pk': self.role1.pk})
response = self.client.put(url, data, format='json', **self.header)
self.assertHttpStatus(response, status.HTTP_200_OK)
self.assertEqual(Role.objects.count(), 3)
role1 = Role.objects.get(pk=response.data['id'])
self.assertEqual(role1.name, data['name'])
self.assertEqual(role1.slug, data['slug'])
def test_delete_role(self):
url = reverse('ipam-api:role-detail', kwargs={'pk': self.role1.pk})
response = self.client.delete(url, **self.header)
self.assertHttpStatus(response, status.HTTP_204_NO_CONTENT)
self.assertEqual(Role.objects.count(), 2)
class PrefixTest(HttpStatusMixin, APITestCase):
def setUp(self):
user = User.objects.create(username='testuser', is_superuser=True)
token = Token.objects.create(user=user)
self.header = {'HTTP_AUTHORIZATION': 'Token {}'.format(token.key)}
self.site1 = Site.objects.create(name='Test Site 1', slug='test-site-1')
self.vrf1 = VRF.objects.create(name='Test VRF 1', rd='65000:1')
self.vlan1 = VLAN.objects.create(vid=1, name='Test VLAN 1')
self.role1 = Role.objects.create(name='Test Role 1', slug='test-role-1')
self.prefix1 = Prefix.objects.create(prefix=IPNetwork('192.168.1.0/24'))
self.prefix2 = Prefix.objects.create(prefix=IPNetwork('192.168.2.0/24'))
self.prefix3 = Prefix.objects.create(prefix=IPNetwork('192.168.3.0/24'))
def test_get_prefix(self):
url = reverse('ipam-api:prefix-detail', kwargs={'pk': self.prefix1.pk})
response = self.client.get(url, **self.header)
self.assertEqual(response.data['prefix'], str(self.prefix1.prefix))
def test_list_prefixs(self):
url = reverse('ipam-api:prefix-list')
response = self.client.get(url, **self.header)
self.assertEqual(response.data['count'], 3)
def test_create_prefix(self):
data = {
'prefix': '192.168.4.0/24',
'site': self.site1.pk,
'vrf': self.vrf1.pk,
'vlan': self.vlan1.pk,
'role': self.role1.pk,
}
url = reverse('ipam-api:prefix-list')
response = self.client.post(url, data, format='json', **self.header)
self.assertHttpStatus(response, status.HTTP_201_CREATED)
self.assertEqual(Prefix.objects.count(), 4)
prefix4 = Prefix.objects.get(pk=response.data['id'])
self.assertEqual(str(prefix4.prefix), data['prefix'])
self.assertEqual(prefix4.site_id, data['site'])
self.assertEqual(prefix4.vrf_id, data['vrf'])
self.assertEqual(prefix4.vlan_id, data['vlan'])
self.assertEqual(prefix4.role_id, data['role'])
def test_create_prefix_bulk(self):
data = [
{
'prefix': '10.0.1.0/24',
},
{
'prefix': '10.0.2.0/24',
},
{
'prefix': '10.0.3.0/24',
},
]
url = reverse('ipam-api:prefix-list')
response = self.client.post(url, data, format='json', **self.header)
self.assertHttpStatus(response, status.HTTP_201_CREATED)
self.assertEqual(Prefix.objects.count(), 6)
self.assertEqual(response.data[0]['prefix'], data[0]['prefix'])
self.assertEqual(response.data[1]['prefix'], data[1]['prefix'])
self.assertEqual(response.data[2]['prefix'], data[2]['prefix'])
def test_update_prefix(self):
data = {
'prefix': '192.168.99.0/24',
'site': self.site1.pk,
'vrf': self.vrf1.pk,
'vlan': self.vlan1.pk,
'role': self.role1.pk,
}
url = reverse('ipam-api:prefix-detail', kwargs={'pk': self.prefix1.pk})
response = self.client.put(url, data, format='json', **self.header)
self.assertHttpStatus(response, status.HTTP_200_OK)
self.assertEqual(Prefix.objects.count(), 3)
prefix1 = Prefix.objects.get(pk=response.data['id'])
self.assertEqual(str(prefix1.prefix), data['prefix'])
self.assertEqual(prefix1.site_id, data['site'])
self.assertEqual(prefix1.vrf_id, data['vrf'])
self.assertEqual(prefix1.vlan_id, data['vlan'])
self.assertEqual(prefix1.role_id, data['role'])
def test_delete_prefix(self):
url = reverse('ipam-api:prefix-detail', kwargs={'pk': self.prefix1.pk})
response = self.client.delete(url, **self.header)
self.assertHttpStatus(response, status.HTTP_204_NO_CONTENT)
self.assertEqual(Prefix.objects.count(), 2)
def test_list_available_prefixes(self):
prefix = Prefix.objects.create(prefix=IPNetwork('192.0.2.0/24'))
Prefix.objects.create(prefix=IPNetwork('192.0.2.64/26'))
Prefix.objects.create(prefix=IPNetwork('192.0.2.192/27'))
url = reverse('ipam-api:prefix-available-prefixes', kwargs={'pk': prefix.pk})
# Retrieve all available IPs
response = self.client.get(url, **self.header)
available_prefixes = ['192.0.2.0/26', '192.0.2.128/26', '192.0.2.224/27']
for i, p in enumerate(response.data):
self.assertEqual(p['prefix'], available_prefixes[i])
def test_create_single_available_prefix(self):
prefix = Prefix.objects.create(prefix=IPNetwork('192.0.2.0/28'), is_pool=True)
url = reverse('ipam-api:prefix-available-prefixes', kwargs={'pk': prefix.pk})
# Create four available prefixes with individual requests
prefixes_to_be_created = [
'192.0.2.0/30',
'192.0.2.4/30',
'192.0.2.8/30',
'192.0.2.12/30',
]
for i in range(4):
data = {
'prefix_length': 30,
'description': 'Test Prefix {}'.format(i + 1)
}
response = self.client.post(url, data, format='json', **self.header)
self.assertHttpStatus(response, status.HTTP_201_CREATED)
self.assertEqual(response.data['prefix'], prefixes_to_be_created[i])
self.assertEqual(response.data['description'], data['description'])
# Try to create one more prefix
response = self.client.post(url, {'prefix_length': 30}, **self.header)
self.assertHttpStatus(response, status.HTTP_400_BAD_REQUEST)
self.assertIn('detail', response.data)
def test_create_multiple_available_prefixes(self):
prefix = Prefix.objects.create(prefix=IPNetwork('192.0.2.0/28'), is_pool=True)
url = reverse('ipam-api:prefix-available-prefixes', kwargs={'pk': prefix.pk})
# Try to create five /30s (only four are available)
data = [
{'prefix_length': 30, 'description': 'Test Prefix 1'},
{'prefix_length': 30, 'description': 'Test Prefix 2'},
{'prefix_length': 30, 'description': 'Test Prefix 3'},
{'prefix_length': 30, 'description': 'Test Prefix 4'},
{'prefix_length': 30, 'description': 'Test Prefix 5'},
]
response = self.client.post(url, data, format='json', **self.header)
self.assertHttpStatus(response, status.HTTP_400_BAD_REQUEST)
self.assertIn('detail', response.data)
# Verify that no prefixes were created (the entire /28 is still available)
response = self.client.get(url, **self.header)
self.assertEqual(response.data[0]['prefix'], '192.0.2.0/28')
# Create four /30s in a single request
response = self.client.post(url, data[:4], format='json', **self.header)
self.assertHttpStatus(response, status.HTTP_201_CREATED)
self.assertEqual(len(response.data), 4)
def test_list_available_ips(self):
prefix = Prefix.objects.create(prefix=IPNetwork('192.0.2.0/29'), is_pool=True)
url = reverse('ipam-api:prefix-available-ips', kwargs={'pk': prefix.pk})
# Retrieve all available IPs
response = self.client.get(url, **self.header)
self.assertEqual(len(response.data), 8) # 8 because prefix.is_pool = True
# Change the prefix to not be a pool and try again
prefix.is_pool = False
prefix.save()
response = self.client.get(url, **self.header)
self.assertEqual(len(response.data), 6) # 8 - 2 because prefix.is_pool = False
def test_create_single_available_ip(self):
prefix = Prefix.objects.create(prefix=IPNetwork('192.0.2.0/30'), is_pool=True)
url = reverse('ipam-api:prefix-available-ips', kwargs={'pk': prefix.pk})
# Create all four available IPs with individual requests
for i in range(1, 5):
data = {
'description': 'Test IP {}'.format(i)
}
response = self.client.post(url, data, format='json', **self.header)
self.assertHttpStatus(response, status.HTTP_201_CREATED)
self.assertEqual(response.data['description'], data['description'])
# Try to create one more IP
response = self.client.post(url, {}, **self.header)
self.assertHttpStatus(response, status.HTTP_400_BAD_REQUEST)
self.assertIn('detail', response.data)
def test_create_multiple_available_ips(self):
prefix = Prefix.objects.create(prefix=IPNetwork('192.0.2.0/29'), is_pool=True)
url = reverse('ipam-api:prefix-available-ips', kwargs={'pk': prefix.pk})
# Try to create nine IPs (only eight are available)
data = [{'description': 'Test IP {}'.format(i)} for i in range(1, 10)] # 9 IPs
response = self.client.post(url, data, format='json', **self.header)
self.assertHttpStatus(response, status.HTTP_400_BAD_REQUEST)
self.assertIn('detail', response.data)
# Verify that no IPs were created (eight are still available)
response = self.client.get(url, **self.header)
self.assertEqual(len(response.data), 8)
# Create all eight available IPs in a single request
data = [{'description': 'Test IP {}'.format(i)} for i in range(1, 9)] # 8 IPs
response = self.client.post(url, data, format='json', **self.header)
self.assertHttpStatus(response, status.HTTP_201_CREATED)
self.assertEqual(len(response.data), 8)
class IPAddressTest(HttpStatusMixin, APITestCase):
def setUp(self):
user = User.objects.create(username='testuser', is_superuser=True)
token = Token.objects.create(user=user)
self.header = {'HTTP_AUTHORIZATION': 'Token {}'.format(token.key)}
self.vrf1 = VRF.objects.create(name='Test VRF 1', rd='65000:1')
self.ipaddress1 = IPAddress.objects.create(address=IPNetwork('192.168.0.1/24'))
self.ipaddress2 = IPAddress.objects.create(address=IPNetwork('192.168.0.2/24'))
self.ipaddress3 = IPAddress.objects.create(address=IPNetwork('192.168.0.3/24'))
def test_get_ipaddress(self):
url = reverse('ipam-api:ipaddress-detail', kwargs={'pk': self.ipaddress1.pk})
response = self.client.get(url, **self.header)
self.assertEqual(response.data['address'], str(self.ipaddress1.address))
def test_list_ipaddresss(self):
url = reverse('ipam-api:ipaddress-list')
response = self.client.get(url, **self.header)
self.assertEqual(response.data['count'], 3)
def test_create_ipaddress(self):
data = {
'address': '192.168.0.4/24',
'vrf': self.vrf1.pk,
}
url = reverse('ipam-api:ipaddress-list')
response = self.client.post(url, data, format='json', **self.header)
self.assertHttpStatus(response, status.HTTP_201_CREATED)
self.assertEqual(IPAddress.objects.count(), 4)
ipaddress4 = IPAddress.objects.get(pk=response.data['id'])
self.assertEqual(str(ipaddress4.address), data['address'])
self.assertEqual(ipaddress4.vrf_id, data['vrf'])
def test_create_ipaddress_bulk(self):
data = [
{
'address': '192.168.0.4/24',
},
{
'address': '192.168.0.5/24',
},
{
'address': '192.168.0.6/24',
},
]
url = reverse('ipam-api:ipaddress-list')
response = self.client.post(url, data, format='json', **self.header)
self.assertHttpStatus(response, status.HTTP_201_CREATED)
self.assertEqual(IPAddress.objects.count(), 6)
self.assertEqual(response.data[0]['address'], data[0]['address'])
self.assertEqual(response.data[1]['address'], data[1]['address'])
self.assertEqual(response.data[2]['address'], data[2]['address'])
def test_update_ipaddress(self):
data = {
'address': '192.168.0.99/24',
'vrf': self.vrf1.pk,
}
url = reverse('ipam-api:ipaddress-detail', kwargs={'pk': self.ipaddress1.pk})
response = self.client.put(url, data, format='json', **self.header)
self.assertHttpStatus(response, status.HTTP_200_OK)
self.assertEqual(IPAddress.objects.count(), 3)
ipaddress1 = IPAddress.objects.get(pk=response.data['id'])
self.assertEqual(str(ipaddress1.address), data['address'])
self.assertEqual(ipaddress1.vrf_id, data['vrf'])
def test_delete_ipaddress(self):
url = reverse('ipam-api:ipaddress-detail', kwargs={'pk': self.ipaddress1.pk})
response = self.client.delete(url, **self.header)
self.assertHttpStatus(response, status.HTTP_204_NO_CONTENT)
self.assertEqual(IPAddress.objects.count(), 2)
class VLANGroupTest(HttpStatusMixin, APITestCase):
def setUp(self):
user = User.objects.create(username='testuser', is_superuser=True)
token = Token.objects.create(user=user)
self.header = {'HTTP_AUTHORIZATION': 'Token {}'.format(token.key)}
self.vlangroup1 = VLANGroup.objects.create(name='Test VLAN Group 1', slug='test-vlan-group-1')
self.vlangroup2 = VLANGroup.objects.create(name='Test VLAN Group 2', slug='test-vlan-group-2')
self.vlangroup3 = VLANGroup.objects.create(name='Test VLAN Group 3', slug='test-vlan-group-3')
def test_get_vlangroup(self):
url = reverse('ipam-api:vlangroup-detail', kwargs={'pk': self.vlangroup1.pk})
response = self.client.get(url, **self.header)
self.assertEqual(response.data['name'], self.vlangroup1.name)
def test_list_vlangroups(self):
url = reverse('ipam-api:vlangroup-list')
response = self.client.get(url, **self.header)
self.assertEqual(response.data['count'], 3)
def test_create_vlangroup(self):
data = {
'name': 'Test VLAN Group 4',
'slug': 'test-vlan-group-4',
}
url = reverse('ipam-api:vlangroup-list')
response = self.client.post(url, data, format='json', **self.header)
self.assertHttpStatus(response, status.HTTP_201_CREATED)
self.assertEqual(VLANGroup.objects.count(), 4)
vlangroup4 = VLANGroup.objects.get(pk=response.data['id'])
self.assertEqual(vlangroup4.name, data['name'])
self.assertEqual(vlangroup4.slug, data['slug'])
def test_create_vlangroup_bulk(self):
data = [
{
'name': 'Test VLAN Group 4',
'slug': 'test-vlan-group-4',
},
{
'name': 'Test VLAN Group 5',
'slug': 'test-vlan-group-5',
},
{
'name': 'Test VLAN Group 6',
'slug': 'test-vlan-group-6',
},
]
url = reverse('ipam-api:vlangroup-list')
response = self.client.post(url, data, format='json', **self.header)
self.assertHttpStatus(response, status.HTTP_201_CREATED)
self.assertEqual(VLANGroup.objects.count(), 6)
self.assertEqual(response.data[0]['name'], data[0]['name'])
self.assertEqual(response.data[1]['name'], data[1]['name'])
self.assertEqual(response.data[2]['name'], data[2]['name'])
def test_update_vlangroup(self):
data = {
'name': 'Test VLAN Group X',
'slug': 'test-vlan-group-x',
}
url = reverse('ipam-api:vlangroup-detail', kwargs={'pk': self.vlangroup1.pk})
response = self.client.put(url, data, format='json', **self.header)
self.assertHttpStatus(response, status.HTTP_200_OK)
self.assertEqual(VLANGroup.objects.count(), 3)
vlangroup1 = VLANGroup.objects.get(pk=response.data['id'])
self.assertEqual(vlangroup1.name, data['name'])
self.assertEqual(vlangroup1.slug, data['slug'])
def test_delete_vlangroup(self):
url = reverse('ipam-api:vlangroup-detail', kwargs={'pk': self.vlangroup1.pk})
response = self.client.delete(url, **self.header)
self.assertHttpStatus(response, status.HTTP_204_NO_CONTENT)
self.assertEqual(VLANGroup.objects.count(), 2)
class VLANTest(HttpStatusMixin, APITestCase):
def setUp(self):
user = User.objects.create(username='testuser', is_superuser=True)
token = Token.objects.create(user=user)
self.header = {'HTTP_AUTHORIZATION': 'Token {}'.format(token.key)}
self.vlan1 = VLAN.objects.create(vid=1, name='Test VLAN 1')
self.vlan2 = VLAN.objects.create(vid=2, name='Test VLAN 2')
self.vlan3 = VLAN.objects.create(vid=3, name='Test VLAN 3')
def test_get_vlan(self):
url = reverse('ipam-api:vlan-detail', kwargs={'pk': self.vlan1.pk})
response = self.client.get(url, **self.header)
self.assertEqual(response.data['name'], self.vlan1.name)
def test_list_vlans(self):
url = reverse('ipam-api:vlan-list')
response = self.client.get(url, **self.header)
self.assertEqual(response.data['count'], 3)
def test_create_vlan(self):
data = {
'vid': 4,
'name': 'Test VLAN 4',
}
url = reverse('ipam-api:vlan-list')
response = self.client.post(url, data, format='json', **self.header)
self.assertHttpStatus(response, status.HTTP_201_CREATED)
self.assertEqual(VLAN.objects.count(), 4)
vlan4 = VLAN.objects.get(pk=response.data['id'])
self.assertEqual(vlan4.vid, data['vid'])
self.assertEqual(vlan4.name, data['name'])
def test_create_vlan_bulk(self):
data = [
{
'vid': 4,
'name': 'Test VLAN 4',
},
{
'vid': 5,
'name': 'Test VLAN 5',
},
{
'vid': 6,
'name': 'Test VLAN 6',
},
]
url = reverse('ipam-api:vlan-list')
response = self.client.post(url, data, format='json', **self.header)
self.assertHttpStatus(response, status.HTTP_201_CREATED)
self.assertEqual(VLAN.objects.count(), 6)
self.assertEqual(response.data[0]['name'], data[0]['name'])
self.assertEqual(response.data[1]['name'], data[1]['name'])
self.assertEqual(response.data[2]['name'], data[2]['name'])
def test_update_vlan(self):
data = {
'vid': 99,
'name': 'Test VLAN X',
}
url = reverse('ipam-api:vlan-detail', kwargs={'pk': self.vlan1.pk})
response = self.client.put(url, data, format='json', **self.header)
self.assertHttpStatus(response, status.HTTP_200_OK)
self.assertEqual(VLAN.objects.count(), 3)
vlan1 = VLAN.objects.get(pk=response.data['id'])
self.assertEqual(vlan1.vid, data['vid'])
self.assertEqual(vlan1.name, data['name'])
def test_delete_vlan(self):
url = reverse('ipam-api:vlan-detail', kwargs={'pk': self.vlan1.pk})
response = self.client.delete(url, **self.header)
self.assertHttpStatus(response, status.HTTP_204_NO_CONTENT)
self.assertEqual(VLAN.objects.count(), 2)
class ServiceTest(HttpStatusMixin, APITestCase):
def setUp(self):
user = User.objects.create(username='testuser', is_superuser=True)
token = Token.objects.create(user=user)
self.header = {'HTTP_AUTHORIZATION': 'Token {}'.format(token.key)}
site = Site.objects.create(name='Test Site 1', slug='test-site-1')
manufacturer = Manufacturer.objects.create(name='Test Manufacturer 1', slug='test-manufacturer-1')
devicetype = DeviceType.objects.create(manufacturer=manufacturer, model='Test Device Type 1')
devicerole = DeviceRole.objects.create(name='Test Device Role 1', slug='test-device-role-1')
self.device1 = Device.objects.create(
name='Test Device 1', site=site, device_type=devicetype, device_role=devicerole
)
self.device2 = Device.objects.create(
name='Test Device 2', site=site, device_type=devicetype, device_role=devicerole
)
self.service1 = Service.objects.create(
device=self.device1, name='Test Service 1', protocol=IP_PROTOCOL_TCP, port=1
)
self.service1 = Service.objects.create(
device=self.device1, name='Test Service 2', protocol=IP_PROTOCOL_TCP, port=2
)
self.service1 = Service.objects.create(
device=self.device1, name='Test Service 3', protocol=IP_PROTOCOL_TCP, port=3
)
def test_get_service(self):
url = reverse('ipam-api:service-detail', kwargs={'pk': self.service1.pk})
response = self.client.get(url, **self.header)
self.assertEqual(response.data['name'], self.service1.name)
def test_list_services(self):
url = reverse('ipam-api:service-list')
response = self.client.get(url, **self.header)
self.assertEqual(response.data['count'], 3)
def test_create_service(self):
data = {
'device': self.device1.pk,
'name': 'Test Service 4',
'protocol': IP_PROTOCOL_TCP,
'port': 4,
}
url = reverse('ipam-api:service-list')
response = self.client.post(url, data, format='json', **self.header)
self.assertHttpStatus(response, status.HTTP_201_CREATED)
self.assertEqual(Service.objects.count(), 4)
service4 = Service.objects.get(pk=response.data['id'])
self.assertEqual(service4.device_id, data['device'])
self.assertEqual(service4.name, data['name'])
self.assertEqual(service4.protocol, data['protocol'])
self.assertEqual(service4.port, data['port'])
def test_create_service_bulk(self):
data = [
{
'device': self.device1.pk,
'name': 'Test Service 4',
'protocol': IP_PROTOCOL_TCP,
'port': 4,
},
{
'device': self.device1.pk,
'name': 'Test Service 5',
'protocol': IP_PROTOCOL_TCP,
'port': 5,
},
{
'device': self.device1.pk,
'name': 'Test Service 6',
'protocol': IP_PROTOCOL_TCP,
'port': 6,
},
]
url = reverse('ipam-api:service-list')
response = self.client.post(url, data, format='json', **self.header)
self.assertHttpStatus(response, status.HTTP_201_CREATED)
self.assertEqual(Service.objects.count(), 6)
self.assertEqual(response.data[0]['name'], data[0]['name'])
self.assertEqual(response.data[1]['name'], data[1]['name'])
self.assertEqual(response.data[2]['name'], data[2]['name'])
def test_update_service(self):
data = {
'device': self.device2.pk,
'name': 'Test Service X',
'protocol': IP_PROTOCOL_UDP,
'port': 99,
}
url = reverse('ipam-api:service-detail', kwargs={'pk': self.service1.pk})
response = self.client.put(url, data, format='json', **self.header)
self.assertHttpStatus(response, status.HTTP_200_OK)
self.assertEqual(Service.objects.count(), 3)
service1 = Service.objects.get(pk=response.data['id'])
self.assertEqual(service1.device_id, data['device'])
self.assertEqual(service1.name, data['name'])
self.assertEqual(service1.protocol, data['protocol'])
self.assertEqual(service1.port, data['port'])
def test_delete_service(self):
url = reverse('ipam-api:service-detail', kwargs={'pk': self.service1.pk})
response = self.client.delete(url, **self.header)
self.assertHttpStatus(response, status.HTTP_204_NO_CONTENT)
self.assertEqual(Service.objects.count(), 2)
| 36.782266 | 106 | 0.601864 |
efdf607e2e75baa2446cfb851ee6565aa0db6bd3 | 6,356 | py | Python | tests/commands/mc-1.13/test_scoreboard.py | Le0Developer/mcast | bdabd897e399ff17c734b9e02d3e1e5099674a1c | [
"MIT"
] | 2 | 2021-12-28T14:10:13.000Z | 2022-01-12T16:59:20.000Z | tests/commands/mc-1.13/test_scoreboard.py | Le0Developer/mcast | bdabd897e399ff17c734b9e02d3e1e5099674a1c | [
"MIT"
] | 11 | 2021-01-18T09:00:23.000Z | 2021-01-29T09:29:04.000Z | tests/commands/mc-1.13/test_scoreboard.py | Le0Developer/mcast | bdabd897e399ff17c734b9e02d3e1e5099674a1c | [
"MIT"
] | null | null | null |
from mcfunction.versions.mc_1_13.scoreboard import (
scoreboard, ParsedScoreboardCommand
)
from mcfunction.nodes import EntityNode
def test_scoreboard_objectives_add():
parsed = scoreboard.parse('scoreboard objectives add testobjective dummy')
parsed: ParsedScoreboardCommand
assert parsed.category.value == 'objectives'
assert parsed.action.value == 'add'
assert parsed.objective.value == 'testobjective'
assert parsed.criterion.value == 'dummy'
assert str(parsed) == 'scoreboard objectives add testobjective dummy'
def test_scoreboard_objectives_add_displayname():
parsed = scoreboard.parse('scoreboard objectives add testobjective dummy '
'{"text":"test successful"}')
parsed: ParsedScoreboardCommand
assert parsed.name.object['text'] == 'test successful'
assert str(parsed) == 'scoreboard objectives add testobjective dummy ' \
'{"text":"test successful"}'
def test_scoreboard_objectives_list():
parsed = scoreboard.parse('scoreboard objectives list')
parsed: ParsedScoreboardCommand
assert parsed.category.value == 'objectives'
assert parsed.action.value == 'list'
assert str(parsed) == 'scoreboard objectives list'
def test_scoreboard_objectives_modify_displayname():
parsed = scoreboard.parse('scoreboard objectives modify testobjective '
'displayname {"text":"test successful"}')
parsed: ParsedScoreboardCommand
assert parsed.category.value == 'objectives'
assert parsed.action.value == 'modify'
assert parsed.objective.value == 'testobjective'
assert parsed.mode.value == 'displayname'
assert parsed.name.object['text'] == 'test successful'
assert str(parsed) == 'scoreboard objectives modify testobjective ' \
'displayname {"text":"test successful"}'
def test_scoreboard_objectives_modify_rendertype():
parsed = scoreboard.parse('scoreboard objectives modify testobjective '
'rendertype hearts')
parsed: ParsedScoreboardCommand
assert parsed.category.value == 'objectives'
assert parsed.action.value == 'modify'
assert parsed.objective.value == 'testobjective'
assert parsed.mode.value == 'rendertype'
assert parsed.value.value == 'hearts'
assert str(parsed) == 'scoreboard objectives modify testobjective ' \
'rendertype hearts'
def test_scoreboard_objectives_remove():
parsed = scoreboard.parse('scoreboard objectives remove testobjective')
parsed: ParsedScoreboardCommand
assert parsed.category.value == 'objectives'
assert parsed.action.value == 'remove'
assert parsed.objective.value == 'testobjective'
assert str(parsed) == 'scoreboard objectives remove testobjective'
def test_scoreboard_objectives_setdisplay():
parsed = scoreboard.parse('scoreboard objectives setdisplay sidebar')
parsed: ParsedScoreboardCommand
assert parsed.category.value == 'objectives'
assert parsed.action.value == 'setdisplay'
assert parsed.slot.value == 'sidebar'
assert str(parsed) == 'scoreboard objectives setdisplay sidebar'
def test_scoreboard_objectives_setdisplay_value():
parsed = scoreboard.parse('scoreboard objectives setdisplay sidebar '
'testobjective')
parsed: ParsedScoreboardCommand
assert parsed.objective.value == 'testobjective'
assert str(parsed) == 'scoreboard objectives setdisplay sidebar ' \
'testobjective'
def test_scoreboard_players_add():
parsed = scoreboard.parse('scoreboard players add @s testobjective 1')
parsed: ParsedScoreboardCommand
assert parsed.category.value == 'players'
assert parsed.action.value == 'add'
assert isinstance(parsed.target, EntityNode)
assert parsed.objective.value == 'testobjective'
assert parsed.value.value == 1
assert str(parsed) == 'scoreboard players add @s testobjective 1'
def test_scoreboard_players_enable():
parsed = scoreboard.parse('scoreboard players enable @s testobjective')
parsed: ParsedScoreboardCommand
assert parsed.category.value == 'players'
assert parsed.action.value == 'enable'
assert isinstance(parsed.target, EntityNode)
assert parsed.objective.value == 'testobjective'
assert str(parsed) == 'scoreboard players enable @s testobjective'
def test_scoreboard_players_list():
parsed = scoreboard.parse('scoreboard players list')
parsed: ParsedScoreboardCommand
assert parsed.category.value == 'players'
assert parsed.action.value == 'list'
assert str(parsed) == 'scoreboard players list'
def test_scoreboard_players_list_target():
parsed = scoreboard.parse('scoreboard players list @s')
parsed: ParsedScoreboardCommand
assert parsed.category.value == 'players'
assert parsed.action.value == 'list'
assert isinstance(parsed.target, EntityNode)
assert str(parsed) == 'scoreboard players list @s'
def test_scoreboard_players_operation():
parsed = scoreboard.parse('scoreboard players operation @s testobjective1 '
'>< @s testobjective2')
parsed: ParsedScoreboardCommand
assert parsed.category.value == 'players'
assert parsed.action.value == 'operation'
assert isinstance(parsed.target, EntityNode)
assert parsed.objective.value == 'testobjective1'
assert parsed.operation.value == '><'
assert isinstance(parsed.source_target, EntityNode)
assert parsed.source_objective.value == 'testobjective2'
assert str(parsed) == 'scoreboard players operation @s testobjective1 ' \
'>< @s testobjective2'
def test_scoreboard_players_reset():
parsed = scoreboard.parse('scoreboard players reset @s')
parsed: ParsedScoreboardCommand
assert parsed.category.value == 'players'
assert parsed.action.value == 'reset'
assert isinstance(parsed.target, EntityNode)
assert str(parsed) == 'scoreboard players reset @s'
def test_scoreboard_players_reset_objective():
parsed = scoreboard.parse('scoreboard players reset @s testobjective')
parsed: ParsedScoreboardCommand
assert parsed.objective.value == 'testobjective'
assert str(parsed) == 'scoreboard players reset @s testobjective'
| 34.356757 | 79 | 0.711454 |
17349559f5717ff3f76c9b9bafeb8655285be306 | 3,509 | py | Python | packages/v8env/vendor/whatwg-streams/reference-implementation/web-platform-tests/tools/wptrunner/wptrunner/browsers/webkit.py | GagnDeep/v8-isolates | b9fa6b88fc029fadf5a8eef71b803589959c4da5 | [
"Apache-2.0"
] | null | null | null | packages/v8env/vendor/whatwg-streams/reference-implementation/web-platform-tests/tools/wptrunner/wptrunner/browsers/webkit.py | GagnDeep/v8-isolates | b9fa6b88fc029fadf5a8eef71b803589959c4da5 | [
"Apache-2.0"
] | null | null | null | packages/v8env/vendor/whatwg-streams/reference-implementation/web-platform-tests/tools/wptrunner/wptrunner/browsers/webkit.py | GagnDeep/v8-isolates | b9fa6b88fc029fadf5a8eef71b803589959c4da5 | [
"Apache-2.0"
] | null | null | null | from .base import Browser, ExecutorBrowser, require_arg
from ..executors import executor_kwargs as base_executor_kwargs
from ..executors.executorselenium import (SeleniumTestharnessExecutor, # noqa: F401
SeleniumRefTestExecutor) # noqa: F401
from ..executors.executorwebkit import WebKitDriverWdspecExecutor # noqa: F401
from ..webdriver_server import WebKitDriverServer
__wptrunner__ = {"product": "webkit",
"check_args": "check_args",
"browser": "WebKitBrowser",
"browser_kwargs": "browser_kwargs",
"executor": {"testharness": "SeleniumTestharnessExecutor",
"reftest": "SeleniumRefTestExecutor",
"wdspec": "WebKitDriverWdspecExecutor"},
"executor_kwargs": "executor_kwargs",
"env_extras": "env_extras",
"env_options": "env_options"}
def check_args(**kwargs):
require_arg(kwargs, "binary")
require_arg(kwargs, "webdriver_binary")
require_arg(kwargs, "webkit_port")
def browser_kwargs(test_type, run_info_data, **kwargs):
return {"binary": kwargs["binary"],
"webdriver_binary": kwargs["webdriver_binary"],
"webdriver_args": kwargs.get("webdriver_args")}
def capabilities_for_port(server_config, **kwargs):
from selenium.webdriver import DesiredCapabilities
if kwargs["webkit_port"] == "gtk":
capabilities = dict(DesiredCapabilities.WEBKITGTK.copy())
capabilities["webkitgtk:browserOptions"] = {
"binary": kwargs["binary"],
"args": kwargs.get("binary_args", []),
"certificates": [
{"host": server_config["browser_host"],
"certificateFile": kwargs["host_cert_path"]}
]
}
return capabilities
return {}
def executor_kwargs(test_type, server_config, cache_manager, run_info_data,
**kwargs):
executor_kwargs = base_executor_kwargs(test_type, server_config,
cache_manager, run_info_data, **kwargs)
executor_kwargs["close_after_done"] = True
executor_kwargs["capabilities"] = capabilities_for_port(server_config,
**kwargs)
return executor_kwargs
def env_extras(**kwargs):
return []
def env_options():
return {}
class WebKitBrowser(Browser):
"""Generic WebKit browser is backed by WebKit's WebDriver implementation,
which is supplied through ``wptrunner.webdriver.WebKitDriverServer``.
"""
def __init__(self, logger, binary, webdriver_binary=None,
webdriver_args=None):
Browser.__init__(self, logger)
self.binary = binary
self.server = WebKitDriverServer(self.logger, binary=webdriver_binary,
args=webdriver_args)
def start(self, **kwargs):
self.server.start(block=False)
def stop(self, force=False):
self.server.stop(force=force)
def pid(self):
return self.server.pid
def is_alive(self):
# TODO(ato): This only indicates the driver is alive,
# and doesn't say anything about whether a browser session
# is active.
return self.server.is_alive()
def cleanup(self):
self.stop()
def executor_browser(self):
return ExecutorBrowser, {"webdriver_url": self.server.url}
| 34.742574 | 84 | 0.618695 |
99df1e94aefbfc9e2ca02726bd1aab50e9df9159 | 784 | py | Python | glhe/profiles/flow_factory.py | stianchris/GLHE | 80c3eecca81ffd50d5077f87027c9441292452f5 | [
"MIT"
] | 2 | 2018-11-06T08:04:04.000Z | 2020-10-09T14:52:36.000Z | glhe/profiles/flow_factory.py | stianchris/GLHE | 80c3eecca81ffd50d5077f87027c9441292452f5 | [
"MIT"
] | 68 | 2018-03-27T01:43:22.000Z | 2019-09-09T12:05:44.000Z | glhe/profiles/flow_factory.py | mitchute/GLHE | 80c3eecca81ffd50d5077f87027c9441292452f5 | [
"MIT"
] | 4 | 2018-05-24T03:02:44.000Z | 2021-08-16T13:54:09.000Z | from typing import Union
from glhe.input_processor.input_processor import InputProcessor
from glhe.output_processor.output_processor import OutputProcessor
from glhe.profiles.constant_flow import ConstantFlow
from glhe.profiles.external_flow import ExternalFlow
def make_flow_profile(inputs: dict, ip: InputProcessor, op: OutputProcessor) -> Union[ConstantFlow,
ExternalFlow]:
load_profile_type = inputs['flow-profile-type']
if load_profile_type == 'constant':
return ConstantFlow(inputs, ip, op)
elif load_profile_type == 'external':
return ExternalFlow(inputs, ip, op)
else:
raise ValueError("Flow profile '{}' is not valid.".format(load_profile_type))
| 43.555556 | 100 | 0.6875 |
d4d337261f363a75e929fcb01560e55d57e338bd | 374 | py | Python | News-Notices-PressRelease-Tenders.py | vinaysomawat/NITW-event-updates | 5e58a023c8e326e0cfe25410e6c9781752dec433 | [
"Apache-2.0"
] | 1 | 2019-08-30T16:42:05.000Z | 2019-08-30T16:42:05.000Z | News-Notices-PressRelease-Tenders.py | vinaysomawat/NITW-event-updates | 5e58a023c8e326e0cfe25410e6c9781752dec433 | [
"Apache-2.0"
] | null | null | null | News-Notices-PressRelease-Tenders.py | vinaysomawat/NITW-event-updates | 5e58a023c8e326e0cfe25410e6c9781752dec433 | [
"Apache-2.0"
] | null | null | null | from flask import Flask, render_template
from flask_ask import Ask, statement, question
import bs4 as bs
import urllib.request
source = urllib.request.urlopen('https://www.nitw.ac.in/main/update/News/Notices/Press%20Release/Tenders/').read()
soup = bs.BeautifulSoup(source,'lxml')
events = []
for event in soup.find_all('td'):
events.append(event.text)
print (events)
| 28.769231 | 114 | 0.762032 |
30c94181c7aa07712dca68250baa559ce68207ad | 3,479 | py | Python | src/olympia/translations/forms.py | shashwatsingh/addons-server | 8fce98901104349055a828b5a47865f5e8f4120b | [
"BSD-3-Clause"
] | 10 | 2018-08-16T04:55:06.000Z | 2022-01-08T16:09:39.000Z | src/olympia/translations/forms.py | shashwatsingh/addons-server | 8fce98901104349055a828b5a47865f5e8f4120b | [
"BSD-3-Clause"
] | 1,398 | 2020-10-08T06:32:26.000Z | 2022-03-31T12:06:24.000Z | src/olympia/translations/forms.py | shashwatsingh/addons-server | 8fce98901104349055a828b5a47865f5e8f4120b | [
"BSD-3-Clause"
] | 12 | 2018-08-01T16:46:09.000Z | 2022-01-08T16:09:46.000Z | from django.conf import settings
from django.db import models
from django.forms import ValidationError
from django.forms.utils import ErrorList
from django.utils.encoding import force_text
from django.utils.html import conditional_escape, format_html, format_html_join
from django.utils.safestring import mark_safe
from django.utils.translation.trans_real import to_language
from .fields import LocaleErrorMessage, _TransField
def default_locale(obj):
"""Get obj's default locale."""
if hasattr(obj, 'get_fallback'):
fallback = obj.get_fallback()
if isinstance(fallback, models.Field):
fallback = getattr(obj, fallback.name)
return fallback
else:
return settings.LANGUAGE_CODE
class TranslationFormMixin(object):
"""
A mixin for forms with translations that tells fields about the object's
default locale.
"""
# Hack to restore behavior from pre Django 1.10 times.
# Django 1.10 enabled `required` rendering for required widgets. That
# wasn't the case before, this should be fixed properly but simplifies
# the actual Django 1.11 deployment for now.
# See https://github.com/mozilla/addons-server/issues/8912 for proper fix.
use_required_attribute = False
def __init__(self, *args, **kwargs):
kwargs['error_class'] = LocaleErrorList
super(TranslationFormMixin, self).__init__(*args, **kwargs)
self.set_locale_field_defaults()
def set_locale_field_defaults(self):
locale = to_language(default_locale(self.instance))
for field_name, field in self.fields.items():
if isinstance(field, _TransField):
field.set_default_values(
field_name=field_name,
parent_form=self,
default_locale=locale)
def add_error(self, field, error):
if isinstance(error, LocaleErrorMessage):
self._errors.setdefault(field, self.error_class())
self._errors[field].append(error)
if field in self.cleaned_data:
del self.cleaned_data[field]
else:
# Didn't come from a translation field, forward
# to original implementation.
super(TranslationFormMixin, self).add_error(field, error)
def full_clean(self):
self.set_locale_field_defaults()
return super(TranslationFormMixin, self).full_clean()
class LocaleErrorList(ErrorList):
def as_ul(self):
if not self.data:
return u''
li = []
for item in self.data:
if isinstance(item, LocaleErrorMessage):
locale, message = item.locale, item.message
extra = mark_safe(
u' data-lang="%s"' % conditional_escape(locale))
else:
message, extra = u''.join(list(item)), u''
li.append((extra, conditional_escape(force_text(message))))
return mark_safe(format_html(
u'<ul class="{}">{}</ul>',
self.error_class,
format_html_join(
u'',
u'<li{}>{}</li>',
((extra, elem) for extra, elem in li)
)
))
def __getitem__(self, i):
error = self.data[i]
if isinstance(error, LocaleErrorMessage):
return error.message
if isinstance(error, ValidationError):
return list(error)[0]
return force_text(error)
| 35.141414 | 79 | 0.631503 |
4dfd460ff998df414d05fba43fe202b38a0fca0f | 4,615 | py | Python | release/nightly_tests/dataset/sort.py | mgelbart/ray | 4cec2286572e368a4bd64aae467751a384eff62d | [
"Apache-2.0"
] | 22 | 2018-05-08T05:52:34.000Z | 2020-04-01T10:09:55.000Z | release/nightly_tests/dataset/sort.py | mgelbart/ray | 4cec2286572e368a4bd64aae467751a384eff62d | [
"Apache-2.0"
] | 73 | 2021-09-25T07:11:39.000Z | 2022-03-26T07:10:59.000Z | release/nightly_tests/dataset/sort.py | mgelbart/ray | 4cec2286572e368a4bd64aae467751a384eff62d | [
"Apache-2.0"
] | 10 | 2018-04-27T10:50:59.000Z | 2020-02-24T02:41:43.000Z | import ray
import numpy as np
import time
import os
import psutil
import resource
import json
from typing import List
from ray.data.impl.arrow_block import ArrowRow
from ray.data.impl.util import _check_pyarrow_version
from ray.data.block import Block, BlockMetadata
from ray.data.datasource import Datasource, ReadTask
from ray.internal.internal_api import memory_summary
class RandomIntRowDatasource(Datasource[ArrowRow]):
"""An example datasource that generates rows with random int64 columns.
Examples:
>>> source = RandomIntRowDatasource()
>>> ray.data.read_datasource(source, n=10, num_columns=2).take()
... {'c_0': 1717767200176864416, 'c_1': 999657309586757214}
... {'c_0': 4983608804013926748, 'c_1': 1160140066899844087}
"""
def prepare_read(
self, parallelism: int, n: int, num_columns: int
) -> List[ReadTask]:
_check_pyarrow_version()
import pyarrow
read_tasks: List[ReadTask] = []
block_size = max(1, n // parallelism)
def make_block(count: int, num_columns: int) -> Block:
return pyarrow.Table.from_arrays(
np.random.randint(
np.iinfo(np.int64).max, size=(num_columns, count), dtype=np.int64
),
names=[f"c_{i}" for i in range(num_columns)],
)
schema = pyarrow.Table.from_pydict(
{f"c_{i}": [0] for i in range(num_columns)}
).schema
i = 0
while i < n:
count = min(block_size, n - i)
meta = BlockMetadata(
num_rows=count,
size_bytes=8 * count * num_columns,
schema=schema,
input_files=None,
exec_stats=None,
)
read_tasks.append(
ReadTask(
lambda count=count, num_columns=num_columns: [
make_block(count, num_columns)
],
meta,
)
)
i += block_size
return read_tasks
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument(
"--num-partitions", help="number of partitions", default="50", type=str
)
parser.add_argument(
"--partition-size",
help="partition size (bytes)",
default="200e6",
type=str,
)
parser.add_argument(
"--shuffle", help="shuffle instead of sort", action="store_true"
)
args = parser.parse_args()
num_partitions = int(args.num_partitions)
partition_size = int(float(args.partition_size))
print(
f"Dataset size: {num_partitions} partitions, "
f"{partition_size / 1e9}GB partition size, "
f"{num_partitions * partition_size / 1e9}GB total"
)
start_time = time.time()
source = RandomIntRowDatasource()
num_rows_per_partition = partition_size // 8
ds = ray.data.read_datasource(
source,
parallelism=num_partitions,
n=num_rows_per_partition * num_partitions,
num_columns=1,
)
exc = None
try:
if args.shuffle:
ds = ds.random_shuffle()
else:
ds = ds.sort(key="c_0")
except Exception as e:
exc = e
pass
end_time = time.time()
duration = end_time - start_time
print("Finished in", duration)
print("")
print("==== Driver memory summary ====")
maxrss = int(resource.getrusage(resource.RUSAGE_SELF).ru_maxrss * 1e3)
print(f"max: {maxrss / 1e9}/GB")
process = psutil.Process(os.getpid())
rss = int(process.memory_info().rss)
print(f"rss: {rss / 1e9}/GB")
print(memory_summary(stats_only=True))
print("")
print(ds.stats())
if "TEST_OUTPUT_JSON" in os.environ:
out_file = open(os.environ["TEST_OUTPUT_JSON"], "w")
results = {
"time": duration,
"success": "1" if exc is None else "0",
"num_partitions": num_partitions,
"partition_size": partition_size,
"perf_metrics": [
{
"perf_metric_name": "peak_driver_memory",
"perf_metric_value": maxrss,
"perf_metric_type": "MEMORY",
},
{
"perf_metric_name": "runtime",
"perf_metric_value": duration,
"perf_metric_type": "LATENCY",
},
],
}
json.dump(results, out_file)
if exc:
raise exc
| 29.025157 | 85 | 0.565764 |
e7fcd26e55e0f04576a2db9b6ea17f1a9558f35c | 1,284 | py | Python | KnowledgeMapping/PIL_OpenCV_Exp/other/dtcExp.py | nickliqian/ralph_doc_to_chinese | be120ce2bb94a8e8395630218985f5e51ae087d9 | [
"MIT"
] | 8 | 2018-05-22T01:11:33.000Z | 2020-03-19T01:44:55.000Z | KnowledgeMapping/PIL_OpenCV_Exp/other/dtcExp.py | yangliangguang/keep_learning | 47ab39c726cb28713ad22bf4cf39d6b146715910 | [
"MIT"
] | null | null | null | KnowledgeMapping/PIL_OpenCV_Exp/other/dtcExp.py | yangliangguang/keep_learning | 47ab39c726cb28713ad22bf4cf39d6b146715910 | [
"MIT"
] | 3 | 2018-07-25T09:31:53.000Z | 2019-09-14T14:05:31.000Z | # 二维图像的离散变余弦换(DCT)
# Python3.5
# 库:cv2+numpy+matplotlib
# 作者:James_Ray_Murphy
import cv2
import numpy as np
import matplotlib.pyplot as plt
img = cv2.imread('timg.jpg', 0)
img1 = img.astype('float')
C_temp = np.zeros(img.shape)
dst = np.zeros(img.shape)
m, n = img.shape
N = n
C_temp[0, :] = 1 * np.sqrt(1 / N)
for i in range(1, m):
for j in range(n):
C_temp[i, j] = np.cos(np.pi * i * (2 * j + 1) / (2 * N)) * np.sqrt(2 / N)
dst = np.dot(C_temp, img1)
dst = np.dot(dst, np.transpose(C_temp))
dst1 = np.log(abs(dst)) # 进行log处理
img_recor = np.dot(np.transpose(C_temp), dst)
img_recor1 = np.dot(img_recor, C_temp)
# 自带方法
img_dct = cv2.dct(img1) # 进行离散余弦变换
img_dct_log = np.log(abs(img_dct)) # 进行log处理
img_recor2 = cv2.idct(img_dct) # 进行离散余弦反变换
plt.subplot(231)
plt.imshow(img1, 'gray')
plt.title('original image')
plt.xticks([]), plt.yticks([])
plt.subplot(232)
plt.imshow(dst1)
plt.title('DCT1')
plt.xticks([]), plt.yticks([])
plt.subplot(233)
plt.imshow(img_recor1, 'gray')
plt.title('IDCT1')
plt.xticks([]), plt.yticks([])
plt.subplot(234)
plt.imshow(img, 'gray')
plt.title('original image')
plt.subplot(235)
plt.imshow(img_dct_log)
plt.title('DCT2(cv2_dct)')
plt.subplot(236)
plt.imshow(img_recor2, 'gray')
plt.title('IDCT2(cv2_idct)')
plt.show() | 18.882353 | 81 | 0.666667 |
8d2838e7cd29a501a3abc4128bb77f7e453546e1 | 888 | py | Python | CAIL2020/znwd/prepare.py | ShenDezhou/CAIL | c4cfa98ab4ecedbce34a7a5a186830486047540c | [
"Apache-2.0"
] | 71 | 2020-07-16T01:49:27.000Z | 2022-03-27T16:55:00.000Z | CAIL2020/lbwjx/prepare.py | ShenDezhou/CAIL | c4cfa98ab4ecedbce34a7a5a186830486047540c | [
"Apache-2.0"
] | 11 | 2020-09-18T14:26:25.000Z | 2022-02-09T23:49:33.000Z | CAIL2020/znwd/prepare.py | ShenDezhou/CAIL | c4cfa98ab4ecedbce34a7a5a186830486047540c | [
"Apache-2.0"
] | 16 | 2020-07-15T07:24:30.000Z | 2022-03-19T05:41:11.000Z | """Separate train and valid set for train file.
Author: Tsinghuaboy tsinghua9boy@sina.com
Usage:
python prepare.py --train_in 'data/SMP-CAIL2020-train.csv' \
--train_out 'data/train.csv' \
--valid_out 'data/valid.csv'
"""
import fire
import pandas as pd
def main(train_in='data/SMP-CAIL2020-train.csv',
train_out='data/train.csv',
valid_out='data/valid.csv'):
"""Main method to divide dataset.
Args:
train_in: origin train file
train_out: train file
valid_out: valid file
"""
data = pd.read_csv(train_in, encoding='utf-8')
total_num = data.shape[0]
train_num = int(1 * total_num)
data[:train_num].to_csv(train_out, encoding='utf-8', index=False)
data[train_num:].to_csv(valid_out, encoding='utf-8', index=False)
if __name__ == '__main__':
fire.Fire(main)
| 26.909091 | 69 | 0.635135 |
1e2d897d7093cfb1f4ae6eac194d57080fa89fbc | 4,382 | py | Python | datasets/cc_news/cc_news.py | WojciechKusa/datasets | 1406a04c3e911cec2680d8bc513653e0cafcaaa4 | [
"Apache-2.0"
] | 10,608 | 2020-09-10T15:47:50.000Z | 2022-03-31T22:51:47.000Z | datasets/cc_news/cc_news.py | realChainLife/datasets | 98261e8b0b7be4dbaaa71ae188b950f7fbe51bbd | [
"Apache-2.0"
] | 2,396 | 2020-09-10T14:55:31.000Z | 2022-03-31T19:41:04.000Z | datasets/cc_news/cc_news.py | realChainLife/datasets | 98261e8b0b7be4dbaaa71ae188b950f7fbe51bbd | [
"Apache-2.0"
] | 1,530 | 2020-09-10T21:43:10.000Z | 2022-03-31T01:59:12.000Z | # coding=utf-8
# Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""The CC-News dataset is based on Common Crawl News Dataset by Sebastian Nagel"""
import json
import os
from fnmatch import fnmatch
import datasets
logger = datasets.logging.get_logger(__name__)
_DESCRIPTION = """\
CC-News containing news articles from news sites all over the world \
The data is available on AWS S3 in the Common Crawl bucket at /crawl-data/CC-NEWS/. \
This version of the dataset has 708241 articles. It represents a small portion of English \
language subset of the CC-News dataset created using news-please(Hamborg et al.,2017) to \
collect and extract English language portion of CC-News.
"""
_CITATION = """\
@InProceedings{Hamborg2017,
author = {Hamborg, Felix and Meuschke, Norman and Breitinger, Corinna and Gipp, Bela},
title = {news-please: A Generic News Crawler and Extractor},
year = {2017},
booktitle = {Proceedings of the 15th International Symposium of Information Science},
location = {Berlin},
doi = {10.5281/zenodo.4120316},
pages = {218--223},
month = {March}
}
"""
_PROJECT_URL = "https://commoncrawl.org/2016/10/news-dataset-available/"
_DOWNLOAD_URL = "https://storage.googleapis.com/huggingface-nlp/datasets/cc_news/cc_news.tar.gz"
class CCNewsConfig(datasets.BuilderConfig):
"""BuilderConfig for CCNews."""
def __init__(self, **kwargs):
"""BuilderConfig for CCNews.
Args:
**kwargs: keyword arguments forwarded to super.
"""
super(CCNewsConfig, self).__init__(version=datasets.Version("1.0.0", ""), **kwargs)
class CCNews(datasets.GeneratorBasedBuilder):
"""CC-News dataset."""
BUILDER_CONFIGS = [
CCNewsConfig(
name="plain_text",
description="Plain text",
)
]
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"title": datasets.Value("string"),
"text": datasets.Value("string"),
"domain": datasets.Value("string"),
"date": datasets.Value("string"),
"description": datasets.Value("string"),
"url": datasets.Value("string"),
"image_url": datasets.Value("string"),
}
),
supervised_keys=None,
homepage=_PROJECT_URL,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
archive = dl_manager.download(_DOWNLOAD_URL)
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"files": dl_manager.iter_archive(archive)}),
]
def _generate_examples(self, files):
id_ = 0
for article_file_path, f in files:
if fnmatch(os.path.basename(article_file_path), "*.json"):
article = json.load(f)
yield id_, {
"title": article["title"].strip() if article["title"] is not None else "",
"text": article["maintext"].strip() if article["maintext"] is not None else "",
"domain": article["source_domain"].strip() if article["source_domain"] is not None else "",
"date": article["date_publish"].strip() if article["date_publish"] is not None else "",
"description": article["description"].strip() if article["description"] is not None else "",
"url": article["url"].strip() if article["url"] is not None else "",
"image_url": article["image_url"].strip() if article["image_url"] is not None else "",
}
id_ += 1
| 38.104348 | 119 | 0.626883 |
a63c9116c9339ce51e48ca23a17801653ab1985c | 9,772 | py | Python | tests/build_feedstock_test.py | nkalband/open-ce | 1cda678df13ccdb2e0fc10a0877df5337e37fa1a | [
"Apache-2.0"
] | null | null | null | tests/build_feedstock_test.py | nkalband/open-ce | 1cda678df13ccdb2e0fc10a0877df5337e37fa1a | [
"Apache-2.0"
] | null | null | null | tests/build_feedstock_test.py | nkalband/open-ce | 1cda678df13ccdb2e0fc10a0877df5337e37fa1a | [
"Apache-2.0"
] | null | null | null | # *****************************************************************
# (C) Copyright IBM Corp. 2020, 2021. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# *****************************************************************
import os
import pathlib
import pytest
from importlib.util import spec_from_loader, module_from_spec
from importlib.machinery import SourceFileLoader
test_dir = pathlib.Path(__file__).parent.absolute()
spec = spec_from_loader("opence", SourceFileLoader("opence", os.path.join(test_dir, '..', 'open_ce', 'open-ce')))
opence = module_from_spec(spec)
spec.loader.exec_module(opence)
import helpers
import open_ce.utils as utils
from open_ce.errors import OpenCEError
import open_ce.build_feedstock as build_feedstock
def test_build_feedstock_default(mocker):
"""
Tests that the default arguments for 'build_feedstock' generate the correct 'conda_build.api.build' input args.
"""
mocker.patch(
'os.getcwd',
return_value="/test/test_recipe"
)
mocker.patch(
'os.path.exists',
return_value=False
)
expect_recipe = os.path.join(os.getcwd(),'recipe')
expect_config = {'variant_config_files' : [utils.DEFAULT_CONDA_BUILD_CONFIG],
'output_folder' : utils.DEFAULT_OUTPUT_FOLDER}
mocker.patch(
'conda_build.api.build',
side_effect=(lambda x, **kwargs: helpers.validate_conda_build_args(x, expect_recipe=expect_recipe, expect_config=expect_config, **kwargs))
)
opence._main(["build", build_feedstock.COMMAND])
def test_build_feedstock_failure(mocker):
"""
Tests that a 'conda_build.api.build' failure is handled correctly.
"""
mocker.patch(
'os.getcwd',
return_value="/test/test_recipe"
)
mocker.patch(
'os.path.exists',
return_value=False
)
mocker.patch(
'conda_build.api.build',
side_effect=ValueError("invalid literal for int() with base 10: 'xy'") #using ValueError to simulate a failure.
)
with pytest.raises(OpenCEError) as exc:
opence._main(["build", build_feedstock.COMMAND])
assert "Unable to build recipe: test_recipe" in str(exc.value)
def test_build_feedstock_working_dir(mocker):
"""
Tests that the 'working_dir' argument is correctly handled and the original working directory is restored after execution.
"""
dirTracker = helpers.DirTracker("/test/starting_dir")
mocker.patch(
'os.getcwd',
side_effect=dirTracker.mocked_getcwd
)
mocker.patch(
'os.path.exists',
return_value=False
)
mocker.patch(
'conda_build.api.build',
return_value=[]
)
working_dir = "/test/my_work_dir"
mocker.patch(
'os.chdir',
side_effect=(lambda x: dirTracker.validate_chdir(x, expected_dirs=[working_dir, # First the working directory should be changed to the arg.
"/test/starting_dir"])) # And then changed back to the starting directory.
)
opence._main(["build", build_feedstock.COMMAND, "--working_directory", working_dir])
def test_build_feedstock_config_file(mocker):
"""
Tests that the 'recipe_config_file' argument is correctly handled..
"""
expect_recipe = os.path.join(os.getcwd(),'cuda_recipe_path') #Checks that the value from the input config file is used.
mocker.patch(
'conda_build.api.build',
side_effect=(lambda x, **kwargs: helpers.validate_conda_build_args(x, expect_recipe=expect_recipe, **kwargs))
)
opence._main(["build", build_feedstock.COMMAND, "--recipe-config-file", os.path.join(test_dir, "my_config.yaml"), "--build_type", "cuda"])
def test_build_feedstock_default_config_file(mocker):
"""
Tests that the default config file is loaded when no argument is specified.
"""
mocker.patch(
'os.getcwd',
return_value="/test/test_recipe"
)
mocker.patch(
'os.path.exists',
return_value=True #True for default config file.
)
expect_recipe = os.path.join(os.getcwd(),'variants_from_default_config')#Checks that the value from the default config file is used.
mocker.patch(
'conda_build.api.build',
side_effect=(lambda x, **kwargs: helpers.validate_conda_build_args(x, expect_recipe=expect_recipe, **kwargs))
)
test_recipe_config = {'recipes' : [{'name' : 'my_variant', 'path' : 'variants_from_default_config'}]}
mocker.patch('open_ce.conda_utils.render_yaml', return_value=test_recipe_config)
opence._main(["build", build_feedstock.COMMAND])
def test_build_feedstock_nonexist_config_file(mocker):
"""
Tests that execution fails and the correct error message is shown if the default config file doesn't exist.
"""
mocker.patch(
'os.getcwd',
return_value="/test/test_recipe"
)
mocker.patch(
'os.path.exists',
return_value=False
)
with pytest.raises(OpenCEError) as exc:
opence._main(["build", build_feedstock.COMMAND, "--recipe-config-file", "my_config.yml"])
assert "Unable to open provided config file: my_config.yml" in str(exc.value)
def test_recipe_config_file_for_inapplicable_configuration(mocker, capsys):
"""
Tests the case when build is triggered for a configuration for which no recipes are applicable.
"""
expect_recipe = os.path.join(os.getcwd(),'cuda_recipe_path') #Checks that the value from the input config file is used.
mocker.patch(
'conda_build.api.build',
side_effect=(lambda x, **kwargs: helpers.validate_conda_build_args(x, expect_recipe=expect_recipe, **kwargs))
)
opence._main(["build", build_feedstock.COMMAND, "--recipe-config-file", os.path.join(test_dir, "my_config.yaml"), "--python_versions", "4.1"])
captured = capsys.readouterr()
assert "INFO: No recipe to build for given configuration." in captured.out
def test_build_feedstock_local_src_dir_args(mocker):
"""
Tests that providing the local_src_dir argument sets the LOCAL_SRC_DIR environment variable correctly.
"""
mocker.patch(
'os.path.exists',
return_value=True
)
build_feedstock._set_local_src_dir("my_src_dir", None, None)
assert os.environ["LOCAL_SRC_DIR"] == "my_src_dir"
def test_build_feedstock_local_src_dir_args_fail(mocker):
"""
Tests that providing the local_src_dir argument to a non-existant file fails properly.
"""
mocker.patch(
'os.path.exists',
return_value=False
)
with pytest.raises(OpenCEError) as exc:
build_feedstock._set_local_src_dir("my_src_dir", { 'local_src_dir' : "my_other_src_dir" }, None)
assert "local_src_dir path \"my_src_dir\" specified doesn't exist" in str(exc.value)
def test_build_feedstock_local_src_dir_recipe(mocker):
"""
Tests that providing the local_src_dir in a recipe sets the LOCAL_SRC_DIR environment variable correctly.
"""
mocker.patch(
'os.path.exists',
return_value=True
)
build_feedstock._set_local_src_dir(None, { 'local_src_dir' : "my_other_src_dir" } , "/test/location/recipe.yaml")
assert os.environ["LOCAL_SRC_DIR"] == "/test/location/my_other_src_dir"
def test_build_feedstock_extra_args(mocker):
"""
Tests that additional arguments add the expected values to the 'conda_build.api.build' arguments.
"""
mocker.patch(
'os.getcwd',
return_value="/test/test_recipe"
)
mocker.patch(
'os.path.exists',
return_value=True
)
expect_config = { 'channel_urls' : ['/test/test_recipe/condabuild', 'test_channel', 'test_channel_2', 'test_channel_from_config']}
expect_variants = {'python': '3.6', 'build_type': 'cpu', 'mpi_type': 'openmpi'}
reject_recipe = os.path.join(os.getcwd(),'test_recipe_extra')
mocker.patch(
'conda_build.api.build',
side_effect=(lambda x, **kwargs: helpers.validate_conda_build_args(x, expect_config=expect_config, expect_variants=expect_variants, reject_recipe=reject_recipe, **kwargs))
)
test_recipe_config = { 'recipes' : [{ 'name' : 'my_project', 'path' : 'recipe'},
{ 'name' : 'my_variant', 'path': 'variants'},
{ 'name' : 'test_recipe_extra', 'path' : 'extra'}],
'channels' : ['test_channel_from_config']}
mocker.patch('open_ce.conda_utils.render_yaml', return_value=test_recipe_config)
arg_input = ["build", build_feedstock.COMMAND,
"--channels", "test_channel",
"--channels", "test_channel_2",
"--recipes", "my_project,my_variant",
"--python_versions", "3.6",
"--build_types", "cpu",
"--mpi_types", "openmpi",
"--cuda_versions", "10.2"]
opence._main(arg_input)
def test_build_feedstock_if_no_conda_build(mocker):
'''
Test that build_feedstock should fail if conda_build isn't present
'''
mocker.patch('pkg_resources.get_distribution', return_value=None)
with pytest.raises(OpenCEError):
assert opence._main(["build", build_feedstock.COMMAND]) == 1
| 38.321569 | 179 | 0.668236 |
8d0dd4e0c18b974e06afa1a347ff5d8ec8393474 | 344 | py | Python | mmdet/models/roi_heads/sar_modules/__init__.py | DatDoc/mmdetection | 697966f84901587627799e84c98c6e0a6df427b4 | [
"Apache-2.0"
] | null | null | null | mmdet/models/roi_heads/sar_modules/__init__.py | DatDoc/mmdetection | 697966f84901587627799e84c98c6e0a6df427b4 | [
"Apache-2.0"
] | null | null | null | mmdet/models/roi_heads/sar_modules/__init__.py | DatDoc/mmdetection | 697966f84901587627799e84c98c6e0a6df427b4 | [
"Apache-2.0"
] | null | null | null | from .spatial_relation_module import SpatialRelationModule
# from .diseases_relation_module import DiseasesRelationModule
from .contextual_relation_module import ContextualRelationModule
__all__ = ['SpatialRelationModule', 'ContextualRelationModule']
# __all__ = ['SpatialRelationModule', 'DiseasesRelationModule', 'ContextualRelationModule'] | 49.142857 | 91 | 0.860465 |
a50060d1e28d17813fba602a4aeb64b106ae646f | 615 | py | Python | flask-aws/lib/python2.7/site-packages/ebcli/__init__.py | AccelAI/accel.ai | c380a6473ce30cbbb22f59e6e860fb2854f8d2b4 | [
"MIT"
] | 1 | 2017-11-08T08:15:45.000Z | 2017-11-08T08:15:45.000Z | flask-aws/lib/python2.7/site-packages/ebcli/__init__.py | AccelAI/accel.ai | c380a6473ce30cbbb22f59e6e860fb2854f8d2b4 | [
"MIT"
] | null | null | null | flask-aws/lib/python2.7/site-packages/ebcli/__init__.py | AccelAI/accel.ai | c380a6473ce30cbbb22f59e6e860fb2854f8d2b4 | [
"MIT"
] | 4 | 2016-10-12T23:54:55.000Z | 2020-07-25T23:28:25.000Z | #!/usr/bin/env python
# Copyright 2012-2013 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
__version__ = '3.9.0'
| 41 | 79 | 0.744715 |
78b5dd418431435a3e0fdb35a3029a09e8bceeeb | 2,896 | py | Python | variantcall/call/snake/helpers/utils.py | owensgl/haploblocks_2019 | 1b59b2ed8a3c512270cd6e0aec23cedca868d778 | [
"MIT"
] | null | null | null | variantcall/call/snake/helpers/utils.py | owensgl/haploblocks_2019 | 1b59b2ed8a3c512270cd6e0aec23cedca868d778 | [
"MIT"
] | null | null | null | variantcall/call/snake/helpers/utils.py | owensgl/haploblocks_2019 | 1b59b2ed8a3c512270cd6e0aec23cedca868d778 | [
"MIT"
] | null | null | null |
import os, os.path
import errno
import tempfile
import hashlib
import io
import traceback
class Cas(object):
""" Content Addressible Storage.
Storage device/directory where all files are stored under their content address (hash).
put(file) -> key
get(key) -> file
two files with the same content will have the same key
"""
def __init__(self, cachedir):
self.cachedir = cachedir
def _key(self, digest_type, hexdigest):
if digest_type == "sha1":
return hexdigest.lower()
return digest_type.upper() + "_" + hexdigest.lower()
def _cache_entry(self, digest_type, digest):
return os.path.join(self.cachedir, digest_type.strip().upper() + "_" + digest.strip().lower())
def _get(self, digest_type, hexdigest):
return open(self._cache_entry(digest_type, hexdigest), "r")
def get(self, name):
"""gets an open file based on key. raises erorr if file is not found."""
toks = name.split("_", 1)
if len(toks) == 1:
digest_type, hexdigest = "sha1", toks[0]
else:
digest_type, hexdigest = toks
return self._get(digest_type, hexdigest)
def put_text(self, text, digest_type='sha1'):
"""stores text string. returns a key that can be used to retrieve content later"""
with io.BytesIO(text.encode('utf-8')) as datafile:
return self._put(datafile, digest_type=digest_type)
def put_stream(self, stream, digest_type='sha1'):
"""stores file-like object stream.
returns a key that can be used to retrieve content later"""
return self._put(stream, digest_type=digest_type)
def _put(self, stream, digest_type):
digest_obj = getattr(hashlib, digest_type)()
with tempfile.NamedTemporaryFile(prefix="tmp.digest.", dir=self.cachedir, delete=False) as digest_fd:
chunk = stream.read(8*1024)
while chunk:
digest_obj.update(chunk)
digest_fd.write(chunk)
chunk = stream.read(8*1024)
hexdigest = digest_obj.hexdigest()
dst_name = self._cache_entry(digest_type, hexdigest)
# avoids modifying timestamps
if not os.path.exists(dst_name):
os.rename(digest_fd.name, dst_name)
else:
os.remove(digest_fd.name)
return self._key(digest_type, hexdigest)
def log_exc(f, logger):
def wrapper(*args, **kwargs):
try:
return f(*args, **kwargs)
except Exception as exc:
trace = traceback.format_exc(10)
logger.error("%s: %s" % (exc, trace))
raise
return wrapper
def mkdirp(pathname, mode=0o755):
"""mkdir -p <pathname>"""
try: os.makedirs(pathname, mode)
except OSError as ose:
if ose.errno != errno.EEXIST:
pass
| 31.824176 | 109 | 0.615677 |
8a0f0509873a984f071605a47e2050f6cee62799 | 2,789 | py | Python | iast/views/project_detail.py | zu1kbackup/DongTai-webapi | ef8df93386e010e0113d0c307f0a8146b55a2fdb | [
"Apache-2.0"
] | null | null | null | iast/views/project_detail.py | zu1kbackup/DongTai-webapi | ef8df93386e010e0113d0c307f0a8146b55a2fdb | [
"Apache-2.0"
] | null | null | null | iast/views/project_detail.py | zu1kbackup/DongTai-webapi | ef8df93386e010e0113d0c307f0a8146b55a2fdb | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding:utf-8 -*-
# author:owefsad
# software: PyCharm
# project: lingzhi-webapi
from dongtai.endpoint import R
from dongtai.endpoint import UserEndPoint
from dongtai.models.agent import IastAgent
from dongtai.models.project import IastProject
from dongtai.utils import const
from django.utils.translation import gettext_lazy as _
from iast.base.project_version import get_project_version, ProjectsVersionDataSerializer
from iast.utils import extend_schema_with_envcheck, get_response_serializer
from rest_framework import serializers
class ProjectsResponseDataSerializer(serializers.Serializer):
name = serializers.CharField(help_text=_('The name of project'))
agent_ids = serializers.CharField(help_text=_(
'The id corresponding to the agent, use, for segmentation.'))
mode = serializers.ChoiceField(['插桩模式'],
help_text=_('The mode of project'))
scan_id = serializers.IntegerField(
help_text=_("The id corresponding to the scanning strategy."))
versionData = ProjectsVersionDataSerializer(
help_text=_('Version information about the project'))
id = serializers.IntegerField(help_text=_("The id of the project"))
_ResponseSerializer = get_response_serializer(
ProjectsResponseDataSerializer(help_text=''),
status_msg_keypair=(
((201, _('success')), ''),
((203, _('no permission')), ''),
))
class ProjectDetail(UserEndPoint):
name = "api-v1-project-<id>"
description = _("View item details")
@extend_schema_with_envcheck(
tags=[_('Project')],
summary=_('Projects Detail'),
description=
_("Get project information by project id, including the current version information of the project."
),
response_schema=_ResponseSerializer,
)
def get(self, request, id):
auth_users = self.get_auth_users(request.user)
project = IastProject.objects.filter(user__in=auth_users, id=id).first()
if project:
relations = IastAgent.objects.filter(bind_project_id=project.id, online=const.RUNNING)
agents = [{"id": relation.id, "name": relation.token} for relation in relations]
if project.scan:
scan_id = project.scan.id
else:
scan_id = 0
current_project_version = get_project_version(project.id, auth_users)
return R.success(data={
"name": project.name,
"id": project.id,
"mode": project.mode,
"scan_id": scan_id,
"agents": agents,
"versionData": current_project_version,
})
else:
return R.failure(status=203, msg=_('no permission'))
| 37.186667 | 108 | 0.661886 |
27d86b6cff56fba8ce9310f669fbe17415f0e792 | 1,055 | py | Python | test/test_api.py | CjwRiver/apiAutoTest | 35f1c2475e76dd34089e2cee33b351a1ca97c168 | [
"MIT"
] | null | null | null | test/test_api.py | CjwRiver/apiAutoTest | 35f1c2475e76dd34089e2cee33b351a1ca97c168 | [
"MIT"
] | null | null | null | test/test_api.py | CjwRiver/apiAutoTest | 35f1c2475e76dd34089e2cee33b351a1ca97c168 | [
"MIT"
] | null | null | null | #!/usr/bin/env/ python3
# -*- coding:utf-8 -*-
"""
@project: apiAutoTest
@author: cjw
@file: test_api.py
@ide: PyCharm
@time: 2020/11/22
@desc: 测试方法
"""
from .conftest import pytest
from api.base_requests import BaseRequest
from tools.data_process import DataProcess
from time import sleep
# https://www.cnblogs.com/shouhu/p/12392917.html
# reruns 重试次数 reruns_delay 次数之间的延时设置(单位:秒)
# 失败重跑,会影响总测试时长,如不需要 将 @pytest.mark.flaky(reruns=3, reruns_delay=5) 注释即可
# @pytest.mark.flaky(reruns=2, reruns_delay=1)
# def test_main(cases, get_db): # 使用数据库功能(包含sql查询,数据备份,数据恢复)
# # 此处的cases入参来自与 conftest.py 文件中 cases函数,与直接使用 @pytest.mark.parametrize
# # 有着差不多的效果
# # 发送请求
# response, expect, sql = BaseRequest.send_request(cases)
# # 执行sql
# DataProcess.handle_sql(sql, get_db)
# # 断言操作
# DataProcess.assert_result(response, expect)
def test_main(cases): # 不使用数据库功能
# 发送请求
response, expect, sql = BaseRequest.send_request(cases)
# 断言操作
DataProcess.assert_result(response, expect)
# 一分钟之后执行下一条
sleep(2)
| 27.051282 | 77 | 0.708057 |
c4e57be74596254afd172b2a9fa931bcebd90598 | 1,460 | py | Python | planet/management/commands/syncrss.py | nakagami/planetplanet | c2148166b41260ca00c5a917d9923bde44c3c5c6 | [
"MIT"
] | 4 | 2018-02-05T16:08:59.000Z | 2021-05-16T15:17:35.000Z | planet/management/commands/syncrss.py | nakagami/planetplanet | c2148166b41260ca00c5a917d9923bde44c3c5c6 | [
"MIT"
] | null | null | null | planet/management/commands/syncrss.py | nakagami/planetplanet | c2148166b41260ca00c5a917d9923bde44c3c5c6 | [
"MIT"
] | 1 | 2020-01-19T17:15:10.000Z | 2020-01-19T17:15:10.000Z | ##############################################################################
# Copyright (c) 2011 Hajime Nakagami <nakagami@gmail.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import sys, urllib2, datetime
import feedparser
from django.core.management.base import BaseCommand
from planet.models import update_rss, Feed, Entry
class Command(BaseCommand):
def handle(self, *args, **kwargs):
update_rss(proxy=kwargs.get('proxy'))
| 50.344828 | 79 | 0.733562 |
3d8f32af658c1c1aef87d42019ac4693da86b8b4 | 2,032 | py | Python | tests/core/vm/test_modexp_precompile.py | jin10086/py-evm | da04e8de42fdbf3bc5ca596f5f6b3d810c1afea8 | [
"MIT"
] | 5 | 2018-09-28T20:01:42.000Z | 2022-02-22T19:54:46.000Z | tests/core/vm/test_modexp_precompile.py | jin10086/py-evm | da04e8de42fdbf3bc5ca596f5f6b3d810c1afea8 | [
"MIT"
] | null | null | null | tests/core/vm/test_modexp_precompile.py | jin10086/py-evm | da04e8de42fdbf3bc5ca596f5f6b3d810c1afea8 | [
"MIT"
] | 1 | 2019-01-29T21:09:31.000Z | 2019-01-29T21:09:31.000Z | import pytest
from eth.precompiles.modexp import (
_modexp,
_compute_modexp_gas_fee,
)
from eth.utils.hexadecimal import (
decode_hex,
)
EIP198_VECTOR_A = decode_hex(
"0000000000000000000000000000000000000000000000000000000000000001"
"0000000000000000000000000000000000000000000000000000000000000020"
"0000000000000000000000000000000000000000000000000000000000000020"
"03"
"fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2e"
"fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f"
)
EIP198_VECTOR_B = decode_hex(
"0000000000000000000000000000000000000000000000000000000000000000"
"0000000000000000000000000000000000000000000000000000000000000020"
"0000000000000000000000000000000000000000000000000000000000000020"
"fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2e"
"fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f"
)
EIP198_VECTOR_C = decode_hex(
"0000000000000000000000000000000000000000000000000000000000000000"
"0000000000000000000000000000000000000000000000000000000000000020"
"ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"
"fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe"
"fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffd"
)
@pytest.mark.parametrize(
'data,expected',
(
(EIP198_VECTOR_A, 13056),
(
EIP198_VECTOR_C,
10684346944173007063723051170445283632835119638284563472873463025465780712173320789629146724657549280936306536701227228889744512638312451529980055895215896, # noqa: E501
),
),
)
def test_modexp_gas_fee_calculation(data, expected):
actual = _compute_modexp_gas_fee(data)
assert actual == expected
@pytest.mark.parametrize(
'data,expected',
(
(EIP198_VECTOR_A, 1),
(EIP198_VECTOR_B, 0),
(EIP198_VECTOR_C, 0),
),
)
def test_modexp_result(data, expected):
actual = _modexp(data)
assert actual == expected
| 31.75 | 182 | 0.801673 |
f339fc3c43f25075cdac5dc94e2801f35ec5a1cd | 2,352 | py | Python | python/o80_pam/o80_robot_mirroring.py | intelligent-soft-robots/o80_pam | 3491dcdace61f58e0cf31149184593da3cd2f017 | [
"BSD-3-Clause"
] | null | null | null | python/o80_pam/o80_robot_mirroring.py | intelligent-soft-robots/o80_pam | 3491dcdace61f58e0cf31149184593da3cd2f017 | [
"BSD-3-Clause"
] | 2 | 2021-02-17T12:55:44.000Z | 2021-05-27T14:10:57.000Z | python/o80_pam/o80_robot_mirroring.py | intelligent-soft-robots/o80_pam | 3491dcdace61f58e0cf31149184593da3cd2f017 | [
"BSD-3-Clause"
] | null | null | null | import o80
import o80_pam
# convenience class for sending mirroring
# (i.e. imposing joint positions and velocities)
# to a robot
class o80RobotMirroring:
def __init__(self, segment_id, frontend=None, burster=None):
if frontend is None:
self._frontend = o80_pam.MirrorRobotFrontEnd(segment_id)
else:
self._frontend = frontend
self._state = o80.State2d(0, 0)
if burster is None:
self._burster = self._frontend
else:
self._burster = burster
def reset(self):
"""
uses o80 frontend to send to the backend in overwrite mode
a command that request the desired states to be the first states
the backend experienced, i.e. it resets the robot to its
original state"""
self._frontend.add_reinit_command()
self._frontend.pulse()
def read(self):
return self._frontend.latest()
def burst(self, nb_iterations):
return self._burster.burst(nb_iterations)
def get(self):
states = self._frontend.pulse().get_observed_states()
positions = [states.get(dof).get(0) for dof in range(4)]
velocities = [states.get(dof).get(1) for dof in range(4)]
return positions, velocities
def set(
self,
joint_positions,
joint_velocities,
duration_ms=None,
nb_iterations=None,
wait=False,
burst=False,
):
if duration_ms is not None:
duration = o80.Duration_us.milliseconds(duration_ms)
else:
duration = None
if nb_iterations is not None:
iteration = o80.Iteration(nb_iterations, True, True)
else:
iteration = None
if duration:
self._frontend.add_command(
joint_positions, joint_velocities, duration, o80.Mode.OVERWRITE
)
else:
self._frontend.add_command(
joint_positions, joint_velocities, o80.Mode.OVERWRITE
)
if wait:
self._frontend.pulse_and_wait()
else:
if burst:
if type(burst) == type(True):
self._burster.burst(1)
else:
self._burster.burst(burst)
else:
self._frontend.pulse()
| 28.337349 | 79 | 0.582908 |
6cab32ff64248f56a312b98dfa01b3bbef7a15a9 | 2,040 | py | Python | 064_Dense_Depth/nyu/01_float32/20_tensorrt_inf_test.py | IgiArdiyanto/PINTO_model_zoo | 9247b56a7dff37f28a8a7822a7ef4dd9adf7234d | [
"MIT"
] | 1,529 | 2019-12-11T13:36:23.000Z | 2022-03-31T18:38:27.000Z | 064_Dense_Depth/nyu/01_float32/20_tensorrt_inf_test.py | IgiArdiyanto/PINTO_model_zoo | 9247b56a7dff37f28a8a7822a7ef4dd9adf7234d | [
"MIT"
] | 200 | 2020-01-06T09:24:42.000Z | 2022-03-31T17:29:08.000Z | 064_Dense_Depth/nyu/01_float32/20_tensorrt_inf_test.py | IgiArdiyanto/PINTO_model_zoo | 9247b56a7dff37f28a8a7822a7ef4dd9adf7234d | [
"MIT"
] | 288 | 2020-02-21T14:56:02.000Z | 2022-03-30T03:00:35.000Z | ### tensorflow==2.3.1
### https://developer.nvidia.com/compute/machine-learning/tensorrt/secure/6.0/GA_6.0.1.5/local_repos/nv-tensorrt-repo-ubuntu1804-cuda10.1-trt6.0.1.5-ga-20190913_1-1_amd64.deb
# os="ubuntu1804"
# tag="cuda10.1-trt6.0.1.5-ga-20190913"
# sudo dpkg -i nv-tensorrt-repo-${os}-${tag}_1-1_amd64.deb
# sudo apt-key add /var/nv-tensorrt-repo-${tag}/7fa2af80.pub
# sudo apt-get update
# sudo apt-get install tensorrt
# sudo apt-get install python3-libnvinfer-dev
# python3-libnvinfer
# sudo apt-get install uff-converter-tf
# sudo apt-get install onnx-graphsurgeon
import tensorflow as tf
import numpy as np
import time
def input_fn_480x640():
input_shapes = [1, 480, 640, 3]
yield [np.zeros(input_shapes).astype(np.float32)]
params = tf.experimental.tensorrt.ConversionParams(precision_mode='FP32', maximum_cached_engines=1000)
converter = tf.experimental.tensorrt.Converter(input_saved_model_dir='saved_model_nyu_480x640', conversion_params=params)
converter.convert()
converter.build(input_fn=input_fn_480x640)
converter.save('tensorrt_saved_model_nyu_480x640_float32')
params = tf.experimental.tensorrt.ConversionParams(precision_mode='FP16', maximum_cached_engines=1000)
converter = tf.experimental.tensorrt.Converter(input_saved_model_dir='saved_model_nyu_480x640', conversion_params=params)
converter.convert()
converter.build(input_fn=input_fn_480x640)
converter.save('tensorrt_saved_model_nyu_480x640_float16')
model = tf.saved_model.load('tensorrt_saved_model_nyu_480x640_float16', tags=[tf.saved_model.SERVING])
infer = model.signatures[tf.saved_model.DEFAULT_SERVING_SIGNATURE_DEF_KEY]
infer.inputs[0].shape
x = np.random.uniform(size=(1, 480, 640, 3)).astype(np.float32)
start = time.perf_counter()
infer(tf.convert_to_tensor(x))
end = time.perf_counter()
print('@@@@@@@@@@@@@@ First Inference')
print('elapsed time:', end - start)
start = time.perf_counter()
infer(tf.convert_to_tensor(x))
end = time.perf_counter()
print('@@@@@@@@@@@@@@ Second Inference')
print('elapsed time:', end - start)
| 36.428571 | 174 | 0.779412 |
b2d7e7e6978d0150b5238100b0b8157e5f082d10 | 1,455 | py | Python | pcat2py/class/25006946-5cc5-11e4-af55-00155d01fe08.py | phnomcobra/PCAT2PY | 937c3b365cdc5ac69b78f59070be0a21bdb53db0 | [
"MIT"
] | null | null | null | pcat2py/class/25006946-5cc5-11e4-af55-00155d01fe08.py | phnomcobra/PCAT2PY | 937c3b365cdc5ac69b78f59070be0a21bdb53db0 | [
"MIT"
] | null | null | null | pcat2py/class/25006946-5cc5-11e4-af55-00155d01fe08.py | phnomcobra/PCAT2PY | 937c3b365cdc5ac69b78f59070be0a21bdb53db0 | [
"MIT"
] | null | null | null | #!/usr/bin/python
################################################################################
# 25006946-5cc5-11e4-af55-00155d01fe08
#
# Justin Dierking
# justindierking@hardbitsolutions.com
# phnomcobra@gmail.com
#
# 10/24/2014 Original Construction
################################################################################
class Finding:
def __init__(self):
self.output = []
self.is_compliant = False
self.uuid = "25006946-5cc5-11e4-af55-00155d01fe08"
def check(self, cli):
# Initialize Compliance
self.is_compliant = False
# Get Registry DWORD
dword = cli.get_reg_dword(r'HKCU:\Software\Policies\Microsoft\Office\12.0\Outlook\Security', 'AddinTrust')
# Output Lines
self.output = [r'HKCU:\Software\Policies\Microsoft\Office\12.0\Outlook\Security', ('AddinTrust=' + str(dword))]
if dword == 1:
self.is_compliant = True
return self.is_compliant
def fix(self, cli):
cli.powershell(r"New-Item -path 'HKCU:\Software\Policies\Microsoft\Office\12.0'")
cli.powershell(r"New-Item -path 'HKCU:\Software\Policies\Microsoft\Office\12.0\Outlook'")
cli.powershell(r"New-Item -path 'HKCU:\Software\Policies\Microsoft\Office\12.0\Outlook\Security'")
cli.powershell(r"Set-ItemProperty -path 'HKCU:\Software\Policies\Microsoft\Office\12.0\Outlook\Security' -name 'AddinTrust' -value 1 -Type DWord")
| 38.289474 | 154 | 0.600687 |
63d823f07d55925665898cdeb4de77acd990a3eb | 61 | py | Python | dist/js/credentials.py | Cookie419/html5-boilerplate | 1b3efe90f95d053573d604eb53409c7dfbb2d68b | [
"MIT"
] | null | null | null | dist/js/credentials.py | Cookie419/html5-boilerplate | 1b3efe90f95d053573d604eb53409c7dfbb2d68b | [
"MIT"
] | 116 | 2020-08-31T03:12:55.000Z | 2022-03-15T03:04:56.000Z | dist/js/credentials.py | Cookie419/html5-boilerplate | 1b3efe90f95d053573d604eb53409c7dfbb2d68b | [
"MIT"
] | null | null | null | user = "eduardo@10daysofxamarin.blog"
password = "password_1" | 30.5 | 37 | 0.786885 |
fa9de039caa0bd7820c888bad8aa7f503e3888ff | 307 | py | Python | setup.py | ctgk/bayes | 96eab9305eaeecc5a5b032cdf92a8285de4f60bf | [
"MIT"
] | 21 | 2019-01-08T05:58:41.000Z | 2021-11-26T14:24:11.000Z | setup.py | ctgk/bayes | 96eab9305eaeecc5a5b032cdf92a8285de4f60bf | [
"MIT"
] | null | null | null | setup.py | ctgk/bayes | 96eab9305eaeecc5a5b032cdf92a8285de4f60bf | [
"MIT"
] | 11 | 2019-05-04T13:44:19.000Z | 2021-08-05T04:26:19.000Z | from setuptools import setup, find_packages
setup(
name="bayesnet",
version="0.0.1",
description="Bayesian method library",
author="ctgk",
python_requires=">=3.6",
install_requires=["numpy", "scipy"],
packages=find_packages(exclude=["test", "test.*"]),
test_suite="test"
)
| 21.928571 | 55 | 0.651466 |
8a67fcfbf12577c3ead694ab77d224584cd69b98 | 5,575 | py | Python | torch_geometric/datasets/pcpnet_dataset.py | lsj2408/pytorch_geometric | 21cc1efd7c3b2912f4c2c98ddd5e9065a9aef6d4 | [
"MIT"
] | null | null | null | torch_geometric/datasets/pcpnet_dataset.py | lsj2408/pytorch_geometric | 21cc1efd7c3b2912f4c2c98ddd5e9065a9aef6d4 | [
"MIT"
] | null | null | null | torch_geometric/datasets/pcpnet_dataset.py | lsj2408/pytorch_geometric | 21cc1efd7c3b2912f4c2c98ddd5e9065a9aef6d4 | [
"MIT"
] | null | null | null | import os
import os.path as osp
import torch
from torch_geometric.data import (Data, InMemoryDataset, download_url,
extract_zip)
from torch_geometric.io import read_txt_array
class PCPNetDataset(InMemoryDataset):
r"""The PCPNet dataset from the `"PCPNet: Learning Local Shape Properties
from Raw Point Clouds" <https://arxiv.org/abs/1710.04954>`_ paper,
consisting of 30 shapes, each given as a point cloud, densely sampled with
100k points.
For each shape, surface normals and local curvatures are given as node
features.
Args:
root (string): Root directory where the dataset should be saved.
category (string): The training set category (one of :obj:`"NoNoise"`,
:obj:`"Noisy"`, :obj:`"VarDensity"`, :obj:`"NoisyAndVarDensity"`
for :obj:`split="train"` or :obj:`split="val"`,
or one of :obj:`"All"`, :obj:`"LowNoise"`, :obj:`"MedNoise"`,
:obj:`"HighNoise", :obj:`"VarDensityStriped",
:obj:`"VarDensityGradient"` for :obj:`split="test"`).
split (string): If :obj:`"train"`, loads the training dataset.
If :obj:`"val"`, loads the validation dataset.
If :obj:`"test"`, loads the test dataset. (default: :obj:`"train"`)
transform (callable, optional): A function/transform that takes in an
:obj:`torch_geometric.data.Data` object and returns a transformed
version. The data object will be transformed before every access.
(default: :obj:`None`)
pre_transform (callable, optional): A function/transform that takes in
an :obj:`torch_geometric.data.Data` object and returns a
transformed version. The data object will be transformed before
being saved to disk. (default: :obj:`None`)
pre_filter (callable, optional): A function that takes in an
:obj:`torch_geometric.data.Data` object and returns a boolean
value, indicating whether the data object should be included in the
final dataset. (default: :obj:`None`)
"""
url = 'http://geometry.cs.ucl.ac.uk/projects/2018/pcpnet/pclouds.zip'
category_files_train = {
'NoNoise': 'trainingset_no_noise.txt',
'Noisy': 'trainingset_whitenoise.txt',
'VarDensity': 'trainingset_vardensity.txt',
'NoisyAndVarDensity': 'trainingset_vardensity_whitenoise.txt'
}
category_files_val = {
'NoNoise': 'validationset_no_noise.txt',
'Noisy': 'validationset_whitenoise.txt',
'VarDensity': 'validationset_vardensity.txt',
'NoisyAndVarDensity': 'validationset_vardensity_whitenoise.txt'
}
category_files_test = {
'All': 'testset_all.txt',
'NoNoise': 'testset_no_noise.txt',
'LowNoise': 'testset_low_noise.txt',
'MedNoise': 'testset_med_noise.txt',
'HighNoise': 'testset_high_noise.txt',
'VarDensityStriped': 'testset_vardensity_striped.txt',
'VarDensityGradient': 'testset_vardensity_gradient.txt'
}
def __init__(self, root, category, split='train', transform=None,
pre_transform=None, pre_filter=None):
assert split in ['train', 'val', 'test']
if split == 'train':
assert category in self.category_files_train.keys()
elif split == 'val':
assert category in self.category_files_val.keys()
else:
assert category in self.category_files_test.keys()
self.category = category
self.split = split
super().__init__(root, transform, pre_transform, pre_filter)
self.data, self.slices = torch.load(self.processed_paths[0])
@property
def raw_file_names(self):
if self.split == 'train':
return self.category_files_train[self.category]
elif self.split == 'val':
return self.category_files_val[self.category]
else:
return self.category_files_test[self.category]
@property
def processed_file_names(self):
return self.split + '_' + self.category + '.pt'
def download(self):
path = download_url(self.url, self.raw_dir)
extract_zip(path, self.raw_dir)
os.unlink(path)
def process(self):
path_file = self.raw_paths
with open(path_file[0], "r") as f:
filenames = f.read().split('\n')[:-1]
data_list = []
for filename in filenames:
pos_path = osp.join(self.raw_dir, filename + '.xyz')
normal_path = osp.join(self.raw_dir, filename + '.normals')
curv_path = osp.join(self.raw_dir, filename + '.curv')
idx_path = osp.join(self.raw_dir, filename + '.pidx')
pos = read_txt_array(pos_path)
normals = read_txt_array(normal_path)
curv = read_txt_array(curv_path)
normals_and_curv = torch.cat([normals, curv], dim=1)
test_idx = read_txt_array(idx_path, dtype=torch.long)
data = Data(pos=pos, x=normals_and_curv)
data.test_idx = test_idx
if self.pre_filter is not None and not self.pre_filter(data):
continue
if self.pre_transform is not None:
data = self.pre_transform(data)
data_list.append(data)
torch.save(self.collate(data_list), self.processed_paths[0])
def __repr__(self):
return '{}({}, category={})'.format(self.__class__.__name__, len(self),
self.category)
| 41.917293 | 79 | 0.624036 |
107edab9aca25f41df4396b7c3042658aec5c2c8 | 4,415 | py | Python | kubic_user.py | cinxdy/TIBigdataOpenAPI_server | 8691ca0feb9ca259730f1fc2144b7e29beb95ca6 | [
"MIT"
] | null | null | null | kubic_user.py | cinxdy/TIBigdataOpenAPI_server | 8691ca0feb9ca259730f1fc2144b7e29beb95ca6 | [
"MIT"
] | null | null | null | kubic_user.py | cinxdy/TIBigdataOpenAPI_server | 8691ca0feb9ca259730f1fc2144b7e29beb95ca6 | [
"MIT"
] | null | null | null | from flask import session
from pymongo import MongoClient
from bson.objectid import ObjectId
from secrets import token_urlsafe
from passlib.hash import pbkdf2_sha512
from datetime import datetime
from dateutil.relativedelta import relativedelta
#import logging
client = MongoClient('localhost',27017)
db = client.user
trafficLimit = 3000
# def getEmail():
# email_logined = "21800409@handong.edu"
# email_logined = session['id']
# return email_logined
# return session['id']
# email_logined = getEmail()
def countAPI():
count = db.apiUser.count({"user_email": session['id']})
return count
def generateCode():
key = token_urlsafe(16)
hashKey = pbkdf2_sha512.hash(key)
return key, hashKey
def registerAPI(app_type, app_name, app_purpose):
today = datetime.today()
key, hashKey = generateCode()
post = {
"app_type": app_type,
"app_name" : app_name,
"app_purpose" : app_purpose,
"user_email" : session['id'],
"veri_code" : hashKey,
"reporting_date" : today,
"expiration_date" : (today+relativedelta(years=1)),
"traffic":0
}
db.apiUser.insert_one(post)
return key
def preRegisterAPI(email, app_name, app_purpose):
today = datetime.today()
# key, hashKey = generateCode()
key = token_urlsafe(16)
post = {
# "app_type": app_type,
"app_name" : app_name,
"app_purpose" : app_purpose,
"email" : email,
"key_id" : key,
"reporting_date" : today,
# "expiration_date" : (today+relativedelta(years=1)),
# "traffic":0
}
db.preApiUser.insert_one(post)
return key
def getPreuserInfoByKey(key):
if key == None: return None, None, None
doc = db.preApiUser.find_one({"key_id": key})
if doc != None:
email = doc['email']
app_name = doc['app_name']
app_purpose = doc['app_purpose']
return email, app_name, app_purpose
return None, None, None
def getDocListPreUser():
docList = db.preApiUser.find({"accept": {'$ne': 1}})
return docList
def updatePreuserInfoByKey(key, accept, reason):
post = {
'accept': accept,
'reason': reason
}
db.preApiUser.update({"key_id": key}, {'$set': post})
def reissueAPI(_id):
today = datetime.today()
key, hashKey = generateCode()
post = {
# "app_name" : "testtesttest",
"veri_code" : hashKey,
"reporting_date" : today,
"expiration_date" : (today+relativedelta(years=1)),
}
db.apiUser.update({"_id": ObjectId(_id)}, {'$set': post})
# print("reissue> app_name", post['app_name'],"key", key)
return key
def deleteAPI(_id):
try: db.apiUser.remove({"_id": ObjectId(_id)})
except: return False
return True
def getDocByEmail():
docList = db.apiUser.find({"user_email": session['id']})
return docList
def getDocById(_id):
doc = db.apiUser.find_one({"_id": ObjectId(_id)})
return doc
# def findHash():
# doc = getDocByEmail()
# hashKeyList = [item['veri_code'] for item in doc]
# return hashKeyList
def verification(serviceKey):
docList = db.apiUser.find({},{"veri_code":1})
# print(hashKeyList)
# hashKey=pbkdf2_sha512.hash(serviceKey)
for doc in docList:
if(pbkdf2_sha512.verify(serviceKey, doc['veri_code'])):
# doc = db.apiUser.find_one({"veri_code": hashKey})
return doc['_id']
# hashKeyList = findHash()
# for hashKey in hashKeyList:
# if(pbkdf2_sha512.verify(serviceKey, hashKey)):
# doc = db.apiUser.find_one({"veri_code": hashKey})
# return doc['_id']
return False
def limitTraffic(_id):
# if(ObjectId(_id)==ObjectId('606a98e2d7f9d800f11f17bc')): return True
doc = getDocById(_id)
if doc['traffic'] > trafficLimit:
return False
return True
def limitDate(_id):
doc = getDocById(_id)
if doc['expiration_date'] < datetime.today():
return False
return True
def raiseTraffic(_id, numOfCnt):
doc = getDocById(_id)
post = {"traffic" : doc['traffic']+numOfCnt}
db.apiUser.update({"_id": ObjectId(_id)}, {'$set':post})
doc = getDocById(_id)
print(doc)
def getMyDocByEmail():
doc = db.mydocs.find_one({"userEmail": session['id']})
print(doc)
print("myDoc:", doc['savedDocIds'])
return doc['savedDocIds'] | 27.253086 | 74 | 0.629672 |
904c5c7776a90125c8a3c96c27ba781c2f0da3ed | 1,198 | py | Python | conanfile.py | extcpp/base | 26b1384cbbd5cb3171e4cd22c8eb5f09ec0c5732 | [
"MIT"
] | null | null | null | conanfile.py | extcpp/base | 26b1384cbbd5cb3171e4cd22c8eb5f09ec0c5732 | [
"MIT"
] | 22 | 2019-10-15T20:47:02.000Z | 2020-01-26T20:26:19.000Z | conanfile.py | extcpp/base | 26b1384cbbd5cb3171e4cd22c8eb5f09ec0c5732 | [
"MIT"
] | 1 | 2020-09-24T08:53:15.000Z | 2020-09-24T08:53:15.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
from conans import ConanFile, CMake, tools
class ExtbaseConan(ConanFile):
name = "ext-basics"
version = "0.0.1"
license = "MIT"
author = "Jan Christoph Uhde <jan@uhdejc.com>"
url = "https://github.com/extcpp/basics"
description = "Extended C++ - base library"
topics = ("c++", "utils", "library")
settings = "os", "compiler", "build_type", "arch"
options = {"shared": [True, False]}
default_options = {"shared": True}
generators = "cmake"
def source(self):
self.run("git clone -b conan https://github.com/extcpp/basics.git basics --recurse-submodules")
def build(self):
cmake = CMake(self)
#cmake.verbose = True
cmake.configure(source_folder="basics")
cmake.build()
def package(self):
"""install / copy artifarct"""
cmake = CMake(self)
#cmake.verbose = True
cmake.definitions["LIBEXT_TESTS"] = False
cmake.definitions["LIBEXT_WARNINGS"] = False
cmake.definitions["LIBEXT_EXAMPLES"] = False
cmake.configure()
cmake.install()
def package_info(self):
self.info.header_only()
| 29.219512 | 103 | 0.610184 |
1fb9384f1c71b14c6a090ca3f56ac163bb98c8eb | 1,456 | py | Python | resources/usleep_dataset_prep/convert_dod_datasets.py | learning310/U-Time | f7c8e3f1368f43226872a69b0fbb8c29990e4bd9 | [
"MIT"
] | 138 | 2019-11-20T02:31:17.000Z | 2022-03-23T04:31:51.000Z | resources/usleep_dataset_prep/convert_dod_datasets.py | amiyapatanaik/U-Time | a9ed4892da77d165a71dbfef1d069d782c909757 | [
"MIT"
] | 46 | 2019-12-04T03:13:28.000Z | 2022-03-31T13:10:48.000Z | resources/usleep_dataset_prep/convert_dod_datasets.py | amiyapatanaik/U-Time | a9ed4892da77d165a71dbfef1d069d782c909757 | [
"MIT"
] | 42 | 2019-11-26T16:02:26.000Z | 2022-01-06T11:01:32.000Z | import os
import h5py
from glob import glob
import json
import numpy as np
from utime.utils.scriptutils.extract import to_h5_file
def convert_h5_file(h5_path):
out_dir = h5_path.replace(".h5", "")
if not os.path.exists(out_dir):
os.mkdir(out_dir)
out_h5_path = os.path.join(out_dir, "signals.h5")
out_hyp_path = os.path.join(out_dir, "hypnogram.npy")
if os.path.exists(out_h5_path) and os.path.exists(out_hyp_path):
return
with h5py.File(h5_path, "r") as in_f:
description = json.loads(in_f.attrs['description'])
data = []
sample_rates = []
channel_names = []
for channel in description:
sample_rates.append(int(channel['fs']))
data.append(np.array(in_f[channel["path"]]))
channel_names.append(channel["path"].split("/")[-1].replace("_", "-"))
assert np.all(np.array(sample_rates) == sample_rates[0])
sample_rate = sample_rates[0]
data = np.array(data).T
print(channel_names)
to_h5_file(
out_path=out_h5_path,
data=data,
sample_rate=sample_rate,
channel_names=channel_names,
date=None
)
# Save hypnogram
hyp = np.array(in_f['hypnogram'])
np.save(out_hyp_path, hyp)
if __name__ == "__main__":
files = glob("dod_*/*h5")
for file_ in files:
print(file_)
convert_h5_file(file_)
| 27.471698 | 82 | 0.607143 |
449875ae8bc5427f34a8427b6fe513fb9dc54a83 | 789 | py | Python | models/modules/se.py | vuiseng9/CalibTIP | 69077c92611b079234706784c344e8c9156f3283 | [
"MIT"
] | 61 | 2020-06-14T20:00:05.000Z | 2022-03-08T10:18:41.000Z | models/modules/se.py | vuiseng9/CalibTIP | 69077c92611b079234706784c344e8c9156f3283 | [
"MIT"
] | 5 | 2020-09-01T06:53:41.000Z | 2022-01-28T07:40:45.000Z | models/modules/se.py | vuiseng9/CalibTIP | 69077c92611b079234706784c344e8c9156f3283 | [
"MIT"
] | 18 | 2020-06-28T10:30:58.000Z | 2022-02-28T02:04:15.000Z | import torch
import torch.nn as nn
class SEBlock(nn.Module):
def __init__(self, in_channels, out_channels=None, ratio=16):
super(SEBlock, self).__init__()
self.in_channels = in_channels
if out_channels is None:
out_channels = in_channels
self.ratio = ratio
self.relu = nn.ReLU(True)
self.global_pool = nn.AdaptiveAvgPool2d(1)
self.transform = nn.Sequential(
nn.Linear(in_channels, in_channels // ratio),
nn.ReLU(inplace=True),
nn.Linear(in_channels // ratio, out_channels),
nn.Sigmoid()
)
def forward(self, x):
x_avg = self.global_pool(x).view(x.size(0), -1)
mask = self.transform(x_avg)
return x * mask.view(x.size(0), -1, 1, 1)
| 31.56 | 65 | 0.598226 |
9f6067a06f527a967de6caf245653ff70a5b069a | 51,589 | py | Python | tensorflow/python/ipu/ipu_estimator.py | chenzhengda/tensorflow | 8debb698097670458b5f21d728bc6f734a7b5a53 | [
"Apache-2.0"
] | 74 | 2020-07-06T17:11:39.000Z | 2022-01-28T06:31:28.000Z | tensorflow/python/ipu/ipu_estimator.py | chenzhengda/tensorflow | 8debb698097670458b5f21d728bc6f734a7b5a53 | [
"Apache-2.0"
] | 9 | 2020-10-13T23:25:29.000Z | 2022-02-10T06:54:48.000Z | tensorflow/python/ipu/ipu_estimator.py | chenzhengda/tensorflow | 8debb698097670458b5f21d728bc6f734a7b5a53 | [
"Apache-2.0"
] | 12 | 2020-07-08T07:27:17.000Z | 2021-12-27T08:54:27.000Z | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===================================================================
"""
IPUEstimator
~~~~~~~~~~~~
"""
import abc
import collections
import itertools
import threading
from six.moves import _thread
import six
from tensorflow.compiler.plugin.poplar.driver.config_pb2 import IpuOptions
from tensorflow.compiler.plugin.poplar.ops import gen_pop_datastream_ops
from tensorflow.compiler.plugin.poplar.ops import gen_sendrecv_ops
from tensorflow.python.client import session as session_lib
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.distribute import distribution_strategy_context
from tensorflow.python.estimator import estimator as estimator_lib
from tensorflow.python.estimator import model_fn as model_fn_lib
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ipu import config as ipu_config
from tensorflow.python.ipu import ipu_compiler
from tensorflow.python.ipu import ipu_infeed_queue
from tensorflow.python.ipu import ipu_outfeed_queue
from tensorflow.python.ipu import ipu_run_config
from tensorflow.python.ipu import loops
from tensorflow.python.ipu import ops as ipu_ops
from tensorflow.python.ipu import utils as ipu_utils
from tensorflow.python.ipu.ipu_multi_worker_strategy import IPUMultiWorkerStrategyV1
from tensorflow.python.ipu.scopes import ipu_scope
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import basic_session_run_hooks
from tensorflow.python.training import session_run_hook
from tensorflow.python.training import training_util
from tensorflow.python.util import function_utils
_INITIAL_LOSS = 0.0
_INPUT_FN_KEY = "input_fn"
_BATCH_SIZE_KEY = "batch_size"
_ASSIGN_ADD_OP = "AssignAddVariableOp"
_CROSS_REPLICA_SUM_OP = "IpuCrossReplicaSum"
_CROSS_REPLICA_MEAN_OP = "IpuCrossReplicaMean"
_RESOURCE_UPDATE_OP = "ResourceUpdate"
_HOST_DEVICE = "/device:CPU:0"
# Keys that cannot be used in the `params` dictionary passed to the
# IPUEstimator
_RESERVED_PARAMS_KEYS = [_INPUT_FN_KEY]
def _validate_function_call_spec(call_spec, name, allow_dict_arg=False):
if call_spec is not None:
if not isinstance(call_spec, tuple):
raise TypeError("`{}` must be a tuple".format(name))
if len(call_spec) != 2:
raise ValueError("`{}` must have two elements".format(name))
if not callable(call_spec[0]):
raise TypeError("first element in `{}` must be callable".format(name))
allowed_arg_types = (list,)
if allow_dict_arg:
allowed_arg_types += (dict,)
if not isinstance(call_spec[1], allowed_arg_types):
raise TypeError("second element in `{}` must be a {}".format(
name, " or ".join(t.__name__ for t in allowed_arg_types)))
class IPUEstimatorSpec(
collections.namedtuple('IPUEstimatorSpec', [
'mode', 'predictions', 'loss', 'train_op', 'eval_metric_ops',
'eval_metrics', 'host_call', 'training_hooks', 'evaluation_hooks',
'prediction_hooks'
])):
"""Ops and objects returned from a `model_fn` and passed to `IPUEstimator`.
This is very similar to `EstimatorSpec`, with the addition of two extra
arguments: `eval_metrics` and `host_call`. If neither of those arguments
are needed, an `EstimatorSpec` can be passed to the `IPUEstimator` instead.
`eval_metrics` is a tuple of a (`function`, `tensors`), where `tensors` is
either a list of `tf.Tensor` or a dict from strings to `tf.Tensor`, that is
passed to the function. The function runs on the CPU and returns a dict of
metrics. The tensors are transferred from the IPU to the CPU host and passed
to the function.
Exactly one of `eval_metrics` and `eval_metric_ops` must be provided during
evaluation. The major difference between the two is that while the
`eval_metric_ops` will execute directly on the IPU, the `eval_metrics` will
execute on the CPU host using the provided function. Example:
.. code-block:: python
def my_metrics_fn(features, labels):
return {
"accuracy": tf.metrics.accuracy(labels, features),
"precision": tf.metrics.precision(labels, features),
"recall": tf.metrics.recall(labels, features),
}
eval_metrics = (my_metrics_fn, [features, labels])
spec = IPUEstimatorSpec(mode, loss=loss, eval_metrics=eval_metrics)
`host_call` is a tuple of a function and a list of tensors to pass to that
function. `host_call` only works for training and is executed on the CPU for
every training step. The tensors are transferred from the IPU to the CPU host
and passed to the function.
This functionality can be used for e.g. doing all-reduce of the gradients and
weight updates on the host during distributed training with the
`IPUMultiWorkerStrategyV1`. Example:
.. code-block:: python
def my_host_fn(*host_gradients):
# This will all-reduce the gradients and update the weights on the host.
return optimizer.apply_gradients(zip(host_gradients, variables))
train_op = tf.identity(loss)
grads_and_vars = optimizer.compute_gradients(loss, var_list=variables)
gradients = [g for (g, _) in grads_and_vars]
host_call = (my_host_fn, gradients)
spec = IPUEstimatorSpec(mode=mode,
loss=loss,
train_op=train_op,
host_call=host_call)
See full example: :any:`distributed_training`.
The various hooks (`training_hooks, `evaluation_hooks`, `prediction_hooks`)
support instances of `tf.estimator.SessionRunHook`. To log tensor values from
within the `model_fn`, use the
:class:`~tensorflow.python.ipu.ipu_session_run_hooks.IPULoggingTensorHook`.
For documentation of the remaining arguments, see `EstimatorSpec`.
"""
def __new__(cls,
mode,
predictions=None,
loss=None,
train_op=None,
eval_metric_ops=None,
eval_metrics=None,
host_call=None,
training_hooks=None,
evaluation_hooks=None,
prediction_hooks=None):
train_op = model_fn_lib._validate_estimator_spec_train_op(train_op, mode)
loss = model_fn_lib._validate_estimator_spec_loss(loss, mode)
predictions = model_fn_lib._validate_estimator_spec_predictions(
predictions, mode)
training_hooks = model_fn_lib._validate_estimator_spec_hooks(
training_hooks)
evaluation_hooks = model_fn_lib._validate_estimator_spec_hooks(
evaluation_hooks)
prediction_hooks = model_fn_lib._validate_estimator_spec_hooks(
prediction_hooks)
eval_metric_ops = model_fn_lib._validate_eval_metric_ops(eval_metric_ops)
_validate_function_call_spec(host_call, "host_call")
_validate_function_call_spec(eval_metrics,
"eval_metrics",
allow_dict_arg=True)
return super().__new__(cls,
mode=mode,
predictions=predictions,
loss=loss,
train_op=train_op,
eval_metric_ops=eval_metric_ops,
eval_metrics=eval_metrics,
host_call=host_call,
training_hooks=training_hooks,
evaluation_hooks=evaluation_hooks,
prediction_hooks=prediction_hooks)
class _IPUConfigureIPUSystemHook(session_run_hook.SessionRunHook):
def __init__(self, config, host_device=_HOST_DEVICE):
if not isinstance(config.ipu_options, (IpuOptions, ipu_config.IPUConfig)):
raise Exception("`config.ipu_options` must be an IPUConfig or IpuOptions"
" instance")
self._config = config.ipu_options
if isinstance(self._config, ipu_config.IPUConfig):
self._config = self._config._create_protobuf() # pylint: disable=protected-access
self._run_config = config
self._host_device = host_device
def begin(self):
ipu_utils.configure_ipu_system(self._config, self._host_device)
if self._config.device_config[self._run_config.ordinal].cfg_index:
num_configured_devices = ipu_utils.get_num_of_ipus_in_device(
'/device:IPU:{}'.format(self._run_config.ordinal))
num_devices = self._run_config.num_shards * self._run_config.num_replicas
if num_devices != num_configured_devices:
raise ValueError('`IPURunConfig` configured with {} devices'
' ({} num_replicas times {} num_shards),'
' but `IpuOptions` configured with {} devices'.format(
num_devices, self._run_config.num_replicas,
self._run_config.num_shards,
num_configured_devices))
class _IPUInfeedLifecycleHook(session_run_hook.SessionRunHook):
def __init__(self, infeed):
self._infeed = infeed
self._should_delete = False
def after_create_session(self, session, coord):
session.run(self._infeed.initializer)
self._should_delete = True
def end(self, session):
session.run(self._infeed.deleter)
self._should_delete = False
def _run_delete_op_in_new_graph_and_session(self):
g = ops.Graph()
with g.as_default(), ops.device(_HOST_DEVICE):
delete_op = gen_pop_datastream_ops.ipu_delete_dataset_iterator(
feed_id=self._infeed._id) # pylint: disable=protected-access
with session_lib.Session(graph=g) as sess:
sess.run(delete_op)
def __del__(self):
if self._should_delete:
# We may end up here if the session exited abnormally, such
# as if an exception was raised or the generator returned
# by `predict()` was deleted, since these scenarios will
# not trigger the `end()` callback above.
self._run_delete_op_in_new_graph_and_session()
class _IPUOutfeedLifecycleHook(session_run_hook.SessionRunHook):
def __init__(self, outfeed):
self._outfeed = outfeed
self._should_delete = False
def after_run(self, run_context, run_values):
# The outfeed is allocated when the engine is executed.
self._should_delete = True
def _run_delete_op_in_new_graph_and_session(self):
g = ops.Graph()
with g.as_default(), ops.device(_HOST_DEVICE):
delete_op = gen_pop_datastream_ops.ipu_delete_outfeed(
feed_id=self._outfeed._feed_name) # pylint: disable=protected-access
with session_lib.Session(graph=g) as sess:
sess.run(delete_op)
def __del__(self):
if self._should_delete:
self._run_delete_op_in_new_graph_and_session()
class _IPUGlobalStepCounterAndStopHook(session_run_hook.SessionRunHook):
def __init__(self, iterations_per_loop, num_steps, final_step):
if num_steps is None and final_step is None:
raise ValueError("One of `num_steps` or `final_step` must be specified.")
if num_steps is not None and final_step is not None:
raise ValueError(
"Only one of `num_steps` or `final_step` can be specified.")
self._iterations_per_loop = iterations_per_loop
self._num_steps = num_steps
self._final_step = final_step
def after_create_session(self, session, coord):
global_step = session.run(self._global_step_tensor)
if self._final_step is None:
self._final_step = global_step + self._num_steps
def begin(self):
self._global_step_tensor = training_util.get_global_step()
with ops.device(_HOST_DEVICE):
self._increment_op = self._global_step_tensor.assign_add(
self._iterations_per_loop)
def after_run(self, run_context, run_values):
global_step = run_context.session.run(self._increment_op)
if global_step >= self._final_step:
run_context.request_stop()
def _call_input_fn(input_fn, mode, params, config, input_context):
input_fn_args = function_utils.fn_args(input_fn)
kwargs = {}
if "mode" in input_fn_args:
kwargs["mode"] = mode
if "params" in input_fn_args:
kwargs["params"] = params
if "config" in input_fn_args:
kwargs["config"] = config
if input_context and "input_context" in input_fn_args:
kwargs["input_context"] = input_context
with ops.device(_HOST_DEVICE):
return input_fn(**kwargs)
def _validate_global_step_not_incremented():
operations = ops.get_default_graph().get_operations()
for op in operations:
if op.type == _ASSIGN_ADD_OP and "global_step" in op.inputs[0].name:
raise ValueError(
"Illegal increment of the `global_step` variable in the `model_fn`. "
"This is usually caused by passing it as an argument to the "
"`Optimizer.minimize()` function. Please remove this argument as "
"the IPUEstimator itself is responsible for incrementing it.")
def _validate_replicated_training_graph():
def has_cross_replica_reduce_op(g):
return any(
op.type == _CROSS_REPLICA_SUM_OP or op.type == _CROSS_REPLICA_MEAN_OP
for op in g.get_operations())
graph = ops.get_default_graph()
if has_cross_replica_reduce_op(graph):
return
# Also check inside the resource update `FuncGraph` if there is one.
for op in graph.get_operations():
if op.type == _RESOURCE_UPDATE_OP:
resource_update_graph = graph._get_function( # pylint: disable=protected-access
op.get_attr("to_apply").name).graph
if has_cross_replica_reduce_op(resource_update_graph):
return
raise ValueError(
"This is not a valid replicated training graph because no " +
_CROSS_REPLICA_SUM_OP + " or " + _CROSS_REPLICA_MEAN_OP +
"operations were found. Did you remember to use the " +
"`tensorflow.python.ipu.optimizers.CrossReplicaOptimizer`?")
def _add_send_to_host_ops(tensors, ipu_device):
"""Returns attributes for matching recv ops"""
recv_ops_attrs = []
for tensor in tensors:
model_fn_lib._check_is_tensor_or_operation( # pylint: disable=protected-access
tensor, "`host_call` argument")
attrs = dict(tensor_name=tensor.name,
send_device=ipu_device,
send_device_incarnation=0,
recv_device=_HOST_DEVICE)
gen_sendrecv_ops.ipu_send_to_host(tensor, **attrs)
# The recv op has an additional type argument.
attrs["T"] = tensor.dtype
recv_ops_attrs.append(attrs)
return recv_ops_attrs
def _add_recv_at_host_ops(recv_ops_attrs):
tensors = []
for attrs in recv_ops_attrs:
tensors.append(gen_sendrecv_ops.ipu_recv_at_host(**attrs))
return tensors
def _unpack_features_and_labels(args, kwargs):
if args and kwargs:
raise ValueError("Invalid dataset with both tuple and keywords")
if not args and not kwargs:
raise ValueError("Invalid dataset with neither tuple nor keywords")
if args:
if len(args) == 1:
features = args[0]
labels = None
elif len(args) == 2:
features, labels = args
else:
raise ValueError(
"Invalid dataset tuple, expected 1 or 2 elements, got {}".format(
len(args)))
else:
features = kwargs
labels = None
return features, labels
def _extract_metric_values(eval_dict):
metric_values = {}
# Sort metrics lexicographically so graph is identical every time.
for name, (value_tensor, update_op) in sorted(six.iteritems(eval_dict)):
# We cannot depend on the `value_tensor` as it is unspecified whether it
# is evaluated before or after the `update_op`. For example there are no
# control dependencies between the `assign_add()` update ops and the tensor
# in `metrics_impl.mean()`. There does however seem to be a guarantee that
# the `update_op` returns the updated value, so we will just ignore the
# `value_tensor` and use the result of the `update_op` instead.
del value_tensor
model_fn_lib._check_is_tensor(update_op, "update_op") # pylint: disable=protected-access
metric_values[name] = update_op
return metric_values
@six.add_metaclass(abc.ABCMeta)
class _ModelFnWrapperBase:
"""Interface for wrapping the user-provided `model_fn` in a loop."""
@abc.abstractproperty
def captured_hooks(self):
raise NotImplementedError()
@abc.abstractmethod
def create_training_loop(self):
raise NotImplementedError()
@abc.abstractmethod
def get_training_loss_and_op(self, compiled_training_loop):
raise NotImplementedError()
@abc.abstractmethod
def create_evaluation_loop(self):
raise NotImplementedError()
@abc.abstractmethod
def get_evaluation_loss_and_metrics(self, compiled_evaluation_loop):
raise NotImplementedError()
@abc.abstractmethod
def create_prediction_loop(self):
raise NotImplementedError()
@abc.abstractmethod
def get_predictions(self, compiled_prediction_loop):
raise NotImplementedError()
@staticmethod
@abc.abstractmethod
def need_outfeed(mode):
raise NotImplementedError()
class _ModelFnWrapper(_ModelFnWrapperBase):
def __init__(self, model_fn, config, params, infeed_queue, outfeed_queue):
self._model_fn = model_fn
self._config = config
self._params = params
self._infeed_queue = infeed_queue
self._outfeed_queue = outfeed_queue
self._iterations_per_loop = config.ipu_run_config.iterations_per_loop
self._replication_factor = config.ipu_run_config.num_replicas
self._num_shards = config.ipu_run_config.num_shards
self._ipu_device = "/device:IPU:{}".format(config.ipu_run_config.ordinal)
self._captured_hooks = []
self._captured_host_call_fn = None
self._captured_host_call_args = None
self._captured_eval_metrics_fn = None
@staticmethod
def need_outfeed(mode):
# No outfeed for training
return mode != model_fn_lib.ModeKeys.TRAIN
def _loop_replica_mean(self, loop_sum):
if self._replication_factor == 1:
return loop_sum / self._iterations_per_loop
loop_replica_mean = ipu_ops.cross_replica_ops.cross_replica_mean(loop_sum)
return loop_replica_mean / self._iterations_per_loop
def _capture_hooks(self, hooks):
if hooks:
assert not self._captured_hooks, "Can only capture hooks once"
self._captured_hooks = hooks
@property
def captured_hooks(self):
return self._captured_hooks
def _capture_host_call(self, host_call):
if host_call:
assert self._captured_host_call_fn is None, \
"Can only capture host_call once"
self._captured_host_call_fn, tensors = host_call
self._captured_host_call_args = _add_send_to_host_ops(
tensors, self._ipu_device)
def _capture_eval_metrics_fn(self, metrics_fn):
assert metrics_fn is not None
assert self._captured_eval_metrics_fn is None, \
"Can only capture eval_metrics_fn once"
self._captured_eval_metrics_fn = metrics_fn
def _received_host_call_args(self):
return _add_recv_at_host_ops(self._captured_host_call_args)
def create_training_loop(self):
def training_step(total_loss, *args, **kwargs):
features, labels = _unpack_features_and_labels(args, kwargs)
estimator_spec = self._call_model_fn(features, labels,
model_fn_lib.ModeKeys.TRAIN)
loss = estimator_spec.loss
if loss is None:
raise ValueError("EstimatorSpec must contain loss when training")
train_op = estimator_spec.train_op
if train_op is None:
raise ValueError("EstimatorSpec must contain train_op when training")
self._capture_hooks(estimator_spec.training_hooks)
if isinstance(estimator_spec, IPUEstimatorSpec):
self._capture_host_call(estimator_spec.host_call)
# training_step will be run by xla.compile(). xla.compile() only supports
# tensor output while train_op can be either an operation or a tensor.
# Even though xla.compile() automatically adds operation-typed train_op as
# control dependency of other tensor outputs, it doesn't do so for
# tensor-typed train_op. Thus, we need to set it explicitly here.
with ops.control_dependencies([train_op]):
total_loss += math_ops.cast(loss, dtypes.float32)
_validate_global_step_not_incremented()
if self._replication_factor > 1:
_validate_replicated_training_graph()
return total_loss
def training_loop():
if self._iterations_per_loop == 1:
# Simplify the graph by avoiding the loop.
inputs = self._infeed_queue._dequeue() # pylint: disable=protected-access
args, kwargs = loops._body_arguments(inputs) # pylint: disable=protected-access
total_loss = training_step(_INITIAL_LOSS, *args, **kwargs)
return total_loss
total_loss = loops.repeat(self._iterations_per_loop,
training_step,
inputs=[_INITIAL_LOSS],
infeed_queue=self._infeed_queue)
if self._captured_host_call_fn is not None:
raise ValueError(
"host_call is not allowed for iterations_per_loop > 1")
return self._loop_replica_mean(total_loss)
return training_loop
def get_training_loss_and_op(self, compiled_training_loop):
loss = compiled_training_loop[0]
if self._captured_host_call_fn is None:
train_op = loss
else:
# The base class will run both `train_op` and `loss`.
# Let `train_op` be the return value from the host call.
# If there is a dependency on the `loss` calculated on
# the IPU, they will be sequenced. Otherwise they might
# run in parallel on the IPU and CPU.
with ops.device(_HOST_DEVICE):
train_op = _call_host_fn(self._captured_host_call_fn,
self._received_host_call_args())
return loss, train_op
def create_evaluation_loop(self):
def evaluation_step(total_loss, *args, **kwargs):
features, labels = _unpack_features_and_labels(args, kwargs)
estimator_spec = self._call_model_fn(features, labels,
model_fn_lib.ModeKeys.EVAL)
loss = estimator_spec.loss
if loss is None:
raise ValueError("EstimatorSpec must contain loss when evaluating")
eval_metric_ops = estimator_spec.eval_metric_ops
eval_metrics = getattr(estimator_spec, "eval_metrics", None)
if not eval_metric_ops and not eval_metrics:
raise ValueError(
"EstimatorSpec must contain either eval_metric_ops or "
"eval_metrics when evaluating")
if eval_metric_ops and eval_metrics:
raise ValueError(
"EstimatorSpec cannot contain both eval_metric_ops and "
"eval_metrics")
self._capture_hooks(estimator_spec.evaluation_hooks)
if eval_metric_ops:
outfeed_values = _extract_metric_values(eval_metric_ops)
else:
metrics_fn, outfeed_values = eval_metrics
self._capture_eval_metrics_fn(metrics_fn)
total_loss += math_ops.cast(loss, dtypes.float32)
outfeed = self._outfeed_queue.enqueue(outfeed_values)
return total_loss, outfeed
def evaluation_loop():
total_loss = loops.repeat(self._iterations_per_loop,
evaluation_step,
inputs=[_INITIAL_LOSS],
infeed_queue=self._infeed_queue)
return self._loop_replica_mean(total_loss)
return evaluation_loop
def get_evaluation_loss_and_metrics(self, compiled_evaluation_loop):
loss = compiled_evaluation_loop[0]
with ops.device(_HOST_DEVICE):
if self._captured_eval_metrics_fn is not None:
# Calculate metrics on the host. Control dependency on the loop needed
# since the metric *ops* on the host must see all the enqueued inputs.
# The metric *tensors* on the host are idempotent and will not trigger
# another execution of the dequeue op when evaluated later.
with ops.control_dependencies(compiled_evaluation_loop):
inputs = self._outfeed_queue.dequeue(wait_for_completion=True)
args, kwargs = loops._body_arguments(inputs) # pylint: disable=protected-access
metric_ops = self._captured_eval_metrics_fn(*args, **kwargs)
else:
# Metrics already calculated on IPU. Aggregate on the host. We can
# *not* have a control dependency on the loop here as the metric
# tensors must be idempotent, i.e. they must support evaluation
# without triggering a new execution of the dequeue op, and our
# pass-through metric tensors below have a data dependency on the
# dequeue op. The metric tensors are evaluated in a separate
# execution so they are guaranteed to see all the enqueued inputs.
metrics = self._outfeed_queue.dequeue(wait_for_completion=True)
metric_ops = {}
for metric_name, metric_tensor in six.iteritems(metrics):
# The outfeed outputs all values, but we only need the last one (the
# most recent aggregated value) when they are calculated on IPU.
last_metric_tensor = metric_tensor[-1]
# For replicated graphs the tensor will have an additional replica
# dimension, so we reduce over this dimension (if it exists).
# Note: mean is not always correct, e.g. for root_mean_squared_error,
# workaround is to use `eval_metrics` on host to get correct aggregation.
# Use no-op as the update_op since updating is done inside the loop.
metric_ops[metric_name] = (math_ops.reduce_mean(last_metric_tensor),
control_flow_ops.no_op())
return loss, metric_ops
def create_prediction_loop(self):
def prediction_step(*args, **kwargs):
features, _ = _unpack_features_and_labels(args, kwargs)
labels = None # Do not provide labels for prediction
estimator_spec = self._call_model_fn(features, labels,
model_fn_lib.ModeKeys.PREDICT)
predictions = estimator_spec.predictions
if predictions is None:
raise ValueError(
"EstimatorSpec must contain predictions when predicting")
self._capture_hooks(estimator_spec.prediction_hooks)
outfeed = self._outfeed_queue.enqueue(predictions)
return outfeed
def prediction_loop():
return loops.repeat(self._iterations_per_loop,
prediction_step,
infeed_queue=self._infeed_queue)
return prediction_loop
def get_predictions(self, compiled_prediction_loop):
with ops.device(_HOST_DEVICE):
with ops.control_dependencies([compiled_prediction_loop]):
predictions = self._outfeed_queue.dequeue(wait_for_completion=True)
return predictions
def _call_model_fn(self, features, labels, mode):
model_fn_args = function_utils.fn_args(self._model_fn)
kwargs = {}
if "labels" in model_fn_args:
kwargs["labels"] = labels
else:
if labels is not None:
raise ValueError(
"model_fn does not take labels, but input_fn returns labels.")
if "mode" in model_fn_args:
kwargs["mode"] = mode
if "params" in model_fn_args:
kwargs["params"] = self._params
if "config" in model_fn_args:
kwargs["config"] = self._config
estimator_spec = self._model_fn(features=features, **kwargs)
valid_classes = (IPUEstimatorSpec, model_fn_lib.EstimatorSpec)
if not isinstance(estimator_spec, valid_classes):
raise ValueError("`model_fn` must return {}".format(" or ".join(
[cls.__name__ for cls in valid_classes])))
return estimator_spec
def _call_host_fn(host_call_fn, host_call_args):
assert host_call_fn is not None
assert host_call_args is not None
ret = host_call_fn(*host_call_args)
model_fn_lib._check_is_tensor_or_operation( # pylint: disable=protected-access
ret, "`host_call` return value")
return ret
def _get_input_context():
strategy = distribution_strategy_context.get_strategy()
if isinstance(strategy, IPUMultiWorkerStrategyV1):
return strategy.extended._make_input_context() # pylint: disable=protected-access
return None
def _augment_model_fn(model_fn, wrapper_class, ipu_device):
"""Wraps the `model_fn`, feeds it with queues, and returns a new
`model_fn` that returns a regular `EstimatorSpec`. This `model_fn` wraps
all the IPU support and can be passed to the regular `Estimator` class."""
def _model_fn(features, labels, mode, config, params):
del features, labels # We call the input_fn directly from here instead
input_fn = params[_INPUT_FN_KEY]
input_context = _get_input_context()
dataset = _call_input_fn(input_fn, mode, params, config, input_context)
# DatasetV1 (the current alias of Dataset) inherits
# from DatasetV2, so this allows both.
if not isinstance(dataset, dataset_ops.DatasetV2):
raise ValueError("input_fn must return Dataset")
hooks = []
infeed_queue = ipu_infeed_queue.IPUInfeedQueue(
dataset, prefetch_depth=config.ipu_run_config.prefetch_depth)
hooks.append(_IPUInfeedLifecycleHook(infeed_queue))
if not wrapper_class.need_outfeed(mode):
outfeed_queue = None
else:
outfeed_queue = ipu_outfeed_queue.IPUOutfeedQueue(
outfeed_mode=ipu_outfeed_queue.IPUOutfeedMode.ALL)
hooks.append(_IPUOutfeedLifecycleHook(outfeed_queue))
if config.ipu_run_config.ipu_options is not None:
hooks.append(
_IPUConfigureIPUSystemHook(config.ipu_run_config,
host_device=_HOST_DEVICE))
wrapped_model_fn = wrapper_class(model_fn, config, params, infeed_queue,
outfeed_queue)
if mode == model_fn_lib.ModeKeys.TRAIN:
loop = wrapped_model_fn.create_training_loop()
elif mode == model_fn_lib.ModeKeys.EVAL:
loop = wrapped_model_fn.create_evaluation_loop()
elif mode == model_fn_lib.ModeKeys.PREDICT:
loop = wrapped_model_fn.create_prediction_loop()
else:
raise ValueError("Unknown mode: {}".format(mode))
with ipu_scope(ipu_device):
compiled_loop = ipu_compiler.compile(loop)
if config.ipu_run_config.compile_summary:
raise NotImplementedError(
"Generating compilation summaries for the IPUEstimator through"
" IPURunConfig.compile_summary is deprecated, is non-functional and"
" will be removed in a future release. Use the PopVision suite of"
" analysis tools to profile IPU programs.")
ipu_utils.move_variable_initialization_to_cpu()
hooks.extend(wrapped_model_fn.captured_hooks)
if mode == model_fn_lib.ModeKeys.TRAIN:
loss, train_op = wrapped_model_fn.get_training_loss_and_op(compiled_loop)
return model_fn_lib.EstimatorSpec(mode=mode,
loss=loss,
train_op=train_op,
training_hooks=hooks)
elif mode == model_fn_lib.ModeKeys.PREDICT:
predictions = wrapped_model_fn.get_predictions(compiled_loop)
return model_fn_lib.EstimatorSpec(mode=mode,
predictions=predictions,
prediction_hooks=hooks)
elif mode == model_fn_lib.ModeKeys.EVAL:
loss, eval_metric_ops = wrapped_model_fn.get_evaluation_loss_and_metrics(
compiled_loop)
return model_fn_lib.EstimatorSpec(mode=mode,
loss=loss,
eval_metric_ops=eval_metric_ops,
evaluation_hooks=hooks)
else:
raise ValueError("Unknown mode: {}".format(mode))
return _model_fn
def _calc_batch_size(global_batch_size, num_workers, num_replicas, name):
if global_batch_size is None:
return None
if global_batch_size < 1:
raise ValueError("{} (got {}) must be positive".format(
name, global_batch_size))
batch_size, remainder = divmod(global_batch_size, num_workers * num_replicas)
if remainder != 0:
raise ValueError(
"{} (got {}) must be divisible by num_workers * num_replicas ({} * {})"
.format(name, global_batch_size, num_workers, num_replicas))
return batch_size
class _IPUEstimatorBase(estimator_lib.Estimator):
def __init__(self,
model_fn,
model_dir=None,
config=None,
params=None,
warm_start_from=None,
train_batch_size=None,
eval_batch_size=None,
predict_batch_size=None):
# Base Estimator does not allow for overriding publice APIs as of June 2019
estimator_lib.Estimator._assert_members_are_not_overridden = lambda _: None
if config is None or not isinstance(config, ipu_run_config.RunConfig):
raise ValueError(
"`config` must be provided with type `ipu_run_config.RunConfig`")
if params is not None and not isinstance(params, dict):
raise ValueError('`params` is expected to be of type `dict`')
if params is not None and any(k in params for k in _RESERVED_PARAMS_KEYS):
raise ValueError('{} are reserved keys but existed in params {}.'.format(
_RESERVED_PARAMS_KEYS, params))
self._any_batch_size_provided = ((train_batch_size is not None)
or (eval_batch_size is not None)
or (predict_batch_size is not None))
if (self._any_batch_size_provided and params is not None
and _BATCH_SIZE_KEY in params):
raise ValueError(
"{} cannot be passed in params when a batch size argument is passed".
format(_BATCH_SIZE_KEY))
# pylint: disable=protected-access
num_train_workers = config._train_distribute.num_replicas_in_sync if \
config._train_distribute else 1
num_eval_workers = config._eval_distribute.num_replicas_in_sync if \
config._eval_distribute else 1
# pylint: enable=protected-access
num_replicas = config.ipu_run_config.num_replicas
self._ipu_device = "/device:IPU:{}".format(config.ipu_run_config.ordinal)
self._batch_size_for_train = _calc_batch_size(train_batch_size,
num_train_workers,
num_replicas,
"train_batch_size")
self._batch_size_for_eval = _calc_batch_size(eval_batch_size,
num_eval_workers,
num_replicas,
"eval_batch_size")
self._batch_size_for_predict = _calc_batch_size(predict_batch_size, 1,
num_replicas,
"predict_batch_size")
self._validate_config(config)
super().__init__(model_fn=model_fn,
model_dir=model_dir,
config=config,
params=params,
warm_start_from=warm_start_from)
def _setup_params(self, input_fn, batch_size):
self._params[_INPUT_FN_KEY] = input_fn
if self._any_batch_size_provided:
if batch_size is not None:
if "params" not in function_utils.fn_args(input_fn):
raise ValueError(
"input_fn must have params argument to receive params['{}']".
format(_BATCH_SIZE_KEY))
self._params[_BATCH_SIZE_KEY] = batch_size
else:
# Remove any value left from previous call.
self._params.pop(_BATCH_SIZE_KEY, None)
def train(self,
input_fn,
hooks=None,
steps=None,
max_steps=None,
saving_listeners=None):
"""Trains a model given training data `input_fn`.
Args:
input_fn: A function that provides input data for training as minibatches.
The function should return a `tf.data.Dataset` object. The outputs of
the `Dataset` object must be a tuple `(features, labels)` where
* `features` is a `tf.Tensor` or a dictionary of string feature name to `Tensor`
* `labels` is a `Tensor` or a dictionary of string label name to `Tensor`
Both `features` and `labels` are consumed by `model_fn`.
hooks: List of `tf.train.SessionRunHook` subclass instances. Used for
callbacks inside the training loop.
steps: Number of steps for which to train the model. `steps` works
incrementally. If you call two times `train(steps=10)` then training
occurs in total 20 steps. If you don't want to have incremental behavior
please set `max_steps` instead. If set, `max_steps` must be `None`.
max_steps: Number of total steps for which to train model. If set,
`steps` must be `None`. Two calls to `train(steps=100)` means 200
training iterations. On the other hand, two calls to `train(max_steps=100)`
means that the second call will not do any iteration since first call did all
100 steps.
saving_listeners: list of `CheckpointSaverListener` objects. Used for
callbacks that run immediately before or after checkpoint savings.
Returns:
`self`, for chaining.
"""
self._validate_steps(steps)
self._setup_params(input_fn, self._batch_size_for_train)
return super().train(input_fn=input_fn,
hooks=hooks,
steps=steps,
max_steps=max_steps,
saving_listeners=saving_listeners)
def _convert_train_steps_to_hooks(self, steps, max_steps):
return [
_IPUGlobalStepCounterAndStopHook(
self._config.ipu_run_config.iterations_per_loop, steps, max_steps)
]
def _convert_eval_steps_to_hooks(self, steps):
return self._convert_train_steps_to_hooks(steps, max_steps=None)
def _validate_steps(self, steps):
iterations_per_loop = self.config.ipu_run_config.iterations_per_loop
if steps is not None and steps % iterations_per_loop != 0:
raise ValueError(
"steps ({}) must be a multiple of iterations_per_loop ({})".format(
steps, iterations_per_loop))
def _validate_config(self, config):
is_distributed = config._train_distribute or config._eval_distribute # pylint: disable=protected-access
if is_distributed and config.ipu_run_config.iterations_per_loop > 1:
raise NotImplementedError(
"iterations_per_loop > 1 (got {}) not supported with distribution".
format(config.ipu_run_config.iterations_per_loop))
def _create_global_step(self, graph):
# Overridden to make sure it is a resource variable and placed on the host,
# while being cached on the IPU. It must be a resource variable for the
# _validate_global_step_not_incremented() check to work, otherwise it fails
# too early. It must be cached on the IPU in order to be readable from the
# model function.
graph = graph or ops.get_default_graph()
if training_util.get_global_step(graph) is not None:
raise ValueError('"global_step" already exists.')
with graph.as_default() as g, g.name_scope(None), ops.device(_HOST_DEVICE):
return variable_scope.get_variable(
ops.GraphKeys.GLOBAL_STEP,
shape=[],
dtype=dtypes.int64,
initializer=init_ops.zeros_initializer(),
trainable=False,
use_resource=True,
caching_device=self._ipu_device,
aggregation=variables.VariableAggregation.ONLY_FIRST_REPLICA,
collections=[
ops.GraphKeys.GLOBAL_VARIABLES, ops.GraphKeys.GLOBAL_STEP
])
def _flatten_predictions(self, predictions):
for nested_predictions in predictions:
if isinstance(nested_predictions, dict):
for i in range(self._extract_batch_length(nested_predictions)):
yield {
key: value[i]
for key, value in six.iteritems(nested_predictions)
}
else:
for prediction in nested_predictions:
yield prediction
def evaluate(self,
input_fn,
steps=None,
hooks=None,
checkpoint_path=None,
name=None):
"""Evaluates the model given evaluation data `input_fn`.
Args:
input_fn: A function that constructs the input data for evaluation.
The function should return a `tf.data.Dataset` object. The outputs of
the `Dataset` object must be a tuple `(features, labels)` where
* `features` is a `tf.Tensor` or a dictionary of string feature name to `Tensor`
* `labels` is a `Tensor` or a dictionary of string label name to `Tensor`
Both `features` and `labels` are consumed by `model_fn`.
steps: Number of steps for which to evaluate model.
hooks: List of `tf.train.SessionRunHook` subclass instances. Used for
callbacks inside the evaluation call.
checkpoint_path: Path of a specific checkpoint to evaluate. If `None`, the
latest checkpoint in `model_dir` is used. If there are no checkpoints
in `model_dir`, evaluation is run with newly initialized `Variables`
instead of ones restored from checkpoint.
name: Name of the evaluation if user needs to run multiple evaluations on
different data sets, such as on training data vs test data. Metrics for
different evaluations are saved in separate folders, and appear
separately in tensorboard.
Returns:
A dict containing the evaluation metrics specified in `model_fn` keyed by
name, as well as an entry `global_step` which contains the value of the
global step for which this evaluation was performed.
"""
self._validate_steps(steps)
self._setup_params(input_fn, self._batch_size_for_eval)
return super().evaluate(input_fn=input_fn,
hooks=hooks,
steps=steps,
checkpoint_path=checkpoint_path,
name=name)
def predict(self,
input_fn,
predict_keys=None,
hooks=None,
checkpoint_path=None,
yield_single_examples=True,
num_predictions=None):
"""Yields predictions for given features.
Args:
input_fn: A function that constructs the features. The function should
return a `tf.data.Dataset` object. The outputs of the `Dataset` object
should be one of the following:
* features: A `Tensor` or a dictionary of string feature name to
`Tensor`. features are consumed by `model_fn`.
* A tuple, in which case the first item is extracted as features.
predict_keys: list of `str`, name of the keys to predict. It is used if
the `tf.estimator.EstimatorSpec.predictions` is a `dict`. If
`predict_keys` is used then rest of the predictions will be filtered
from the dictionary. If `None`, returns all.
hooks: List of `tf.train.SessionRunHook` subclass instances. Used for
callbacks inside the prediction call.
checkpoint_path: Path of a specific checkpoint to predict. If `None`, the
latest checkpoint in `model_dir` is used. If there are no checkpoints
in `model_dir`, prediction is run with newly initialized `Variables`
instead of ones restored from checkpoint.
yield_single_examples: If `False`, yields the whole batch as returned by
the `model_fn` instead of decomposing the batch into individual
elements. This is useful if `model_fn` returns some tensors whose first
dimension is not equal to the batch size.
num_predictions: If not `None`, the generator will raise `StopIteration`
after yielding this number of predictions. This allows draining the
generator by using :code:`list(predictions)`. If `None`, the returned
generator is infinite and will trigger a fatal error if you try to
consume more predictions from it than what is actually generated,
instead of raising the `StopIteration` exception. This is caused by
the current behaviour when requesting to run a loop on the IPU for
more iterations than there are elements remaining in the dataset.
In this case you cannot drain it by using :code:`list(predictions)`,
you have to consume the expected number of elements yourself, e.g.
using :code:`[next(predictions) for _ in range(num_predictions)]`.
Yields:
Evaluated values of `predictions` tensors.
"""
self._setup_params(input_fn, self._batch_size_for_predict)
predictions = super().predict(input_fn=input_fn,
predict_keys=predict_keys,
hooks=hooks,
checkpoint_path=checkpoint_path,
yield_single_examples=yield_single_examples)
# If yield_single_examples == True, the base class has
# already flattened the outermost iterations_per_loop
# dimension, but we also want to flatten the batch dimension.
# If however yield_single_examples == False, we need to
# flatten the iterations_per_loop dimension ourselves.
# So in both cases we need to flatten the output here.
flat_predictions = self._flatten_predictions(predictions)
# Raise StopIteration after num_predictions (if not None)
return itertools.islice(flat_predictions, num_predictions)
class IPUEstimator(_IPUEstimatorBase):
"""Estimator with IPU support.
IPUEstimator handles many of the details of running on IPUs, such as
placement of operations and tensors, graph compilation and usage of
data feeds. It also provides a simple way to use multiple IPUs in the
form of either data parallelism or model parallelism.
The data parallelism is based on graph replication. One batch from the
dataset returned by the `input_fn` (of size `batch_size`) is sent to each
replica, giving an effective batch size of `num_replicas * batch_size`.
The only change needed to the `model_fn` is that the optimizer should be
wrapped in a
:class:`~tensorflow.python.ipu.optimizers.CrossReplicaOptimizer`
in order to average the gradients across the replicas.
This can also be combined with distributed multi-worker training using the
:class:`~tensorflow.python.ipu.ipu_multi_worker_strategy.IPUMultiWorkerStrategyV1`,
giving a total effective batch size of
`num_workers * num_replicas * batch_size`.
The desired global batch size can be passed as `train_batch_size`,
`eval_batch_size` and `predict_batch_size`, and the local batch size will be
calculated based on the number of replicas and the number of distributed
workers and passed to the `input_fn` and `model_fn` in
`params['batch_size']`. If the `input_fn` returns a dataset batched with
`dataset.batch(params['batch_size'], drop_remainder=True)`, the global batch
size will be as desired.
The model parallelism supported by this class is basic sharding. Consider
using the
:class:`~tensorflow.python.ipu.ipu_pipeline_estimator.IPUPipelineEstimator`
to get pipelined execution.
For efficiency, it supports compiling a graph that contains multiple
iterations of the training/prediction/evaluation loop, which will be
fully executed on the IPU before yielding back to the TensorFlow
Python runtime on the CPU.
See https://tensorflow.org/guide/estimators for general information
about estimators.
Args:
model_fn: The model function. Refer to
https://github.com/tensorflow/docs/blob/master/site/en/r1/guide/custom_estimators.md#write-a-model-function
for details on how to write this function.
model_dir: Directory to save model parameters, graph and etc. This can
also be used to load checkpoints from the directory into an estimator to
continue training a previously saved model. If `PathLike` object, the
path will be resolved. If `None`, the model_dir in `config` will be used
if set. If both are set, they must be same. If both are `None`, a
temporary directory will be used.
config: A :class:`~tensorflow.python.ipu.ipu_run_config.RunConfig` object.
params: `dict` of hyper parameters that will be passed into `model_fn`.
Keys are names of parameters, values are basic python types.
warm_start_from: Optional string filepath to a checkpoint or SavedModel to
warm-start from, or a `tf.estimator.WarmStartSettings`
object to fully configure warm-starting. If the string
filepath is provided instead of a
`tf.estimator.WarmStartSettings`, then all variables are
warm-started, and it is assumed that vocabularies
and `tf.Tensor` names are unchanged.
train_batch_size: If not None, an int representing the global training
batch size. This global batch size is transformed to a local batch size
passed as `params['batch_size']` to the `input_fn` and `model_fn` during
training. Must be divisible by the number of replicas multiplied by the
number of distributed workers.
eval_batch_size: If not None, an int representing the global evaluation
batch size. Same behaviour as train_batch_size, only during evaluation.
predict_batch_size: If not None, an int representing the global prediction
batch size. Same behaviour as train_batch_size, only during prediction.
"""
def __init__(self,
model_fn,
model_dir=None,
config=None,
params=None,
warm_start_from=None,
train_batch_size=None,
eval_batch_size=None,
predict_batch_size=None):
# Verifies the model_fn signature according to Estimator framework.
estimator_lib._verify_model_fn_args(model_fn, params) # pylint: disable=protected-access
ipu_device = "/device:IPU:{}".format(config.ipu_run_config.ordinal)
model_function = _augment_model_fn(model_fn, _ModelFnWrapper, ipu_device)
super().__init__(model_fn=model_function,
model_dir=model_dir,
config=config,
params=params,
warm_start_from=warm_start_from,
train_batch_size=train_batch_size,
eval_batch_size=eval_batch_size,
predict_batch_size=predict_batch_size)
| 41.604032 | 113 | 0.69162 |
e4c5d6ca1d9c0b827dc238f0e8a3d31b74bb84fc | 3,458 | py | Python | sonnet/src/conformance/distribute_test.py | chentingpc/sonnet | adae0b2e1700037bab289437f778ec4192b9881e | [
"Apache-2.0"
] | 1 | 2021-05-03T20:22:20.000Z | 2021-05-03T20:22:20.000Z | sonnet/src/conformance/distribute_test.py | chentingpc/sonnet | adae0b2e1700037bab289437f778ec4192b9881e | [
"Apache-2.0"
] | null | null | null | sonnet/src/conformance/distribute_test.py | chentingpc/sonnet | adae0b2e1700037bab289437f778ec4192b9881e | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 The Sonnet Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests Sonnet and TF Distribution Strategy."""
from __future__ import absolute_import
from __future__ import division
# from __future__ import google_type_annotations
from __future__ import print_function
from absl.testing import parameterized
import sonnet as snt
from sonnet.src import replicator_test_utils as replicator_utils
from sonnet.src import test_utils
from sonnet.src.conformance import descriptors
from sonnet.src.conformance import goldens
import tensorflow as tf
from typing import Callable, Tuple
class TpuReplicatorTest(test_utils.TestCase, parameterized.TestCase):
@test_utils.combined_named_parameters(goldens.named_goldens(),
replicator_utils.named_replicators())
def test_variable_creation_in_replica_context(self, golden, replicator_fn):
tf.random.set_seed(None)
replicator = replicator_fn()
with replicator.scope():
mod = golden.create_module()
@tf.function
def forward():
step = lambda: golden.create_all_variables(mod)
return replicator.experimental_run_v2(step)
# TODO(b/132329316) Remove when `xla.compile` allows tf.device(TPU).
with tf.device(None):
variables_per_replica = forward()
self.assertLen(variables_per_replica, golden.num_variables)
for per_replica_variable in variables_per_replica:
self.assertSameValuePerReplica(replicator, per_replica_variable)
def assertSameValuePerReplica(self, replicator, per_replica):
per_replica = replicator.experimental_local_results(per_replica)
first_replica = per_replica[0]
for nth_replica in per_replica[1:]:
self.assertAllEqual(first_replica, nth_replica)
@test_utils.combined_named_parameters(descriptors.RNN_CORES,
test_utils.named_bools("dynamic"),
replicator_utils.named_replicators())
def test_unroll(
self,
core_fn: Callable[[], snt.RNNCore],
input_shape: Tuple[int],
dtype: tf.DType,
dynamic: bool,
replicator_fn: tf.distribute.Strategy,
):
replicator = replicator_fn()
with replicator.scope():
core = core_fn()
def forward():
unroll = snt.dynamic_unroll if dynamic else snt.static_unroll
sequence = tf.ones((1,) + input_shape, dtype)
state = core.initial_state(input_shape[0])
return unroll(core, sequence, state)
# TODO(b/132329316) Remove when `xla.compile` allows tf.device(TPU).
with tf.device(None):
out_sequence, final_state = replicator.experimental_run_v2(forward)
self.assertSameValuePerReplica(replicator, out_sequence)
self.assertSameValuePerReplica(replicator, final_state)
if __name__ == "__main__":
# tf.enable_v2_behavior()
tf.test.main()
| 36.787234 | 78 | 0.716599 |
a1e40a7e0222a9506bc22818eb00744f41ac156f | 207 | py | Python | src/snowflake/connector/feature.py | stevebeck89/snowflake-connector-python | 739f9c2604116e1060268c437c1696c4e38862e2 | [
"Apache-2.0"
] | null | null | null | src/snowflake/connector/feature.py | stevebeck89/snowflake-connector-python | 739f9c2604116e1060268c437c1696c4e38862e2 | [
"Apache-2.0"
] | 4 | 2021-11-22T17:24:18.000Z | 2022-01-31T17:22:21.000Z | src/snowflake/connector/feature.py | stevebeck89/snowflake-connector-python | 739f9c2604116e1060268c437c1696c4e38862e2 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2012-2021 Snowflake Computing Inc. All right reserved.
#
# Feature flags
feature_use_pyopenssl = True # use pyopenssl API or openssl command
| 23 | 70 | 0.714976 |
4e19ba370e7fffb78aed7a3e554710f88d65498a | 41,648 | py | Python | pysnmp-with-texts/H3C-STORAGE-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 8 | 2019-05-09T17:04:00.000Z | 2021-06-09T06:50:51.000Z | pysnmp-with-texts/H3C-STORAGE-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 4 | 2019-05-31T16:42:59.000Z | 2020-01-31T21:57:17.000Z | pysnmp-with-texts/H3C-STORAGE-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 10 | 2019-04-30T05:51:36.000Z | 2022-02-16T03:33:41.000Z | #
# PySNMP MIB module H3C-STORAGE-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/H3C-STORAGE-MIB
# Produced by pysmi-0.3.4 at Wed May 1 13:23:53 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
OctetString, ObjectIdentifier, Integer = mibBuilder.importSymbols("ASN1", "OctetString", "ObjectIdentifier", "Integer")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueRangeConstraint, ConstraintsIntersection, SingleValueConstraint, ConstraintsUnion, ValueSizeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueRangeConstraint", "ConstraintsIntersection", "SingleValueConstraint", "ConstraintsUnion", "ValueSizeConstraint")
entPhysicalIndex, entPhysicalName, PhysicalIndex, entPhysicalDescr = mibBuilder.importSymbols("ENTITY-MIB", "entPhysicalIndex", "entPhysicalName", "PhysicalIndex", "entPhysicalDescr")
h3cDiskPowerOffReason, = mibBuilder.importSymbols("H3C-DISK-MIB", "h3cDiskPowerOffReason")
h3cEntityExtCriticalLowerTemperatureThreshold, h3cEntityExtPhysicalIndex, h3cEntityExtShutdownLowerTemperatureThreshold, h3cEntityExtTemperature, h3cEntityExtOperStatus = mibBuilder.importSymbols("H3C-ENTITY-EXT-MIB", "h3cEntityExtCriticalLowerTemperatureThreshold", "h3cEntityExtPhysicalIndex", "h3cEntityExtShutdownLowerTemperatureThreshold", "h3cEntityExtTemperature", "h3cEntityExtOperStatus")
h3cRaidUuid, h3cRaidRunState, h3cRaidHideState, h3cRaidName = mibBuilder.importSymbols("H3C-RAID-MIB", "h3cRaidUuid", "h3cRaidRunState", "h3cRaidHideState", "h3cRaidName")
H3cStorageActionType, H3cStorageEnableState, H3cStorageCapableState, H3cStorageLedStateType, H3cSoftwareInfoString, h3cStorageRef, H3cWwpnListType = mibBuilder.importSymbols("H3C-STORAGE-REF-MIB", "H3cStorageActionType", "H3cStorageEnableState", "H3cStorageCapableState", "H3cStorageLedStateType", "H3cSoftwareInfoString", "h3cStorageRef", "H3cWwpnListType")
InetAddressType, InetAddress = mibBuilder.importSymbols("INET-ADDRESS-MIB", "InetAddressType", "InetAddress")
SnmpAdminString, = mibBuilder.importSymbols("SNMP-FRAMEWORK-MIB", "SnmpAdminString")
ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup")
MibScalar, MibTable, MibTableRow, MibTableColumn, Unsigned32, ModuleIdentity, NotificationType, TimeTicks, Counter64, Gauge32, MibIdentifier, ObjectIdentity, Bits, IpAddress, iso, Integer32, Counter32 = mibBuilder.importSymbols("SNMPv2-SMI", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Unsigned32", "ModuleIdentity", "NotificationType", "TimeTicks", "Counter64", "Gauge32", "MibIdentifier", "ObjectIdentity", "Bits", "IpAddress", "iso", "Integer32", "Counter32")
TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString")
h3cStorageMIB = ModuleIdentity((1, 3, 6, 1, 4, 1, 2011, 10, 10, 1))
if mibBuilder.loadTexts: h3cStorageMIB.setLastUpdated('200709041452Z')
if mibBuilder.loadTexts: h3cStorageMIB.setOrganization('H3C Technologies Co., Ltd.')
if mibBuilder.loadTexts: h3cStorageMIB.setContactInfo('Platform Team H3C Technologies Co., Ltd. Hai-Dian District Beijing P.R. China Http://www.h3c.com Zip:100085')
if mibBuilder.loadTexts: h3cStorageMIB.setDescription('This MIB describes the general information of storage device, such as the property, configuration and management.')
h3cStorageMibObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 2011, 10, 10, 1, 1))
h3cStorageServerInfo = MibIdentifier((1, 3, 6, 1, 4, 1, 2011, 10, 10, 1, 1, 1))
h3cStoragePhysicalInfo = MibIdentifier((1, 3, 6, 1, 4, 1, 2011, 10, 10, 1, 1, 2))
h3cStorageServerCapability = MibIdentifier((1, 3, 6, 1, 4, 1, 2011, 10, 10, 1, 1, 1, 1))
h3cRaidCapability = MibScalar((1, 3, 6, 1, 4, 1, 2011, 10, 10, 1, 1, 1, 1, 1), H3cStorageCapableState()).setMaxAccess("readonly")
if mibBuilder.loadTexts: h3cRaidCapability.setStatus('current')
if mibBuilder.loadTexts: h3cRaidCapability.setDescription('This object identifies if the agent support raid management.')
h3cFcCapability = MibScalar((1, 3, 6, 1, 4, 1, 2011, 10, 10, 1, 1, 1, 1, 2), H3cStorageCapableState()).setMaxAccess("readonly")
if mibBuilder.loadTexts: h3cFcCapability.setStatus('current')
if mibBuilder.loadTexts: h3cFcCapability.setDescription('This object identifies if the agent support FC(fiber channel) management.')
h3cNasCapability = MibScalar((1, 3, 6, 1, 4, 1, 2011, 10, 10, 1, 1, 1, 1, 3), H3cStorageCapableState()).setMaxAccess("readonly")
if mibBuilder.loadTexts: h3cNasCapability.setStatus('current')
if mibBuilder.loadTexts: h3cNasCapability.setDescription('This object identifies if the agent support NAS(Network Attached Storage).')
h3cAdaptiveRepCapability = MibScalar((1, 3, 6, 1, 4, 1, 2011, 10, 10, 1, 1, 1, 1, 4), H3cStorageCapableState()).setMaxAccess("readonly")
if mibBuilder.loadTexts: h3cAdaptiveRepCapability.setStatus('current')
if mibBuilder.loadTexts: h3cAdaptiveRepCapability.setDescription('This object identifies if the agent support adaptive replication. With adaptive replication option, data from the primary resource is continuously replicated to a replication resource unless the system determines it is not practical or possible, such as when there is insufficient bandwidth. For continuous replication to occur, a continuous replication resource is used to stage the data being replicated from the primary resource. Similar to a cache, as soon as data comes into the continuous replication resource, it is written to the replication resource.')
h3cRemoteRepCapability = MibScalar((1, 3, 6, 1, 4, 1, 2011, 10, 10, 1, 1, 1, 1, 5), H3cStorageCapableState()).setMaxAccess("readonly")
if mibBuilder.loadTexts: h3cRemoteRepCapability.setStatus('current')
if mibBuilder.loadTexts: h3cRemoteRepCapability.setDescription('This object identifies if the agent support remote replication.')
h3cSafeCacheCapability = MibScalar((1, 3, 6, 1, 4, 1, 2011, 10, 10, 1, 1, 1, 1, 6), H3cStorageCapableState()).setMaxAccess("readonly")
if mibBuilder.loadTexts: h3cSafeCacheCapability.setStatus('current')
if mibBuilder.loadTexts: h3cSafeCacheCapability.setDescription('This object identifies if the agent support safe cache.')
h3cSyncMirrorCapability = MibScalar((1, 3, 6, 1, 4, 1, 2011, 10, 10, 1, 1, 1, 1, 7), H3cStorageCapableState()).setMaxAccess("readonly")
if mibBuilder.loadTexts: h3cSyncMirrorCapability.setStatus('current')
if mibBuilder.loadTexts: h3cSyncMirrorCapability.setDescription('This object identifies if the agent support synchronized mirror.')
h3cAsyncMirrorCapability = MibScalar((1, 3, 6, 1, 4, 1, 2011, 10, 10, 1, 1, 1, 1, 8), H3cStorageCapableState()).setMaxAccess("readonly")
if mibBuilder.loadTexts: h3cAsyncMirrorCapability.setStatus('current')
if mibBuilder.loadTexts: h3cAsyncMirrorCapability.setDescription('This object identifies if the agent support asynchronous mirror.')
h3cTimeMarkCapability = MibScalar((1, 3, 6, 1, 4, 1, 2011, 10, 10, 1, 1, 1, 1, 9), H3cStorageCapableState()).setMaxAccess("readonly")
if mibBuilder.loadTexts: h3cTimeMarkCapability.setStatus('current')
if mibBuilder.loadTexts: h3cTimeMarkCapability.setDescription('This object identifies if the agent support time mark.')
h3cSseCapability = MibScalar((1, 3, 6, 1, 4, 1, 2011, 10, 10, 1, 1, 1, 1, 10), H3cStorageCapableState()).setMaxAccess("readonly")
if mibBuilder.loadTexts: h3cSseCapability.setStatus('current')
if mibBuilder.loadTexts: h3cSseCapability.setDescription('This object identifies if the agent support SSE(Storage Service Enable).')
h3cStorageTargetConfig = MibIdentifier((1, 3, 6, 1, 4, 1, 2011, 10, 10, 1, 1, 1, 2))
h3ciSCSITargetEnable = MibScalar((1, 3, 6, 1, 4, 1, 2011, 10, 10, 1, 1, 1, 2, 1), H3cStorageEnableState().clone('disable')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: h3ciSCSITargetEnable.setStatus('current')
if mibBuilder.loadTexts: h3ciSCSITargetEnable.setDescription('This object identifies if the agent support iSCSI target.')
h3cFcTargetEnable = MibScalar((1, 3, 6, 1, 4, 1, 2011, 10, 10, 1, 1, 1, 2, 2), H3cStorageEnableState()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: h3cFcTargetEnable.setStatus('current')
if mibBuilder.loadTexts: h3cFcTargetEnable.setDescription('This object identifies if the agent support FC target.')
h3cStorageServerPhysInfo = MibIdentifier((1, 3, 6, 1, 4, 1, 2011, 10, 10, 1, 1, 1, 3))
h3cServerLocationLedState = MibScalar((1, 3, 6, 1, 4, 1, 2011, 10, 10, 1, 1, 1, 3, 1), H3cStorageLedStateType()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: h3cServerLocationLedState.setStatus('current')
if mibBuilder.loadTexts: h3cServerLocationLedState.setDescription("This object identifies the state of storage device's location led.")
h3cServerResetButtonState = MibScalar((1, 3, 6, 1, 4, 1, 2011, 10, 10, 1, 1, 1, 3, 2), H3cStorageEnableState().clone('enable')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: h3cServerResetButtonState.setStatus('current')
if mibBuilder.loadTexts: h3cServerResetButtonState.setDescription('This object identifies if the reset button is enable.')
h3cServerPowerButtonState = MibScalar((1, 3, 6, 1, 4, 1, 2011, 10, 10, 1, 1, 1, 3, 3), H3cStorageEnableState().clone('enable')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: h3cServerPowerButtonState.setStatus('current')
if mibBuilder.loadTexts: h3cServerPowerButtonState.setDescription('This object identifies if the power button is enable.')
h3cServerPowerState = MibScalar((1, 3, 6, 1, 4, 1, 2011, 10, 10, 1, 1, 1, 3, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("online", 1), ("onlinebypass", 2), ("onbattery", 3), ("unknown", 4)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: h3cServerPowerState.setStatus('current')
if mibBuilder.loadTexts: h3cServerPowerState.setDescription("This object identifies the state of power supply in the server, and is valid when UPS has been configured successful. The value 'online' means power supply is well-balanced. The value 'onlinebypass' means supply power without ups. The value 'onbattery' means supply power with battery. The value 'unknown' means the power supply state is unknown.")
h3cDeuTable = MibTable((1, 3, 6, 1, 4, 1, 2011, 10, 10, 1, 1, 2, 1), )
if mibBuilder.loadTexts: h3cDeuTable.setStatus('current')
if mibBuilder.loadTexts: h3cDeuTable.setDescription('This table contains some information of a particular DEU(Disk Enclosure Unit).')
h3cDeuEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2011, 10, 10, 1, 1, 2, 1, 1), ).setIndexNames((0, "H3C-STORAGE-MIB", "h3cDeuIndex"))
if mibBuilder.loadTexts: h3cDeuEntry.setStatus('current')
if mibBuilder.loadTexts: h3cDeuEntry.setDescription('An entry containing management information applicable to a particular DEU.')
h3cDeuIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 10, 1, 1, 2, 1, 1, 1), Integer32())
if mibBuilder.loadTexts: h3cDeuIndex.setStatus('current')
if mibBuilder.loadTexts: h3cDeuIndex.setDescription('This object identifies the index of h3cDeuTable. The DEU identified by a particular value of this index is the same object as identified by the same value of entPhysicalIndex.')
h3cDeuIDLed = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 10, 1, 1, 2, 1, 1, 2), H3cStorageLedStateType().clone('off')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: h3cDeuIDLed.setStatus('current')
if mibBuilder.loadTexts: h3cDeuIDLed.setDescription("This object identifies the state of storage DEU's led.")
h3cDeuDiskScan = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 10, 1, 1, 2, 1, 1, 3), H3cStorageActionType()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: h3cDeuDiskScan.setStatus('current')
if mibBuilder.loadTexts: h3cDeuDiskScan.setDescription("The operation of scanning the table. When the value of this object is set to 'action', the operation of disk scanning is touched off.")
h3cStorageInterfaceTable = MibTable((1, 3, 6, 1, 4, 1, 2011, 10, 10, 1, 1, 2, 2), )
if mibBuilder.loadTexts: h3cStorageInterfaceTable.setStatus('current')
if mibBuilder.loadTexts: h3cStorageInterfaceTable.setDescription('This table describes the interface information of the network.')
h3cStorageInterfaceEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2011, 10, 10, 1, 1, 2, 2, 1), ).setIndexNames((0, "H3C-STORAGE-MIB", "h3cStorageInterfaceIndex"))
if mibBuilder.loadTexts: h3cStorageInterfaceEntry.setStatus('current')
if mibBuilder.loadTexts: h3cStorageInterfaceEntry.setDescription('An entry containing management information applicable to a particular interface.')
h3cStorageInterfaceIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 10, 1, 1, 2, 2, 1, 1), Integer32())
if mibBuilder.loadTexts: h3cStorageInterfaceIndex.setStatus('current')
if mibBuilder.loadTexts: h3cStorageInterfaceIndex.setDescription('This object identifies the index of H3cStorageInterfaceEntry. The interface identified by this index is the same interface as identified by the same value of ifIndex.')
h3cStorageInterfaceGateway = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 10, 1, 1, 2, 2, 1, 2), InetAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: h3cStorageInterfaceGateway.setStatus('current')
if mibBuilder.loadTexts: h3cStorageInterfaceGateway.setDescription('This object identifies the gateway address of interface.')
h3cStorageInterfaceGatewayType = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 10, 1, 1, 2, 2, 1, 3), InetAddressType()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: h3cStorageInterfaceGatewayType.setStatus('current')
if mibBuilder.loadTexts: h3cStorageInterfaceGatewayType.setDescription('This object identifies the gateway address type of interface.')
h3cStorageInterfaceMTU = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 10, 1, 1, 2, 2, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1500, 9000))).clone(namedValues=NamedValues(("mtu1", 1500), ("mtu2", 9000)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: h3cStorageInterfaceMTU.setStatus('current')
if mibBuilder.loadTexts: h3cStorageInterfaceMTU.setDescription('This object identifies the MTU of interface.')
h3cBondingTable = MibTable((1, 3, 6, 1, 4, 1, 2011, 10, 10, 1, 1, 2, 3), )
if mibBuilder.loadTexts: h3cBondingTable.setStatus('current')
if mibBuilder.loadTexts: h3cBondingTable.setDescription('This table describes the information of the Bonding interface.')
h3cBondingEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2011, 10, 10, 1, 1, 2, 3, 1), ).setIndexNames((0, "H3C-STORAGE-MIB", "h3cBondingIndex"))
if mibBuilder.loadTexts: h3cBondingEntry.setStatus('current')
if mibBuilder.loadTexts: h3cBondingEntry.setDescription('An entry containing management information applicable to a particular Bonding interface.')
h3cBondingIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 10, 1, 1, 2, 3, 1, 1), Integer32())
if mibBuilder.loadTexts: h3cBondingIndex.setStatus('current')
if mibBuilder.loadTexts: h3cBondingIndex.setDescription('This object identifies the index of h3cBondingEntry. The interface identified by this index is the same interface as identified by the same value of ifIndex.')
h3cBondingPortList = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 10, 1, 1, 2, 3, 1, 2), OctetString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: h3cBondingPortList.setStatus('current')
if mibBuilder.loadTexts: h3cBondingPortList.setDescription('An entry contains management information applicable to a particular bonding interface. Every two bytes describe the ifIndex of an interface, and an interface belongs to one bonding group only. An interface should be deleted from the portlist of the bonding group it belongs to before it is added to another bonding group.')
h3cScsiAdapterTable = MibTable((1, 3, 6, 1, 4, 1, 2011, 10, 10, 1, 1, 2, 4), )
if mibBuilder.loadTexts: h3cScsiAdapterTable.setStatus('current')
if mibBuilder.loadTexts: h3cScsiAdapterTable.setDescription('This table containing some information of the SCSI(Small Computer System interface) adapter.')
h3cScsiAdapterEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2011, 10, 10, 1, 1, 2, 4, 1), ).setIndexNames((0, "ENTITY-MIB", "entPhysicalIndex"), (0, "H3C-STORAGE-MIB", "h3cAdapterNumber"))
if mibBuilder.loadTexts: h3cScsiAdapterEntry.setStatus('current')
if mibBuilder.loadTexts: h3cScsiAdapterEntry.setDescription('An entry containing management information applicable to a particular SCSI adapter.')
h3cAdapterNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 10, 1, 1, 2, 4, 1, 1), Integer32())
if mibBuilder.loadTexts: h3cAdapterNumber.setStatus('current')
if mibBuilder.loadTexts: h3cAdapterNumber.setDescription('This object identifies the index of the adapter .')
h3cAdapterDesc = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 10, 1, 1, 2, 4, 1, 2), OctetString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: h3cAdapterDesc.setStatus('current')
if mibBuilder.loadTexts: h3cAdapterDesc.setDescription('This object describes some information of the adapter.')
h3cAdapterType = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 10, 1, 1, 2, 4, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("scsi", 1), ("fc", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: h3cAdapterType.setStatus('current')
if mibBuilder.loadTexts: h3cAdapterType.setDescription("This object identifies the type of adapter. If the value of this object is not equal to 'fc', the associated object(such as h3cFcAdapterMode, h3cFcAdapterIWWPNs,h3cFcAdapterTWWPNs,h3cFcAdapterPortState,h3cFcAdapterModeSwitch) will be invalid.")
h3cFcAdapterMode = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 10, 1, 1, 2, 4, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("initiator", 1), ("target", 2), ("dual", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: h3cFcAdapterMode.setStatus('current')
if mibBuilder.loadTexts: h3cFcAdapterMode.setDescription('This object identifies the mode type of adapter.')
h3cFcAdapterInitiatorWwpnName = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 10, 1, 1, 2, 4, 1, 5), H3cWwpnListType()).setMaxAccess("readonly")
if mibBuilder.loadTexts: h3cFcAdapterInitiatorWwpnName.setStatus('current')
if mibBuilder.loadTexts: h3cFcAdapterInitiatorWwpnName.setDescription("This object describes the FC's adapter WWPN(World Wide Port Name) numbers.")
h3cFcAdapterTargetWwpnName = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 10, 1, 1, 2, 4, 1, 6), H3cWwpnListType()).setMaxAccess("readonly")
if mibBuilder.loadTexts: h3cFcAdapterTargetWwpnName.setStatus('current')
if mibBuilder.loadTexts: h3cFcAdapterTargetWwpnName.setDescription("This object describes the FC's target WWPN numbers.")
h3cFcAdapterPortState = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 10, 1, 1, 2, 4, 1, 7), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("linkup", 1), ("linkdown", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: h3cFcAdapterPortState.setStatus('current')
if mibBuilder.loadTexts: h3cFcAdapterPortState.setDescription("This object identifies the state of adapter's interface.")
h3cFcAdapterModeSwitch = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 10, 1, 1, 2, 4, 1, 8), H3cStorageEnableState().clone('disable')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: h3cFcAdapterModeSwitch.setStatus('current')
if mibBuilder.loadTexts: h3cFcAdapterModeSwitch.setDescription('This object identifies if the adapter work on a target mode.')
h3cExtVoltageTable = MibTable((1, 3, 6, 1, 4, 1, 2011, 10, 10, 1, 1, 2, 5), )
if mibBuilder.loadTexts: h3cExtVoltageTable.setStatus('current')
if mibBuilder.loadTexts: h3cExtVoltageTable.setDescription('This table contains some entity extend information of the voltage.')
h3cExtVoltageEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2011, 10, 10, 1, 1, 2, 5, 1), ).setIndexNames((0, "H3C-STORAGE-MIB", "h3cExtVoltagePhysicalIndex"))
if mibBuilder.loadTexts: h3cExtVoltageEntry.setStatus('current')
if mibBuilder.loadTexts: h3cExtVoltageEntry.setDescription('An entry containing management information applicable to the voltage.')
h3cExtVoltagePhysicalIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 10, 1, 1, 2, 5, 1, 1), PhysicalIndex()).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: h3cExtVoltagePhysicalIndex.setStatus('current')
if mibBuilder.loadTexts: h3cExtVoltagePhysicalIndex.setDescription('This object identifies the entity index of voltage.')
h3cExtVoltagePhysicalName = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 10, 1, 1, 2, 5, 1, 2), SnmpAdminString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: h3cExtVoltagePhysicalName.setStatus('current')
if mibBuilder.loadTexts: h3cExtVoltagePhysicalName.setDescription('This object identifies the entity name of voltage.')
h3cExtVoltage = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 10, 1, 1, 2, 5, 1, 3), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: h3cExtVoltage.setStatus('current')
if mibBuilder.loadTexts: h3cExtVoltage.setDescription('The voltage for the entity.')
h3cExtVoltageLowThreshold = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 10, 1, 1, 2, 5, 1, 4), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: h3cExtVoltageLowThreshold.setStatus('current')
if mibBuilder.loadTexts: h3cExtVoltageLowThreshold.setDescription('The low-threshold for the voltage. When voltage is lower than low-threshold, a notification will be sent.')
h3cExtVoltageHighThreshold = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 10, 1, 1, 2, 5, 1, 5), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: h3cExtVoltageHighThreshold.setStatus('current')
if mibBuilder.loadTexts: h3cExtVoltageHighThreshold.setDescription('The high-threshold for the voltage. When voltage is higher than high-threshold, a notification will be sent.')
h3cExtCriticalVoltageLowThreshold = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 10, 1, 1, 2, 5, 1, 6), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: h3cExtCriticalVoltageLowThreshold.setStatus('current')
if mibBuilder.loadTexts: h3cExtCriticalVoltageLowThreshold.setDescription('The critical low threshold for voltage. If the voltage is lower too much than the entity needs, the entity will halt.')
h3cExtCriticalVoltageHighThreshold = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 10, 1, 1, 2, 5, 1, 7), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: h3cExtCriticalVoltageHighThreshold.setStatus('current')
if mibBuilder.loadTexts: h3cExtCriticalVoltageHighThreshold.setDescription('The critical high threshold for voltage. If the voltage is higher too much than the entity needs, the entity will halt.')
h3cExtShutdownVoltageLowThreshold = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 10, 1, 1, 2, 5, 1, 8), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: h3cExtShutdownVoltageLowThreshold.setStatus('current')
if mibBuilder.loadTexts: h3cExtShutdownVoltageLowThreshold.setDescription('The voltage is lower than the critical low threshold. If the voltage is lower too much than the entity needs, the entity halts now.')
h3cExtShutdownVoltageHighThreshold = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 10, 1, 1, 2, 5, 1, 9), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: h3cExtShutdownVoltageHighThreshold.setStatus('current')
if mibBuilder.loadTexts: h3cExtShutdownVoltageHighThreshold.setDescription('The voltage is higher than the critical high threshold. If the voltage is higher too much than the entity needs, the entity halts now.')
h3cStorageTraps = MibIdentifier((1, 3, 6, 1, 4, 1, 2011, 10, 10, 1, 1, 3))
h3cStorageTrapsPrefix = MibIdentifier((1, 3, 6, 1, 4, 1, 2011, 10, 10, 1, 1, 3, 0))
h3cStorageTrapsObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 2011, 10, 10, 1, 1, 3, 1))
h3cSoftwareInfoString = MibScalar((1, 3, 6, 1, 4, 1, 2011, 10, 10, 1, 1, 3, 1, 1), H3cSoftwareInfoString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: h3cSoftwareInfoString.setStatus('current')
if mibBuilder.loadTexts: h3cSoftwareInfoString.setDescription("This object identifies if the software's information.")
h3cStorCriticalLowerTemperatureThresholdNotification = NotificationType((1, 3, 6, 1, 4, 1, 2011, 10, 10, 1, 1, 3, 0, 1)).setObjects(("H3C-ENTITY-EXT-MIB", "h3cEntityExtPhysicalIndex"), ("ENTITY-MIB", "entPhysicalName"), ("H3C-ENTITY-EXT-MIB", "h3cEntityExtTemperature"), ("H3C-ENTITY-EXT-MIB", "h3cEntityExtCriticalLowerTemperatureThreshold"))
if mibBuilder.loadTexts: h3cStorCriticalLowerTemperatureThresholdNotification.setStatus('current')
if mibBuilder.loadTexts: h3cStorCriticalLowerTemperatureThresholdNotification.setDescription('The h3cStorCriticalLowerTemperatureThresholdNotification indicates the temperature is lower than the threshold. If the temperature is lower too much than the entity needs, the entity will halt.')
h3cStorTemperatureTooLow = NotificationType((1, 3, 6, 1, 4, 1, 2011, 10, 10, 1, 1, 3, 0, 2)).setObjects(("H3C-ENTITY-EXT-MIB", "h3cEntityExtPhysicalIndex"), ("ENTITY-MIB", "entPhysicalName"), ("H3C-ENTITY-EXT-MIB", "h3cEntityExtTemperature"), ("H3C-ENTITY-EXT-MIB", "h3cEntityExtShutdownLowerTemperatureThreshold"))
if mibBuilder.loadTexts: h3cStorTemperatureTooLow.setStatus('current')
if mibBuilder.loadTexts: h3cStorTemperatureTooLow.setDescription('The h3cStorTemperatureTooLow indicates the temperature is lower than the threshold. If the temperature is lower too much than the entity needs, the entity halts now.')
h3cExtVoltageLowThresholdNotification = NotificationType((1, 3, 6, 1, 4, 1, 2011, 10, 10, 1, 1, 3, 0, 3)).setObjects(("H3C-STORAGE-MIB", "h3cExtVoltagePhysicalIndex"), ("H3C-STORAGE-MIB", "h3cExtVoltagePhysicalName"), ("H3C-STORAGE-MIB", "h3cExtVoltage"), ("H3C-STORAGE-MIB", "h3cExtVoltageLowThreshold"))
if mibBuilder.loadTexts: h3cExtVoltageLowThresholdNotification.setStatus('current')
if mibBuilder.loadTexts: h3cExtVoltageLowThresholdNotification.setDescription('The h3cExtVoltageLowThresholdNotification indicates the Voltage is lower than the low-threshold.')
h3cExtVoltageHighThresholdNotification = NotificationType((1, 3, 6, 1, 4, 1, 2011, 10, 10, 1, 1, 3, 0, 4)).setObjects(("H3C-STORAGE-MIB", "h3cExtVoltagePhysicalIndex"), ("H3C-STORAGE-MIB", "h3cExtVoltagePhysicalName"), ("H3C-STORAGE-MIB", "h3cExtVoltage"), ("H3C-STORAGE-MIB", "h3cExtVoltageHighThreshold"))
if mibBuilder.loadTexts: h3cExtVoltageHighThresholdNotification.setStatus('current')
if mibBuilder.loadTexts: h3cExtVoltageHighThresholdNotification.setDescription('The h3cExtVoltageHighThresholdNotification indicates the Voltage is lower than the high-threshold.')
h3cExtCriticalVoltageLowThresholdNotification = NotificationType((1, 3, 6, 1, 4, 1, 2011, 10, 10, 1, 1, 3, 0, 5)).setObjects(("H3C-STORAGE-MIB", "h3cExtVoltagePhysicalIndex"), ("H3C-STORAGE-MIB", "h3cExtVoltagePhysicalName"), ("H3C-STORAGE-MIB", "h3cExtVoltage"), ("H3C-STORAGE-MIB", "h3cExtCriticalVoltageLowThreshold"))
if mibBuilder.loadTexts: h3cExtCriticalVoltageLowThresholdNotification.setStatus('current')
if mibBuilder.loadTexts: h3cExtCriticalVoltageLowThresholdNotification.setDescription('The h3cExtCriticalVoltageLowThresholdNotification indicates the Voltage is lower than the critical low threshold.')
h3cExtCriticalVoltageHighThresholdNotification = NotificationType((1, 3, 6, 1, 4, 1, 2011, 10, 10, 1, 1, 3, 0, 6)).setObjects(("H3C-STORAGE-MIB", "h3cExtVoltagePhysicalIndex"), ("H3C-STORAGE-MIB", "h3cExtVoltagePhysicalName"), ("H3C-STORAGE-MIB", "h3cExtVoltage"), ("H3C-STORAGE-MIB", "h3cExtCriticalVoltageHighThreshold"))
if mibBuilder.loadTexts: h3cExtCriticalVoltageHighThresholdNotification.setStatus('current')
if mibBuilder.loadTexts: h3cExtCriticalVoltageHighThresholdNotification.setDescription('The h3cExtCriticalVoltageHighThresholdNotification indicates the Voltage is higher than the critical high threshold.')
h3cExtVoltageTooLow = NotificationType((1, 3, 6, 1, 4, 1, 2011, 10, 10, 1, 1, 3, 0, 7)).setObjects(("H3C-STORAGE-MIB", "h3cExtVoltagePhysicalIndex"), ("H3C-STORAGE-MIB", "h3cExtVoltagePhysicalName"), ("H3C-STORAGE-MIB", "h3cExtVoltage"), ("H3C-STORAGE-MIB", "h3cExtShutdownVoltageLowThreshold"))
if mibBuilder.loadTexts: h3cExtVoltageTooLow.setStatus('current')
if mibBuilder.loadTexts: h3cExtVoltageTooLow.setDescription('The h3cExtVoltageTooLowNotification indicates the Voltage is lower than the critical low threshold, and the entity halt now!')
h3cExtVoltageTooHigh = NotificationType((1, 3, 6, 1, 4, 1, 2011, 10, 10, 1, 1, 3, 0, 8)).setObjects(("H3C-STORAGE-MIB", "h3cExtVoltagePhysicalIndex"), ("H3C-STORAGE-MIB", "h3cExtVoltagePhysicalName"), ("H3C-STORAGE-MIB", "h3cExtVoltage"), ("H3C-STORAGE-MIB", "h3cExtShutdownVoltageHighThreshold"))
if mibBuilder.loadTexts: h3cExtVoltageTooHigh.setStatus('current')
if mibBuilder.loadTexts: h3cExtVoltageTooHigh.setDescription('The h3cExtVoltageTooHighNotification indicates the Voltage is higher than the critical high threshold, and the entity halt now!')
h3cExtBatteryStateNotification = NotificationType((1, 3, 6, 1, 4, 1, 2011, 10, 10, 1, 1, 3, 0, 9)).setObjects(("H3C-ENTITY-EXT-MIB", "h3cEntityExtPhysicalIndex"), ("ENTITY-MIB", "entPhysicalName"), ("H3C-ENTITY-EXT-MIB", "h3cEntityExtOperStatus"))
if mibBuilder.loadTexts: h3cExtBatteryStateNotification.setStatus('current')
if mibBuilder.loadTexts: h3cExtBatteryStateNotification.setDescription("The h3cExtBatteryStateNotification indicates the raid battery entity's state changes.")
h3cDiskIOErrorNotification = NotificationType((1, 3, 6, 1, 4, 1, 2011, 10, 10, 1, 1, 3, 0, 10)).setObjects(("ENTITY-MIB", "entPhysicalDescr"))
if mibBuilder.loadTexts: h3cDiskIOErrorNotification.setStatus('current')
if mibBuilder.loadTexts: h3cDiskIOErrorNotification.setDescription('The h3cDiskIOErrorNotification indicates that a disk occurred I/O error.')
h3cRaidCreateNotification = NotificationType((1, 3, 6, 1, 4, 1, 2011, 10, 10, 1, 1, 3, 0, 11)).setObjects(("H3C-RAID-MIB", "h3cRaidUuid"), ("H3C-RAID-MIB", "h3cRaidName"))
if mibBuilder.loadTexts: h3cRaidCreateNotification.setStatus('current')
if mibBuilder.loadTexts: h3cRaidCreateNotification.setDescription('The h3cRaidCreateNotification indicates that a array is created.')
h3cRaidDeleteNotification = NotificationType((1, 3, 6, 1, 4, 1, 2011, 10, 10, 1, 1, 3, 0, 12)).setObjects(("H3C-RAID-MIB", "h3cRaidUuid"), ("H3C-RAID-MIB", "h3cRaidName"))
if mibBuilder.loadTexts: h3cRaidDeleteNotification.setStatus('current')
if mibBuilder.loadTexts: h3cRaidDeleteNotification.setDescription('The h3cRaidDeleteStateNotification indicates that a array is deleted.')
h3cRaidHideStateNotification = NotificationType((1, 3, 6, 1, 4, 1, 2011, 10, 10, 1, 1, 3, 0, 13)).setObjects(("H3C-RAID-MIB", "h3cRaidUuid"), ("H3C-RAID-MIB", "h3cRaidName"), ("H3C-RAID-MIB", "h3cRaidHideState"))
if mibBuilder.loadTexts: h3cRaidHideStateNotification.setStatus('current')
if mibBuilder.loadTexts: h3cRaidHideStateNotification.setDescription('The h3cRaidFailedNotification indicates that a array hide or unhide.')
h3cRaidRunStateNotification = NotificationType((1, 3, 6, 1, 4, 1, 2011, 10, 10, 1, 1, 3, 0, 14)).setObjects(("H3C-RAID-MIB", "h3cRaidUuid"), ("H3C-RAID-MIB", "h3cRaidName"), ("H3C-RAID-MIB", "h3cRaidRunState"))
if mibBuilder.loadTexts: h3cRaidRunStateNotification.setStatus('current')
if mibBuilder.loadTexts: h3cRaidRunStateNotification.setDescription('The h3cRaidDegradeNotification indicates that a array state change.')
h3cRaidImportNotification = NotificationType((1, 3, 6, 1, 4, 1, 2011, 10, 10, 1, 1, 3, 0, 15)).setObjects(("H3C-RAID-MIB", "h3cRaidUuid"), ("H3C-RAID-MIB", "h3cRaidName"))
if mibBuilder.loadTexts: h3cRaidImportNotification.setStatus('current')
if mibBuilder.loadTexts: h3cRaidImportNotification.setDescription('The h3cRaidFailedNotification indicates that a thrid array is imported.')
h3cRaidRebuildStartNotification = NotificationType((1, 3, 6, 1, 4, 1, 2011, 10, 10, 1, 1, 3, 0, 16)).setObjects(("H3C-RAID-MIB", "h3cRaidUuid"), ("H3C-RAID-MIB", "h3cRaidName"))
if mibBuilder.loadTexts: h3cRaidRebuildStartNotification.setStatus('current')
if mibBuilder.loadTexts: h3cRaidRebuildStartNotification.setDescription('The h3cRaidRebuildStartNotification indicates that a array start to rebuild.')
h3cRaidRebuildFinishNotification = NotificationType((1, 3, 6, 1, 4, 1, 2011, 10, 10, 1, 1, 3, 0, 17)).setObjects(("H3C-RAID-MIB", "h3cRaidUuid"), ("H3C-RAID-MIB", "h3cRaidName"))
if mibBuilder.loadTexts: h3cRaidRebuildFinishNotification.setStatus('current')
if mibBuilder.loadTexts: h3cRaidRebuildFinishNotification.setDescription('The h3cRaidRebuildStartNotification indicates that a array fininsh rebuild.')
h3cRaidRebuildPauseNotification = NotificationType((1, 3, 6, 1, 4, 1, 2011, 10, 10, 1, 1, 3, 0, 18)).setObjects(("H3C-RAID-MIB", "h3cRaidUuid"), ("H3C-RAID-MIB", "h3cRaidName"))
if mibBuilder.loadTexts: h3cRaidRebuildPauseNotification.setStatus('current')
if mibBuilder.loadTexts: h3cRaidRebuildPauseNotification.setDescription('The h3cRaidRebuildPauseNotification indicates that pause a array rebuilding.')
h3cRaidRebuildInterruptNotification = NotificationType((1, 3, 6, 1, 4, 1, 2011, 10, 10, 1, 1, 3, 0, 19)).setObjects(("H3C-RAID-MIB", "h3cRaidUuid"), ("H3C-RAID-MIB", "h3cRaidName"))
if mibBuilder.loadTexts: h3cRaidRebuildInterruptNotification.setStatus('current')
if mibBuilder.loadTexts: h3cRaidRebuildInterruptNotification.setDescription('The h3cRaidRebuildInterruptNotification indicates that interrupt a array rebuilding.')
h3cSoftwareModuleFailNotification = NotificationType((1, 3, 6, 1, 4, 1, 2011, 10, 10, 1, 1, 3, 0, 20)).setObjects(("H3C-STORAGE-MIB", "h3cSoftwareInfoString"))
if mibBuilder.loadTexts: h3cSoftwareModuleFailNotification.setStatus('current')
if mibBuilder.loadTexts: h3cSoftwareModuleFailNotification.setDescription('The h3cSoftwareModuleFailNotification indicates that software module(s) fail.')
h3cRaidBatteryExpiredNotification = NotificationType((1, 3, 6, 1, 4, 1, 2011, 10, 10, 1, 1, 3, 0, 21))
if mibBuilder.loadTexts: h3cRaidBatteryExpiredNotification.setStatus('current')
if mibBuilder.loadTexts: h3cRaidBatteryExpiredNotification.setDescription('The h3cRaidBatteryExpiredNotification indicates RAID Battery lifetime had expired.')
h3cRaidBatteryWillExpireNotification = NotificationType((1, 3, 6, 1, 4, 1, 2011, 10, 10, 1, 1, 3, 0, 22))
if mibBuilder.loadTexts: h3cRaidBatteryWillExpireNotification.setStatus('current')
if mibBuilder.loadTexts: h3cRaidBatteryWillExpireNotification.setDescription('The h3cRaidBatteryWillExpireNotification indicates RAID Battery lifetime will expire.')
h3cLvOnlineFailNotification = NotificationType((1, 3, 6, 1, 4, 1, 2011, 10, 10, 1, 1, 3, 0, 23)).setObjects(("H3C-RAID-MIB", "h3cRaidUuid"), ("H3C-RAID-MIB", "h3cRaidName"))
if mibBuilder.loadTexts: h3cLvOnlineFailNotification.setStatus('current')
if mibBuilder.loadTexts: h3cLvOnlineFailNotification.setDescription('The h3cLvOnlineFailNotification indicates lv(s) on vd fail to online.')
h3cLvOfflineFailNotification = NotificationType((1, 3, 6, 1, 4, 1, 2011, 10, 10, 1, 1, 3, 0, 24)).setObjects(("H3C-RAID-MIB", "h3cRaidUuid"), ("H3C-RAID-MIB", "h3cRaidName"))
if mibBuilder.loadTexts: h3cLvOfflineFailNotification.setStatus('current')
if mibBuilder.loadTexts: h3cLvOfflineFailNotification.setDescription('The h3cLvOfflineFailNotification indicates lv(s) on vd fail to offline.')
h3cRaidRunNotification = NotificationType((1, 3, 6, 1, 4, 1, 2011, 10, 10, 1, 1, 3, 0, 25)).setObjects(("H3C-RAID-MIB", "h3cRaidUuid"), ("H3C-RAID-MIB", "h3cRaidName"))
if mibBuilder.loadTexts: h3cRaidRunNotification.setStatus('current')
if mibBuilder.loadTexts: h3cRaidRunNotification.setDescription('The h3cRaidRunNotification indicates that a array is running.')
h3cExtVoltageNormal = NotificationType((1, 3, 6, 1, 4, 1, 2011, 10, 10, 1, 1, 3, 0, 26)).setObjects(("H3C-STORAGE-MIB", "h3cExtVoltagePhysicalIndex"), ("H3C-STORAGE-MIB", "h3cExtVoltagePhysicalName"), ("H3C-STORAGE-MIB", "h3cExtVoltage"), ("H3C-STORAGE-MIB", "h3cExtVoltageLowThreshold"), ("H3C-STORAGE-MIB", "h3cExtVoltageHighThreshold"))
if mibBuilder.loadTexts: h3cExtVoltageNormal.setStatus('current')
if mibBuilder.loadTexts: h3cExtVoltageNormal.setDescription('The h3cStorVoltageRecoveryNotification indicates the Voltage has recovered in reference.')
h3cDiskPowerOnNotification = NotificationType((1, 3, 6, 1, 4, 1, 2011, 10, 10, 1, 1, 3, 0, 27)).setObjects(("ENTITY-MIB", "entPhysicalDescr"))
if mibBuilder.loadTexts: h3cDiskPowerOnNotification.setStatus('current')
if mibBuilder.loadTexts: h3cDiskPowerOnNotification.setDescription('The h3cDiskPowerOnNotification indicates that a disk is power on.')
h3cDiskPowerOffNotification = NotificationType((1, 3, 6, 1, 4, 1, 2011, 10, 10, 1, 1, 3, 0, 28)).setObjects(("ENTITY-MIB", "entPhysicalDescr"), ("H3C-DISK-MIB", "h3cDiskPowerOffReason"))
if mibBuilder.loadTexts: h3cDiskPowerOffNotification.setStatus('current')
if mibBuilder.loadTexts: h3cDiskPowerOffNotification.setDescription('The h3cDiskPowerOffNotification indicates that a disk is power off.')
mibBuilder.exportSymbols("H3C-STORAGE-MIB", h3cFcTargetEnable=h3cFcTargetEnable, h3cFcAdapterInitiatorWwpnName=h3cFcAdapterInitiatorWwpnName, h3cExtVoltageLowThresholdNotification=h3cExtVoltageLowThresholdNotification, h3cStorageInterfaceMTU=h3cStorageInterfaceMTU, h3cBondingTable=h3cBondingTable, h3cStoragePhysicalInfo=h3cStoragePhysicalInfo, h3cExtCriticalVoltageHighThresholdNotification=h3cExtCriticalVoltageHighThresholdNotification, h3cRaidDeleteNotification=h3cRaidDeleteNotification, h3cDeuTable=h3cDeuTable, h3cAdaptiveRepCapability=h3cAdaptiveRepCapability, h3cBondingPortList=h3cBondingPortList, h3cExtVoltageTable=h3cExtVoltageTable, h3cDeuDiskScan=h3cDeuDiskScan, h3cExtVoltageNormal=h3cExtVoltageNormal, h3cDeuIndex=h3cDeuIndex, h3cExtVoltageLowThreshold=h3cExtVoltageLowThreshold, h3cStorageInterfaceTable=h3cStorageInterfaceTable, h3cScsiAdapterEntry=h3cScsiAdapterEntry, h3cRemoteRepCapability=h3cRemoteRepCapability, h3cAdapterNumber=h3cAdapterNumber, h3cExtShutdownVoltageHighThreshold=h3cExtShutdownVoltageHighThreshold, h3cExtVoltageTooLow=h3cExtVoltageTooLow, h3cStorageServerInfo=h3cStorageServerInfo, h3cStorageTrapsObjects=h3cStorageTrapsObjects, h3cFcAdapterPortState=h3cFcAdapterPortState, h3cLvOnlineFailNotification=h3cLvOnlineFailNotification, h3cRaidHideStateNotification=h3cRaidHideStateNotification, h3cTimeMarkCapability=h3cTimeMarkCapability, h3cSafeCacheCapability=h3cSafeCacheCapability, h3cAdapterDesc=h3cAdapterDesc, h3cDeuEntry=h3cDeuEntry, h3cServerPowerState=h3cServerPowerState, h3cStorageInterfaceGateway=h3cStorageInterfaceGateway, h3cExtCriticalVoltageHighThreshold=h3cExtCriticalVoltageHighThreshold, h3cRaidRebuildFinishNotification=h3cRaidRebuildFinishNotification, h3cRaidRebuildInterruptNotification=h3cRaidRebuildInterruptNotification, h3cStorTemperatureTooLow=h3cStorTemperatureTooLow, h3cBondingIndex=h3cBondingIndex, h3cRaidRunStateNotification=h3cRaidRunStateNotification, h3cExtVoltagePhysicalName=h3cExtVoltagePhysicalName, h3cDiskPowerOnNotification=h3cDiskPowerOnNotification, h3cServerLocationLedState=h3cServerLocationLedState, h3cExtVoltagePhysicalIndex=h3cExtVoltagePhysicalIndex, h3cSoftwareInfoString=h3cSoftwareInfoString, h3cExtVoltageHighThresholdNotification=h3cExtVoltageHighThresholdNotification, h3cDiskPowerOffNotification=h3cDiskPowerOffNotification, h3cStorageTrapsPrefix=h3cStorageTrapsPrefix, h3cAdapterType=h3cAdapterType, h3cDeuIDLed=h3cDeuIDLed, PYSNMP_MODULE_ID=h3cStorageMIB, h3cFcAdapterMode=h3cFcAdapterMode, h3cStorageInterfaceGatewayType=h3cStorageInterfaceGatewayType, h3cExtVoltageTooHigh=h3cExtVoltageTooHigh, h3cStorageMIB=h3cStorageMIB, h3cExtBatteryStateNotification=h3cExtBatteryStateNotification, h3cRaidCreateNotification=h3cRaidCreateNotification, h3cFcAdapterTargetWwpnName=h3cFcAdapterTargetWwpnName, h3cExtVoltageHighThreshold=h3cExtVoltageHighThreshold, h3cBondingEntry=h3cBondingEntry, h3cFcCapability=h3cFcCapability, h3cFcAdapterModeSwitch=h3cFcAdapterModeSwitch, h3cExtVoltage=h3cExtVoltage, h3cRaidImportNotification=h3cRaidImportNotification, h3cRaidRebuildStartNotification=h3cRaidRebuildStartNotification, h3cStorCriticalLowerTemperatureThresholdNotification=h3cStorCriticalLowerTemperatureThresholdNotification, h3cServerResetButtonState=h3cServerResetButtonState, h3cAsyncMirrorCapability=h3cAsyncMirrorCapability, h3cStorageTargetConfig=h3cStorageTargetConfig, h3cLvOfflineFailNotification=h3cLvOfflineFailNotification, h3ciSCSITargetEnable=h3ciSCSITargetEnable, h3cExtCriticalVoltageLowThreshold=h3cExtCriticalVoltageLowThreshold, h3cStorageTraps=h3cStorageTraps, h3cRaidBatteryExpiredNotification=h3cRaidBatteryExpiredNotification, h3cExtShutdownVoltageLowThreshold=h3cExtShutdownVoltageLowThreshold, h3cStorageServerCapability=h3cStorageServerCapability, h3cRaidRebuildPauseNotification=h3cRaidRebuildPauseNotification, h3cStorageServerPhysInfo=h3cStorageServerPhysInfo, h3cExtVoltageEntry=h3cExtVoltageEntry, h3cRaidCapability=h3cRaidCapability, h3cServerPowerButtonState=h3cServerPowerButtonState, h3cSyncMirrorCapability=h3cSyncMirrorCapability, h3cStorageInterfaceEntry=h3cStorageInterfaceEntry, h3cDiskIOErrorNotification=h3cDiskIOErrorNotification, h3cNasCapability=h3cNasCapability, h3cScsiAdapterTable=h3cScsiAdapterTable, h3cExtCriticalVoltageLowThresholdNotification=h3cExtCriticalVoltageLowThresholdNotification, h3cSoftwareModuleFailNotification=h3cSoftwareModuleFailNotification, h3cStorageMibObjects=h3cStorageMibObjects, h3cStorageInterfaceIndex=h3cStorageInterfaceIndex, h3cRaidBatteryWillExpireNotification=h3cRaidBatteryWillExpireNotification, h3cRaidRunNotification=h3cRaidRunNotification, h3cSseCapability=h3cSseCapability)
| 149.275986 | 4,660 | 0.799054 |
113746225da4f1ba8c46dd591c7557e380d51a1d | 11,153 | py | Python | models/networks.py | BynaryCobweb/joliGAN | a712b540b61f09691bb99406a49646dc8746cb7f | [
"MIT"
] | null | null | null | models/networks.py | BynaryCobweb/joliGAN | a712b540b61f09691bb99406a49646dc8746cb7f | [
"MIT"
] | null | null | null | models/networks.py | BynaryCobweb/joliGAN | a712b540b61f09691bb99406a49646dc8746cb7f | [
"MIT"
] | null | null | null | import torch.nn as nn
import functools
from torch.optim import lr_scheduler
import math
import torchvision.models as models
from .modules.utils import spectral_norm,init_net,init_weights,get_norm_layer
from .modules.resnet_architecture.resnet_generator import ResnetGenerator
from .modules.resnet_architecture.mobile_resnet_generator import MobileResnetGenerator,MobileResnetGenerator_attn
from .modules.unet_architecture.unet_generator import UnetGenerator
from .modules.resnet_architecture.resnet_generator import ResnetGenerator_attn
from .modules.discriminators import NLayerDiscriminator
from .modules.discriminators import PixelDiscriminator
from .modules.classifiers import Classifier, VGG16_FCN8s, torch_model,model_classes
from .modules.UNet_classification import UNet
from .modules.classifiers import Classifier_w
from .modules.fid.pytorch_fid.inception import InceptionV3
from .modules.stylegan_networks import StyleGAN2Discriminator, StyleGAN2Generator, TileStyleGAN2Discriminator
from .modules.cut_networks import PatchSampleF
class BaseNetwork(nn.Module):
def __init__(self):
super(BaseNetwork, self).__init__()
@staticmethod
def modify_commandline_options(parser, is_train):
return parser
class Identity(nn.Module):
def forward(self, x):
return x
def define_G(input_nc, output_nc, ngf, netG, norm='batch', use_dropout=False, use_spectral=False, init_type='normal', init_gain=0.02, gpu_ids=[], decoder=True, wplus=True, wskip=False, init_weight=True, img_size=128, img_size_dec=128,nb_attn = 10,nb_mask_input=1,padding_type='reflect',opt=None):
"""Create a generator
Parameters:
input_nc (int) -- the number of channels in input images
output_nc (int) -- the number of channels in output images
ngf (int) -- the number of filters in the last conv layer
netG (str) -- the architecture's name: resnet_9blocks | resnet_6blocks | unet_256 | unet_128
norm (str) -- the name of normalization layers used in the network: batch | instance | none
use_dropout (bool) -- if use dropout layers.
use_spectral (bool) -- if use spectral norm.
init_type (str) -- the name of our initialization method.
init_gain (float) -- scaling factor for normal, xavier and orthogonal.
gpu_ids (int list) -- which GPUs the network runs on: e.g., 0,1,2
Returns a generator
Our current implementation provides two types of generators:
U-Net: [unet_128] (for 128x128 input images) and [unet_256] (for 256x256 input images)
The original U-Net paper: https://arxiv.org/abs/1505.04597
Resnet-based generator: [resnet_6blocks] (with 6 Resnet blocks) and [resnet_9blocks] (with 9 Resnet blocks)
Resnet-based generator consists of several Resnet blocks between a few downsampling/upsampling operations.
We adapt Torch code from Justin Johnson's neural style transfer project (https://github.com/jcjohnson/fast-neural-style).
The generator has been initialized by <init_net>. It uses RELU for non-linearity.
"""
net = None
norm_layer = get_norm_layer(norm_type=norm)
if netG == 'resnet_9blocks':
net = ResnetGenerator(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, use_spectral=use_spectral, n_blocks=9, init_type=init_type, init_gain=init_gain, gpu_ids=gpu_ids, padding_type=padding_type)
elif netG == 'resnet_6blocks':
net = ResnetGenerator(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, use_spectral=use_spectral, n_blocks=6, init_type=init_type, init_gain=init_gain, gpu_ids=gpu_ids, padding_type=padding_type)
elif netG == 'resnet_12blocks':
net = ResnetGenerator(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, use_spectral=use_spectral, n_blocks=12, init_type=init_type, init_gain=init_gain, gpu_ids=gpu_ids, padding_type=padding_type)
elif netG == 'resnet_3blocks':
net = ResnetGenerator(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, use_spectral=use_spectral, n_blocks=3, init_type=init_type, init_gain=init_gain, gpu_ids=gpu_ids, padding_type=padding_type)
elif netG == 'mobile_resnet_9blocks':
net = MobileResnetGenerator(input_nc, output_nc, ngf=ngf, norm_layer=norm_layer,
dropout_rate=0.0, n_blocks=9, wplus=wplus,
init_type=init_type, init_gain=init_gain, gpu_ids=gpu_ids,
img_size=img_size, img_size_dec=img_size_dec)
elif netG == 'mobile_resnet_3blocks':
net = MobileResnetGenerator(input_nc, output_nc, ngf=ngf, norm_layer=norm_layer,
dropout_rate=0.0, n_blocks=3, wplus=wplus,
init_type=init_type, init_gain=init_gain, gpu_ids=gpu_ids,
img_size=img_size, img_size_dec=img_size_dec)
elif netG == 'unet_128':
net = UnetGenerator(input_nc, output_nc, 7, ngf, norm_layer=norm_layer, use_dropout=use_dropout)
elif netG == 'unet_256':
net = UnetGenerator(input_nc, output_nc, 8, ngf, norm_layer=norm_layer, use_dropout=use_dropout)
elif netG == 'resnet_attn':
net = ResnetGenerator_attn(input_nc, output_nc, ngf, n_blocks=9, use_spectral=use_spectral,nb_attn = nb_attn,nb_mask_input=nb_mask_input,padding_type=padding_type)
elif netG == 'mobile_resnet_attn':
net = MobileResnetGenerator_attn(input_nc, output_nc, ngf, n_blocks=9, use_spectral=use_spectral,nb_attn = nb_attn,nb_mask_input=nb_mask_input,padding_type=padding_type)
elif netG == 'stylegan2':
net = StyleGAN2Generator(input_nc, output_nc,ngf, use_dropout=use_dropout, opt=opt)
elif netG == 'smallstylegan2':
net = StyleGAN2Generator(input_nc, output_nc,ngf, use_dropout=use_dropout, n_blocks=2, opt=opt)
else:
raise NotImplementedError('Generator model name [%s] is not recognized' % netG)
return init_net(net, init_type, init_gain, gpu_ids,init_weight=init_weight and ('stylegan2' not in netG))
def define_D(input_nc, ndf, netD, n_layers_D=3, norm='batch', use_dropout=False, use_spectral=False, init_type='normal', init_gain=0.02, no_antialias=False, gpu_ids=[],opt=None):
"""Create a discriminator
Parameters:
input_nc (int) -- the number of channels in input images
ndf (int) -- the number of filters in the first conv layer
netD (str) -- the architecture's name: basic | n_layers | pixel
n_layers_D (int) -- the number of conv layers in the discriminator; effective when netD=='n_layers'
norm (str) -- the type of normalization layers used in the network.
use_dropout (bool) -- whether to use dropout layers
use_spectral(bool) -- whether to use spectral norm
init_type (str) -- the name of the initialization method.
init_gain (float) -- scaling factor for normal, xavier and orthogonal.
gpu_ids (int list) -- which GPUs the network runs on: e.g., 0,1,2
Returns a discriminator
Our current implementation provides three types of discriminators:
[basic]: 'PatchGAN' classifier described in the original pix2pix paper.
It can classify whether 70×70 overlapping patches are real or fake.
Such a patch-level discriminator architecture has fewer parameters
than a full-image discriminator and can work on arbitrarily-sized images
in a fully convolutional fashion.
[n_layers]: With this mode, you can specify the number of conv layers in the discriminator
with the parameter <n_layers_D> (default=3 as used in [basic] (PatchGAN).)
[pixel]: 1x1 PixelGAN discriminator can classify whether a pixel is real or not.
It encourages greater color diversity but has no effect on spatial statistics.
The discriminator has been initialized by <init_net>. It uses Leakly RELU for non-linearity.
"""
net = None
norm_layer = get_norm_layer(norm_type=norm)
if netD == 'basic': # default PatchGAN classifier
net = NLayerDiscriminator(input_nc, ndf, n_layers=3, norm_layer=norm_layer, use_dropout=use_dropout, use_spectral=use_spectral)
elif netD == 'n_layers': # more options
net = NLayerDiscriminator(input_nc, ndf, n_layers_D, norm_layer=norm_layer, use_dropout=use_dropout, use_spectral=use_spectral)
elif netD == 'pixel': # classify if each pixel is real or fake
net = PixelDiscriminator(input_nc, ndf, norm_layer=norm_layer)
elif 'stylegan2' in netD: # global D from sty2 repo
net = StyleGAN2Discriminator(input_nc, ndf, n_layers_D, no_antialias=no_antialias, opt=opt)
elif netD in model_classes : # load torchvision model
nclasses=1
template=netD
net = torch_model(input_nc, ndf, nclasses,opt.crop_size, template, pretrained=False)
else:
raise NotImplementedError('Discriminator model name [%s] is not recognized' % netD)
return init_net(net, init_type, init_gain, gpu_ids,init_weight= 'stylegan2' not in netD)
def define_C(input_nc, ndf,img_size, init_type='normal', init_gain=0.02, gpu_ids=[], nclasses=10, template='basic', pretrained=False):
if template == 'basic':
netC = Classifier(input_nc, ndf, nclasses,img_size)
else:
netC = torch_model(input_nc, ndf, nclasses, img_size, template, pretrained)
return init_net(netC, init_type, init_gain, gpu_ids)
def define_f(input_nc, nclasses, init_type='normal', init_gain=0.02, gpu_ids=[], fs_light=False):
if not fs_light:
net = VGG16_FCN8s(nclasses,pretrained = False, weights_init =None,output_last_ft=False)
else:
net = UNet(classes=nclasses,input_nc=input_nc)
return init_net(net, init_type, init_gain, gpu_ids)
def define_classifier_w(pretrained=False, weights_init='', init_type='normal', init_gain=0.02, gpu_ids=[],init_weight=True,img_size_dec=256):
net = Classifier_w(img_size_dec=img_size_dec)
return init_net(net, init_type, init_gain, gpu_ids,init_weight=init_weight)
def define_inception(device,dims):
block_idx = InceptionV3.BLOCK_INDEX_BY_DIM[dims]
model = InceptionV3([block_idx]).to(device)
return model
def define_F(input_nc, netF, norm='batch', use_dropout=False, init_type='normal', init_gain=0.02, no_antialias=False, gpu_ids=[], opt=None):
if netF == 'global_pool':
net = PoolingF()
elif netF == 'reshape':
net = ReshapeF()
elif netF == 'sample':
net = PatchSampleF(use_mlp=False, init_type=init_type, init_gain=init_gain, gpu_ids=gpu_ids, nc=opt.netF_nc)
elif netF == 'mlp_sample':
net = PatchSampleF(use_mlp=True, init_type=init_type, init_gain=init_gain, gpu_ids=gpu_ids, nc=opt.netF_nc)
elif netF == 'strided_conv':
net = StridedConvF(init_type=init_type, init_gain=init_gain, gpu_ids=gpu_ids)
else:
raise NotImplementedError('projection model name [%s] is not recognized' % netF)
return init_net(net, init_type, init_gain, gpu_ids)
| 59.010582 | 296 | 0.722586 |
15cb51d43aaa5d81ba623bdf391eadda23ca03a2 | 989 | py | Python | Spider/ButterBetter/searchText.py | GWU-CS2021/CS6221-ButterBetter | 4a5ef3dede059c245eee2c6e6fba2336ea74dc85 | [
"MIT"
] | null | null | null | Spider/ButterBetter/searchText.py | GWU-CS2021/CS6221-ButterBetter | 4a5ef3dede059c245eee2c6e6fba2336ea74dc85 | [
"MIT"
] | null | null | null | Spider/ButterBetter/searchText.py | GWU-CS2021/CS6221-ButterBetter | 4a5ef3dede059c245eee2c6e6fba2336ea74dc85 | [
"MIT"
] | null | null | null | import requests
import tools
import json
def search_by_text_to_json(API_KEY, text, out_file, r = None):
head = 'https://maps.googleapis.com/maps/api/place/textsearch/json?query='
text = text.replace(' ','%20')
if r:
radius = '&radius='+ str(r)
else:
radius =''
tail = '&key=' + API_KEY
url = head + text + radius + tail
payload={}
headers = {}
response = requests.request("GET", url, headers=headers, data=payload).text
f = open(out_file,'w',encoding='utf-8')
f.write(response)
f.close()
#return response
def search_by_token_to_json(API_KEY, token, out_file):
head = 'https://maps.googleapis.com/maps/api/place/textsearch/json?pagetoken='
tail = '&key=' + API_KEY
url = head + token + tail
payload={}
headers = {}
response = requests.request("GET", url, headers=headers, data=payload).text
f = open(out_file,'w',encoding='utf-8')
f.write(response)
f.close()
return response
| 26.026316 | 82 | 0.629929 |
17de18897e413317a15d427ab745a0e2bbe4cdcc | 5,978 | py | Python | Robocop.py | leo60228/everestbot | 53bbf6a5b42a71889ed3de02cc65f22fedddf313 | [
"MIT"
] | 1 | 2020-08-30T22:16:13.000Z | 2020-08-30T22:16:13.000Z | Robocop.py | leo60228/everestbot | 53bbf6a5b42a71889ed3de02cc65f22fedddf313 | [
"MIT"
] | null | null | null | Robocop.py | leo60228/everestbot | 53bbf6a5b42a71889ed3de02cc65f22fedddf313 | [
"MIT"
] | null | null | null | import os
import asyncio
import sys
import logging
import logging.handlers
import traceback
import aiohttp
import config
import discord
from discord.ext import commands
script_name = os.path.basename(__file__).split('.')[0]
log_file_name = f"{script_name}.log"
# Limit of discord (non-nitro) is 8MB (not MiB)
max_file_size = 1000 * 1000 * 8
backup_count = 3
file_handler = logging.handlers.RotatingFileHandler(
filename=log_file_name, maxBytes=max_file_size, backupCount=backup_count)
stdout_handler = logging.StreamHandler(sys.stdout)
log_format = logging.Formatter(
'[%(asctime)s] {%(filename)s:%(lineno)d} %(levelname)s - %(message)s')
file_handler.setFormatter(log_format)
stdout_handler.setFormatter(log_format)
log = logging.getLogger('discord')
log.setLevel(logging.INFO)
log.addHandler(file_handler)
log.addHandler(stdout_handler)
def get_prefix(bot, message):
prefixes = config.prefixes
return commands.when_mentioned_or(*prefixes)(bot, message)
wanted_jsons = ["data/restrictions.json",
"data/robocronptab.json",
"data/userlog.json"]
initial_extensions = ['cogs.common',
'cogs.basic',
'cogs.meme',
'cogs.everestpins']
bot = commands.Bot(command_prefix=get_prefix,
description=config.bot_description, pm_help=True)
bot.log = log
bot.loop = asyncio.get_event_loop()
bot.config = config
bot.script_name = script_name
bot.wanted_jsons = wanted_jsons
if __name__ == '__main__':
for extension in initial_extensions:
try:
bot.load_extension(extension)
except Exception as e:
log.error(f'Failed to load extension {extension}.')
log.error(traceback.print_exc())
@bot.event
async def on_ready():
aioh = {"User-Agent": f"{script_name}/1.0'"}
bot.aiosession = aiohttp.ClientSession(headers=aioh)
bot.app_info = await bot.application_info()
log.info(f'\nLogged in as: {bot.user.name} - '
f'{bot.user.id}\ndpy version: {discord.__version__}\n')
game_name = f"{config.prefixes[0]}help"
activity = discord.Activity(name=game_name, type=discord.ActivityType.listening)
await bot.change_presence(activity=activity)
@bot.event
async def on_command(ctx):
log_text = f"{ctx.message.author} ({ctx.message.author.id}): "\
f"\"{ctx.message.content}\" "
if ctx.guild: # was too long for tertiary if
log_text += f"on \"{ctx.channel.name}\" ({ctx.channel.id}) "\
f"at \"{ctx.guild.name}\" ({ctx.guild.id})"
else:
log_text += f"on DMs ({ctx.channel.id})"
log.info(log_text)
@bot.event
async def on_error(event_method, *args, **kwargs):
log.error(f"Error on {event_method}: {sys.exc_info()}")
@bot.event
async def on_command_error(ctx, error):
error_text = str(error)
log.error(f"Error with \"{ctx.message.content}\" from "
f"\"{ctx.message.author} ({ctx.message.author.id}) "
f"of type {type(error)}: {error_text}")
if isinstance(error, commands.NoPrivateMessage):
return await ctx.send("This command doesn't work on DMs.")
elif isinstance(error, commands.MissingPermissions):
roles_needed = '\n- '.join(error.missing_perms)
return await ctx.send(f"{ctx.author.mention}: You don't have the right"
" permissions to run this command. You need: "
f"```- {roles_needed}```")
elif isinstance(error, commands.BotMissingPermissions):
roles_needed = '\n-'.join(error.missing_perms)
return await ctx.send(f"{ctx.author.mention}: Bot doesn't have "
"the right permissions to run this command. "
"Please add the following roles: "
f"```- {roles_needed}```")
elif isinstance(error, commands.CommandOnCooldown):
return await ctx.send(f"{ctx.author.mention}: You're being "
"ratelimited. Try in "
f"{error.retry_after:.1f} seconds.")
elif isinstance(error, commands.CheckFailure):
return await ctx.send(f"{ctx.author.mention}: Check failed. "
"You might not have the right permissions "
"to run this command.")
elif isinstance(error, commands.CommandInvokeError) and\
("Cannot send messages to this user" in error_text):
return await ctx.send(f"{ctx.author.mention}: I can't DM you.\n"
"You might have me blocked or have DMs "
f"blocked globally or for {ctx.guild.name}.\n"
"Please resolve that, then "
"run the command again.")
elif isinstance(error, commands.CommandNotFound):
# Nothing to do when command is not found.
return
help_text = f"Usage of this command is: ```{ctx.prefix}"\
f"{ctx.command.signature}```\nPlease see `{ctx.prefix}help "\
f"{ctx.command.name}` for more info about this command."
if isinstance(error, commands.BadArgument):
return await ctx.send(f"{ctx.author.mention}: You gave incorrect "
f"arguments. {help_text}")
elif isinstance(error, commands.MissingRequiredArgument):
return await ctx.send(f"{ctx.author.mention}: You gave incomplete "
f"arguments. {help_text}")
@bot.event
async def on_message(message):
if message.author.bot:
return
ctx = await bot.get_context(message)
await bot.invoke(ctx)
if not os.path.exists("data"):
os.makedirs("data")
for wanted_json in wanted_jsons:
if not os.path.exists(wanted_json):
with open(wanted_json, "w") as f:
f.write("{}")
bot.run(config.token, bot=True, reconnect=True, loop=bot.loop)
| 35.583333 | 84 | 0.623118 |
acee2165f4e9de57208a3aef2ca7e8fc1b042b5b | 51,276 | py | Python | scripts/runtests.py | msakai/chainer-compiler | 77190561408911b33904a20c47f734f38790cfdf | [
"MIT"
] | null | null | null | scripts/runtests.py | msakai/chainer-compiler | 77190561408911b33904a20c47f734f38790cfdf | [
"MIT"
] | null | null | null | scripts/runtests.py | msakai/chainer-compiler | 77190561408911b33904a20c47f734f38790cfdf | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import argparse
import copy
import glob
import multiprocessing
import os
import re
import sys
import subprocess
project_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(project_root)
import ch2o_tests
import elichika_tests
import gen_backprop_tests_oc
import gen_backprop_tests_pc
import gen_chainercv_model_tests
import gen_extra_test
import gen_large_tests_oc
import onnx_chainer_tests
import onnx_real_tests
from test_case import TestCase
parser = argparse.ArgumentParser(description='Run tests for chainer_compiler')
parser.add_argument('test_filter', default=None, nargs='?',
help='A regular expression to filter tests')
parser.add_argument('--all', '-a', action='store_true',
help='Run all tests')
parser.add_argument('--build_dir', '-b', default=None,
help='The build directory')
parser.add_argument('--jobs', '-j', type=int,
default=multiprocessing.cpu_count(),
help='Number of parallel jobs')
parser.add_argument('--show_log', action='store_true',
help='Show logs')
parser.add_argument('--skip_build', action='store_true',
help='Skip the build before running tests')
parser.add_argument('--use_gpu', '-g', action='store_true',
help='Run heavy tests with GPU')
parser.add_argument('--device', '-d', default=None,
help='ChainerX device to be used')
parser.add_argument('--use_gpu_all', '-G', action='store_true',
help='Run all tests with GPU')
parser.add_argument('--failed', action='store_true',
help='Run tests which failed last time')
parser.add_argument('--failure_log', default='out/failed_tests.log',
help='The file where names of failed tests are stored')
parser.add_argument('--fuse', action='store_true', help='Enable fusion')
parser.add_argument('--ngraph', action='store_true', help='Enable nGraph')
parser.add_argument('--snpe', action='store_true', help='Enable SNPE')
parser.add_argument('--computation_order', default=None,
help='Force setting --computation_order flag')
parser.add_argument('--cache', action='store_true', help='Enable model caching')
parser.add_argument('--verbose', action='store_true',
help='Run tests with --verbose flag')
args = parser.parse_args()
GREEN = '\033[92m'
YELLOW = '\033[93m'
RED = '\033[91m'
RESET = '\033[0m'
ONNX_TEST_DATA = 'third_party/onnx/onnx/backend/test/data'
NODE_TEST = os.path.join(ONNX_TEST_DATA, 'node')
SIMPLE_TEST = os.path.join(ONNX_TEST_DATA, 'simple')
# ChainerX does not support 1D conv/pool.
fail_1d_conv_pool = args.use_gpu_all
TEST_CASES = [
TestCase(NODE_TEST, 'test_identity'),
TestCase(NODE_TEST, 'test_add'),
TestCase(NODE_TEST, 'test_add_bcast'),
TestCase(NODE_TEST, 'test_sub'),
TestCase(NODE_TEST, 'test_sub_bcast'),
TestCase(NODE_TEST, 'test_sub_example'),
TestCase(NODE_TEST, 'test_mul'),
TestCase(NODE_TEST, 'test_mul_bcast'),
TestCase(NODE_TEST, 'test_mul_example'),
TestCase(NODE_TEST, 'test_div'),
TestCase(NODE_TEST, 'test_div_bcast'),
TestCase(NODE_TEST, 'test_div_example'),
TestCase(NODE_TEST, 'test_mod_broadcast'),
# TODO(hamaji): Support fmod.
# TestCase(NODE_TEST, 'test_mod_int64_fmod'),
# TestCase(NODE_TEST, 'test_mod_mixed_sign_float16'),
# TestCase(NODE_TEST, 'test_mod_mixed_sign_float32'),
# TestCase(NODE_TEST, 'test_mod_mixed_sign_float64'),
TestCase(NODE_TEST, 'test_mod_mixed_sign_int16'),
TestCase(NODE_TEST, 'test_mod_mixed_sign_int32'),
TestCase(NODE_TEST, 'test_mod_mixed_sign_int64'),
TestCase(NODE_TEST, 'test_mod_mixed_sign_int8'),
# TODO(hamaji): Unsupported dtypes.
# TestCase(NODE_TEST, 'test_mod_uint16'),
# TestCase(NODE_TEST, 'test_mod_uint32'),
# TestCase(NODE_TEST, 'test_mod_uint64'),
TestCase(NODE_TEST, 'test_mod_uint8'),
TestCase(NODE_TEST, 'test_pow'),
TestCase(NODE_TEST, 'test_pow_bcast_array'),
TestCase(NODE_TEST, 'test_pow_bcast_scalar'),
TestCase(NODE_TEST, 'test_pow_example'),
TestCase(NODE_TEST, 'test_and2d'),
TestCase(NODE_TEST, 'test_and3d'),
TestCase(NODE_TEST, 'test_and4d'),
TestCase(NODE_TEST, 'test_and_bcast3v1d'),
TestCase(NODE_TEST, 'test_and_bcast3v2d'),
TestCase(NODE_TEST, 'test_and_bcast4v2d'),
TestCase(NODE_TEST, 'test_and_bcast4v3d'),
TestCase(NODE_TEST, 'test_and_bcast4v4d'),
TestCase(NODE_TEST, 'test_or2d'),
TestCase(NODE_TEST, 'test_or4d'),
TestCase(NODE_TEST, 'test_or_bcast3v1d'),
TestCase(NODE_TEST, 'test_or3d'),
TestCase(NODE_TEST, 'test_or_bcast4v2d'),
TestCase(NODE_TEST, 'test_or_bcast3v2d'),
TestCase(NODE_TEST, 'test_or_bcast4v3d'),
TestCase(NODE_TEST, 'test_or_bcast4v4d'),
TestCase(NODE_TEST, 'test_xor2d'),
TestCase(NODE_TEST, 'test_xor3d'),
TestCase(NODE_TEST, 'test_xor_bcast3v1d'),
TestCase(NODE_TEST, 'test_xor4d'),
TestCase(NODE_TEST, 'test_xor_bcast3v2d'),
TestCase(NODE_TEST, 'test_xor_bcast4v2d'),
TestCase(NODE_TEST, 'test_xor_bcast4v4d'),
TestCase(NODE_TEST, 'test_xor_bcast4v3d'),
TestCase(NODE_TEST, 'test_neg'),
TestCase(NODE_TEST, 'test_neg_example'),
TestCase(NODE_TEST, 'test_reciprocal'),
TestCase(NODE_TEST, 'test_reciprocal_example'),
TestCase(NODE_TEST, 'test_exp'),
TestCase(NODE_TEST, 'test_exp_example'),
TestCase(NODE_TEST, 'test_log'),
TestCase(NODE_TEST, 'test_log_example'),
TestCase(NODE_TEST, 'test_sqrt'),
TestCase(NODE_TEST, 'test_sqrt_example'),
TestCase(NODE_TEST, 'test_sin'),
TestCase(NODE_TEST, 'test_sin_example'),
TestCase(NODE_TEST, 'test_cos'),
TestCase(NODE_TEST, 'test_cos_example'),
TestCase(NODE_TEST, 'test_tan'),
TestCase(NODE_TEST, 'test_tan_example'),
TestCase(NODE_TEST, 'test_sinh'),
TestCase(NODE_TEST, 'test_sinh_example'),
TestCase(NODE_TEST, 'test_cosh'),
TestCase(NODE_TEST, 'test_cosh_example'),
TestCase(NODE_TEST, 'test_tanh'),
TestCase(NODE_TEST, 'test_tanh_example'),
TestCase(NODE_TEST, 'test_asin'),
TestCase(NODE_TEST, 'test_asin_example'),
TestCase(NODE_TEST, 'test_acos'),
TestCase(NODE_TEST, 'test_acos_example'),
TestCase(NODE_TEST, 'test_atan'),
TestCase(NODE_TEST, 'test_atan_example'),
TestCase(NODE_TEST, 'test_asinh'),
TestCase(NODE_TEST, 'test_asinh_example'),
TestCase(NODE_TEST, 'test_acosh'),
TestCase(NODE_TEST, 'test_acosh_example'),
# TODO(hamaji): Enable these tests once atanh is implemented.
# TestCase(NODE_TEST, 'test_atanh'),
# TestCase(NODE_TEST, 'test_atanh_example'),
TestCase(NODE_TEST, 'test_erf'),
TestCase(NODE_TEST, 'test_abs'),
TestCase(NODE_TEST, 'test_relu'),
TestCase(NODE_TEST, 'test_elu'),
TestCase(NODE_TEST, 'test_elu_default'),
TestCase(NODE_TEST, 'test_elu_example'),
TestCase(NODE_TEST, 'test_leakyrelu'),
TestCase(NODE_TEST, 'test_leakyrelu_default'),
TestCase(NODE_TEST, 'test_leakyrelu_example'),
TestCase(NODE_TEST, 'test_selu'),
TestCase(NODE_TEST, 'test_selu_default'),
TestCase(NODE_TEST, 'test_selu_example'),
TestCase(NODE_TEST, 'test_sigmoid'),
TestCase(NODE_TEST, 'test_sigmoid_example'),
TestCase(NODE_TEST, 'test_floor'),
TestCase(NODE_TEST, 'test_floor_example'),
TestCase(NODE_TEST, 'test_ceil'),
TestCase(NODE_TEST, 'test_ceil_example'),
TestCase(NODE_TEST, 'test_sign'),
TestCase(NODE_TEST, 'test_not_2d'),
TestCase(NODE_TEST, 'test_not_3d'),
TestCase(NODE_TEST, 'test_not_4d'),
TestCase(NODE_TEST, 'test_equal'),
TestCase(NODE_TEST, 'test_equal_bcast'),
TestCase(NODE_TEST, 'test_greater'),
TestCase(NODE_TEST, 'test_greater_bcast'),
TestCase(NODE_TEST, 'test_less'),
TestCase(NODE_TEST, 'test_less_bcast'),
TestCase(NODE_TEST, 'test_constant'),
TestCase(NODE_TEST, 'test_constantofshape_float_ones'),
TestCase(NODE_TEST, 'test_constantofshape_int_zeros'),
TestCase(NODE_TEST, 'test_onehot_with_axis'),
TestCase(NODE_TEST, 'test_onehot_with_negative_axis'),
TestCase(NODE_TEST, 'test_onehot_without_axis'),
TestCase(NODE_TEST, 'test_eyelike_populate_off_main_diagonal'),
TestCase(NODE_TEST, 'test_eyelike_with_dtype'),
TestCase(NODE_TEST, 'test_eyelike_without_dtype'),
TestCase(NODE_TEST, 'test_cast_DOUBLE_to_FLOAT'),
TestCase(NODE_TEST, 'test_cast_DOUBLE_to_FLOAT16'),
TestCase(NODE_TEST, 'test_cast_FLOAT16_to_DOUBLE'),
TestCase(NODE_TEST, 'test_cast_FLOAT16_to_FLOAT'),
TestCase(NODE_TEST, 'test_cast_FLOAT_to_DOUBLE'),
TestCase(NODE_TEST, 'test_cast_FLOAT_to_FLOAT16'),
TestCase(NODE_TEST, 'test_matmul_2d'),
TestCase(NODE_TEST, 'test_matmul_3d'),
TestCase(NODE_TEST, 'test_matmul_4d'),
TestCase(NODE_TEST, 'test_basic_conv_with_padding'),
TestCase(NODE_TEST, 'test_basic_conv_without_padding'),
TestCase(NODE_TEST, 'test_conv_with_strides_no_padding'),
TestCase(NODE_TEST, 'test_conv_with_strides_padding'),
TestCase(NODE_TEST, 'test_conv_with_strides_and_asymmetric_padding'),
TestCase(NODE_TEST, 'test_convtranspose'),
TestCase(NODE_TEST, 'test_convtranspose_1d', fail=fail_1d_conv_pool),
TestCase(NODE_TEST, 'test_convtranspose_3d'),
TestCase(NODE_TEST, 'test_convtranspose_kernel_shape'),
TestCase(NODE_TEST, 'test_convtranspose_output_shape'),
# TODO(hamaji): output_pads is not handled yet.
# TestCase(NODE_TEST, 'test_convtranspose_pad'),
TestCase(NODE_TEST, 'test_convtranspose_pads'),
TestCase(NODE_TEST, 'test_convtranspose_with_kernel'),
TestCase(NODE_TEST, 'test_constant_pad'),
# TODO(hamaji): auto_pad is not supported.
TestCase(NODE_TEST, 'test_maxpool_1d_default', fail=fail_1d_conv_pool),
TestCase(NODE_TEST, 'test_maxpool_2d_ceil'),
TestCase(NODE_TEST, 'test_maxpool_2d_default'),
TestCase(NODE_TEST, 'test_maxpool_2d_pads'),
TestCase(NODE_TEST, 'test_maxpool_2d_precomputed_pads'),
TestCase(NODE_TEST, 'test_maxpool_2d_precomputed_same_upper'),
TestCase(NODE_TEST, 'test_maxpool_2d_precomputed_strides'),
TestCase(NODE_TEST, 'test_maxpool_2d_strides'),
TestCase(NODE_TEST, 'test_maxpool_3d_default'),
TestCase(NODE_TEST, 'test_averagepool_1d_default', fail=fail_1d_conv_pool),
TestCase(NODE_TEST, 'test_averagepool_2d_default'),
TestCase(NODE_TEST, 'test_averagepool_2d_precomputed_pads'),
TestCase(NODE_TEST, 'test_averagepool_2d_precomputed_pads_count_include_pad'),
TestCase(NODE_TEST, 'test_averagepool_2d_precomputed_strides'),
TestCase(NODE_TEST, 'test_averagepool_2d_strides'),
TestCase(NODE_TEST, 'test_averagepool_2d_pads'),
TestCase(NODE_TEST, 'test_averagepool_2d_pads_count_include_pad'),
TestCase(NODE_TEST, 'test_averagepool_3d_default'),
TestCase(NODE_TEST, 'test_globalmaxpool'),
TestCase(NODE_TEST, 'test_globalmaxpool_precomputed'),
TestCase(NODE_TEST, 'test_globalaveragepool'),
TestCase(NODE_TEST, 'test_globalaveragepool_precomputed'),
TestCase(NODE_TEST, 'test_upsample_nearest'),
# TODO(take-cheeze): Other Resize-11 tests
TestCase(NODE_TEST, 'test_resize_upsample_scales_nearest'),
# The second ROI values mismatch. Let the test pass with
# ridiculously large tolerance.
TestCase(NODE_TEST, 'test_roialign', rtol=0.5, atol=0.5),
TestCase(NODE_TEST, 'test_shape'),
TestCase(NODE_TEST, 'test_shape_example'),
TestCase(NODE_TEST, 'test_size'),
TestCase(NODE_TEST, 'test_size_example'),
TestCase(NODE_TEST, 'test_reshape_extended_dims'),
TestCase(NODE_TEST, 'test_reshape_negative_dim'),
TestCase(NODE_TEST, 'test_reshape_negative_extended_dims'),
TestCase(NODE_TEST, 'test_reshape_one_dim'),
TestCase(NODE_TEST, 'test_reshape_reduced_dims'),
TestCase(NODE_TEST, 'test_reshape_reordered_all_dims'),
TestCase(NODE_TEST, 'test_reshape_reordered_last_dims'),
# TODO(hamaji): Support zero dims in reshape op.
TestCase(NODE_TEST, 'test_reshape_zero_dim', fail=True),
TestCase(NODE_TEST, 'test_reshape_zero_and_negative_dim', fail=True),
TestCase(NODE_TEST, 'test_expand_dim_changed'),
TestCase(NODE_TEST, 'test_expand_dim_unchanged'),
TestCase(NODE_TEST, 'test_squeeze'),
TestCase(NODE_TEST, 'test_squeeze_negative_axes'),
TestCase(NODE_TEST, 'test_unsqueeze_axis_0'),
TestCase(NODE_TEST, 'test_unsqueeze_axis_1'),
TestCase(NODE_TEST, 'test_unsqueeze_axis_2'),
TestCase(NODE_TEST, 'test_unsqueeze_axis_3'),
TestCase(NODE_TEST, 'test_unsqueeze_negative_axes'),
TestCase(NODE_TEST, 'test_unsqueeze_two_axes'),
TestCase(NODE_TEST, 'test_unsqueeze_three_axes'),
TestCase(NODE_TEST, 'test_unsqueeze_unsorted_axes'),
TestCase(NODE_TEST, 'test_flatten_axis0'),
TestCase(NODE_TEST, 'test_flatten_axis1'),
TestCase(NODE_TEST, 'test_flatten_axis2'),
TestCase(NODE_TEST, 'test_flatten_axis3'),
TestCase(NODE_TEST, 'test_flatten_default_axis'),
TestCase(NODE_TEST, 'test_flatten_negative_axis1'),
TestCase(NODE_TEST, 'test_flatten_negative_axis2'),
TestCase(NODE_TEST, 'test_flatten_negative_axis3'),
TestCase(NODE_TEST, 'test_flatten_negative_axis4'),
TestCase(NODE_TEST, 'test_slice'),
TestCase(NODE_TEST, 'test_slice_default_axes'),
TestCase(NODE_TEST, 'test_slice_default_steps'),
TestCase(NODE_TEST, 'test_slice_end_out_of_bounds'),
TestCase(NODE_TEST, 'test_slice_neg'),
TestCase(NODE_TEST, 'test_slice_neg_steps'),
TestCase(NODE_TEST, 'test_slice_negative_axes'),
TestCase(NODE_TEST, 'test_slice_start_out_of_bounds'),
TestCase(NODE_TEST, 'test_gather_0'),
TestCase(NODE_TEST, 'test_gather_1'),
TestCase(NODE_TEST, 'test_gather_elements_0'),
TestCase(NODE_TEST, 'test_gather_elements_1'),
TestCase(NODE_TEST, 'test_gather_elements_negative_indices'),
TestCase(NODE_TEST, 'test_gather_negative_indices'),
TestCase(NODE_TEST, 'test_gathernd_example_int32'),
TestCase(NODE_TEST, 'test_gathernd_example_float32'),
TestCase(NODE_TEST, 'test_scatter_with_axis'),
TestCase(NODE_TEST, 'test_scatter_without_axis'),
TestCase(NODE_TEST, 'test_scatter_elements_with_axis'),
TestCase(NODE_TEST, 'test_scatter_elements_with_negative_indices'),
TestCase(NODE_TEST, 'test_scatter_elements_without_axis'),
TestCase(NODE_TEST, 'test_scatternd'),
TestCase(NODE_TEST, 'test_concat_1d_axis_0'),
TestCase(NODE_TEST, 'test_concat_1d_axis_negative_1'),
TestCase(NODE_TEST, 'test_concat_2d_axis_0'),
TestCase(NODE_TEST, 'test_concat_2d_axis_1'),
TestCase(NODE_TEST, 'test_concat_2d_axis_negative_1'),
TestCase(NODE_TEST, 'test_concat_2d_axis_negative_2'),
TestCase(NODE_TEST, 'test_concat_3d_axis_0'),
TestCase(NODE_TEST, 'test_concat_3d_axis_1'),
TestCase(NODE_TEST, 'test_concat_3d_axis_2'),
TestCase(NODE_TEST, 'test_concat_3d_axis_negative_1'),
TestCase(NODE_TEST, 'test_concat_3d_axis_negative_2'),
TestCase(NODE_TEST, 'test_concat_3d_axis_negative_3'),
TestCase(NODE_TEST, 'test_split_equal_parts_1d'),
TestCase(NODE_TEST, 'test_split_equal_parts_2d'),
TestCase(NODE_TEST, 'test_split_equal_parts_default_axis'),
TestCase(NODE_TEST, 'test_split_variable_parts_1d'),
TestCase(NODE_TEST, 'test_split_variable_parts_2d'),
TestCase(NODE_TEST, 'test_split_variable_parts_default_axis'),
TestCase(NODE_TEST, 'test_transpose_all_permutations_0'),
TestCase(NODE_TEST, 'test_transpose_all_permutations_1'),
TestCase(NODE_TEST, 'test_transpose_all_permutations_2'),
TestCase(NODE_TEST, 'test_transpose_all_permutations_3'),
TestCase(NODE_TEST, 'test_transpose_all_permutations_4'),
TestCase(NODE_TEST, 'test_transpose_all_permutations_5'),
TestCase(NODE_TEST, 'test_transpose_default'),
TestCase(NODE_TEST, 'test_depthtospace_crd_mode'),
TestCase(NODE_TEST, 'test_depthtospace_crd_mode_example'),
TestCase(NODE_TEST, 'test_depthtospace_dcr_mode'),
TestCase(NODE_TEST, 'test_depthtospace_example'),
TestCase(NODE_TEST, 'test_gemm_all_attributes'),
TestCase(NODE_TEST, 'test_gemm_alpha'),
TestCase(NODE_TEST, 'test_gemm_beta'),
TestCase(NODE_TEST, 'test_gemm_default_matrix_bias'),
TestCase(NODE_TEST, 'test_gemm_default_no_bias'),
TestCase(NODE_TEST, 'test_gemm_default_scalar_bias'),
TestCase(NODE_TEST, 'test_gemm_default_single_elem_vector_bias'),
TestCase(NODE_TEST, 'test_gemm_default_vector_bias'),
TestCase(NODE_TEST, 'test_gemm_default_zero_bias'),
TestCase(NODE_TEST, 'test_gemm_transposeA'),
TestCase(NODE_TEST, 'test_gemm_transposeB'),
TestCase(NODE_TEST, 'test_rnn_seq_length'),
TestCase(NODE_TEST, 'test_simple_rnn_defaults'),
TestCase(NODE_TEST, 'test_simple_rnn_with_initial_bias'),
TestCase(NODE_TEST, 'test_gru_defaults'),
TestCase(NODE_TEST, 'test_gru_seq_length'),
TestCase(NODE_TEST, 'test_gru_with_initial_bias'),
TestCase(NODE_TEST, 'test_lstm_defaults'),
TestCase(NODE_TEST, 'test_lstm_with_initial_bias'),
TestCase(NODE_TEST, 'test_lstm_with_peepholes', rtol=5e-2),
TestCase(NODE_TEST, 'test_softmax_axis_0'),
TestCase(NODE_TEST, 'test_softmax_axis_1'),
TestCase(NODE_TEST, 'test_softmax_axis_2'),
TestCase(NODE_TEST, 'test_softmax_default_axis'),
TestCase(NODE_TEST, 'test_softmax_example'),
TestCase(NODE_TEST, 'test_softmax_large_number'),
TestCase(NODE_TEST, 'test_softmax_negative_axis'),
TestCase(NODE_TEST, 'test_logsoftmax_axis_0'),
TestCase(NODE_TEST, 'test_logsoftmax_axis_1'),
TestCase(NODE_TEST, 'test_logsoftmax_axis_2'),
TestCase(NODE_TEST, 'test_logsoftmax_default_axis'),
TestCase(NODE_TEST, 'test_logsoftmax_example_1'),
TestCase(NODE_TEST, 'test_logsoftmax_large_number', rtol=5e-3),
TestCase(NODE_TEST, 'test_logsoftmax_negative_axis'),
TestCase(NODE_TEST, 'test_softplus'),
TestCase(NODE_TEST, 'test_softplus_example'),
TestCase(NODE_TEST, 'test_softsign'),
TestCase(NODE_TEST, 'test_softsign_example'),
TestCase(NODE_TEST, 'test_sum_example'),
TestCase(NODE_TEST, 'test_sum_one_input'),
TestCase(NODE_TEST, 'test_sum_two_inputs'),
TestCase(NODE_TEST, 'test_mean_example'),
TestCase(NODE_TEST, 'test_mean_one_input'),
TestCase(NODE_TEST, 'test_mean_two_inputs'),
TestCase(NODE_TEST, 'test_max_example'),
TestCase(NODE_TEST, 'test_max_one_input'),
TestCase(NODE_TEST, 'test_max_two_inputs'),
TestCase(NODE_TEST, 'test_min_example'),
TestCase(NODE_TEST, 'test_min_one_input'),
TestCase(NODE_TEST, 'test_min_two_inputs'),
TestCase(NODE_TEST, 'test_clip'),
TestCase(NODE_TEST, 'test_clip_default_inbounds'),
TestCase(NODE_TEST, 'test_clip_default_max'),
TestCase(NODE_TEST, 'test_clip_default_min'),
TestCase(NODE_TEST, 'test_clip_example'),
TestCase(NODE_TEST, 'test_clip_inbounds'),
TestCase(NODE_TEST, 'test_clip_outbounds'),
TestCase(NODE_TEST, 'test_clip_splitbounds'),
TestCase(NODE_TEST, 'test_argmax_default_axis_example'),
TestCase(NODE_TEST, 'test_argmax_default_axis_random'),
TestCase(NODE_TEST, 'test_argmax_keepdims_example'),
TestCase(NODE_TEST, 'test_argmax_keepdims_random'),
TestCase(NODE_TEST, 'test_argmax_negative_axis_keepdims_example'),
TestCase(NODE_TEST, 'test_argmax_negative_axis_keepdims_random'),
TestCase(NODE_TEST, 'test_argmax_no_keepdims_example'),
TestCase(NODE_TEST, 'test_argmax_no_keepdims_random'),
TestCase(NODE_TEST, 'test_argmin_default_axis_example'),
TestCase(NODE_TEST, 'test_argmin_default_axis_random'),
TestCase(NODE_TEST, 'test_argmin_keepdims_example'),
TestCase(NODE_TEST, 'test_argmin_keepdims_random'),
TestCase(NODE_TEST, 'test_argmin_negative_axis_keepdims_example'),
TestCase(NODE_TEST, 'test_argmin_negative_axis_keepdims_random'),
TestCase(NODE_TEST, 'test_argmin_no_keepdims_example'),
TestCase(NODE_TEST, 'test_argmin_no_keepdims_random'),
TestCase(NODE_TEST, 'test_hardmax_axis_0'),
TestCase(NODE_TEST, 'test_hardmax_axis_1'),
TestCase(NODE_TEST, 'test_hardmax_axis_2'),
TestCase(NODE_TEST, 'test_hardmax_default_axis'),
TestCase(NODE_TEST, 'test_hardmax_example'),
TestCase(NODE_TEST, 'test_hardmax_negative_axis'),
TestCase(NODE_TEST, 'test_hardmax_one_hot'),
TestCase(NODE_TEST, 'test_reduce_l1_default_axes_keepdims_example'),
TestCase(NODE_TEST, 'test_reduce_l1_default_axes_keepdims_random'),
TestCase(NODE_TEST, 'test_reduce_l1_do_not_keepdims_example'),
TestCase(NODE_TEST, 'test_reduce_l1_do_not_keepdims_random'),
TestCase(NODE_TEST, 'test_reduce_l1_keep_dims_example'),
TestCase(NODE_TEST, 'test_reduce_l1_keep_dims_random'),
TestCase(NODE_TEST, 'test_reduce_l1_negative_axes_keep_dims_example'),
TestCase(NODE_TEST, 'test_reduce_l1_negative_axes_keep_dims_random'),
TestCase(NODE_TEST, 'test_reduce_l2_default_axes_keepdims_example'),
TestCase(NODE_TEST, 'test_reduce_l2_default_axes_keepdims_random'),
TestCase(NODE_TEST, 'test_reduce_l2_do_not_keepdims_example'),
TestCase(NODE_TEST, 'test_reduce_l2_do_not_keepdims_random'),
TestCase(NODE_TEST, 'test_reduce_l2_keep_dims_example'),
TestCase(NODE_TEST, 'test_reduce_l2_keep_dims_random'),
TestCase(NODE_TEST, 'test_reduce_l2_negative_axes_keep_dims_example'),
TestCase(NODE_TEST, 'test_reduce_l2_negative_axes_keep_dims_random'),
TestCase(NODE_TEST, 'test_reduce_log_sum'),
TestCase(NODE_TEST, 'test_reduce_log_sum_asc_axes'),
TestCase(NODE_TEST, 'test_reduce_log_sum_default'),
TestCase(NODE_TEST, 'test_reduce_log_sum_desc_axes'),
TestCase(NODE_TEST, 'test_reduce_log_sum_exp_default_axes_keepdims_example'),
TestCase(NODE_TEST, 'test_reduce_log_sum_exp_default_axes_keepdims_random'),
TestCase(NODE_TEST, 'test_reduce_log_sum_exp_do_not_keepdims_example'),
TestCase(NODE_TEST, 'test_reduce_log_sum_exp_do_not_keepdims_random'),
TestCase(NODE_TEST, 'test_reduce_log_sum_exp_keepdims_example'),
TestCase(NODE_TEST, 'test_reduce_log_sum_exp_keepdims_random'),
TestCase(NODE_TEST, 'test_reduce_log_sum_exp_negative_axes_keepdims_example'),
TestCase(NODE_TEST, 'test_reduce_log_sum_exp_negative_axes_keepdims_random'),
TestCase(NODE_TEST, 'test_reduce_log_sum_negative_axes'),
TestCase(NODE_TEST, 'test_reduce_max_default_axes_keepdim_example'),
TestCase(NODE_TEST, 'test_reduce_max_default_axes_keepdims_random'),
TestCase(NODE_TEST, 'test_reduce_max_do_not_keepdims_example'),
TestCase(NODE_TEST, 'test_reduce_max_do_not_keepdims_random'),
TestCase(NODE_TEST, 'test_reduce_max_keepdims_example'),
TestCase(NODE_TEST, 'test_reduce_max_keepdims_random'),
TestCase(NODE_TEST, 'test_reduce_max_negative_axes_keepdims_example'),
TestCase(NODE_TEST, 'test_reduce_max_negative_axes_keepdims_random'),
TestCase(NODE_TEST, 'test_reduce_mean_default_axes_keepdims_example'),
TestCase(NODE_TEST, 'test_reduce_mean_default_axes_keepdims_random'),
TestCase(NODE_TEST, 'test_reduce_mean_do_not_keepdims_example'),
TestCase(NODE_TEST, 'test_reduce_mean_do_not_keepdims_random'),
TestCase(NODE_TEST, 'test_reduce_mean_keepdims_example'),
TestCase(NODE_TEST, 'test_reduce_mean_keepdims_random'),
TestCase(NODE_TEST, 'test_reduce_mean_negative_axes_keepdims_example'),
TestCase(NODE_TEST, 'test_reduce_mean_negative_axes_keepdims_random'),
TestCase(NODE_TEST, 'test_reduce_min_default_axes_keepdims_example'),
TestCase(NODE_TEST, 'test_reduce_min_default_axes_keepdims_random'),
TestCase(NODE_TEST, 'test_reduce_min_do_not_keepdims_example'),
TestCase(NODE_TEST, 'test_reduce_min_do_not_keepdims_random'),
TestCase(NODE_TEST, 'test_reduce_min_keepdims_example'),
TestCase(NODE_TEST, 'test_reduce_min_keepdims_random'),
TestCase(NODE_TEST, 'test_reduce_min_negative_axes_keepdims_example'),
TestCase(NODE_TEST, 'test_reduce_min_negative_axes_keepdims_random'),
TestCase(NODE_TEST, 'test_reduce_prod_default_axes_keepdims_example'),
TestCase(NODE_TEST, 'test_reduce_prod_default_axes_keepdims_random'),
TestCase(NODE_TEST, 'test_reduce_sum_default_axes_keepdims_example'),
TestCase(NODE_TEST, 'test_reduce_sum_default_axes_keepdims_random'),
TestCase(NODE_TEST, 'test_reduce_sum_do_not_keepdims_example'),
TestCase(NODE_TEST, 'test_reduce_sum_do_not_keepdims_random'),
TestCase(NODE_TEST, 'test_reduce_sum_keepdims_example'),
TestCase(NODE_TEST, 'test_reduce_sum_keepdims_random'),
TestCase(NODE_TEST, 'test_reduce_sum_negative_axes_keepdims_example'),
TestCase(NODE_TEST, 'test_reduce_sum_negative_axes_keepdims_random'),
TestCase(NODE_TEST, 'test_reduce_sum_square_default_axes_keepdims_example'),
TestCase(NODE_TEST, 'test_reduce_sum_square_default_axes_keepdims_random'),
TestCase(NODE_TEST, 'test_reduce_sum_square_do_not_keepdims_example'),
TestCase(NODE_TEST, 'test_reduce_sum_square_do_not_keepdims_random'),
TestCase(NODE_TEST, 'test_reduce_sum_square_keepdims_example'),
TestCase(NODE_TEST, 'test_reduce_sum_square_keepdims_random'),
TestCase(NODE_TEST, 'test_reduce_sum_square_negative_axes_keepdims_example'),
TestCase(NODE_TEST, 'test_reduce_sum_square_negative_axes_keepdims_random'),
TestCase(NODE_TEST, 'test_batchnorm_example'),
TestCase(NODE_TEST, 'test_batchnorm_epsilon'),
TestCase(NODE_TEST, 'test_lrn'),
TestCase(NODE_TEST, 'test_lrn_default'),
TestCase(NODE_TEST, 'test_dropout_default'),
TestCase(NODE_TEST, 'test_dropout_random'),
TestCase(NODE_TEST, 'test_isnan'),
TestCase(NODE_TEST, 'test_isinf'),
TestCase(NODE_TEST, 'test_isinf_negative'),
TestCase(NODE_TEST, 'test_isinf_positive'),
TestCase(NODE_TEST, 'test_where_example'),
TestCase(NODE_TEST, 'test_where_long_example'),
TestCase(NODE_TEST, 'test_nonzero_example'),
TestCase(NODE_TEST, 'test_nonmaxsuppression_suppress_by_IOU'),
TestCase(NODE_TEST, 'test_nonmaxsuppression_center_point_box_format'),
TestCase(NODE_TEST, 'test_nonmaxsuppression_flipped_coordinates'),
TestCase(NODE_TEST, 'test_nonmaxsuppression_identical_boxes'),
TestCase(NODE_TEST, 'test_nonmaxsuppression_single_box'),
TestCase(NODE_TEST, 'test_nonmaxsuppression_suppress_by_IOU_and_scores'),
TestCase(NODE_TEST, 'test_nonmaxsuppression_two_batches'),
TestCase(NODE_TEST, 'test_nonmaxsuppression_limit_output_size'),
TestCase(NODE_TEST, 'test_nonmaxsuppression_two_classes'),
TestCase(NODE_TEST, 'test_top_k'),
TestCase(NODE_TEST, 'test_top_k_negative_axis'),
TestCase(NODE_TEST, 'test_top_k_smallest'),
TestCase(NODE_TEST, 'test_quantizelinear'),
TestCase(NODE_TEST, 'test_dequantizelinear'),
TestCase(NODE_TEST, 'test_qlinearmatmul_2D'),
TestCase(NODE_TEST, 'test_qlinearmatmul_3D'),
TestCase(NODE_TEST, 'test_qlinearconv'),
TestCase(NODE_TEST, 'test_convinteger_with_padding'),
TestCase(NODE_TEST, 'test_basic_convinteger'),
TestCase(NODE_TEST, 'test_matmulinteger'),
TestCase(NODE_TEST, 'test_round'),
TestCase(NODE_TEST, 'test_bitshift_left_uint8'),
TestCase(NODE_TEST, 'test_bitshift_right_uint8'),
# TODO(take-cheeze): Support larger unsigned int types
# TestCase(NODE_TEST, 'test_bitshift_left_uint64'),
# TestCase(NODE_TEST, 'test_bitshift_left_uint32'),
# TestCase(NODE_TEST, 'test_bitshift_left_uint16'),
# TestCase(NODE_TEST, 'test_bitshift_right_uint64'),
# TestCase(NODE_TEST, 'test_bitshift_right_uint32'),
# TestCase(NODE_TEST, 'test_bitshift_right_uint16'),
TestCase(NODE_TEST, 'test_scan9_sum'),
TestCase(NODE_TEST, 'test_dynamicquantizelinear'),
TestCase(NODE_TEST, 'test_dynamicquantizelinear_max_adjusted'),
TestCase(NODE_TEST, 'test_dynamicquantizelinear_min_adjusted'),
TestCase(NODE_TEST, 'test_dynamicquantizelinear_expanded'),
TestCase(NODE_TEST, 'test_dynamicquantizelinear_max_adjusted_expanded'),
TestCase(NODE_TEST, 'test_dynamicquantizelinear_min_adjusted_expanded'),
TestCase(NODE_TEST, 'test_mvn'),
TestCase(NODE_TEST, 'test_mvn_expanded'),
TestCase(SIMPLE_TEST, 'test_sign_model'),
TestCase(SIMPLE_TEST, 'test_single_relu_model'),
# TODO(hamaji): Come up with a good way to handle ONNX's shape
# inference for sequence types.
TestCase(SIMPLE_TEST, 'test_sequence_model1', skip_shape_inference=True),
TestCase(SIMPLE_TEST, 'test_sequence_model2', skip_shape_inference=True),
TestCase(SIMPLE_TEST, 'test_sequence_model3', skip_shape_inference=True),
TestCase(SIMPLE_TEST, 'test_sequence_model4', skip_shape_inference=True),
TestCase(SIMPLE_TEST, 'test_sequence_model5', skip_shape_inference=True),
TestCase(SIMPLE_TEST, 'test_sequence_model6', skip_shape_inference=True),
TestCase(SIMPLE_TEST, 'test_sequence_model7', skip_shape_inference=True),
TestCase(SIMPLE_TEST, 'test_expand_shape_model1'),
TestCase(SIMPLE_TEST, 'test_expand_shape_model2'),
TestCase(SIMPLE_TEST, 'test_expand_shape_model3'),
TestCase(SIMPLE_TEST, 'test_expand_shape_model4'),
]
TEST_CASES += [
TestCase(ONNX_TEST_DATA, 'pytorch-converted/test_AvgPool1d'),
TestCase(ONNX_TEST_DATA, 'pytorch-converted/test_AvgPool1d_stride'),
TestCase(ONNX_TEST_DATA, 'pytorch-converted/test_AvgPool2d'),
TestCase(ONNX_TEST_DATA, 'pytorch-converted/test_AvgPool2d_stride'),
TestCase(ONNX_TEST_DATA, 'pytorch-converted/test_AvgPool3d'),
TestCase(ONNX_TEST_DATA, 'pytorch-converted/test_AvgPool3d_stride'),
TestCase(ONNX_TEST_DATA, 'pytorch-converted/test_AvgPool3d_stride1_pad0_gpu_input'),
TestCase(ONNX_TEST_DATA, 'pytorch-converted/test_BatchNorm1d_3d_input_eval', fail=True),
TestCase(ONNX_TEST_DATA, 'pytorch-converted/test_BatchNorm2d_eval', fail=True),
TestCase(ONNX_TEST_DATA, 'pytorch-converted/test_BatchNorm2d_momentum_eval', fail=True),
TestCase(ONNX_TEST_DATA, 'pytorch-converted/test_BatchNorm3d_eval', fail=True),
TestCase(ONNX_TEST_DATA, 'pytorch-converted/test_BatchNorm3d_momentum_eval', fail=True),
TestCase(ONNX_TEST_DATA, 'pytorch-converted/test_ConstantPad2d'),
TestCase(ONNX_TEST_DATA, 'pytorch-converted/test_Conv1d', fail=fail_1d_conv_pool),
TestCase(ONNX_TEST_DATA, 'pytorch-converted/test_Conv1d_dilated', fail=True),
TestCase(ONNX_TEST_DATA, 'pytorch-converted/test_Conv1d_groups', fail=fail_1d_conv_pool),
TestCase(ONNX_TEST_DATA, 'pytorch-converted/test_Conv1d_pad1', fail=fail_1d_conv_pool),
TestCase(ONNX_TEST_DATA, 'pytorch-converted/test_Conv1d_pad1size1', fail=fail_1d_conv_pool),
TestCase(ONNX_TEST_DATA, 'pytorch-converted/test_Conv1d_pad2', fail=fail_1d_conv_pool),
TestCase(ONNX_TEST_DATA, 'pytorch-converted/test_Conv1d_pad2size1', fail=fail_1d_conv_pool),
TestCase(ONNX_TEST_DATA, 'pytorch-converted/test_Conv1d_stride', fail=fail_1d_conv_pool),
TestCase(ONNX_TEST_DATA, 'pytorch-converted/test_Conv2d'),
TestCase(ONNX_TEST_DATA, 'pytorch-converted/test_Conv2d_depthwise'),
TestCase(ONNX_TEST_DATA, 'pytorch-converted/test_Conv2d_depthwise_padded'),
TestCase(ONNX_TEST_DATA, 'pytorch-converted/test_Conv2d_depthwise_strided'),
TestCase(ONNX_TEST_DATA, 'pytorch-converted/test_Conv2d_depthwise_with_multiplier'),
TestCase(ONNX_TEST_DATA, 'pytorch-converted/test_Conv2d_dilated', fail=True),
TestCase(ONNX_TEST_DATA, 'pytorch-converted/test_Conv2d_groups'),
TestCase(ONNX_TEST_DATA, 'pytorch-converted/test_Conv2d_groups_thnn', rtol=2e-4),
TestCase(ONNX_TEST_DATA, 'pytorch-converted/test_Conv2d_no_bias'),
TestCase(ONNX_TEST_DATA, 'pytorch-converted/test_Conv2d_padding'),
TestCase(ONNX_TEST_DATA, 'pytorch-converted/test_Conv2d_strided'),
TestCase(ONNX_TEST_DATA, 'pytorch-converted/test_Conv3d'),
TestCase(ONNX_TEST_DATA, 'pytorch-converted/test_Conv3d_dilated', fail=True),
TestCase(ONNX_TEST_DATA, 'pytorch-converted/test_Conv3d_dilated_strided', fail=True),
TestCase(ONNX_TEST_DATA, 'pytorch-converted/test_Conv3d_groups'),
TestCase(ONNX_TEST_DATA, 'pytorch-converted/test_Conv3d_no_bias'),
TestCase(ONNX_TEST_DATA, 'pytorch-converted/test_Conv3d_stride'),
TestCase(ONNX_TEST_DATA, 'pytorch-converted/test_Conv3d_stride_padding'),
TestCase(ONNX_TEST_DATA, 'pytorch-converted/test_ConvTranspose2d', fail=True),
TestCase(ONNX_TEST_DATA, 'pytorch-converted/test_ConvTranspose2d_no_bias', fail=True),
TestCase(ONNX_TEST_DATA, 'pytorch-converted/test_ELU'),
TestCase(ONNX_TEST_DATA, 'pytorch-converted/test_Embedding'),
TestCase(ONNX_TEST_DATA, 'pytorch-converted/test_Embedding_sparse'),
TestCase(ONNX_TEST_DATA, 'pytorch-converted/test_GLU'),
TestCase(ONNX_TEST_DATA, 'pytorch-converted/test_GLU_dim'),
TestCase(ONNX_TEST_DATA, 'pytorch-converted/test_LeakyReLU'),
TestCase(ONNX_TEST_DATA, 'pytorch-converted/test_LeakyReLU_with_negval'),
TestCase(ONNX_TEST_DATA, 'pytorch-converted/test_Linear', fail=True),
TestCase(ONNX_TEST_DATA, 'pytorch-converted/test_Linear_no_bias'),
TestCase(ONNX_TEST_DATA, 'pytorch-converted/test_LogSoftmax'),
TestCase(ONNX_TEST_DATA, 'pytorch-converted/test_MaxPool1d', fail=fail_1d_conv_pool),
TestCase(ONNX_TEST_DATA, 'pytorch-converted/test_MaxPool1d_stride', fail=fail_1d_conv_pool),
TestCase(ONNX_TEST_DATA, 'pytorch-converted/test_MaxPool2d'),
TestCase(ONNX_TEST_DATA, 'pytorch-converted/test_MaxPool3d'),
TestCase(ONNX_TEST_DATA, 'pytorch-converted/test_MaxPool3d_stride'),
TestCase(ONNX_TEST_DATA, 'pytorch-converted/test_MaxPool3d_stride_padding'),
TestCase(ONNX_TEST_DATA, 'pytorch-converted/test_PReLU_1d', fail=True),
TestCase(ONNX_TEST_DATA, 'pytorch-converted/test_PReLU_1d_multiparam', fail=True),
TestCase(ONNX_TEST_DATA, 'pytorch-converted/test_PReLU_2d', fail=True),
TestCase(ONNX_TEST_DATA, 'pytorch-converted/test_PReLU_2d_multiparam', fail=True),
TestCase(ONNX_TEST_DATA, 'pytorch-converted/test_PReLU_3d', fail=True),
TestCase(ONNX_TEST_DATA, 'pytorch-converted/test_PReLU_3d_multiparam', fail=True),
TestCase(ONNX_TEST_DATA, 'pytorch-converted/test_PixelShuffle'),
TestCase(ONNX_TEST_DATA, 'pytorch-converted/test_PoissonNLLLLoss_no_reduce'),
TestCase(ONNX_TEST_DATA, 'pytorch-converted/test_ReLU'),
TestCase(ONNX_TEST_DATA, 'pytorch-converted/test_ReflectionPad2d', fail=True),
TestCase(ONNX_TEST_DATA, 'pytorch-converted/test_ReplicationPad2d', fail=True),
TestCase(ONNX_TEST_DATA, 'pytorch-converted/test_SELU'),
TestCase(ONNX_TEST_DATA, 'pytorch-converted/test_Sigmoid'),
TestCase(ONNX_TEST_DATA, 'pytorch-converted/test_Softmax'),
TestCase(ONNX_TEST_DATA, 'pytorch-converted/test_Softmin'),
TestCase(ONNX_TEST_DATA, 'pytorch-converted/test_Softplus'),
TestCase(ONNX_TEST_DATA, 'pytorch-converted/test_Softsign', fail=True),
TestCase(ONNX_TEST_DATA, 'pytorch-converted/test_Tanh'),
TestCase(ONNX_TEST_DATA, 'pytorch-converted/test_ZeroPad2d'),
TestCase(ONNX_TEST_DATA, 'pytorch-converted/test_log_softmax_dim3'),
TestCase(ONNX_TEST_DATA, 'pytorch-converted/test_log_softmax_lastdim'),
TestCase(ONNX_TEST_DATA, 'pytorch-converted/test_softmax_functional_dim3'),
TestCase(ONNX_TEST_DATA, 'pytorch-converted/test_softmax_lastdim'),
TestCase(ONNX_TEST_DATA, 'pytorch-operator/test_operator_add_broadcast', fail=True),
TestCase(ONNX_TEST_DATA, 'pytorch-operator/test_operator_add_size1_broadcast', fail=True),
TestCase(ONNX_TEST_DATA, 'pytorch-operator/test_operator_add_size1_right_broadcast', fail=True),
TestCase(ONNX_TEST_DATA, 'pytorch-operator/test_operator_add_size1_singleton_broadcast', fail=True),
TestCase(ONNX_TEST_DATA, 'pytorch-operator/test_operator_addconstant', fail=True),
TestCase(ONNX_TEST_DATA, 'pytorch-operator/test_operator_addmm', fail=True),
TestCase(ONNX_TEST_DATA, 'pytorch-operator/test_operator_basic'),
TestCase(ONNX_TEST_DATA, 'pytorch-operator/test_operator_chunk'),
TestCase(ONNX_TEST_DATA, 'pytorch-operator/test_operator_clip'),
TestCase(ONNX_TEST_DATA, 'pytorch-operator/test_operator_concat2'),
TestCase(ONNX_TEST_DATA, 'pytorch-operator/test_operator_conv'),
TestCase(ONNX_TEST_DATA, 'pytorch-operator/test_operator_convtranspose', fail=True),
TestCase(ONNX_TEST_DATA, 'pytorch-operator/test_operator_exp'),
TestCase(ONNX_TEST_DATA, 'pytorch-operator/test_operator_flatten'),
TestCase(ONNX_TEST_DATA, 'pytorch-operator/test_operator_index'),
TestCase(ONNX_TEST_DATA, 'pytorch-operator/test_operator_max'),
TestCase(ONNX_TEST_DATA, 'pytorch-operator/test_operator_maxpool'),
TestCase(ONNX_TEST_DATA, 'pytorch-operator/test_operator_min'),
TestCase(ONNX_TEST_DATA, 'pytorch-operator/test_operator_mm', fail=True),
TestCase(ONNX_TEST_DATA, 'pytorch-operator/test_operator_non_float_params'),
TestCase(ONNX_TEST_DATA, 'pytorch-operator/test_operator_pad', fail=True),
TestCase(ONNX_TEST_DATA, 'pytorch-operator/test_operator_params'),
TestCase(ONNX_TEST_DATA, 'pytorch-operator/test_operator_permute2'),
TestCase(ONNX_TEST_DATA, 'pytorch-operator/test_operator_pow', equal_nan=True),
TestCase(ONNX_TEST_DATA, 'pytorch-operator/test_operator_reduced_mean'),
TestCase(ONNX_TEST_DATA, 'pytorch-operator/test_operator_reduced_mean_keepdim'),
TestCase(ONNX_TEST_DATA, 'pytorch-operator/test_operator_reduced_sum'),
TestCase(ONNX_TEST_DATA, 'pytorch-operator/test_operator_reduced_sum_keepdim'),
TestCase(ONNX_TEST_DATA, 'pytorch-operator/test_operator_repeat', fail=True),
TestCase(ONNX_TEST_DATA, 'pytorch-operator/test_operator_repeat_dim_overflow', fail=True),
TestCase(ONNX_TEST_DATA, 'pytorch-operator/test_operator_selu'),
TestCase(ONNX_TEST_DATA, 'pytorch-operator/test_operator_sqrt', equal_nan=True),
TestCase(ONNX_TEST_DATA, 'pytorch-operator/test_operator_symbolic_override', fail=True),
TestCase(ONNX_TEST_DATA, 'pytorch-operator/test_operator_symbolic_override_nested'),
TestCase(ONNX_TEST_DATA, 'pytorch-operator/test_operator_view', fail=True),
]
TEST_PATHS = set()
for test_case in TEST_CASES:
TEST_PATHS.add(test_case.test_dir)
if args.all:
models = glob.glob(os.path.join(ONNX_TEST_DATA, '*/*/model.onnx'))
for onnx in sorted(models):
path = os.path.dirname(onnx)
if path not in TEST_PATHS:
case = TestCase(os.path.dirname(path), os.path.basename(path),
fail=True)
TEST_CASES.append(case)
num_official_onnx_tests = len(TEST_CASES)
for backprop_test in gen_backprop_tests_oc.get_backprop_tests():
assert os.path.exists(backprop_test.test_dir)
TEST_CASES.append(backprop_test)
for backprop_test in gen_backprop_tests_pc.get_backprop_tests():
assert os.path.exists(backprop_test.test_dir)
TEST_CASES.append(backprop_test)
for test in gen_extra_test.get_tests():
assert os.path.exists(test.test_dir), test.test_dir
TEST_CASES.append(test)
for name, _, _, kwargs in gen_large_tests_oc.get_large_tests():
dirname = 'out'
TEST_CASES.append(TestCase(dirname, name, want_gpu=True, **kwargs))
TEST_CASES.append(TestCase('out', 'backprop_test_mnist_mlp'))
TEST_CASES.append(TestCase('data', 'shufflenet', want_gpu=True))
TEST_CASES.append(TestCase('data', 'mnist'))
TEST_CASES.extend(ch2o_tests.get())
TEST_CASES.extend(elichika_tests.get())
TEST_CASES.extend(onnx_chainer_tests.get())
TEST_CASES.extend(onnx_real_tests.get())
TEST_CASES.extend(gen_chainercv_model_tests.get_tests())
new_tests = []
for test in TEST_CASES:
if not test.is_backprop:
continue
# TODO(mkusumoto): remove this "if" after fixing issue
if not test.name.startswith('large_oc'):
new_test = copy.copy(test)
new_test.name = test.name + '_two_phase'
new_test.is_backprop_two_phase = True
new_tests.append(new_test)
# TODO(hamaji): Temporarily disabled due to shape inference change in ONNX.
if test.name.startswith('backprop_test_oc_split_2'):
continue
# TODO(hamaji): Unexpected shape will appear due to broadcast.
if test.name.startswith('backprop_test_oc_pow_const'):
continue
if test.fixed_batch_norm:
continue
# computation_order is supported in limited test cases
if test.name.startswith('backprop_test_oc'):
for two_phase in [False, True]:
new_test = copy.copy(test)
new_test.name = test.name + '_computation_order'
new_test.computation_order = 'dummy'
new_test.is_backprop_two_phase = two_phase
new_tests.append(new_test)
# add more tests for computation_order using CustomPolicy
if test.name.startswith('backprop_test_oc_tanh2') or\
test.name.startswith('backprop_test_oc_mul2') or\
test.name.startswith('backprop_test_oc_max_pool2'):
order_strings = [
'CF0,CF1,BF1,BF0',
'CF0,CF1,FFo0,CF1,BF1,BF0',
'CF0,CF1,FFo0,FFt0,CF0,CF1,BF1,BF0',
'CF0,CF1,FFt0,FFo0,CF0,CF1,BF1,BF0',
'CF0,CF1,FFt0,CF0,BF1,BF0',
'CF0,CF1,FFt0,CF0,FFt0,CF0,BF1,BF0',
'CF0,CF1,FFt0,CF0,FFo0,CF1,BF1,BF0',
'CF0,CF1,FFt0,CF0,FFo0,CF1,FFt0,CF0,BF1,BF0',
'CF0,CF1,BF1,FFt0,CF0,BF0',
]
if not test.name.startswith('backprop_test_oc_max_pool2'):
order_strings.append(
'CF0,CF1,BF1,FFt0,CF0,FFo0,FFt0,CF0,CF1,BF0'
)
for order_string in order_strings:
for two_phase in [False, True]:
new_test = copy.copy(test)
new_test.name = test.name + '_custom_computation_order_'\
+ order_string
new_test.computation_order = 'custom_' + order_string
new_test.is_backprop_two_phase = two_phase
new_tests.append(new_test)
if test.name.startswith('backprop_test_oc_branched_conv'):
order_string = 'CF0,CF1,CF2,CF3,BF3,FFt2,BF1,CF2,BF2,BF0'
for two_phase in [False, True]:
new_test = copy.copy(test)
new_test.name = test.name + '_custom_computation_order_'\
+ order_string
new_test.computation_order = 'custom_' + order_string
new_test.is_backprop_two_phase = two_phase
new_tests.append(new_test)
# run gpu test for the test cases of onnx_chainer
# NOTE: We don't add tests for float16 case because they fail with --fuse
# option. We may resolve this in future.
if test.name.startswith('backprop_test_oc') and\
not test.name.endswith('float16'):
new_test = copy.copy(test)
new_test.want_gpu = True
new_tests.append(new_test)
for test in new_tests:
TEST_CASES.append(test)
if args.ngraph:
# TODO(hamaji): Triage these failures.
ngraph_blacklist = [
'extra_test_loop_scan_out',
'extra_backprop_test_need_stack_loop',
'ch2o_node_Linear_backprop',
'ch2o_node_Linear_backprop_diversed',
'backprop_test_oc_mul_same_float32_two_phase',
'backprop_test_oc_mul_same_float64_two_phase',
'backprop_test_oc_sigmoid_float64_two_phase',
'extra_backprop_test_need_stack_loop_two_phase',
'test_gemm_default_no_bias',
]
for test in TEST_CASES:
if test.name in ngraph_blacklist:
test.fail = True
if '_float16' in test.name:
# TODO(hamaji): Skip float16 tests since nGraph
# automatically promote float16 to float32.
test.fail = True
if test.name.endswith('_sigmoid_float64'):
# TODO(hamaji): nGraph seems not to support fp64 sigmoid.
test.fail = True
if re.search(r'grouped_conv_.*float64', test.name):
test.fail = True
if args.failed:
if not os.path.exists(args.failure_log):
raise RuntimeError('No failure log in %s' % args.failure_log)
failed_test_names = set()
with open(args.failure_log, 'rb') as f:
for line in f:
if line.startswith(b'=== '):
matched = re.match(r'=== (\S+) ===', line.decode())
if matched:
failed_test_names.add(matched.group(1))
TEST_CASES = [case for case in TEST_CASES
if case.name in failed_test_names]
if args.test_filter is not None:
reg = re.compile(args.test_filter)
TEST_CASES = [case for case in TEST_CASES if reg.search(case.name)]
if not args.all:
TEST_CASES = [case for case in TEST_CASES if not case.fail]
def _start_output(msg):
if sys.stdout.isatty():
if len(msg) > 75:
msg = msg[:36] + '...' + msg[-36:]
sys.stdout.write('\r' + ' ' * 78 + '\r' + msg)
else:
sys.stdout.write(msg)
class TestRunner(object):
def __init__(self, test_cases, show_log):
self.test_cases = test_cases
self.tested = []
self.failed = []
self.show_log = show_log
def run(self, num_parallel_jobs):
tests = list(reversed(self.test_cases))
procs = {}
while tests or procs:
if tests and len(procs) < num_parallel_jobs:
test_case = tests.pop()
if num_parallel_jobs == 1:
_start_output('%s... ' % test_case.name)
log_file = open(test_case.log_filename, 'wb')
proc = subprocess.Popen(test_case.args,
stdout=subprocess.PIPE,
stderr=log_file)
procs[proc.pid] = (test_case, proc, log_file)
continue
assert procs
pid, status = os.wait()
assert pid in procs
test_case, proc, log_file = procs[pid]
del procs[pid]
log_file.close()
if num_parallel_jobs != 1:
_start_output('%s... ' % test_case.name)
self.tested.append(test_case)
if status == 0:
if test_case.fail:
sys.stdout.write('%sOK (unexpected)%s\n' % (YELLOW, RESET))
else:
sys.stdout.write('%sOK%s' % (GREEN, RESET))
if not sys.stdout.isatty():
sys.stdout.write('\n')
else:
self.failed.append(test_case)
sys.stdout.write('%sFAIL%s: %s\n' %
(RED, RESET, test_case.repro_cmdline()))
if status != 0 or self.show_log:
sys.stdout.buffer.write(test_case.log_read())
if status != 0:
sys.stdout.write('%s$%s %s\n' %
(RED, RESET, test_case.repro_cmdline()))
sys.stdout.flush()
_start_output('')
sys.stdout.write('\n')
def main():
if not args.skip_build:
if os.path.exists('Makefile'):
subprocess.check_call(['make', '-j4'])
elif os.path.exists('build.ninja'):
subprocess.check_call('ninja')
if args.build_dir is None:
if os.path.exists('build/CMakeCache.txt'):
args.build_dir = 'build'
elif os.path.exists('CMakeCache.txt'):
args.build_dir = '.'
else:
args.build_dir = 'build'
run_onnx = os.path.join(args.build_dir, 'tools/run_onnx')
run_onnx_menoh = os.path.join(args.build_dir, 'menoh/run_onnx_menoh')
tested = []
failed = []
tests = []
gpu_tests = []
for test_case in TEST_CASES:
runner = run_onnx_menoh
if (test_case.is_backprop or
test_case.is_backprop_two_phase or
test_case.equal_nan or
test_case.skip_shape_inference or
test_case.skip_runtime_type_check or
test_case.want_gpu or
test_case.computation_order or
not test_case.test_dir.startswith(NODE_TEST)):
runner = run_onnx
test_case.args = [runner, '--test', test_case.test_dir]
test_case.args.append('--compiler_log')
is_gpu = False
if test_case.rtol is not None:
test_case.args += ['--rtol', str(test_case.rtol)]
if test_case.atol is not None:
test_case.args += ['--atol', str(test_case.atol)]
if test_case.equal_nan:
test_case.args += ['--equal_nan']
if test_case.skip_shape_inference:
test_case.args.append('--skip_inference')
if test_case.skip_runtime_type_check:
test_case.args.append('--skip_runtime_type_check')
if test_case.fixed_batch_norm:
test_case.args.append('--fixed_batch_norm')
if test_case.is_backprop_two_phase:
test_case.args.append('--backprop_two_phase')
elif test_case.is_backprop:
test_case.args.append('--backprop')
if test_case.computation_order:
test_case.args.append(
'--computation_order=' + test_case.computation_order)
elif args.computation_order:
test_case.args.append(
'--computation_order=' + args.computation_order)
if test_case.backend is not None:
test_case.args.append('--backend')
test_case.args.append(test_case.backend)
if args.verbose:
test_case.args.append('--verbose')
device = args.device
if test_case.want_gpu or args.use_gpu_all:
if not args.use_gpu and not args.use_gpu_all:
continue
if device is None:
device = 'cuda'
is_gpu = True
if device is not None:
test_case.args.extend(['-d', device])
if args.fuse:
test_case.args.append('--fuse_operations')
if is_gpu:
test_case.args.append('--use_nvrtc')
if args.ngraph:
test_case.args.append('--fuse_operations')
test_case.args.append('--use_ngraph')
if args.snpe:
test_case.args.append('--use_snpe')
if args.cache:
test_case.args.append('--use_cached_model')
if is_gpu:
gpu_tests.append(test_case)
else:
tests.append(test_case)
print('Testing %d tests with %s and %s' %
(len(tests + gpu_tests), run_onnx, run_onnx_menoh))
for test in tests + gpu_tests:
test.prepare()
for tests, num_jobs in [(tests, args.jobs), (gpu_tests, 1)]:
runner = TestRunner(tests, args.show_log)
runner.run(num_jobs)
tested += runner.tested
failed += runner.failed
if failed:
with open(args.failure_log, 'wb') as f:
for test in failed:
f.write(('=== %s ===\n' % test.name).encode())
f.write(('$ %s\n' % test.repro_cmdline()).encode())
f.write(test.log_read())
f.write('\n'.encode())
print('%d/%d tests failed! (see %s)' %
(len(failed), len(tested), args.failure_log))
sys.exit(1)
else:
print('ALL %d tests OK! (%d from ONNX)' %
(len(tested), num_official_onnx_tests))
main()
| 47.83209 | 104 | 0.729211 |
8358025f44deefc0a0672046d8e61c3fa4cd94ea | 703 | py | Python | detectron2/evaluation/__init__.py | MargeryLab/BMaskR-CNN | 41f63d301d6be7fa30ba281a5a0f727fbca6ad2a | [
"Apache-2.0"
] | null | null | null | detectron2/evaluation/__init__.py | MargeryLab/BMaskR-CNN | 41f63d301d6be7fa30ba281a5a0f727fbca6ad2a | [
"Apache-2.0"
] | null | null | null | detectron2/evaluation/__init__.py | MargeryLab/BMaskR-CNN | 41f63d301d6be7fa30ba281a5a0f727fbca6ad2a | [
"Apache-2.0"
] | null | null | null | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from .cityscapes_evaluation import CityscapesInstanceEvaluator, CityscapesSemSegEvaluator
from .coco_evaluation import COCOEvaluator
from .rotated_coco_evaluation import RotatedCOCOEvaluator
from .evaluator import DatasetEvaluator, DatasetEvaluators, inference_context, inference_on_dataset
from .lvis_evaluation import LVISEvaluator
from .panoptic_evaluation import COCOPanopticEvaluator
from .pascal_voc_evaluation import PascalVOCDetectionEvaluator
from .sem_seg_evaluation import SemSegEvaluator
from .testing import print_csv_format, verify_results
__all__ = [k for k in globals().keys() if not k.startswith("_")]
| 54.076923 | 100 | 0.846373 |
618c9c463fdd10e3847017004d70ddf007108f6e | 13,367 | py | Python | code/fusion_training_data_maker.py | tafseer-nayeem/CSL-Journal2019 | 1af3cec14b313397703f3e8e3d3e616c9a99b895 | [
"MIT"
] | 2 | 2020-01-15T09:28:19.000Z | 2021-01-24T10:54:52.000Z | code/fusion_training_data_maker.py | tafseer-nayeem/CSL-Journal2019 | 1af3cec14b313397703f3e8e3d3e616c9a99b895 | [
"MIT"
] | null | null | null | code/fusion_training_data_maker.py | tafseer-nayeem/CSL-Journal2019 | 1af3cec14b313397703f3e8e3d3e616c9a99b895 | [
"MIT"
] | 1 | 2021-03-07T12:36:06.000Z | 2021-03-07T12:36:06.000Z | import sys
import os
import shutil
import errno
import numpy as np
import nltk
import string
import re
import codecs
from sklearn.metrics import jaccard_similarity_score
from nltk.corpus import stopwords
from nltk.stem.porter import PorterStemmer
from nltk.stem import WordNetLemmatizer
reload(sys)
sys.setdefaultencoding('utf8')
lowest_threshold = 3.0 #Lowest taken threshold for relatedness score. If the related score is greater than this threshold we will consider the similarity between Sentence A & B
similarity_threshold = 0.25 #To find at least one common word between two sentences
MAX_INPUT_SENTENCE = 5 #There will be MAX_INPUT_SENTENCE sentences in the input pair
sick_file = 'SICK/SICK.txt' #SICK file location
MAX_JACCARD = 0.0
MIN_JACCARD = 1.0
#cnn = "cnn/stories/" #cnn story files location
cnn = "cnn/stories/"
stemmer = PorterStemmer()
lemmatizer = WordNetLemmatizer()
stop_words = set(stopwords.words("english"))
puncset = set(string.punctuation)
puncset.remove('-')
SRCDIR = os.path.dirname(os.path.realpath(__file__))
'''
if os.path.exists('lineNumber.txt') :
os.remove('lineNumber.txt')
if os.path.exists('all_train.txt') :
os.remove('all_train.txt')
'''
def getEnglishStopWords():
'''
returns a set of stop words for NLP pre-processing
from nltk.corpus.stopwords()
'''
stop_words = set(stopwords.words("english"))
return stop_words
def _build_stop_words_set():
'''
Build set of stop words to ignore.
'''
# source: http://jmlr.org/papers/volume5/lewis04a/a11-smart-stop-list/english.stop
return set(open(os.path.join(SRCDIR,'smartstop.txt'), 'r').read().splitlines())
SMARTSTOPWORDS = _build_stop_words_set()
def stem_tokens(tokens):
stemmed = []
for item in tokens:
stemmed.append(stemmer.stem(item))
return stemmed
def tokenizer1(text):
text = re.sub('[^a-zA-Z]', ' ', text)
tokens = text.lower().split()
tokens = [lemmatizer.lemmatize(tkn, pos='n') for tkn in tokens]
tokens = [lemmatizer.lemmatize(tkn, pos='v') for tkn in tokens]
return tokens
def tokenizer2(text):
text = "".join([ch for ch in text if ch not in string.punctuation])
tokens = nltk.word_tokenize(text)
stems = stem_tokens(tokens)
return stems
def tokenizer3(text):
tokens = nltk.word_tokenize(text)
tokens = [i for i in tokens if i not in string.punctuation]
stems = stem_tokens(tokens)
return stems
def tokenizer4(text):
''' Returns a bag of words for the sentence '''
sentenceWords = re.findall(r"[\w']+", text)
cleanWords = []
for word in sentenceWords:
if word not in SMARTSTOPWORDS:
cleanWords.append(word)
return set(cleanWords)
def tokenizer5(doc):
doc = doc.lower() # Lower the text.
doc = nltk.word_tokenize(doc.decode('unicode_escape').encode('ascii','ignore')) # Split into words.
doc = [w for w in doc if not w in stop_words] # Remove stopwords.
doc = [w for w in doc if w.isalpha()] # Remove numbers and punctuation.
return doc
def tokenizer6(sentence):
sentence = sentence.translate(None, string.punctuation)
sentence = sentence.lower()
sentence = sentence.split()
sentence = [w for w in sentence if not w in SMARTSTOPWORDS]
sentence = [lemmatizer.lemmatize(w, pos='n') for w in sentence]
sentence = [lemmatizer.lemmatize(w, pos='v') for w in sentence]
sentence = [w for w in sentence if w.isalpha()] # Remove numbers and punctuation.
return sentence
def tokenizer7(sentence):
sentence = "".join([ch for ch in sentence if ch not in puncset])
sentence = sentence.lower()
sentence = sentence.split()
sentence = [w for w in sentence if not w in SMARTSTOPWORDS]
sentence = [lemmatizer.lemmatize(w, pos='n') for w in sentence]
sentence = [lemmatizer.lemmatize(w, pos='v') for w in sentence]
#sentence = [w for w in sentence if w.isalpha()] # Remove numbers and punctuation.
return sentence
def letter_only_function(sentence):
#Remove non-letters - Utilized Regex Library
letters_only = re.sub("[^a-zA-Z]", " ", sentence)
letters_only = letters_only.lower()
return letters_only
def jaccard_similarity(string1, string2):
intersection = set(string1).intersection(set(string2))
union = set(string1).union(set(string2))
if len(union) == 0:
return 0.0
return len(intersection)/float(len(union))
def similarity(s1, s2): #Finding out maximum similarity from tokenized combination of tokenizer 5,6 & 7
sentence1 = tokenizer5(s1)
sentence2 = tokenizer5(s2)
temp_similarity = jaccard_similarity(sentence1, sentence2)
'''
print sentence1
print sentence2
print temp_similarity
#raw_input("tokenizer5") #For Python 2
'''
sentence1 = tokenizer6(s1)
sentence2 = tokenizer6(s2)
temp = jaccard_similarity(sentence1, sentence2)
if temp > temp_similarity :
temp_similarity = temp
'''
print sentence1
print sentence2
print temp_similarity
#raw_input("tokenizer6") #For Python 2
'''
sentence1 = tokenizer7(s1)
sentence2 = tokenizer7(s2)
temp = jaccard_similarity(sentence1, sentence2)
if temp > temp_similarity :
temp_similarity = temp
'''
print sentence1
print sentence2
print temp_similarity
#raw_input("tokenizer7")
'''
#if temp_similarity < 0.1 and temp_similarity >0.01 :
#raw_input()
return temp_similarity
def read_sick():
with open(sick_file) as fp:
minimum_similarity = 1.0 #assuming minimum similarity between two sentences
line = fp.readline()
cnt = 0
while line:
#print "Line {}: {}".format(cnt, line.strip())
line = fp.readline()
splitted_line = line.split("\t")
if len(splitted_line) == 1:
break
#print splitted_line[4]
if float(splitted_line[4]) > lowest_threshold:
temp_similarity = similarity(letter_only_function(splitted_line[1]), letter_only_function(splitted_line[2]))
if temp_similarity < similarity_threshold :
continue
elif temp_similarity < minimum_similarity:
print temp_similarity
minimum_similarity = temp_similarity
print "End of Processing Line :: %d" %(cnt)
cnt += 1
#if cnt == 10:
# break
print "\n>>>>>>>>>>>>>>>>END OF SICK DATASET READING<<<<<<<<<<<<<<<<\n"
print "For minimum relatedness score of :: %f" %(lowest_threshold)
print "Minimum similarity found in Sick DATASET :: %f" %(minimum_similarity)
raw_input("Press Enter to continue...") #For Python 2
#input("Press Enter to continue...") #For Python 3
return minimum_similarity
def choose_top_sentences(test_url):
with open(test_url) as story_file:
flag = 0
highlight = []
for line in story_file:
line = line.rstrip("\n\r")
if flag == 1:
highlight=line
if line == "@highlight":
flag = 1
sentences = []
with open(test_url) as story_file:
for line in story_file:
temp_line = line.rstrip("\n\r")
if temp_line == "@highlight":
break
sentences.append(line)
os.remove(test_url)
sen_similarity = np.zeros(len(sentences))
flag_sen = np.zeros(len(sentences))
i = 0
for s in sentences:
sen_similarity[i] = similarity(s, highlight)
i += 1
sorted_sim = np.sort(sen_similarity)[::-1]
#print sen_similarity
#print sorted_sim
sen_counter = 0
for i in range(0,len(sorted_sim)):
if sen_counter == MAX_INPUT_SENTENCE:
break
for j in range(0,len(sen_similarity)):
if sorted_sim[i] == sen_similarity[j] and sen_similarity[j] != 0:
flag_sen[j]=1
sen_counter += 1
file = open(test_url, "w")
for i in range(0,len(flag_sen)):
if flag_sen[i] == 1:
file.write(sentences[i])
file.write("\n")
file.write("@highlight\n\n")
file.write(highlight)
def make_from_cnn():
in_folder = "input" #input file directory
out_folder = "output" #output file directory
max_input_line = 0
number_of_sample = 0
number_of_highlight = 0
lines = []
try:
os.makedirs(in_folder)
os.makedirs(out_folder)
except OSError as e:
if e.errno != errno.EEXIST:
raise
else :
inString = raw_input("There is already folder named 'input' or 'output'!!!\n If you proceed your existing input/output directory will be deleted. \n If you want to proceed enter 'Y' or enter 'N'...")
if inString == 'Y' or inString == 'y':
shutil.rmtree(in_folder, ignore_errors=False, onerror=None)
shutil.rmtree(out_folder, ignore_errors=False, onerror=None)
os.makedirs(in_folder)
os.makedirs(out_folder)
else :
sys.exit('Try Again!!')
threshold = read_sick()
all_files = os.listdir(cnn)
try :
os.remove("lineNumber.txt")
os.remove("all_train.txt")
except :
pass
line_count_file = open("lineNumber.txt", "a")
source_url_list = open("all_train.txt", "a")
for story in all_files:
#print "Processing :: %s" %(story)
story_url = story
story = cnn + story
with open(story) as story_file:
count = 0
temp_count = -1
highlight = []
for line in story_file:
line = line.rstrip("\n\r")
count = count + 1
if line == "@highlight":
temp_count = count + 2
if count == temp_count:
temp_count = -1
highlight.append(line)
#print line
#print highlight
#print count
cnt = 0
number_of_highlight = number_of_highlight + len(highlight)
#lemmatize noun verb
#stop word bad dibo
#input_url = cnn + "input/" + str(cnt) + ".txt"
for reference in highlight:
input_line_count = 0
temp_reference = reference
output_url = out_folder + "/" + story_url + str(cnt) + ".story"
input_url = in_folder + "/" + story_url + str(cnt) + ".story"
file_name = story_url + str(cnt) + ".story"
with open(story) as story_file:
for line in story_file:
reference = letter_only_function(reference)
line = line.rstrip("\n\r")
if line == "@highlight":
break
try :
tokenized_sentences = nltk.tokenize.sent_tokenize(line)
except:
continue
for sen in tokenized_sentences:
letter_only_sen = letter_only_function(sen)
try:
sen_similarity = similarity(reference, letter_only_sen)
except:
continue
if sen_similarity > 0.0 and sen_similarity < 1.0:
if sen_similarity >= MAX_JACCARD :
global MAX_JACCARD
MAX_JACCARD = sen_similarity
if sen_similarity <= MIN_JACCARD :
global MIN_JACCARD
MIN_JACCARD = sen_similarity
if sen_similarity < similarity_threshold :
continue
if len(letter_only_sen) < len(reference) :
continue
elif sen_similarity > threshold:
temp_file = open(input_url, "a")
#letter_only_sen = letter_only_sen + " @len" + str(len(letter_only_sen.split())) + "\n" #saves file with length.
sen = sen + '\n' #saves file without length
lines.append(sen)
input_line_count += 1
#IF you want to save into file while checking each line
#temp_file = open(input_url, "a")
temp_file.write(sen)
temp_file.close()
#print input_url
#print reference + "\n" + line
#print similarity(reference, letter_only_sen)
#print "+++++++++++++++++++\n"
line = letter_only_function(line)
'''
try :
temp_similarity = similarity(reference, line)
except :
continue
if temp_similarity < similarity_threshold :
continue
elif temp_similarity > threshold:
line = line + " @len" + str(len(line.split())) + "\n" #saves file with length.
#line = line + "\n" #saves file without length
lines.append(line)
input_line_count += 1
#IF you want to save into file while checking each line
input_url = in_folder + "/" + story_url + ".in" + str(cnt)
temp_file = open(input_url, "a")
temp_file.write(line)
temp_file.close()
#print input_url
#print reference + "\n" + line
'''
'''
input_url = in_folder + "/" + story_url + ".in" + str(cnt)
temp_file = open(input_url, "w")
temp_file.write(lines)
temp_file.close()
'''
line_count_file.write(str(input_line_count)+" "+input_url+" "+output_url+"\n")
#temp_file.write(reference) #saves without length
if os.path.exists(input_url) and os.path.getsize(input_url) > 0:
#print "size 0000000000000000000000000000000"
#temp_file = open(output_url, "w")
temp_file = open(input_url, "a")
temp_reference = "\n@highlight\n\n" + temp_reference
#temp_file.write(reference + " @len" + str(len(reference.split()))) #saves with length
temp_file.write(temp_reference)
temp_file.close()
choose_top_sentences(input_url) #Comment this line, if you want all the sentences in the source
input_url = cnn + file_name + "\n"
source_url_list.write("tmp_dir/"+input_url)
number_of_sample += 1
sys.stdout.write("\r\x1b[KNumber of Sample Created:"+str(number_of_sample).__str__()+" ::: Number of Highlights:"+str(number_of_highlight).__str__())
sys.stdout.flush()
if input_line_count > max_input_line :
max_input_line = input_line_count
max_line_file_name = input_url
input_line_count = 0
cnt = cnt + 1
line_count_file.close()
source_url_list.close()
print "\n\n"
print max_input_line
print max_line_file_name
def main():
make_from_cnn()
print "Maximum Similarity Found in DATASET :: " + str(MAX_JACCARD)
print "Minimum Similarity Found in DATASET :: " + str(MIN_JACCARD)
if __name__ == "__main__":
main()
| 29.638581 | 202 | 0.678163 |
547d85936142da49fca8cacb81438f560e7eea49 | 259 | py | Python | frappe_training/frappe_training/doctype/check8/check8.py | sivaranjanipalanivel/training | b177c56a319c07dc3467ce3113e332ecee9b81fa | [
"MIT"
] | null | null | null | frappe_training/frappe_training/doctype/check8/check8.py | sivaranjanipalanivel/training | b177c56a319c07dc3467ce3113e332ecee9b81fa | [
"MIT"
] | null | null | null | frappe_training/frappe_training/doctype/check8/check8.py | sivaranjanipalanivel/training | b177c56a319c07dc3467ce3113e332ecee9b81fa | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright (c) 2021, valiantsystems and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
# import frappe
from frappe.model.document import Document
class Check8(Document):
pass
| 23.545455 | 53 | 0.776062 |
48d5d7a6e363f58564c31890adc012846ad9e88c | 2,676 | py | Python | env/lib/python2.7/site-packages/django/contrib/gis/gdal/__init__.py | diego-d5000/MisValesMd | b641782bc2546776e9f55f452ec7fb48100dc482 | [
"MIT"
] | null | null | null | env/lib/python2.7/site-packages/django/contrib/gis/gdal/__init__.py | diego-d5000/MisValesMd | b641782bc2546776e9f55f452ec7fb48100dc482 | [
"MIT"
] | null | null | null | env/lib/python2.7/site-packages/django/contrib/gis/gdal/__init__.py | diego-d5000/MisValesMd | b641782bc2546776e9f55f452ec7fb48100dc482 | [
"MIT"
] | null | null | null | """
This module houses ctypes interfaces for GDAL objects. The following GDAL
objects are supported:
CoordTransform: Used for coordinate transformations from one spatial
reference system to another.
Driver: Wraps an OGR data source driver.
DataSource: Wrapper for the OGR data source object, supports
OGR-supported data sources.
Envelope: A ctypes structure for bounding boxes (GDAL library
not required).
OGRGeometry: Object for accessing OGR Geometry functionality.
OGRGeomType: A class for representing the different OGR Geometry
types (GDAL library not required).
SpatialReference: Represents OSR Spatial Reference objects.
The GDAL library will be imported from the system path using the default
library name for the current OS. The default library path may be overridden
by setting `GDAL_LIBRARY_PATH` in your settings with the path to the GDAL C
library on your system.
GDAL links to a large number of external libraries that consume RAM when
loaded. Thus, it may desirable to disable GDAL on systems with limited
RAM resources -- this may be accomplished by setting `GDAL_LIBRARY_PATH`
to a non-existent file location (e.g., `GDAL_LIBRARY_PATH='/null/path'`;
setting to None/False/'' will not work as a string must be given).
"""
from django.contrib.gis.gdal.error import (check_err, GDALException,
OGRException, OGRIndexError, SRSException) # NOQA
from django.contrib.gis.gdal.geomtype import OGRGeomType # NOQA
__all__ = [
'check_err', 'GDALException', 'OGRException', 'OGRIndexError',
'SRSException', 'OGRGeomType', 'HAS_GDAL',
]
# Attempting to import objects that depend on the GDAL library. The
# HAS_GDAL flag will be set to True if the library is present on
# the system.
try:
from django.contrib.gis.gdal.driver import Driver # NOQA
from django.contrib.gis.gdal.datasource import DataSource # NOQA
from django.contrib.gis.gdal.libgdal import gdal_version, gdal_full_version, GDAL_VERSION # NOQA
from django.contrib.gis.gdal.raster.source import GDALRaster # NOQA
from django.contrib.gis.gdal.srs import SpatialReference, CoordTransform # NOQA
from django.contrib.gis.gdal.geometries import OGRGeometry # NOQA
HAS_GDAL = True
__all__ += [
'Driver', 'DataSource', 'gdal_version', 'gdal_full_version',
'GDAL_VERSION', 'SpatialReference', 'CoordTransform', 'OGRGeometry',
]
except GDALException:
HAS_GDAL = False
try:
from django.contrib.gis.gdal.envelope import Envelope
__all__ += ['Envelope']
except ImportError:
# No ctypes, but don't raise an exception.
pass
| 39.940299 | 102 | 0.738416 |
347c406f342d4898e61fbd42882c3113a85e2302 | 13,102 | py | Python | dosagelib/cmd.py | frater/dosage | 8a2537055a9c14d5bdd0aa7fafdb5c9f757c7bc7 | [
"MIT"
] | 98 | 2015-04-14T02:11:36.000Z | 2022-02-02T03:32:58.000Z | dosagelib/cmd.py | frater/dosage | 8a2537055a9c14d5bdd0aa7fafdb5c9f757c7bc7 | [
"MIT"
] | 199 | 2015-04-09T01:09:42.000Z | 2022-03-25T14:28:51.000Z | dosagelib/cmd.py | frater/dosage | 8a2537055a9c14d5bdd0aa7fafdb5c9f757c7bc7 | [
"MIT"
] | 68 | 2015-04-11T17:15:54.000Z | 2022-03-20T17:45:43.000Z | # SPDX-License-Identifier: MIT
# Copyright (C) 2004-2008 Tristan Seligmann and Jonathan Jacobs
# Copyright (C) 2012-2014 Bastian Kleineidam
# Copyright (C) 2015-2020 Tobias Gruetzmacher
import argparse
import contextlib
import os
import platform
from pathlib import Path
import appdirs
from . import events, configuration, singleton, director
from . import AppName, __version__
from .output import out
from .scraper import scrapers as allscrapers
from .util import internal_error, strlimit
class ArgumentParser(argparse.ArgumentParser):
"""Custom argument parser."""
def print_help(self, file=None):
"""Paginate help message on TTYs."""
with out.pager():
out.info(self.format_help())
Examples = """\
EXAMPLES
List available comics:
dosage -l
Get the latest comic of for example CalvinAndHobbes and save it in the "Comics"
directory:
dosage CalvinAndHobbes
If you already have downloaded several comics and want to get the latest
strips of all of them:
dosage --continue @
"""
# Making our config roaming seems sensible
userdirs = appdirs.AppDirs(appname=AppName, appauthor=False, roaming=True)
def setup_options():
"""Construct option parser.
@return: new option parser
@rtype argparse.ArgumentParser
"""
parser = ArgumentParser(
description="A comic downloader and archiver.",
epilog=Examples,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('-v', '--verbose', action='count', default=0,
help='provides verbose output, use multiple times for more verbosity')
parser.add_argument('-n', '--numstrips', action='store', type=int, default=0,
help='traverse and retrieve the given number of comic strips;'
' use --all to retrieve all comic strips')
parser.add_argument('-a', '--all', action='store_true',
help='traverse and retrieve all comic strips')
parser.add_argument('-c', '--continue', action='store_true', dest='cont',
help='traverse and retrieve comic strips until an existing one is found')
parser.add_argument('-b', '--basepath', action='store', default='Comics',
metavar='PATH',
help='set the path to create invidivual comic directories in, default is Comics')
parser.add_argument('--baseurl', action='store', metavar='PATH',
help='the base URL of your comics directory (for RSS, HTML, etc.);'
' this should correspond to --base-path')
parser.add_argument('-l', '--list', action='store_true',
help='list available comic modules')
parser.add_argument('--singlelist', action='store_true',
help='list available comic modules in a single column list')
parser.add_argument('--version', action='store_true',
help='display the version number')
parser.add_argument('--vote', action='store_true',
help='vote for the selected comics')
parser.add_argument('-m', '--modulehelp', action='store_true',
help='display help for comic modules')
parser.add_argument('-t', '--timestamps', action='store_true',
help='print timestamps for all output at any info level')
parser.add_argument('-o', '--output', action='append', dest='handler',
choices=events.getHandlerNames(),
help='sets output handlers for downloaded comics')
parser.add_argument('--no-downscale', action='store_false',
dest='allowdownscale',
help='prevent downscaling when using html or rss handler')
parser.add_argument('-p', '--parallel', action='store', type=int, default=1,
help='fetch comics in parallel. Specify the number of connections')
parser.add_argument('--adult', action='store_true',
help='confirms that you are old enough to view adult content')
parser.add_argument('--allow-multiple', action='store_true',
help='allows multiple instances to run at the same time.'
' Use if you know what you are doing.')
# used for development testing prev/next matching
parser.add_argument('--dry-run', action='store_true',
help=argparse.SUPPRESS)
# multimatch is only used for development, eg. testing if all comics of
# a scripted plugin are working
parser.add_argument('--multimatch', action='store_true',
help=argparse.SUPPRESS)
# List all comic modules, even those normally suppressed, because they
# are not "real" (moved & removed)
parser.add_argument('--list-all', action='store_true',
help=argparse.SUPPRESS)
parser.add_argument('comic', nargs='*',
help='comic module name (including case insensitive substrings)')
try:
import argcomplete
argcomplete.autocomplete(parser)
except ImportError:
pass
return parser
def display_version(verbose):
"""Display application name, version, copyright and license."""
print(configuration.App)
print("Using Python {} ({}) on {}".format(platform.python_version(),
platform.python_implementation(), platform.platform()))
print(configuration.Copyright)
print(configuration.Freeware)
print("For support see", configuration.SupportUrl)
if verbose:
# search for updates
from .updater import check_update
result, value = check_update()
if result:
if value:
version, url = value
if url is None:
# current version is newer than online version
text = ('Detected local or development version %(currentversion)s. '
'Available version of %(app)s is %(version)s.')
else:
# display update link
text = ('A new version %(version)s of %(app)s is '
'available at %(url)s.')
attrs = dict(version=version, app=AppName,
url=url, currentversion=__version__)
print(text % attrs)
else:
if value is None:
value = 'invalid update file syntax'
text = ('An error occured while checking for an '
'update of %(app)s: %(error)s.')
attrs = dict(error=value, app=AppName)
print(text % attrs)
return 0
def set_output_info(options):
"""Set global output level and timestamp option."""
out.level = 0
out.level += options.verbose
out.timestamps = options.timestamps
def display_help(options):
"""Print help for comic strips."""
errors = 0
try:
for scraperobj in director.getScrapers(options.comic, options.basepath, listing=True):
errors += display_comic_help(scraperobj)
except ValueError as msg:
out.exception(msg)
return 2
return errors
def display_comic_help(scraperobj):
"""Print help for a comic."""
orig_context = out.context
out.context = scraperobj.name
try:
out.info('URL: {}'.format(scraperobj.url))
out.info('Language: {}'.format(scraperobj.language()))
if scraperobj.adult:
out.info(u"Adult comic, use option --adult to fetch.")
disabled = scraperobj.getDisabledReasons()
if disabled:
out.info(u"Disabled: " + " ".join(disabled.values()))
if scraperobj.help:
for line in scraperobj.help.splitlines():
out.info(line)
return 0
except ValueError as msg:
out.exception(msg)
return 1
finally:
out.context = orig_context
def vote_comics(options):
"""Vote for comics."""
errors = 0
try:
for scraperobj in director.getScrapers(options.comic, options.basepath,
options.adult,
options.multimatch):
errors += vote_comic(scraperobj)
except ValueError as msg:
out.exception(msg)
errors += 1
return errors
def vote_comic(scraperobj):
"""Vote for given comic scraper."""
errors = 0
orig_context = out.context
out.context = scraperobj.name
try:
scraperobj.vote()
out.info(u'Vote submitted.')
except Exception as msg:
out.exception(msg)
errors += 1
finally:
out.context = orig_context
return errors
def run(options):
"""Execute comic commands."""
set_output_info(options)
# ensure only one instance of dosage is running
if not options.allow_multiple:
singleton.SingleInstance()
if options.version:
return display_version(options.verbose)
if options.list:
return do_list()
if options.singlelist or options.list_all:
return do_list(column_list=False, verbose=options.verbose,
listall=options.list_all)
# after this a list of comic strips is needed
if not options.comic:
out.warn(u'No comics specified, bailing out!')
return 1
add_user_scrapers()
if options.modulehelp:
return display_help(options)
if options.vote:
return vote_comics(options)
return director.getComics(options)
def add_user_scrapers():
"""Add extra comic modules from the user data directory. This uses two
different locations: The "system-native" location and paths matching the
XDG basedir spec. While XDG isn't a thing on macOS and Windows, some users
(and developers) like to use these paths cross-plattform, therefore we
support both."""
dirs = set()
dirs.add(userdirs.user_data_dir)
with xdg_system():
dirs.add(userdirs.user_data_dir)
dirs = (Path(x) / 'plugins' for x in dirs)
for d in dirs:
allscrapers.adddir(d)
@contextlib.contextmanager
def xdg_system():
"""context manager to do something with appdirs while forcing the system to
be "linux2", which implements the XDG base dir spec.
"""
oldsys = appdirs.system
appdirs.system = 'linux2'
try:
yield
finally:
appdirs.system = oldsys
def do_list(column_list=True, verbose=False, listall=False):
"""List available comics."""
add_user_scrapers()
with out.pager():
out.info(u'Available comic scrapers:')
out.info(u'Comics tagged with [{}] require age confirmation'
' with the --adult option.'.format(TAG_ADULT))
out.info(u'Non-english comics are tagged with [%s].' % TAG_LANG)
scrapers = sorted(allscrapers.get(listall),
key=lambda s: s.name.lower())
if column_list:
num, disabled = do_column_list(scrapers)
else:
num, disabled = do_single_list(scrapers, verbose=verbose)
out.info(u'%d supported comics.' % num)
if disabled:
out.info('')
out.info(u'Some comics are disabled, they are tagged with'
' [{}:REASON], where REASON is one of:'.format(TAG_DISABLED))
for k in disabled:
out.info(u' %-10s %s' % (k, disabled[k]))
return 0
def do_single_list(scrapers, verbose=False):
"""Get list of scraper names, one per line."""
disabled = {}
for scraperobj in scrapers:
if verbose:
display_comic_help(scraperobj)
else:
out.info(get_tagged_scraper_name(scraperobj, reasons=disabled))
return len(scrapers) + 1, disabled
def do_column_list(scrapers):
"""Get list of scraper names with multiple names per line."""
disabled = {}
width = out.width
# limit name length so at least two columns are there
limit = (width // 2) - 8
names = [get_tagged_scraper_name(scraperobj, limit=limit, reasons=disabled)
for scraperobj in scrapers]
num = len(names)
maxlen = max(len(name) for name in names)
names_per_line = max(width // (maxlen + 1), 1)
while names:
out.info(u''.join(name.ljust(maxlen) for name in
names[:names_per_line]))
del names[:names_per_line]
return num, disabled
TAG_ADULT = "adult"
TAG_LANG = "lang"
TAG_DISABLED = "dis"
def get_tagged_scraper_name(scraperobj, limit=None, reasons=None):
"""Get comic scraper name."""
tags = []
if scraperobj.adult:
tags.append(TAG_ADULT)
if scraperobj.lang != "en":
tags.append("%s:%s" % (TAG_LANG, scraperobj.lang))
disabled = scraperobj.getDisabledReasons()
if disabled and reasons is not None:
reasons.update(disabled)
for reason in disabled:
tags.append("%s:%s" % (TAG_DISABLED, reason))
if tags:
suffix = " [" + ", ".join(tags) + "]"
else:
suffix = ""
name = scraperobj.name
if limit is not None:
name = strlimit(name, limit)
return name + suffix
def main(args=None):
"""Parse options and execute commands."""
try:
options = setup_options().parse_args(args=args)
options.basepath = os.path.expanduser(options.basepath)
return run(options)
except KeyboardInterrupt:
print("Aborted.")
return 1
except Exception:
internal_error()
return 2
| 35.22043 | 94 | 0.637613 |
28aa67c4149f1e781eb6fa7dde087d92514c1c20 | 21,239 | py | Python | keylime/elchecking/tests.py | utkarsh2102/keylime | a2a8ef810d35f29a39fc43e9a7e8ff2e6b9fffd5 | [
"Apache-2.0"
] | null | null | null | keylime/elchecking/tests.py | utkarsh2102/keylime | a2a8ef810d35f29a39fc43e9a7e8ff2e6b9fffd5 | [
"Apache-2.0"
] | null | null | null | keylime/elchecking/tests.py | utkarsh2102/keylime | a2a8ef810d35f29a39fc43e9a7e8ff2e6b9fffd5 | [
"Apache-2.0"
] | null | null | null | import abc
import re
import typing
# This module defines the abstraction of a Test (of JSON data)
# and several specific test classes.
# A Test can be used multiple times, even concurrently.
# Data is the type of Python data that corresponds to JSON values.
Data = typing.Union[int, float, str, bool, typing.Tuple['Data', ...],
typing.Mapping[str, 'Data'], None]
# Globals is a dict of variables for communication among tests.
# There is a distinct dict for each top-level use of a test.
Globals = typing.Mapping[str, Data]
# PCR_Contents maps digest name to map from PCR index to PCR value.
# Here digest name is something like 'sha256'.
# Each PCR index is a decimal string, so that this can be JSON data
PCR_Contents = typing.Mapping[str, typing.Mapping[str, int]]
class Test(metaclass=abc.ABCMeta):
"""Test is something that can examine a value and either approve it or give a reason for rejection"""
@abc.abstractmethod
def why_not(self, globs: Globals, subject: Data) -> str:
"""Test the given value, return empty string for pass, explanation for fail.
The explanation is (except in deliberate exceptions) English that
makes a sentence when placed after a noun phrase identifying the subject.
The test can read and write in the given globs dict.
"""
raise NotImplementedError
# type_test constructs a test of data type that is expected to pass
def type_test(t) -> typing.Callable[[typing.Any], bool]:
"""Returns a lambda that tests against the given type.
The lambda returns True on pass, raises Exception on fail."""
def test(v: typing.Any) -> bool:
if isinstance(v, t):
return True
raise Exception(f'{v!r} is a {type(v)} rather than a {t}')
return test
def list_test(elt_test: typing.Callable[[typing.Any], bool]) -> typing.Callable[[typing.Any], bool]:
"""Return a lambda that tests for list with certain type of element"""
def test(dat: typing.Any) -> bool:
type_test(list)(dat)
for elt in dat:
elt_test(elt)
return True
return test
def dict_test(dom_test: typing.Callable[[typing.Any], bool], rng_test: typing.Callable[[typing.Any], bool]) -> typing.Callable[[typing.Any], bool]:
"""Return a lambda that tests for dict with certain type key and value"""
def test(dat: typing.Any) -> bool:
type_test(dict)(dat)
for dom, rng in dat.items():
dom_test(dom)
rng_test(rng)
return True
return test
def obj_test(**field_tests: typing.Callable[[typing.Any], bool]) -> typing.Callable[[typing.Any], bool]:
"""Return a lambda that tests for dict with string keys and a particular type for each key"""
def test(dat: typing.Any) -> bool:
type_test(dict)(dat)
dom_test = type_test(str)
for dom, rng in dat.items():
dom_test(dom)
if dom not in field_tests:
continue
rng_test = field_tests[dom]
rng_test(rng)
missing = set(field_tests.keys()) - set(dat.keys())
if missing:
raise Exception(f'{dat!r} lacks fields {missing}')
return True
return test
class AcceptAll(Test):
"""Every value passes this test"""
def why_not(self, _: Globals, subject: Data) -> str:
return ''
class RejectAll(Test):
"""No value passes this test"""
def __init__(self, why: str):
super().__init__()
if not why:
raise Exception(f'the truth value of {why!r} is false')
self.why = why
def why_not(self, _: Globals, subject: Data) -> str:
return self.why
class And(Test):
"""Conjunction of given tests
The tests are run in series, stopping as soon as one fails."""
def __init__(self, *tests: Test):
super().__init__()
list(map(type_test(Test), tests))
self.tests = tests
def why_not(self, globs: Globals, subject: Data) -> str:
for test in self.tests:
reason = test.why_not(globs, subject)
if reason:
return reason
return ''
class Or(Test):
"""Disjunction of given tests
The tests are run in series, stopping as soon as one succeeds."""
def __init__(self, *tests: Test):
super().__init__()
list(map(type_test(Test), tests))
self.tests = tests
def why_not(self, globs: Globals, subject: Data) -> str:
if not self.tests:
return 'does not pass empty disjunction'
reasons = []
for test in self.tests:
reason = test.why_not(globs, subject)
if not reason:
return ''
reasons.append(reason)
return '[' + ', '.join(reasons) + ']'
class Dispatcher(Test):
"""Apply a specific test for each key tuple.
This kind of test applies when the subject is a dict and
it is desired to apply a different test depending on
the value(s) of one or more entries."""
def __init__(self, key_names: typing.Tuple[str, ...]):
"""Initialize a Dispatcher Test.
key_names identifies the subject dict entries that determine
which subsidiary test to apply."""
super().__init__()
if len(key_names) < 1:
raise Exception('Dispatcher given empty list of key names')
list(map(type_test(str), key_names))
self.key_names = key_names
self.tests = dict()
def set(self, key_vals: typing.Tuple[str, ...], test: Test) -> None:
"""Set the test for the given value tuple"""
if len(key_vals) != len(self.key_names):
raise Exception(
f'{key_vals!a} does not match length of {self.key_names}')
if key_vals in self.tests:
raise Exception(f'multiple tests for {key_vals!a}')
self.tests[key_vals] = test
def why_not(self, globs: Globals, subject: Data) -> str:
if not isinstance(subject, dict):
return 'is not a dict'
key_vals = tuple()
for kn in self.key_names:
if kn not in subject:
return f'has no {kn}'
key_vals += (subject[kn],)
test = self.tests.get(key_vals)
if test is None:
return f'has unexpected {self.key_names} combination {key_vals}'
return test.why_not(globs, subject)
class FieldTest(Test):
"""Applies given test to field having given name"""
def __init__(self, field_name: str, field_test: Test, show_name: bool = True):
super().__init__()
type_test(str)(field_name)
type_test(Test)(field_test)
self.field_name = field_name
self.field_test = field_test
self.show_name = show_name
def why_not(self, globs: Globals, subject: Data) -> str:
if not isinstance(subject, dict):
return 'is not a dict'
if self.field_name not in subject:
return f'has no {self.field_name!a} field'
reason = self.field_test.why_not(globs, subject[self.field_name])
if reason and self.show_name:
return self.field_name + ' ' + reason
return reason
class FieldsTest(And):
"""Tests a collection of fields"""
def __init__(self, **fields: Test):
tests = [FieldTest(field_name, field_test)
for field_name, field_test in fields.items()]
super().__init__(*tests)
class IterateTest(Test):
"""Applies a test to every member of a list"""
def __init__(self, elt_test: Test, show_elt: bool = False):
super().__init__()
self.elt_test = elt_test
self.show_elt = show_elt
def why_not(self, globs: Globals, subject: Data) -> str:
if not isinstance(subject, list):
return 'is not a list'
for idx, elt in enumerate(subject):
reason = self.elt_test.why_not(globs, elt)
if not reason:
continue
if self.show_elt:
return f'{elt!a} ' + reason
return f'[{idx}] ' + reason
return ''
class TupleTest(Test):
"""Applies a sequence of tests to a sequence of values
The tests are run in series, stopping as soon as one fails"""
def __init__(self, *member_tests: Test, pad: bool = False):
super().__init__()
list(map(type_test(Test), member_tests))
self.member_tests = member_tests
self.pad = pad
def why_not(self, globs: Globals, subject: Data) -> str:
if not isinstance(subject, list):
return 'is not a list'
subject_len = len(subject)
test_len = len(self.member_tests)
if subject_len > test_len:
return f' is longer ({subject_len}) than the applicable tests ({test_len})'
if (subject_len < test_len) and not self.pad:
return f' is shorter ({subject_len}) than the applicable tests ({test_len})'
for idx, test in enumerate(self.member_tests):
subject_elt = subject[idx] if idx < subject_len else None
reason = test.why_not(globs, subject_elt)
if reason:
return f'[{idx}] ' + reason
return ''
class DelayedField(Test):
"""Remembers a field value for later testing"""
def __init__(self, delayer: 'DelayToFields', field_name: str):
super().__init__()
self.delayer = delayer
self.field_name = field_name
def why_not(self, globs: Globals, subject: Data) -> str:
"""Add the value to the list stashed for later testing"""
val_list = globs[self.field_name]
if not isinstance(val_list, list):
return f'malformed test: global {self.field_name} is not a list'
val_list.append(subject)
return ''
class DelayInitializer(Test):
"""A Test that initializes the globals used by a DelayToFields and reports acceptance"""
def __init__(self, delayer: 'DelayToFields'):
super().__init__()
self.delayer = delayer
def why_not(self, globs: Globals, subject):
self.delayer.initialize_globals(globs)
return ''
class DelayToFields(Test):
"""A test to apply after stashing fields to test.
For each field, accumulates a list of values
in a correspondingly-named global.
As a test, ignores the given subject and instead applies the
configured fields_test to the record of accumulated value lists.
"""
def __init__(self, fields_test: Test, *field_names: str):
super().__init__()
self.field_names = field_names
self.fields_test = fields_test
def initialize_globals(self, globs: Globals) -> None:
"""Initialize for a new pass over data"""
for field_name in self.field_names:
globs[field_name] = []
def get_initializer(self) -> DelayInitializer:
"""Get a Test that accepts the subject and initializes the relevant globals"""
return DelayInitializer(self)
def get(self, field_name: str) -> DelayedField:
"""Return a Test that adds the subject to the list stashed for later evaulation"""
if field_name not in self.field_names:
raise Exception(f'{field_name} not in {self.field_names}')
return DelayedField(self, field_name)
def why_not(self, globs: Globals, subject: Data) -> str:
"""Test the stashed field values"""
delayed = dict()
for field_name in self.field_names:
delayed[field_name] = globs.get(field_name, None)
return self.fields_test.why_not(globs, delayed)
class IntEqual(Test):
"""Compares with a given int"""
def __init__(self, expected: int):
super().__init__()
type_test(int)(expected)
self.expected = expected
def why_not(self, _: Globals, subject: Data) -> str:
if not isinstance(subject, int):
return 'is not a int'
if subject == self.expected:
return ''
return f'is not {self.expected}'
class StringEqual(Test):
"""Compares with a given string"""
def __init__(self, expected: str):
super().__init__()
type_test(str)(expected)
self.expected = expected
def why_not(self, _: Globals, subject: Data) -> str:
if not isinstance(subject, str):
return 'is not a str'
if subject == self.expected:
return ''
return f'is not {self.expected!a}'
class RegExp(Test):
"""Does a full match against a regular expression"""
def __init__(self, pattern: str, flags=0):
super().__init__()
self.regexp = re.compile(pattern, flags)
def why_not(self, _: Globals, subject: Data) -> str:
if not isinstance(subject, str):
return 'is not a str'
if self.regexp.fullmatch(subject):
return ''
return f'does not match {self.regexp.pattern}'
# hash algorithm -> hash value in hex (sans leading 0x)
Digest = typing.Mapping[str, str]
class DigestsTest(Test):
"""Tests whether subject has a digest that is in a list of good ones"""
def __init__(self, good_digests_list: typing.Iterable[Digest]):
"""good_digests_list is a list of good {alg:hash}"""
super().__init__()
self.good_digests = dict()
'map from alg to set of good digests'
for good_digests in good_digests_list:
type_test(dict)(good_digests)
for alg, hash_val in good_digests.items():
if alg in self.good_digests:
self.good_digests[alg].add(hash_val)
else:
self.good_digests[alg] = set((hash_val,))
def why_not(self, _: Globals, subject: Data) -> str:
if not isinstance(subject, dict):
return 'is not a dict'
if 'Digests' not in subject:
return 'has no Digests'
digest_list = subject['Digests']
if not isinstance(digest_list, list):
return 'Digests is not a list'
for idx, subject_digest in enumerate(digest_list):
if not isinstance(subject_digest, dict):
return f'Digests[{idx}] is {subject_digest!r}, not a dict'
if 'AlgorithmId' not in subject_digest:
return f'digest {idx} has no AlgorithmId'
alg = subject_digest['AlgorithmId']
if not isinstance(alg, str):
return f'Digests[{idx}].AlgorithmId is {alg!r}, not a str'
if 'Digest' not in subject_digest:
return f'digest {idx} has no Digest'
hash_val = subject_digest['Digest']
if not isinstance(hash_val, str):
return f'Digests[{idx}].Digest is {hash_val!r}, not a str'
if alg not in self.good_digests:
continue
if hash_val in self.good_digests[alg]:
return ''
return f'has no digest approved by {self.good_digests}'
class DigestTest(DigestsTest):
"""Tests whether subject has a digest that equals a given one"""
def __init__(self, good_digest: Digest):
super().__init__([good_digest])
StrOrRE = typing.Union[str, typing.Pattern]
class VariableTest(Test):
"""Test whether a given variable has value passing given test"""
def __init__(self, variable_name: str, unicode_name: StrOrRE, data_test: Test):
"""variable_name and unicode_name are as in the parsed event; data_test applies to VariableData"""
super().__init__()
self.variable_name = variable_name
if not isinstance(unicode_name, (str, typing.Pattern)):
raise Exception(
f'unicode_name={unicode_name!r} is neither a str nor an re.Pattern')
self.unicode_name = unicode_name
self.data_test = data_test
def why_not(self, globs: Globals, subject: Data) -> str:
if not isinstance(subject, dict):
return 'is not a dict'
if 'Event' not in subject:
return 'has no Event field'
evt = subject['Event']
if not isinstance(evt, dict):
return 'Event is not a dict'
if 'VariableName' not in evt:
return 'Event has no VariableName field'
variable_name = evt['VariableName']
if variable_name != self.variable_name:
return f'Event.VariableName is {variable_name} rather than {self.variable_name}'
if 'UnicodeName' not in evt:
return 'Event has no UnicodeName field'
unicode_name = evt['UnicodeName']
if 'VariableData' not in evt:
return 'Event has no VariableData field'
if not isinstance(unicode_name, str):
return 'Event.UnicodeName is not a str'
variable_data = evt['VariableData']
if isinstance(self.unicode_name, str):
if unicode_name != self.unicode_name:
return f'Event.UnicodeName is {unicode_name} rather than {self.unicode_name}'
elif not self.unicode_name.fullmatch(unicode_name):
return f'Event.UnicodeName, {unicode_name}, does not match {self.unicode_name.pattern}'
return self.data_test.why_not(globs, variable_data)
class VariableDispatch(FieldTest):
"""Do a specific test for each variable"""
def __init__(self):
self.vd = Dispatcher(('VariableName', 'UnicodeName'))
super().__init__('Event', self.vd)
def set(self, variable_name: str, unicode_name: str, data_test: Test) -> None:
"""Define the test for a specific variable"""
self.vd.set((variable_name, unicode_name),
FieldTest('VariableData', data_test))
# Signature has the following fields.
# - SignatureOwner, value is a string UUID
# - SignatureData, value is a hex string without leading 0x
Signature = typing.Mapping[str, str]
class SignatureTest(And):
"""Compares to a particular signature"""
def __init__(self, owner: str, data: str):
"""owner is SignatureOwner, data is SignatureData"""
super().__init__(
FieldTest('SignatureOwner', StringEqual(owner)),
FieldTest('SignatureData', StringEqual(data))
)
class SignatureSetMember(Or):
"""Tests for membership in the given list of signatures"""
def __init__(self, sigs: typing.Iterable[Signature]):
tests = [SignatureTest(sig['SignatureOwner'],
sig['SignatureData']) for sig in sigs]
super().__init__(*tests)
class KeySubset(IterateTest):
def __init__(self, sig_type: str, keys: typing.Iterable[typing.Mapping[str, str]]):
super().__init__(And(
FieldTest('SignatureType', StringEqual(sig_type)),
FieldTest('Keys', IterateTest(SignatureSetMember(keys)))))
class FieldsMismatchError(Exception):
"""Represents a mismatch between expected and actual sets of field names."""
def __init__(self, expected, actual):
"""Constructor."""
super().__init__(expected, actual)
type_test(set)(expected)
type_test(set)(actual)
list(map(type_test(str), expected))
list(map(type_test(str), actual))
self.expected = expected
self.actual = actual
def __str__(self):
return f'expected fields {self.expected} but got {self.actual}'
class SupersetOfDicts(Test):
"""Tests that the subject is a list of dicts with at least certain members
All dicts must have the same field names"""
@staticmethod
def dict_to_tuple(it: dict, field_names: typing.Tuple[str]) -> typing.Tuple:
actual_keys = set(it.keys())
expected_keys = set(field_names)
if actual_keys != expected_keys:
raise FieldsMismatchError(expected_keys, actual_keys)
return tuple(it.get(field_name) for field_name in field_names)
def __init__(self, reqs: typing.Iterable[dict], field_names: typing.Tuple[str]):
list(map(type_test(dict), reqs))
type_test(tuple)(field_names)
list(map(type_test(str), field_names))
self.field_names = field_names
self.reqs = {SupersetOfDicts.dict_to_tuple(
req, field_names) for req in reqs}
def why_not(self, globs: Globals, subject: Data) -> str:
if not isinstance(subject, list):
return 'is not a list'
actual = set()
for elt in subject:
if not isinstance(elt, dict):
return f'member {elt} is not a dict'
try:
tup = SupersetOfDicts.dict_to_tuple(elt, self.field_names)
except FieldsMismatchError:
return f'member {elt!r} does not have the right set of field names {self.field_names}'
actual.add(tup)
missing = self.reqs - actual
if not missing:
return ''
return f'lacks {self.field_names} combinations {missing}'
class KeySuperset(TupleTest):
"""Tests that there is one Keys dict containing at least certain members"""
def __init__(self, sig_type: str, keys: typing.Iterable[Signature]):
super().__init__(And(
FieldTest('SignatureType', StringEqual(sig_type)),
FieldTest('Keys',
SupersetOfDicts(keys, ('SignatureOwner', 'SignatureData')))
))
| 35.695798 | 147 | 0.6247 |
124c66bfdeee2d79b2637a40a0d2f27b133fc43e | 449 | py | Python | vehicles/views/tools.py | kackey0-1/drf-sample | 914907320bc317240b4d7c07968b6d4ea80b4511 | [
"MIT"
] | null | null | null | vehicles/views/tools.py | kackey0-1/drf-sample | 914907320bc317240b4d7c07968b6d4ea80b4511 | [
"MIT"
] | 6 | 2021-03-30T12:05:07.000Z | 2021-04-05T14:21:46.000Z | vehicles/views/tools.py | kackey0-1/drf-sample | 914907320bc317240b4d7c07968b6d4ea80b4511 | [
"MIT"
] | null | null | null | from rest_framework.decorators import api_view
from rest_framework.response import Response
from vehicles.models import Tool
from vehicles.serializers import ToolSerializer
@api_view(['GET'])
def list_tools(request):
tools = [
Tool('hammer', 'Mastercraft'),
Tool('wrench', 'Husky'),
]
serializer = ToolSerializer(tools, many=True)
content = {
'tools': serializer.data,
}
return Response(content)
| 21.380952 | 49 | 0.688196 |
32426589939455553d2c076a8c116fb01d312728 | 233 | py | Python | tests/test_wikipedia.py | simplythebex/hypermodern-python-practice | 48d3cce5e100bc57dfa894d3afbfc69e55ec9ab2 | [
"MIT"
] | null | null | null | tests/test_wikipedia.py | simplythebex/hypermodern-python-practice | 48d3cce5e100bc57dfa894d3afbfc69e55ec9ab2 | [
"MIT"
] | null | null | null | tests/test_wikipedia.py | simplythebex/hypermodern-python-practice | 48d3cce5e100bc57dfa894d3afbfc69e55ec9ab2 | [
"MIT"
] | null | null | null | from src.hypermodern_python_practice import wikipedia
def test_random_page_uses_given_language(mock_requests_get):
wikipedia.random_page(language="de")
args, _ = mock_requests_get.call_args
assert "de.wikipedia.org" in args[0] | 38.833333 | 60 | 0.828326 |
0a788b5f5f88047e86e644cc6873409598dc4c7d | 7,719 | py | Python | qiskit_machine_learning/datasets/dataset_helper.py | declanmillar/qiskit-machine-learning | 3bf98b67618885bd09f4abbff7e381537a646fac | [
"Apache-2.0"
] | null | null | null | qiskit_machine_learning/datasets/dataset_helper.py | declanmillar/qiskit-machine-learning | 3bf98b67618885bd09f4abbff7e381537a646fac | [
"Apache-2.0"
] | null | null | null | qiskit_machine_learning/datasets/dataset_helper.py | declanmillar/qiskit-machine-learning | 3bf98b67618885bd09f4abbff7e381537a646fac | [
"Apache-2.0"
] | 1 | 2022-01-24T15:26:13.000Z | 2022-01-24T15:26:13.000Z | # This code is part of Qiskit.
#
# (C) Copyright IBM 2018, 2022.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
""" Data set helper """
from copy import deepcopy
from typing import Dict, List, Tuple
import numpy as np
from sklearn import preprocessing
def discretize_and_truncate(
data,
min_max_bin_centers,
num_qubits,
return_data_grid_elements=False,
return_prob=False,
prob_non_zero=True,
):
"""
Discretize & truncate classical data to enable digital encoding in qubit registers
whereby the data grid is ``[[grid elements dim 0], ..., [grid elements dim k]]``.
For each dimension ``k``, the domain is split into ``(2 ** num_qubits[k])`` bins equally spaced
and equally sized, each centered in
``min_max_bin_centers[k, 0], ..., min_max_bin_centers[k, 1]``. Bins have size equal to
``(min_max_bin_centers[k, 1] - min_max_bin_centers[k, 0]) / (2 ** num_qubits[k] - 1)``.
Notice that:
* Every sample in data that falls out of the bins is discarded.
* The leftmost bin extends both to the left and to the right around its center,
therefore ``min_max_bin_centers[k, 0]`` is not the left bound for truncation, but only
the center of the leftmost bin. Similar considerations hold for ``min_max_bin_centers[k, 1]``
on the right.
Args:
data (list or array or np.array): training data (int or float) of dimension ``k``.
min_max_bin_centers (list or array or np.ndarray): ``k`` min/max data values
``[[min_center_0, max_center_0],...,[min_center_k-1, max_center_k-1]]``.
If univariate data: ``[min_center_0, max_center_0]``.
num_qubits (list or array or np.array): ``k`` numbers of qubits to determine
representation resolution, i.e. n qubits enable the representation of 2**n
values ``[num_qubits_0,..., num_qubits_k-1]``.
return_data_grid_elements (Bool): if ``True`` - return an array with the data grid
elements.
return_prob (Bool): if ``True`` - return a normalized frequency count of the discretized and
truncated data samples.
prob_non_zero (Bool): if ``True`` - set 0 values in the prob_data to ``10^-1`` to avoid
potential problems when using the probabilities in loss functions - division by 0.
Returns:
array: discretized and truncated data.
array: data grid ``[[grid elements dim 0],..., [grid elements dim k]]``.
array: grid elements, ``Product_j=0^k-1 2**num_qubits_j`` element vectors.
array: data probability, normalized frequency count sorted from smallest to biggest element.
"""
# Truncate the data
if np.ndim(min_max_bin_centers) == 1:
min_max_bin_centers = np.reshape(min_max_bin_centers, (1, len(min_max_bin_centers)))
data = data.reshape((len(data), len(num_qubits)))
temp = []
for i, data_sample in enumerate(data):
append = True
for j, entry in enumerate(data_sample):
if entry < min_max_bin_centers[j, 0] - 0.5 / (2 ** num_qubits[j] - 1) * (
min_max_bin_centers[j, 1] - min_max_bin_centers[j, 0]
):
append = False
if entry > min_max_bin_centers[j, 1] + 0.5 / (2 ** num_qubits[j] - 1) * (
min_max_bin_centers[j, 1] - min_max_bin_centers[j, 0]
):
append = False
if append:
temp.append(list(data_sample))
data = np.array(temp, dtype=float)
# Fit the data to the data element grid
for j, prec in enumerate(num_qubits):
data_row = data[:, j] # dim j of all data samples
# prepare element grid for dim j
elements_current_dim = np.linspace(
min_max_bin_centers[j, 0], min_max_bin_centers[j, 1], (2**prec)
)
# find index for data sample in grid
index_grid = np.searchsorted(
elements_current_dim,
data_row - (elements_current_dim[1] - elements_current_dim[0]) * 0.5,
)
for k, index in enumerate(index_grid):
data[k, j] = elements_current_dim[index]
if j == 0:
if len(num_qubits) > 1:
data_grid = [elements_current_dim]
else:
data_grid = elements_current_dim
grid_elements = elements_current_dim
elif j == 1:
temp = []
for grid_element in grid_elements:
for element_current in elements_current_dim:
temp.append([grid_element, element_current])
grid_elements = temp
data_grid.append(elements_current_dim)
else:
temp = []
for grid_element in grid_elements:
for element_current in elements_current_dim:
temp.append(grid_element + [element_current])
grid_elements = deepcopy(temp)
data_grid.append(elements_current_dim)
data_grid = np.array(data_grid, dtype=object)
data = np.reshape(data, (len(data), len(data[0])))
if return_prob:
if np.ndim(data) > 1:
prob_data = np.zeros(int(np.prod(np.power(np.ones(len(data[0])) * 2, num_qubits))))
else:
prob_data = np.zeros(int(np.prod(np.power(np.array([2]), num_qubits))))
for data_element in data:
for i, element in enumerate(grid_elements):
if all(data_element == element):
prob_data[i] += 1 / len(data)
if prob_non_zero:
# add epsilon to avoid 0 entries which can be problematic in loss functions (division)
prob_data = [1e-10 if x == 0 else x for x in prob_data]
if return_data_grid_elements:
return data, data_grid, grid_elements, prob_data
else:
return data, data_grid, prob_data
else:
if return_data_grid_elements:
return data, data_grid, grid_elements
else:
return data, data_grid
def features_and_labels_transform(
dataset: Dict[str, np.ndarray], class_labels: List[str], one_hot: bool = True
) -> Tuple[np.ndarray, np.ndarray]:
"""
Converts a dataset into arrays of features and labels.
Args:
dataset: A dictionary in the format of {'A': numpy.ndarray, 'B': numpy.ndarray, ...}
class_labels: A list of classes in the dataset
one_hot (bool): if True - return one-hot encoded label
Returns:
A tuple of features as np.ndarray, label as np.ndarray
"""
features = np.concatenate(list(dataset.values()))
raw_labels = []
for category in dataset.keys():
num_samples = dataset[category].shape[0]
raw_labels += [category] * num_samples
if not raw_labels:
# no labels, empty dataset
labels = np.zeros((0, len(class_labels)))
return features, labels
if one_hot:
encoder = preprocessing.OneHotEncoder()
encoder.fit(np.array(class_labels).reshape(-1, 1))
labels = encoder.transform(np.array(raw_labels).reshape(-1, 1))
if not isinstance(labels, np.ndarray):
labels = np.array(labels.todense())
else:
encoder = preprocessing.LabelEncoder()
encoder.fit(np.array(class_labels))
labels = encoder.transform(np.array(raw_labels))
return features, labels
| 40.203125 | 100 | 0.629615 |
53f68cdc695b4885dd12aa02bcfdf9c854a29fcb | 367 | py | Python | profit-loss.py | TheVishwajeetJagtap/Python-Day-3-04-03-21- | af790c3b97863dcd1d06ed9af43890a40ff6e0f0 | [
"MIT"
] | null | null | null | profit-loss.py | TheVishwajeetJagtap/Python-Day-3-04-03-21- | af790c3b97863dcd1d06ed9af43890a40ff6e0f0 | [
"MIT"
] | null | null | null | profit-loss.py | TheVishwajeetJagtap/Python-Day-3-04-03-21- | af790c3b97863dcd1d06ed9af43890a40ff6e0f0 | [
"MIT"
] | null | null | null | actual_cost=float(input("Enter Actual Cost of Product: "))
selling_cost=float(input("Enter Selling Cost of Product: "))
if actual_cost > selling_cost:
amt = actual_cost - selling_cost
print("Total Loss is : ",amt)
elif actual_cost < selling_cost:
amt = selling_cost - actual_cost
print("Total Profit is : ",amt)
else:
print("No Profit No Loss!")
| 30.583333 | 60 | 0.705722 |
0e61b9ad878606b3ae550d5c68286c4fd6c79c04 | 39 | py | Python | demo/m3_ext_demo/m3_ext_demo/routes.py | barsgroup/m3-ext | 15cd6772a5e664431e363ee9755c5fbf9a535c21 | [
"MIT"
] | 1 | 2020-06-03T18:26:16.000Z | 2020-06-03T18:26:16.000Z | demo/m3_ext_demo/m3_ext_demo/routes.py | barsgroup/m3-ext | 15cd6772a5e664431e363ee9755c5fbf9a535c21 | [
"MIT"
] | null | null | null | demo/m3_ext_demo/m3_ext_demo/routes.py | barsgroup/m3-ext | 15cd6772a5e664431e363ee9755c5fbf9a535c21 | [
"MIT"
] | 1 | 2018-04-21T12:13:58.000Z | 2018-04-21T12:13:58.000Z | #coding: utf-8
__author__ = 'prefer'
| 7.8 | 21 | 0.666667 |
fb3e0e286ea720ae84b56599ea622fc06ae73463 | 400 | py | Python | src/bindings/python/src/compatibility/ngraph/exceptions.py | ryanloney/openvino-1 | 4e0a740eb3ee31062ba0df88fcf438564f67edb7 | [
"Apache-2.0"
] | 1,127 | 2018-10-15T14:36:58.000Z | 2020-04-20T09:29:44.000Z | src/bindings/python/src/compatibility/ngraph/exceptions.py | ryanloney/openvino-1 | 4e0a740eb3ee31062ba0df88fcf438564f67edb7 | [
"Apache-2.0"
] | 439 | 2018-10-20T04:40:35.000Z | 2020-04-19T05:56:25.000Z | src/bindings/python/src/compatibility/ngraph/exceptions.py | ryanloney/openvino-1 | 4e0a740eb3ee31062ba0df88fcf438564f67edb7 | [
"Apache-2.0"
] | 414 | 2018-10-17T05:53:46.000Z | 2020-04-16T17:29:53.000Z | # Copyright (C) 2018-2022 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
"""ngraph exceptions hierarchy. All exceptions are descendants of NgraphError."""
class NgraphError(Exception):
"""Base class for Ngraph exceptions."""
class UserInputError(NgraphError):
"""User provided unexpected input."""
class NgraphTypeError(NgraphError, TypeError):
"""Type mismatch error."""
| 23.529412 | 81 | 0.7375 |
bd7417f92d58af8c9998e34eac47716bb270748c | 1,467 | py | Python | Testes com Pygame/ex_pygame04_sprites.py | fotavio16/PycharmProjects | f5be49db941de69159ec543e8a6dde61f9f94d86 | [
"MIT"
] | null | null | null | Testes com Pygame/ex_pygame04_sprites.py | fotavio16/PycharmProjects | f5be49db941de69159ec543e8a6dde61f9f94d86 | [
"MIT"
] | null | null | null | Testes com Pygame/ex_pygame04_sprites.py | fotavio16/PycharmProjects | f5be49db941de69159ec543e8a6dde61f9f94d86 | [
"MIT"
] | null | null | null | import pygame
import pygame.gfxdraw
pygame.init()
FONT = pygame.font.SysFont('Arial', 20)
FONT_COLOR = pygame.Color('black')
ATOM_IMG = pygame.Surface((30, 30), pygame.SRCALPHA)
pygame.gfxdraw.aacircle(ATOM_IMG, 15, 15, 14, (0, 255, 0))
pygame.gfxdraw.filled_circle(ATOM_IMG, 15, 15, 14, (0, 255, 0))
class Atom(pygame.sprite.Sprite):
def __init__(self, pos, element):
pygame.sprite.Sprite.__init__(self)
# We have to make a copy of the image now, because
# we're modifying it by blitting the text onto it.
self.image = ATOM_IMG.copy()
textsurface = FONT.render(element, True, FONT_COLOR)
# To center the text, set the center of the textrect to
# the center of the image rect.
textrect = textsurface.get_rect(center=self.image.get_rect().center)
self.image.blit(textsurface, textrect)
self.rect = self.image.get_rect(center=pos)
def main():
screen = pygame.display.set_mode((640, 480))
clock = pygame.time.Clock()
all_sprites = pygame.sprite.Group(
Atom((150, 200), 'Ne'), Atom((300, 100), 'C'))
done = False
while not done:
for event in pygame.event.get():
if event.type == pygame.QUIT:
done = True
all_sprites.update()
screen.fill((40, 50, 60))
all_sprites.draw(screen)
pygame.display.flip()
clock.tick(30)
if __name__ == '__main__':
main()
pygame.quit() | 27.679245 | 76 | 0.630539 |
1b24599ec653ca178785c3c57a1cc6f85312875a | 661 | py | Python | agua/comparators.py | CompileInc/agua | d1b7508b50ca0a53a430cdea4fb745f02377d594 | [
"MIT"
] | 1 | 2017-03-22T10:51:23.000Z | 2017-03-22T10:51:23.000Z | agua/comparators.py | CompileInc/agua | d1b7508b50ca0a53a430cdea4fb745f02377d594 | [
"MIT"
] | 18 | 2017-12-18T16:41:27.000Z | 2021-03-25T21:47:51.000Z | agua/comparators.py | CompileInc/agua | d1b7508b50ca0a53a430cdea4fb745f02377d594 | [
"MIT"
] | null | null | null | CHECK_FUNCTIONS = {}
def register(fn):
CHECK_FUNCTIONS[fn.func_name] = fn
return fn
@register
def exact(value, test_value):
return value == test_value or value is test_value
@register
def approximate(value, test_value, delta):
min_value = float(value) * (1 - delta)
max_value = float(value) * (1 + delta)
return min_value <= float(test_value) <= max_value
@register
def string_similarity(value, test_value, min_score, case_sensitive=True):
import fuzzywuzzy.fuzz
if not case_sensitive:
value = value.lower()
test_value = test_value.lower()
return fuzzywuzzy.fuzz.ratio(value, test_value) >= min_score
| 23.607143 | 73 | 0.701967 |
9f2b35fb4227089ba0c44f33436f3c8fe3a11dd2 | 4,805 | py | Python | ffbinaries/api.py | tropicoo/ffbinaries-api-client | a1d0d60d3b4adbac296044c15d36ec5a1f0fb693 | [
"MIT"
] | 1 | 2019-10-26T14:38:46.000Z | 2019-10-26T14:38:46.000Z | ffbinaries/api.py | tropicoo/ffbinaries-api-client | a1d0d60d3b4adbac296044c15d36ec5a1f0fb693 | [
"MIT"
] | null | null | null | ffbinaries/api.py | tropicoo/ffbinaries-api-client | a1d0d60d3b4adbac296044c15d36ec5a1f0fb693 | [
"MIT"
] | null | null | null | """ffbinaries API Client Module."""
import logging
import posixpath
from multiprocessing import Lock as proc_lock
from threading import Lock as thread_lock
import requests
from ffbinaries.cache import SimpleCache
from ffbinaries.const import HTTP
from ffbinaries.errors import (FFBinariesAPIClientError, NoCacheDataError,
ExpiredCacheDataError)
from ffbinaries.utils import retry
BASE_API_URL = 'https://ffbinaries.com/api'
DEFAULT_API_VERSION = 'v1'
ENDPOINT_VERSIONS = 'versions'
ENDPOINT_VERSION = 'version'
ENDPOINT_LATEST = '{0}/latest'.format(ENDPOINT_VERSION)
ENDPOINT_EXACT_VERSION = '{0}/{{0}}'.format(ENDPOINT_VERSION)
CACHE_AGE = 300
PROC_LOCK = proc_lock()
THREAD_LOCK = thread_lock()
class FFBinariesAPIClient:
"""ffbinaries API Client Class."""
def __init__(self, use_caching=False, cache_age=CACHE_AGE, log_init=None):
if log_init is not None and callable(log_init[0]):
log_init[0](log_init[1])
self._log = logging.getLogger(self.__class__.__name__)
self._use_caching = use_caching
self._cache = SimpleCache(cache_age)
def _request(self, url, method=HTTP.GET, stream=False, jsonify=False):
"""General Request Method."""
# Cache only JSON-data which should be directly returned to the caller.
if all([jsonify, self._use_caching, self._valid_for_caching(url)]):
with THREAD_LOCK, PROC_LOCK:
try:
return self._cache.get(url)
except (ExpiredCacheDataError, NoCacheDataError):
data = self.__make_request(url, method, stream, jsonify)
self._cache.add(url, data)
return data
return self.__make_request(url, method, stream, jsonify)
@retry()
def __make_request(self, url, method, stream, jsonify):
self._log.debug('%s %s ', method, url)
response = requests.request(method=method, url=url, stream=stream)
return response.json() if jsonify else response
@staticmethod
def _valid_for_caching(url):
return BASE_API_URL in url
def get_latest_metadata(self, api_ver=DEFAULT_API_VERSION):
url = posixpath.join(BASE_API_URL, api_ver, ENDPOINT_LATEST)
return self._request(url, jsonify=True)
def get_available_versions_metadata(self, api_ver=DEFAULT_API_VERSION):
url = posixpath.join(BASE_API_URL, api_ver, ENDPOINT_VERSIONS)
return self._request(url, jsonify=True)
def get_exact_version_metadata(self, version, api_ver=DEFAULT_API_VERSION):
url = posixpath.join(BASE_API_URL, api_ver,
ENDPOINT_EXACT_VERSION.format(version))
return self._request(url, jsonify=True)
def get_latest_version(self):
try:
return self.get_latest_metadata()['version']
except KeyError as err:
raise FFBinariesAPIClientError('Failed to get latest published'
'version: {0}'.format(err))
def get_available_versions(self):
versions = []
metadata = self.get_available_versions_metadata()
try:
versions_view = metadata['versions'].keys()
except KeyError as err:
raise FFBinariesAPIClientError('Failed to get available versions:'
' {0}'.format(err))
for version in versions_view:
try:
# Check if version can be converted to float but use original
# string version for compatibility with API response.
float_ver = float(version)
versions.append(version)
except ValueError:
# Got regular non-float string e.g. 'latest', skip it.
pass
return versions
def download_latest_version(self, component, platform, stream=False):
try:
url = self.get_latest_metadata()['bin'][platform][component]
except KeyError as err:
raise FFBinariesAPIClientError('Failed to download latest version:'
' {0}'.format(err))
return self._request(url, stream=stream)
def download_exact_version(self, component, version, platform,
api_ver=DEFAULT_API_VERSION, stream=False):
metadata = self.get_exact_version_metadata(version, api_ver)
try:
url = metadata['bin'][platform][component]
except KeyError as err:
raise FFBinariesAPIClientError('Failed to download exact version:'
' {0}'.format(err))
return self._request(url, stream=stream)
def show_cache(self):
return self._cache.get_cached_items()
| 38.44 | 79 | 0.63871 |
b9041d23df17f7411b6ea005380ca163d756e881 | 227,499 | py | Python | tensorflow/python/ops/image_ops_impl.py | onepick/tensorflow | b44dd5e97df2f9aec5ad71e8122be98a59a35057 | [
"Apache-2.0"
] | null | null | null | tensorflow/python/ops/image_ops_impl.py | onepick/tensorflow | b44dd5e97df2f9aec5ad71e8122be98a59a35057 | [
"Apache-2.0"
] | null | null | null | tensorflow/python/ops/image_ops_impl.py | onepick/tensorflow | b44dd5e97df2f9aec5ad71e8122be98a59a35057 | [
"Apache-2.0"
] | null | null | null | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implementation of image ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import numpy as np
from tensorflow.python.compat import compat
from tensorflow.python.eager import def_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_image_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import sort_ops
from tensorflow.python.ops import stateless_random_ops
from tensorflow.python.ops import string_ops
from tensorflow.python.ops import variables
from tensorflow.python.util import deprecation
from tensorflow.python.util import dispatch
from tensorflow.python.util.tf_export import tf_export
ops.NotDifferentiable('RandomCrop')
# TODO(b/31222613): This op may be differentiable, and there may be
# latent bugs here.
ops.NotDifferentiable('HSVToRGB')
ops.NotDifferentiable('DrawBoundingBoxes')
ops.NotDifferentiable('SampleDistortedBoundingBox')
ops.NotDifferentiable('SampleDistortedBoundingBoxV2')
# TODO(bsteiner): Implement the gradient function for extract_glimpse
# TODO(b/31222613): This op may be differentiable, and there may be
# latent bugs here.
ops.NotDifferentiable('ExtractGlimpse')
ops.NotDifferentiable('NonMaxSuppression')
ops.NotDifferentiable('NonMaxSuppressionV2')
ops.NotDifferentiable('NonMaxSuppressionWithOverlaps')
ops.NotDifferentiable('GenerateBoundingBoxProposals')
# pylint: disable=invalid-name
def _assert(cond, ex_type, msg):
"""A polymorphic assert, works with tensors and boolean expressions.
If `cond` is not a tensor, behave like an ordinary assert statement, except
that a empty list is returned. If `cond` is a tensor, return a list
containing a single TensorFlow assert op.
Args:
cond: Something evaluates to a boolean value. May be a tensor.
ex_type: The exception class to use.
msg: The error message.
Returns:
A list, containing at most one assert op.
"""
if _is_tensor(cond):
return [control_flow_ops.Assert(cond, [msg])]
else:
if not cond:
raise ex_type(msg)
else:
return []
def _is_tensor(x):
"""Returns `True` if `x` is a symbolic tensor-like object.
Args:
x: A python object to check.
Returns:
`True` if `x` is a `tf.Tensor` or `tf.Variable`, otherwise `False`.
"""
return isinstance(x, (ops.Tensor, variables.Variable))
def _ImageDimensions(image, rank):
"""Returns the dimensions of an image tensor.
Args:
image: A rank-D Tensor. For 3-D of shape: `[height, width, channels]`.
rank: The expected rank of the image
Returns:
A list of corresponding to the dimensions of the
input image. Dimensions that are statically known are python integers,
otherwise, they are integer scalar tensors.
"""
if image.get_shape().is_fully_defined():
return image.get_shape().as_list()
else:
static_shape = image.get_shape().with_rank(rank).as_list()
dynamic_shape = array_ops.unstack(array_ops.shape(image), rank)
return [
s if s is not None else d for s, d in zip(static_shape, dynamic_shape)
]
def _Check3DImage(image, require_static=True):
"""Assert that we are working with a properly shaped image.
Args:
image: 3-D Tensor of shape [height, width, channels]
require_static: If `True`, requires that all dimensions of `image` are known
and non-zero.
Raises:
ValueError: if `image.shape` is not a 3-vector.
Returns:
An empty list, if `image` has fully defined dimensions. Otherwise, a list
containing an assert op is returned.
"""
try:
image_shape = image.get_shape().with_rank(3)
except ValueError:
raise ValueError("'image' (shape %s) must be three-dimensional." %
image.shape)
if require_static and not image_shape.is_fully_defined():
raise ValueError("'image' (shape %s) must be fully defined." % image_shape)
if any(x == 0 for x in image_shape):
raise ValueError("all dims of 'image.shape' must be > 0: %s" % image_shape)
if not image_shape.is_fully_defined():
return [
check_ops.assert_positive(
array_ops.shape(image),
["all dims of 'image.shape' "
'must be > 0.'])
]
else:
return []
def _Assert3DImage(image):
"""Assert that we are working with a properly shaped image.
Performs the check statically if possible (i.e. if the shape
is statically known). Otherwise adds a control dependency
to an assert op that checks the dynamic shape.
Args:
image: 3-D Tensor of shape [height, width, channels]
Raises:
ValueError: if `image.shape` is not a 3-vector.
Returns:
If the shape of `image` could be verified statically, `image` is
returned unchanged, otherwise there will be a control dependency
added that asserts the correct dynamic shape.
"""
return control_flow_ops.with_dependencies(
_Check3DImage(image, require_static=False), image)
def _AssertAtLeast3DImage(image):
"""Assert that we are working with a properly shaped image.
Performs the check statically if possible (i.e. if the shape
is statically known). Otherwise adds a control dependency
to an assert op that checks the dynamic shape.
Args:
image: >= 3-D Tensor of size [*, height, width, depth]
Raises:
ValueError: if image.shape is not a [>= 3] vector.
Returns:
If the shape of `image` could be verified statically, `image` is
returned unchanged, otherwise there will be a control dependency
added that asserts the correct dynamic shape.
"""
return control_flow_ops.with_dependencies(
_CheckAtLeast3DImage(image, require_static=False), image)
def _CheckAtLeast3DImage(image, require_static=True):
"""Assert that we are working with a properly shaped image.
Args:
image: >= 3-D Tensor of size [*, height, width, depth]
require_static: If `True`, requires that all dimensions of `image` are known
and non-zero.
Raises:
ValueError: if image.shape is not a [>= 3] vector.
Returns:
An empty list, if `image` has fully defined dimensions. Otherwise, a list
containing an assert op is returned.
"""
try:
if image.get_shape().ndims is None:
image_shape = image.get_shape().with_rank(3)
else:
image_shape = image.get_shape().with_rank_at_least(3)
except ValueError:
raise ValueError("'image' (shape %s) must be at least three-dimensional." %
image.shape)
if require_static and not image_shape.is_fully_defined():
raise ValueError('\'image\' must be fully defined.')
if any(x == 0 for x in image_shape[-3:]):
raise ValueError('inner 3 dims of \'image.shape\' must be > 0: %s' %
image_shape)
if not image_shape[-3:].is_fully_defined():
return [
check_ops.assert_positive(
array_ops.shape(image)[-3:],
["inner 3 dims of 'image.shape' "
'must be > 0.']),
check_ops.assert_greater_equal(
array_ops.rank(image),
3,
message="'image' must be at least three-dimensional.")
]
else:
return []
def _AssertGrayscaleImage(image):
"""Assert that we are working with a properly shaped grayscale image.
Performs the check statically if possible (i.e. if the shape
is statically known). Otherwise adds a control dependency
to an assert op that checks the dynamic shape.
Args:
image: >= 2-D Tensor of size [*, 1]
Raises:
ValueError: if image.shape is not a [>= 2] vector or if
last dimension is not size 1.
Returns:
If the shape of `image` could be verified statically, `image` is
returned unchanged, otherwise there will be a control dependency
added that asserts the correct dynamic shape.
"""
return control_flow_ops.with_dependencies(
_CheckGrayscaleImage(image, require_static=False), image)
def _CheckGrayscaleImage(image, require_static=True):
"""Assert that we are working with properly shaped grayscale image.
Args:
image: >= 2-D Tensor of size [*, 1]
require_static: Boolean, whether static shape is required.
Raises:
ValueError: if image.shape is not a [>= 2] vector or if
last dimension is not size 1.
Returns:
An empty list, if `image` has fully defined dimensions. Otherwise, a list
containing an assert op is returned.
"""
try:
if image.get_shape().ndims is None:
image_shape = image.get_shape().with_rank(2)
else:
image_shape = image.get_shape().with_rank_at_least(2)
except ValueError:
raise ValueError('A grayscale image (shape %s) must be at least '
'two-dimensional.' % image.shape)
if require_static and not image_shape.is_fully_defined():
raise ValueError('\'image\' must be fully defined.')
if image_shape.is_fully_defined():
if image_shape[-1] != 1:
raise ValueError('Last dimension of a grayscale image should be size 1.')
if not image_shape.is_fully_defined():
return [
check_ops.assert_equal(
array_ops.shape(image)[-1],
1,
message='Last dimension of a grayscale image should be size 1.'),
check_ops.assert_greater_equal(
array_ops.rank(image),
3,
message='A grayscale image must be at least two-dimensional.')
]
else:
return []
def fix_image_flip_shape(image, result):
"""Set the shape to 3 dimensional if we don't know anything else.
Args:
image: original image size
result: flipped or transformed image
Returns:
An image whose shape is at least (None, None, None).
"""
image_shape = image.get_shape()
if image_shape == tensor_shape.unknown_shape():
result.set_shape([None, None, None])
else:
result.set_shape(image_shape)
return result
@tf_export('image.random_flip_up_down')
@dispatch.add_dispatch_support
def random_flip_up_down(image, seed=None):
"""Randomly flips an image vertically (upside down).
With a 1 in 2 chance, outputs the contents of `image` flipped along the first
dimension, which is `height`. Otherwise, output the image as-is.
When passing a batch of images, each image will be randomly flipped
independent of other images.
Example usage:
>>> image = np.array([[[1], [2]], [[3], [4]]])
>>> tf.image.random_flip_up_down(image, 3).numpy().tolist()
[[[3], [4]], [[1], [2]]]
Randomly flip multiple images.
>>> images = np.array(
... [
... [[[1], [2]], [[3], [4]]],
... [[[5], [6]], [[7], [8]]]
... ])
>>> tf.image.random_flip_up_down(images, 4).numpy().tolist()
[[[[3], [4]], [[1], [2]]], [[[5], [6]], [[7], [8]]]]
For producing deterministic results given a `seed` value, use
`tf.image.stateless_random_flip_up_down`. Unlike using the `seed` param
with `tf.image.random_*` ops, `tf.image.stateless_random_*` ops guarantee the
same results given the same seed independent of how many times the function is
called, and independent of global seed settings (e.g. tf.random.set_seed).
Args:
image: 4-D Tensor of shape `[batch, height, width, channels]` or 3-D Tensor
of shape `[height, width, channels]`.
seed: A Python integer. Used to create a random seed. See
`tf.compat.v1.set_random_seed` for behavior.
Returns:
A tensor of the same type and shape as `image`.
Raises:
ValueError: if the shape of `image` not supported.
"""
random_func = functools.partial(random_ops.random_uniform, seed=seed)
return _random_flip(image, 0, random_func, 'random_flip_up_down')
@tf_export('image.random_flip_left_right')
@dispatch.add_dispatch_support
def random_flip_left_right(image, seed=None):
"""Randomly flip an image horizontally (left to right).
With a 1 in 2 chance, outputs the contents of `image` flipped along the
second dimension, which is `width`. Otherwise output the image as-is.
When passing a batch of images, each image will be randomly flipped
independent of other images.
Example usage:
>>> image = np.array([[[1], [2]], [[3], [4]]])
>>> tf.image.random_flip_left_right(image, 5).numpy().tolist()
[[[2], [1]], [[4], [3]]]
Randomly flip multiple images.
>>> images = np.array(
... [
... [[[1], [2]], [[3], [4]]],
... [[[5], [6]], [[7], [8]]]
... ])
>>> tf.image.random_flip_left_right(images, 6).numpy().tolist()
[[[[2], [1]], [[4], [3]]], [[[5], [6]], [[7], [8]]]]
For producing deterministic results given a `seed` value, use
`tf.image.stateless_random_flip_left_right`. Unlike using the `seed` param
with `tf.image.random_*` ops, `tf.image.stateless_random_*` ops guarantee the
same results given the same seed independent of how many times the function is
called, and independent of global seed settings (e.g. tf.random.set_seed).
Args:
image: 4-D Tensor of shape `[batch, height, width, channels]` or 3-D Tensor
of shape `[height, width, channels]`.
seed: A Python integer. Used to create a random seed. See
`tf.compat.v1.set_random_seed` for behavior.
Returns:
A tensor of the same type and shape as `image`.
Raises:
ValueError: if the shape of `image` not supported.
"""
random_func = functools.partial(random_ops.random_uniform, seed=seed)
return _random_flip(image, 1, random_func, 'random_flip_left_right')
@tf_export('image.stateless_random_flip_left_right', v1=[])
@dispatch.add_dispatch_support
def stateless_random_flip_left_right(image, seed):
"""Randomly flip an image horizontally (left to right) deterministically.
Guarantees the same results given the same `seed` independent of how many
times the function is called, and independent of global seed settings (e.g.
`tf.random.set_seed`).
Example usage:
>>> image = np.array([[[1], [2]], [[3], [4]]])
>>> seed = (2, 3)
>>> tf.image.stateless_random_flip_left_right(image, seed).numpy().tolist()
[[[2], [1]], [[4], [3]]]
Args:
image: 4-D Tensor of shape `[batch, height, width, channels]` or 3-D Tensor
of shape `[height, width, channels]`.
seed: A shape [2] Tensor, the seed to the random number generator. Must have
dtype `int32` or `int64`. (When using XLA, only `int32` is allowed.)
Returns:
A tensor of the same type and shape as `image`.
"""
random_func = functools.partial(
stateless_random_ops.stateless_random_uniform, seed=seed)
return _random_flip(
image, 1, random_func, 'stateless_random_flip_left_right')
@tf_export('image.stateless_random_flip_up_down', v1=[])
@dispatch.add_dispatch_support
def stateless_random_flip_up_down(image, seed):
"""Randomly flip an image vertically (upside down) deterministically.
Guarantees the same results given the same `seed` independent of how many
times the function is called, and independent of global seed settings (e.g.
`tf.random.set_seed`).
Example usage:
>>> image = np.array([[[1], [2]], [[3], [4]]])
>>> seed = (2, 3)
>>> tf.image.stateless_random_flip_up_down(image, seed).numpy().tolist()
[[[3], [4]], [[1], [2]]]
Args:
image: 4-D Tensor of shape `[batch, height, width, channels]` or 3-D Tensor
of shape `[height, width, channels]`.
seed: A shape [2] Tensor, the seed to the random number generator. Must have
dtype `int32` or `int64`. (When using XLA, only `int32` is allowed.)
Returns:
A tensor of the same type and shape as `image`.
"""
random_func = functools.partial(
stateless_random_ops.stateless_random_uniform, seed=seed)
return _random_flip(
image, 0, random_func, 'stateless_random_flip_up_down')
def _random_flip(image, flip_index, random_func, scope_name):
"""Randomly (50% chance) flip an image along axis `flip_index`.
Args:
image: 4-D Tensor of shape `[batch, height, width, channels]` or 3-D Tensor
of shape `[height, width, channels]`.
flip_index: Dimension along which to flip the image.
Vertical is 0, Horizontal is 1.
random_func: partial function for calling either stateful or stateless
random ops with `seed` parameter specified.
scope_name: Name of the scope in which the ops are added.
Returns:
A tensor of the same type and shape as `image`.
Raises:
ValueError: if the shape of `image` not supported.
"""
with ops.name_scope(None, scope_name, [image]) as scope:
image = ops.convert_to_tensor(image, name='image')
image = _AssertAtLeast3DImage(image)
shape = image.get_shape()
def f_rank3():
uniform_random = random_func(shape=[], minval=0, maxval=1.0)
mirror_cond = math_ops.less(uniform_random, .5)
result = control_flow_ops.cond(
mirror_cond,
lambda: array_ops.reverse(image, [flip_index]),
lambda: image,
name=scope)
return fix_image_flip_shape(image, result)
def f_rank4():
batch_size = array_ops.shape(image)[0]
uniform_random = random_func(shape=[batch_size], minval=0, maxval=1.0)
flips = math_ops.round(
array_ops.reshape(uniform_random, [batch_size, 1, 1, 1]))
flips = math_ops.cast(flips, image.dtype)
flipped_input = array_ops.reverse(image, [flip_index + 1])
return flips * flipped_input + (1 - flips) * image
if shape.ndims is None:
rank = array_ops.rank(image)
return control_flow_ops.cond(math_ops.equal(rank, 3), f_rank3, f_rank4)
if shape.ndims == 3:
return f_rank3()
elif shape.ndims == 4:
return f_rank4()
else:
raise ValueError(
'\'image\' (shape %s) must have either 3 or 4 dimensions.' % shape)
@tf_export('image.flip_left_right')
@dispatch.add_dispatch_support
def flip_left_right(image):
"""Flip an image horizontally (left to right).
Outputs the contents of `image` flipped along the width dimension.
See also `tf.reverse`.
Usage Example:
>>> x = [[[1.0, 2.0, 3.0],
... [4.0, 5.0, 6.0]],
... [[7.0, 8.0, 9.0],
... [10.0, 11.0, 12.0]]]
>>> tf.image.flip_left_right(x)
<tf.Tensor: shape=(2, 2, 3), dtype=float32, numpy=
array([[[ 4., 5., 6.],
[ 1., 2., 3.]],
[[10., 11., 12.],
[ 7., 8., 9.]]], dtype=float32)>
Args:
image: 4-D Tensor of shape `[batch, height, width, channels]` or 3-D Tensor
of shape `[height, width, channels]`.
Returns:
A tensor of the same type and shape as `image`.
Raises:
ValueError: if the shape of `image` not supported.
"""
return _flip(image, 1, 'flip_left_right')
@tf_export('image.flip_up_down')
@dispatch.add_dispatch_support
def flip_up_down(image):
"""Flip an image vertically (upside down).
Outputs the contents of `image` flipped along the height dimension.
See also `reverse()`.
Usage Example:
>>> x = [[[1.0, 2.0, 3.0],
... [4.0, 5.0, 6.0]],
... [[7.0, 8.0, 9.0],
... [10.0, 11.0, 12.0]]]
>>> tf.image.flip_up_down(x)
<tf.Tensor: shape=(2, 2, 3), dtype=float32, numpy=
array([[[ 7., 8., 9.],
[10., 11., 12.]],
[[ 1., 2., 3.],
[ 4., 5., 6.]]], dtype=float32)>
Args:
image: 4-D Tensor of shape `[batch, height, width, channels]` or 3-D Tensor
of shape `[height, width, channels]`.
Returns:
A `Tensor` of the same type and shape as `image`.
Raises:
ValueError: if the shape of `image` not supported.
"""
return _flip(image, 0, 'flip_up_down')
def _flip(image, flip_index, scope_name):
"""Flip an image either horizontally or vertically.
Outputs the contents of `image` flipped along the dimension `flip_index`.
See also `reverse()`.
Args:
image: 4-D Tensor of shape `[batch, height, width, channels]` or 3-D Tensor
of shape `[height, width, channels]`.
flip_index: 0 For vertical, 1 for horizontal.
scope_name: string, scope name.
Returns:
A `Tensor` of the same type and shape as `image`.
Raises:
ValueError: if the shape of `image` not supported.
"""
with ops.name_scope(None, scope_name, [image]):
image = ops.convert_to_tensor(image, name='image')
image = _AssertAtLeast3DImage(image)
shape = image.get_shape()
def f_rank3():
return fix_image_flip_shape(image, array_ops.reverse(image, [flip_index]))
def f_rank4():
return array_ops.reverse(image, [flip_index + 1])
if shape.ndims is None:
rank = array_ops.rank(image)
return control_flow_ops.cond(math_ops.equal(rank, 3), f_rank3, f_rank4)
elif shape.ndims == 3:
return f_rank3()
elif shape.ndims == 4:
return f_rank4()
else:
raise ValueError(
'\'image\' (shape %s)must have either 3 or 4 dimensions.' % shape)
@tf_export('image.rot90')
@dispatch.add_dispatch_support
def rot90(image, k=1, name=None):
"""Rotate image(s) counter-clockwise by 90 degrees.
For example:
>>> a=tf.constant([[[1],[2]],
... [[3],[4]]])
>>> # rotating `a` counter clockwise by 90 degrees
>>> a_rot=tf.image.rot90(a)
>>> print(a_rot[...,0].numpy())
[[2 4]
[1 3]]
>>> # rotating `a` counter clockwise by 270 degrees
>>> a_rot=tf.image.rot90(a, k=3)
>>> print(a_rot[...,0].numpy())
[[3 1]
[4 2]]
Args:
image: 4-D Tensor of shape `[batch, height, width, channels]` or 3-D Tensor
of shape `[height, width, channels]`.
k: A scalar integer. The number of times the image is rotated by 90 degrees.
name: A name for this operation (optional).
Returns:
A rotated tensor of the same type and shape as `image`.
Raises:
ValueError: if the shape of `image` not supported.
"""
with ops.name_scope(name, 'rot90', [image, k]) as scope:
image = ops.convert_to_tensor(image, name='image')
image = _AssertAtLeast3DImage(image)
k = ops.convert_to_tensor(k, dtype=dtypes.int32, name='k')
k.get_shape().assert_has_rank(0)
k = math_ops.mod(k, 4)
shape = image.get_shape()
if shape.ndims is None:
rank = array_ops.rank(image)
def f_rank3():
return _rot90_3D(image, k, scope)
def f_rank4():
return _rot90_4D(image, k, scope)
return control_flow_ops.cond(math_ops.equal(rank, 3), f_rank3, f_rank4)
elif shape.ndims == 3:
return _rot90_3D(image, k, scope)
elif shape.ndims == 4:
return _rot90_4D(image, k, scope)
else:
raise ValueError(
'\'image\' (shape %s) must have either 3 or 4 dimensions.' % shape)
def _rot90_3D(image, k, name_scope):
"""Rotate image counter-clockwise by 90 degrees `k` times.
Args:
image: 3-D Tensor of shape `[height, width, channels]`.
k: A scalar integer. The number of times the image is rotated by 90 degrees.
name_scope: A valid TensorFlow name scope.
Returns:
A 3-D tensor of the same type and shape as `image`.
"""
def _rot90():
return array_ops.transpose(array_ops.reverse_v2(image, [1]), [1, 0, 2])
def _rot180():
return array_ops.reverse_v2(image, [0, 1])
def _rot270():
return array_ops.reverse_v2(array_ops.transpose(image, [1, 0, 2]), [1])
cases = [(math_ops.equal(k, 1), _rot90), (math_ops.equal(k, 2), _rot180),
(math_ops.equal(k, 3), _rot270)]
result = control_flow_ops.case(
cases, default=lambda: image, exclusive=True, name=name_scope)
result.set_shape([None, None, image.get_shape()[2]])
return result
def _rot90_4D(images, k, name_scope):
"""Rotate batch of images counter-clockwise by 90 degrees `k` times.
Args:
images: 4-D Tensor of shape `[height, width, channels]`.
k: A scalar integer. The number of times the images are rotated by 90
degrees.
name_scope: A valid TensorFlow name scope.
Returns:
A 4-D `Tensor` of the same type and shape as `images`.
"""
def _rot90():
return array_ops.transpose(array_ops.reverse_v2(images, [2]), [0, 2, 1, 3])
def _rot180():
return array_ops.reverse_v2(images, [1, 2])
def _rot270():
return array_ops.reverse_v2(array_ops.transpose(images, [0, 2, 1, 3]), [2])
cases = [(math_ops.equal(k, 1), _rot90), (math_ops.equal(k, 2), _rot180),
(math_ops.equal(k, 3), _rot270)]
result = control_flow_ops.case(
cases, default=lambda: images, exclusive=True, name=name_scope)
shape = result.get_shape()
result.set_shape([shape[0], None, None, shape[3]])
return result
@tf_export('image.transpose', v1=['image.transpose', 'image.transpose_image'])
@dispatch.add_dispatch_support
def transpose(image, name=None):
"""Transpose image(s) by swapping the height and width dimension.
Usage Example:
>>> x = [[[1.0, 2.0, 3.0],
... [4.0, 5.0, 6.0]],
... [[7.0, 8.0, 9.0],
... [10.0, 11.0, 12.0]]]
>>> tf.image.transpose(x)
<tf.Tensor: shape=(2, 2, 3), dtype=float32, numpy=
array([[[ 1., 2., 3.],
[ 7., 8., 9.]],
[[ 4., 5., 6.],
[10., 11., 12.]]], dtype=float32)>
Args:
image: 4-D Tensor of shape `[batch, height, width, channels]` or 3-D Tensor
of shape `[height, width, channels]`.
name: A name for this operation (optional).
Returns:
If `image` was 4-D, a 4-D float Tensor of shape
`[batch, width, height, channels]`
If `image` was 3-D, a 3-D float Tensor of shape
`[width, height, channels]`
Raises:
ValueError: if the shape of `image` not supported.
Usage Example:
>>> image = [[[1, 2], [3, 4]],
... [[5, 6], [7, 8]],
... [[9, 10], [11, 12]]]
>>> image = tf.constant(image)
>>> tf.image.transpose(image)
<tf.Tensor: shape=(2, 3, 2), dtype=int32, numpy=
array([[[ 1, 2],
[ 5, 6],
[ 9, 10]],
[[ 3, 4],
[ 7, 8],
[11, 12]]], dtype=int32)>
"""
with ops.name_scope(name, 'transpose', [image]):
image = ops.convert_to_tensor(image, name='image')
image = _AssertAtLeast3DImage(image)
shape = image.get_shape()
if shape.ndims is None:
rank = array_ops.rank(image)
def f_rank3():
return array_ops.transpose(image, [1, 0, 2], name=name)
def f_rank4():
return array_ops.transpose(image, [0, 2, 1, 3], name=name)
return control_flow_ops.cond(math_ops.equal(rank, 3), f_rank3, f_rank4)
elif shape.ndims == 3:
return array_ops.transpose(image, [1, 0, 2], name=name)
elif shape.ndims == 4:
return array_ops.transpose(image, [0, 2, 1, 3], name=name)
else:
raise ValueError(
'\'image\' (shape %s) must have either 3 or 4 dimensions.' % shape)
@tf_export('image.central_crop')
@dispatch.add_dispatch_support
def central_crop(image, central_fraction):
"""Crop the central region of the image(s).
Remove the outer parts of an image but retain the central region of the image
along each dimension. If we specify central_fraction = 0.5, this function
returns the region marked with "X" in the below diagram.
--------
| |
| XXXX |
| XXXX |
| | where "X" is the central 50% of the image.
--------
This function works on either a single image (`image` is a 3-D Tensor), or a
batch of images (`image` is a 4-D Tensor).
Usage Example:
>>> x = [[[1.0, 2.0, 3.0],
... [4.0, 5.0, 6.0],
... [7.0, 8.0, 9.0],
... [10.0, 11.0, 12.0]],
... [[13.0, 14.0, 15.0],
... [16.0, 17.0, 18.0],
... [19.0, 20.0, 21.0],
... [22.0, 23.0, 24.0]],
... [[25.0, 26.0, 27.0],
... [28.0, 29.0, 30.0],
... [31.0, 32.0, 33.0],
... [34.0, 35.0, 36.0]],
... [[37.0, 38.0, 39.0],
... [40.0, 41.0, 42.0],
... [43.0, 44.0, 45.0],
... [46.0, 47.0, 48.0]]]
>>> tf.image.central_crop(x, 0.5)
<tf.Tensor: shape=(2, 2, 3), dtype=float32, numpy=
array([[[16., 17., 18.],
[19., 20., 21.]],
[[28., 29., 30.],
[31., 32., 33.]]], dtype=float32)>
Args:
image: Either a 3-D float Tensor of shape [height, width, depth], or a 4-D
Tensor of shape [batch_size, height, width, depth].
central_fraction: float (0, 1], fraction of size to crop
Raises:
ValueError: if central_crop_fraction is not within (0, 1].
Returns:
3-D / 4-D float Tensor, as per the input.
"""
with ops.name_scope(None, 'central_crop', [image]):
image = ops.convert_to_tensor(image, name='image')
if central_fraction <= 0.0 or central_fraction > 1.0:
raise ValueError('central_fraction must be within (0, 1]')
if central_fraction == 1.0:
return image
_AssertAtLeast3DImage(image)
rank = image.get_shape().ndims
if rank != 3 and rank != 4:
raise ValueError('`image` should either be a Tensor with rank = 3 or '
'rank = 4. Had rank = {}.'.format(rank))
# Helper method to return the `idx`-th dimension of `tensor`, along with
# a boolean signifying if the dimension is dynamic.
def _get_dim(tensor, idx):
static_shape = tensor.get_shape().dims[idx].value
if static_shape is not None:
return static_shape, False
return array_ops.shape(tensor)[idx], True
# Get the height, width, depth (and batch size, if the image is a 4-D
# tensor).
if rank == 3:
img_h, dynamic_h = _get_dim(image, 0)
img_w, dynamic_w = _get_dim(image, 1)
img_d = image.get_shape()[2]
else:
img_bs = image.get_shape()[0]
img_h, dynamic_h = _get_dim(image, 1)
img_w, dynamic_w = _get_dim(image, 2)
img_d = image.get_shape()[3]
# Compute the bounding boxes for the crop. The type and value of the
# bounding boxes depend on the `image` tensor's rank and whether / not the
# dimensions are statically defined.
if dynamic_h:
img_hd = math_ops.cast(img_h, dtypes.float64)
bbox_h_start = math_ops.cast((img_hd - img_hd * central_fraction) / 2,
dtypes.int32)
else:
img_hd = float(img_h)
bbox_h_start = int((img_hd - img_hd * central_fraction) / 2)
if dynamic_w:
img_wd = math_ops.cast(img_w, dtypes.float64)
bbox_w_start = math_ops.cast((img_wd - img_wd * central_fraction) / 2,
dtypes.int32)
else:
img_wd = float(img_w)
bbox_w_start = int((img_wd - img_wd * central_fraction) / 2)
bbox_h_size = img_h - bbox_h_start * 2
bbox_w_size = img_w - bbox_w_start * 2
if rank == 3:
bbox_begin = array_ops.stack([bbox_h_start, bbox_w_start, 0])
bbox_size = array_ops.stack([bbox_h_size, bbox_w_size, -1])
else:
bbox_begin = array_ops.stack([0, bbox_h_start, bbox_w_start, 0])
bbox_size = array_ops.stack([-1, bbox_h_size, bbox_w_size, -1])
image = array_ops.slice(image, bbox_begin, bbox_size)
# Reshape the `image` tensor to the desired size.
if rank == 3:
image.set_shape([
None if dynamic_h else bbox_h_size,
None if dynamic_w else bbox_w_size, img_d
])
else:
image.set_shape([
img_bs, None if dynamic_h else bbox_h_size,
None if dynamic_w else bbox_w_size, img_d
])
return image
@tf_export('image.pad_to_bounding_box')
@dispatch.add_dispatch_support
def pad_to_bounding_box(image, offset_height, offset_width, target_height,
target_width):
"""Pad `image` with zeros to the specified `height` and `width`.
Adds `offset_height` rows of zeros on top, `offset_width` columns of
zeros on the left, and then pads the image on the bottom and right
with zeros until it has dimensions `target_height`, `target_width`.
This op does nothing if `offset_*` is zero and the image already has size
`target_height` by `target_width`.
Usage Example:
>>> x = [[[1., 2., 3.],
... [4., 5., 6.]],
... [[7., 8., 9.],
... [10., 11., 12.]]]
>>> padded_image = tf.image.pad_to_bounding_box(x, 1, 1, 4, 4)
>>> padded_image
<tf.Tensor: shape=(4, 4, 3), dtype=float32, numpy=
array([[[ 0., 0., 0.],
[ 0., 0., 0.],
[ 0., 0., 0.],
[ 0., 0., 0.]],
[[ 0., 0., 0.],
[ 1., 2., 3.],
[ 4., 5., 6.],
[ 0., 0., 0.]],
[[ 0., 0., 0.],
[ 7., 8., 9.],
[10., 11., 12.],
[ 0., 0., 0.]],
[[ 0., 0., 0.],
[ 0., 0., 0.],
[ 0., 0., 0.],
[ 0., 0., 0.]]], dtype=float32)>
Args:
image: 4-D Tensor of shape `[batch, height, width, channels]` or 3-D Tensor
of shape `[height, width, channels]`.
offset_height: Number of rows of zeros to add on top.
offset_width: Number of columns of zeros to add on the left.
target_height: Height of output image.
target_width: Width of output image.
Returns:
If `image` was 4-D, a 4-D float Tensor of shape
`[batch, target_height, target_width, channels]`
If `image` was 3-D, a 3-D float Tensor of shape
`[target_height, target_width, channels]`
Raises:
ValueError: If the shape of `image` is incompatible with the `offset_*` or
`target_*` arguments, or either `offset_height` or `offset_width` is
negative.
"""
with ops.name_scope(None, 'pad_to_bounding_box', [image]):
image = ops.convert_to_tensor(image, name='image')
is_batch = True
image_shape = image.get_shape()
if image_shape.ndims == 3:
is_batch = False
image = array_ops.expand_dims(image, 0)
elif image_shape.ndims is None:
is_batch = False
image = array_ops.expand_dims(image, 0)
image.set_shape([None] * 4)
elif image_shape.ndims != 4:
raise ValueError(
'\'image\' (shape %s) must have either 3 or 4 dimensions.' %
image_shape)
assert_ops = _CheckAtLeast3DImage(image, require_static=False)
batch, height, width, depth = _ImageDimensions(image, rank=4)
after_padding_width = target_width - offset_width - width
after_padding_height = target_height - offset_height - height
assert_ops += _assert(offset_height >= 0, ValueError,
'offset_height must be >= 0')
assert_ops += _assert(offset_width >= 0, ValueError,
'offset_width must be >= 0')
assert_ops += _assert(after_padding_width >= 0, ValueError,
'width must be <= target - offset')
assert_ops += _assert(after_padding_height >= 0, ValueError,
'height must be <= target - offset')
image = control_flow_ops.with_dependencies(assert_ops, image)
# Do not pad on the depth dimensions.
paddings = array_ops.reshape(
array_ops.stack([
0, 0, offset_height, after_padding_height, offset_width,
after_padding_width, 0, 0
]), [4, 2])
padded = array_ops.pad(image, paddings)
padded_shape = [
None if _is_tensor(i) else i
for i in [batch, target_height, target_width, depth]
]
padded.set_shape(padded_shape)
if not is_batch:
padded = array_ops.squeeze(padded, axis=[0])
return padded
@tf_export('image.crop_to_bounding_box')
@dispatch.add_dispatch_support
def crop_to_bounding_box(image, offset_height, offset_width, target_height,
target_width):
"""Crops an image to a specified bounding box.
This op cuts a rectangular part out of `image`. The top-left corner of the
returned image is at `offset_height, offset_width` in `image`, and its
lower-right corner is at
`offset_height + target_height, offset_width + target_width`.
Args:
image: 4-D Tensor of shape `[batch, height, width, channels]` or 3-D Tensor
of shape `[height, width, channels]`.
offset_height: Vertical coordinate of the top-left corner of the result in
the input.
offset_width: Horizontal coordinate of the top-left corner of the result in
the input.
target_height: Height of the result.
target_width: Width of the result.
Returns:
If `image` was 4-D, a 4-D float Tensor of shape
`[batch, target_height, target_width, channels]`
If `image` was 3-D, a 3-D float Tensor of shape
`[target_height, target_width, channels]`
Raises:
ValueError: If the shape of `image` is incompatible with the `offset_*` or
`target_*` arguments, or either `offset_height` or `offset_width` is
negative, or either `target_height` or `target_width` is not positive.
"""
with ops.name_scope(None, 'crop_to_bounding_box', [image]):
image = ops.convert_to_tensor(image, name='image')
is_batch = True
image_shape = image.get_shape()
if image_shape.ndims == 3:
is_batch = False
image = array_ops.expand_dims(image, 0)
elif image_shape.ndims is None:
is_batch = False
image = array_ops.expand_dims(image, 0)
image.set_shape([None] * 4)
elif image_shape.ndims != 4:
raise ValueError(
'\'image\' (shape %s) must have either 3 or 4 dimensions.' %
image_shape)
assert_ops = _CheckAtLeast3DImage(image, require_static=False)
batch, height, width, depth = _ImageDimensions(image, rank=4)
assert_ops += _assert(offset_width >= 0, ValueError,
'offset_width must be >= 0.')
assert_ops += _assert(offset_height >= 0, ValueError,
'offset_height must be >= 0.')
assert_ops += _assert(target_width > 0, ValueError,
'target_width must be > 0.')
assert_ops += _assert(target_height > 0, ValueError,
'target_height must be > 0.')
assert_ops += _assert(width >= (target_width + offset_width), ValueError,
'width must be >= target + offset.')
assert_ops += _assert(height >= (target_height + offset_height), ValueError,
'height must be >= target + offset.')
image = control_flow_ops.with_dependencies(assert_ops, image)
cropped = array_ops.slice(
image, array_ops.stack([0, offset_height, offset_width, 0]),
array_ops.stack([-1, target_height, target_width, -1]))
cropped_shape = [
None if _is_tensor(i) else i
for i in [batch, target_height, target_width, depth]
]
cropped.set_shape(cropped_shape)
if not is_batch:
cropped = array_ops.squeeze(cropped, axis=[0])
return cropped
@tf_export(
'image.resize_with_crop_or_pad',
v1=['image.resize_with_crop_or_pad', 'image.resize_image_with_crop_or_pad'])
@dispatch.add_dispatch_support
def resize_image_with_crop_or_pad(image, target_height, target_width):
"""Crops and/or pads an image to a target width and height.
Resizes an image to a target width and height by either centrally
cropping the image or padding it evenly with zeros.
If `width` or `height` is greater than the specified `target_width` or
`target_height` respectively, this op centrally crops along that dimension.
If `width` or `height` is smaller than the specified `target_width` or
`target_height` respectively, this op centrally pads with 0 along that
dimension.
Args:
image: 4-D Tensor of shape `[batch, height, width, channels]` or 3-D Tensor
of shape `[height, width, channels]`.
target_height: Target height.
target_width: Target width.
Raises:
ValueError: if `target_height` or `target_width` are zero or negative.
Returns:
Cropped and/or padded image.
If `images` was 4-D, a 4-D float Tensor of shape
`[batch, new_height, new_width, channels]`.
If `images` was 3-D, a 3-D float Tensor of shape
`[new_height, new_width, channels]`.
"""
with ops.name_scope(None, 'resize_image_with_crop_or_pad', [image]):
image = ops.convert_to_tensor(image, name='image')
image_shape = image.get_shape()
is_batch = True
if image_shape.ndims == 3:
is_batch = False
image = array_ops.expand_dims(image, 0)
elif image_shape.ndims is None:
is_batch = False
image = array_ops.expand_dims(image, 0)
image.set_shape([None] * 4)
elif image_shape.ndims != 4:
raise ValueError(
'\'image\' (shape %s) must have either 3 or 4 dimensions.' %
image_shape)
assert_ops = _CheckAtLeast3DImage(image, require_static=False)
assert_ops += _assert(target_width > 0, ValueError,
'target_width must be > 0.')
assert_ops += _assert(target_height > 0, ValueError,
'target_height must be > 0.')
image = control_flow_ops.with_dependencies(assert_ops, image)
# `crop_to_bounding_box` and `pad_to_bounding_box` have their own checks.
# Make sure our checks come first, so that error messages are clearer.
if _is_tensor(target_height):
target_height = control_flow_ops.with_dependencies(
assert_ops, target_height)
if _is_tensor(target_width):
target_width = control_flow_ops.with_dependencies(assert_ops,
target_width)
def max_(x, y):
if _is_tensor(x) or _is_tensor(y):
return math_ops.maximum(x, y)
else:
return max(x, y)
def min_(x, y):
if _is_tensor(x) or _is_tensor(y):
return math_ops.minimum(x, y)
else:
return min(x, y)
def equal_(x, y):
if _is_tensor(x) or _is_tensor(y):
return math_ops.equal(x, y)
else:
return x == y
_, height, width, _ = _ImageDimensions(image, rank=4)
width_diff = target_width - width
offset_crop_width = max_(-width_diff // 2, 0)
offset_pad_width = max_(width_diff // 2, 0)
height_diff = target_height - height
offset_crop_height = max_(-height_diff // 2, 0)
offset_pad_height = max_(height_diff // 2, 0)
# Maybe crop if needed.
cropped = crop_to_bounding_box(image, offset_crop_height, offset_crop_width,
min_(target_height, height),
min_(target_width, width))
# Maybe pad if needed.
resized = pad_to_bounding_box(cropped, offset_pad_height, offset_pad_width,
target_height, target_width)
# In theory all the checks below are redundant.
if resized.get_shape().ndims is None:
raise ValueError('resized contains no shape.')
_, resized_height, resized_width, _ = _ImageDimensions(resized, rank=4)
assert_ops = []
assert_ops += _assert(
equal_(resized_height, target_height), ValueError,
'resized height is not correct.')
assert_ops += _assert(
equal_(resized_width, target_width), ValueError,
'resized width is not correct.')
resized = control_flow_ops.with_dependencies(assert_ops, resized)
if not is_batch:
resized = array_ops.squeeze(resized, axis=[0])
return resized
@tf_export(v1=['image.ResizeMethod'])
class ResizeMethodV1(object):
"""See `v1.image.resize` for details."""
BILINEAR = 0
NEAREST_NEIGHBOR = 1
BICUBIC = 2
AREA = 3
@tf_export('image.ResizeMethod', v1=[])
class ResizeMethod(object):
"""See `tf.image.resize` for details."""
BILINEAR = 'bilinear'
NEAREST_NEIGHBOR = 'nearest'
BICUBIC = 'bicubic'
AREA = 'area'
LANCZOS3 = 'lanczos3'
LANCZOS5 = 'lanczos5'
GAUSSIAN = 'gaussian'
MITCHELLCUBIC = 'mitchellcubic'
def _resize_images_common(images, resizer_fn, size, preserve_aspect_ratio, name,
skip_resize_if_same):
"""Core functionality for v1 and v2 resize functions."""
with ops.name_scope(name, 'resize', [images, size]):
images = ops.convert_to_tensor(images, name='images')
if images.get_shape().ndims is None:
raise ValueError('\'images\' contains no shape.')
# TODO(shlens): Migrate this functionality to the underlying Op's.
is_batch = True
if images.get_shape().ndims == 3:
is_batch = False
images = array_ops.expand_dims(images, 0)
elif images.get_shape().ndims != 4:
raise ValueError('\'images\' must have either 3 or 4 dimensions.')
_, height, width, _ = images.get_shape().as_list()
try:
size = ops.convert_to_tensor(size, dtypes.int32, name='size')
except (TypeError, ValueError):
raise ValueError('\'size\' must be a 1-D int32 Tensor')
if not size.get_shape().is_compatible_with([2]):
raise ValueError('\'size\' must be a 1-D Tensor of 2 elements: '
'new_height, new_width')
if preserve_aspect_ratio:
# Get the current shapes of the image, even if dynamic.
_, current_height, current_width, _ = _ImageDimensions(images, rank=4)
# do the computation to find the right scale and height/width.
scale_factor_height = (
math_ops.cast(size[0], dtypes.float32) /
math_ops.cast(current_height, dtypes.float32))
scale_factor_width = (
math_ops.cast(size[1], dtypes.float32) /
math_ops.cast(current_width, dtypes.float32))
scale_factor = math_ops.minimum(scale_factor_height, scale_factor_width)
scaled_height_const = math_ops.cast(
math_ops.round(scale_factor *
math_ops.cast(current_height, dtypes.float32)),
dtypes.int32)
scaled_width_const = math_ops.cast(
math_ops.round(scale_factor *
math_ops.cast(current_width, dtypes.float32)),
dtypes.int32)
# NOTE: Reset the size and other constants used later.
size = ops.convert_to_tensor([scaled_height_const, scaled_width_const],
dtypes.int32,
name='size')
size_const_as_shape = tensor_util.constant_value_as_shape(size)
new_height_const = tensor_shape.dimension_at_index(size_const_as_shape,
0).value
new_width_const = tensor_shape.dimension_at_index(size_const_as_shape,
1).value
# If we can determine that the height and width will be unmodified by this
# transformation, we avoid performing the resize.
if skip_resize_if_same and all(
x is not None
for x in [new_width_const, width, new_height_const, height]) and (
width == new_width_const and height == new_height_const):
if not is_batch:
images = array_ops.squeeze(images, axis=[0])
return images
images = resizer_fn(images, size)
# NOTE(mrry): The shape functions for the resize ops cannot unpack
# the packed values in `new_size`, so set the shape here.
images.set_shape([None, new_height_const, new_width_const, None])
if not is_batch:
images = array_ops.squeeze(images, axis=[0])
return images
@tf_export(v1=['image.resize_images', 'image.resize'])
@dispatch.add_dispatch_support
def resize_images(images,
size,
method=ResizeMethodV1.BILINEAR,
align_corners=False,
preserve_aspect_ratio=False,
name=None):
"""Resize `images` to `size` using the specified `method`.
Resized images will be distorted if their original aspect ratio is not
the same as `size`. To avoid distortions see
`tf.image.resize_with_pad` or `tf.image.resize_with_crop_or_pad`.
The `method` can be one of:
* <b>`tf.image.ResizeMethod.BILINEAR`</b>: [Bilinear interpolation.](
https://en.wikipedia.org/wiki/Bilinear_interpolation)
* <b>`tf.image.ResizeMethod.NEAREST_NEIGHBOR`</b>: [
Nearest neighbor interpolation.](
https://en.wikipedia.org/wiki/Nearest-neighbor_interpolation)
* <b>`tf.image.ResizeMethod.BICUBIC`</b>: [Bicubic interpolation.](
https://en.wikipedia.org/wiki/Bicubic_interpolation)
* <b>`tf.image.ResizeMethod.AREA`</b>: Area interpolation.
The return value has the same type as `images` if `method` is
`tf.image.ResizeMethod.NEAREST_NEIGHBOR`. It will also have the same type
as `images` if the size of `images` can be statically determined to be the
same as `size`, because `images` is returned in this case. Otherwise, the
return value has type `float32`.
Args:
images: 4-D Tensor of shape `[batch, height, width, channels]` or 3-D Tensor
of shape `[height, width, channels]`.
size: A 1-D int32 Tensor of 2 elements: `new_height, new_width`. The new
size for the images.
method: ResizeMethod. Defaults to `tf.image.ResizeMethod.BILINEAR`.
align_corners: bool. If True, the centers of the 4 corner pixels of the
input and output tensors are aligned, preserving the values at the corner
pixels. Defaults to `False`.
preserve_aspect_ratio: Whether to preserve the aspect ratio. If this is set,
then `images` will be resized to a size that fits in `size` while
preserving the aspect ratio of the original image. Scales up the image if
`size` is bigger than the current size of the `image`. Defaults to False.
name: A name for this operation (optional).
Raises:
ValueError: if the shape of `images` is incompatible with the
shape arguments to this function
ValueError: if `size` has invalid shape or type.
ValueError: if an unsupported resize method is specified.
Returns:
If `images` was 4-D, a 4-D float Tensor of shape
`[batch, new_height, new_width, channels]`.
If `images` was 3-D, a 3-D float Tensor of shape
`[new_height, new_width, channels]`.
"""
def resize_fn(images_t, new_size):
"""Legacy resize core function, passed to _resize_images_common."""
if method == ResizeMethodV1.BILINEAR or method == ResizeMethod.BILINEAR:
return gen_image_ops.resize_bilinear(
images_t, new_size, align_corners=align_corners)
elif (method == ResizeMethodV1.NEAREST_NEIGHBOR or
method == ResizeMethod.NEAREST_NEIGHBOR):
return gen_image_ops.resize_nearest_neighbor(
images_t, new_size, align_corners=align_corners)
elif method == ResizeMethodV1.BICUBIC or method == ResizeMethod.BICUBIC:
return gen_image_ops.resize_bicubic(
images_t, new_size, align_corners=align_corners)
elif method == ResizeMethodV1.AREA or method == ResizeMethod.AREA:
return gen_image_ops.resize_area(
images_t, new_size, align_corners=align_corners)
else:
raise ValueError('Resize method is not implemented: {}'.format(method))
return _resize_images_common(
images,
resize_fn,
size,
preserve_aspect_ratio=preserve_aspect_ratio,
name=name,
skip_resize_if_same=True)
@tf_export('image.resize', v1=[])
@dispatch.add_dispatch_support
def resize_images_v2(images,
size,
method=ResizeMethod.BILINEAR,
preserve_aspect_ratio=False,
antialias=False,
name=None):
"""Resize `images` to `size` using the specified `method`.
Resized images will be distorted if their original aspect ratio is not
the same as `size`. To avoid distortions see
`tf.image.resize_with_pad`.
>>> image = tf.constant([
... [1,0,0,0,0],
... [0,1,0,0,0],
... [0,0,1,0,0],
... [0,0,0,1,0],
... [0,0,0,0,1],
... ])
>>> # Add "batch" and "channels" dimensions
>>> image = image[tf.newaxis, ..., tf.newaxis]
>>> image.shape.as_list() # [batch, height, width, channels]
[1, 5, 5, 1]
>>> tf.image.resize(image, [3,5])[0,...,0].numpy()
array([[0.6666667, 0.3333333, 0. , 0. , 0. ],
[0. , 0. , 1. , 0. , 0. ],
[0. , 0. , 0. , 0.3333335, 0.6666665]],
dtype=float32)
It works equally well with a single image instead of a batch of images:
>>> tf.image.resize(image[0], [3,5]).shape.as_list()
[3, 5, 1]
When `antialias` is true, the sampling filter will anti-alias the input image
as well as interpolate. When downsampling an image with [anti-aliasing](
https://en.wikipedia.org/wiki/Spatial_anti-aliasing) the sampling filter
kernel is scaled in order to properly anti-alias the input image signal.
`antialias` has no effect when upsampling an image:
>>> a = tf.image.resize(image, [5,10])
>>> b = tf.image.resize(image, [5,10], antialias=True)
>>> tf.reduce_max(abs(a - b)).numpy()
0.0
The `method` argument expects an item from the `image.ResizeMethod` enum, or
the string equivalent. The options are:
* <b>`bilinear`</b>: [Bilinear interpolation.](
https://en.wikipedia.org/wiki/Bilinear_interpolation) If `antialias` is
true, becomes a hat/tent filter function with radius 1 when downsampling.
* <b>`lanczos3`</b>: [Lanczos kernel](
https://en.wikipedia.org/wiki/Lanczos_resampling) with radius 3.
High-quality practical filter but may have some ringing, especially on
synthetic images.
* <b>`lanczos5`</b>: [Lanczos kernel] (
https://en.wikipedia.org/wiki/Lanczos_resampling) with radius 5.
Very-high-quality filter but may have stronger ringing.
* <b>`bicubic`</b>: [Cubic interpolant](
https://en.wikipedia.org/wiki/Bicubic_interpolation) of Keys. Equivalent to
Catmull-Rom kernel. Reasonably good quality and faster than Lanczos3Kernel,
particularly when upsampling.
* <b>`gaussian`</b>: [Gaussian kernel](
https://en.wikipedia.org/wiki/Gaussian_filter) with radius 3,
sigma = 1.5 / 3.0.
* <b>`nearest`</b>: [Nearest neighbor interpolation.](
https://en.wikipedia.org/wiki/Nearest-neighbor_interpolation)
`antialias` has no effect when used with nearest neighbor interpolation.
* <b>`area`</b>: Anti-aliased resampling with area interpolation.
`antialias` has no effect when used with area interpolation; it
always anti-aliases.
* <b>`mitchellcubic`</b>: Mitchell-Netravali Cubic non-interpolating filter.
For synthetic images (especially those lacking proper prefiltering), less
ringing than Keys cubic kernel but less sharp.
Note: Near image edges the filtering kernel may be partially outside the
image boundaries. For these pixels, only input pixels inside the image will be
included in the filter sum, and the output value will be appropriately
normalized.
The return value has type `float32`, unless the `method` is
`ResizeMethod.NEAREST_NEIGHBOR`, then the return dtype is the dtype
of `images`:
>>> nn = tf.image.resize(image, [5,7], method='nearest')
>>> nn[0,...,0].numpy()
array([[1, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 1]], dtype=int32)
With `preserve_aspect_ratio=True`, the aspect ratio is preserved, so `size`
is the maximum for each dimension:
>>> max_10_20 = tf.image.resize(image, [10,20], preserve_aspect_ratio=True)
>>> max_10_20.shape.as_list()
[1, 10, 10, 1]
Args:
images: 4-D Tensor of shape `[batch, height, width, channels]` or 3-D Tensor
of shape `[height, width, channels]`.
size: A 1-D int32 Tensor of 2 elements: `new_height, new_width`. The new
size for the images.
method: An `image.ResizeMethod`, or string equivalent. Defaults to
`bilinear`.
preserve_aspect_ratio: Whether to preserve the aspect ratio. If this is set,
then `images` will be resized to a size that fits in `size` while
preserving the aspect ratio of the original image. Scales up the image if
`size` is bigger than the current size of the `image`. Defaults to False.
antialias: Whether to use an anti-aliasing filter when downsampling an
image.
name: A name for this operation (optional).
Raises:
ValueError: if the shape of `images` is incompatible with the
shape arguments to this function
ValueError: if `size` has an invalid shape or type.
ValueError: if an unsupported resize method is specified.
Returns:
If `images` was 4-D, a 4-D float Tensor of shape
`[batch, new_height, new_width, channels]`.
If `images` was 3-D, a 3-D float Tensor of shape
`[new_height, new_width, channels]`.
"""
def resize_fn(images_t, new_size):
"""Resize core function, passed to _resize_images_common."""
scale_and_translate_methods = [
ResizeMethod.LANCZOS3, ResizeMethod.LANCZOS5, ResizeMethod.GAUSSIAN,
ResizeMethod.MITCHELLCUBIC
]
def resize_with_scale_and_translate(method):
scale = (
math_ops.cast(new_size, dtype=dtypes.float32) /
math_ops.cast(array_ops.shape(images_t)[1:3], dtype=dtypes.float32))
return gen_image_ops.scale_and_translate(
images_t,
new_size,
scale,
array_ops.zeros([2]),
kernel_type=method,
antialias=antialias)
if method == ResizeMethod.BILINEAR:
if antialias:
return resize_with_scale_and_translate('triangle')
else:
return gen_image_ops.resize_bilinear(
images_t, new_size, half_pixel_centers=True)
elif method == ResizeMethod.NEAREST_NEIGHBOR:
return gen_image_ops.resize_nearest_neighbor(
images_t, new_size, half_pixel_centers=True)
elif method == ResizeMethod.BICUBIC:
if antialias:
return resize_with_scale_and_translate('keyscubic')
else:
return gen_image_ops.resize_bicubic(
images_t, new_size, half_pixel_centers=True)
elif method == ResizeMethod.AREA:
return gen_image_ops.resize_area(images_t, new_size)
elif method in scale_and_translate_methods:
return resize_with_scale_and_translate(method)
else:
raise ValueError('Resize method is not implemented: {}'.format(method))
return _resize_images_common(
images,
resize_fn,
size,
preserve_aspect_ratio=preserve_aspect_ratio,
name=name,
skip_resize_if_same=False)
def _resize_image_with_pad_common(image, target_height, target_width,
resize_fn):
"""Core functionality for v1 and v2 resize_image_with_pad functions."""
with ops.name_scope(None, 'resize_image_with_pad', [image]):
image = ops.convert_to_tensor(image, name='image')
image_shape = image.get_shape()
is_batch = True
if image_shape.ndims == 3:
is_batch = False
image = array_ops.expand_dims(image, 0)
elif image_shape.ndims is None:
is_batch = False
image = array_ops.expand_dims(image, 0)
image.set_shape([None] * 4)
elif image_shape.ndims != 4:
raise ValueError(
'\'image\' (shape %s) must have either 3 or 4 dimensions.' %
image_shape)
assert_ops = _CheckAtLeast3DImage(image, require_static=False)
assert_ops += _assert(target_width > 0, ValueError,
'target_width must be > 0.')
assert_ops += _assert(target_height > 0, ValueError,
'target_height must be > 0.')
image = control_flow_ops.with_dependencies(assert_ops, image)
def max_(x, y):
if _is_tensor(x) or _is_tensor(y):
return math_ops.maximum(x, y)
else:
return max(x, y)
_, height, width, _ = _ImageDimensions(image, rank=4)
# convert values to float, to ease divisions
f_height = math_ops.cast(height, dtype=dtypes.float32)
f_width = math_ops.cast(width, dtype=dtypes.float32)
f_target_height = math_ops.cast(target_height, dtype=dtypes.float32)
f_target_width = math_ops.cast(target_width, dtype=dtypes.float32)
# Find the ratio by which the image must be adjusted
# to fit within the target
ratio = max_(f_width / f_target_width, f_height / f_target_height)
resized_height_float = f_height / ratio
resized_width_float = f_width / ratio
resized_height = math_ops.cast(
math_ops.floor(resized_height_float), dtype=dtypes.int32)
resized_width = math_ops.cast(
math_ops.floor(resized_width_float), dtype=dtypes.int32)
padding_height = (f_target_height - resized_height_float) / 2
padding_width = (f_target_width - resized_width_float) / 2
f_padding_height = math_ops.floor(padding_height)
f_padding_width = math_ops.floor(padding_width)
p_height = max_(0, math_ops.cast(f_padding_height, dtype=dtypes.int32))
p_width = max_(0, math_ops.cast(f_padding_width, dtype=dtypes.int32))
# Resize first, then pad to meet requested dimensions
resized = resize_fn(image, [resized_height, resized_width])
padded = pad_to_bounding_box(resized, p_height, p_width, target_height,
target_width)
if padded.get_shape().ndims is None:
raise ValueError('padded contains no shape.')
_ImageDimensions(padded, rank=4)
if not is_batch:
padded = array_ops.squeeze(padded, axis=[0])
return padded
@tf_export(v1=['image.resize_image_with_pad'])
@dispatch.add_dispatch_support
def resize_image_with_pad_v1(image,
target_height,
target_width,
method=ResizeMethodV1.BILINEAR,
align_corners=False):
"""Resizes and pads an image to a target width and height.
Resizes an image to a target width and height by keeping
the aspect ratio the same without distortion. If the target
dimensions don't match the image dimensions, the image
is resized and then padded with zeroes to match requested
dimensions.
Args:
image: 4-D Tensor of shape `[batch, height, width, channels]` or 3-D Tensor
of shape `[height, width, channels]`.
target_height: Target height.
target_width: Target width.
method: Method to use for resizing image. See `resize_images()`
align_corners: bool. If True, the centers of the 4 corner pixels of the
input and output tensors are aligned, preserving the values at the corner
pixels. Defaults to `False`.
Raises:
ValueError: if `target_height` or `target_width` are zero or negative.
Returns:
Resized and padded image.
If `images` was 4-D, a 4-D float Tensor of shape
`[batch, new_height, new_width, channels]`.
If `images` was 3-D, a 3-D float Tensor of shape
`[new_height, new_width, channels]`.
"""
def _resize_fn(im, new_size):
return resize_images(im, new_size, method, align_corners=align_corners)
return _resize_image_with_pad_common(image, target_height, target_width,
_resize_fn)
@tf_export('image.resize_with_pad', v1=[])
@dispatch.add_dispatch_support
def resize_image_with_pad_v2(image,
target_height,
target_width,
method=ResizeMethod.BILINEAR,
antialias=False):
"""Resizes and pads an image to a target width and height.
Resizes an image to a target width and height by keeping
the aspect ratio the same without distortion. If the target
dimensions don't match the image dimensions, the image
is resized and then padded with zeroes to match requested
dimensions.
Args:
image: 4-D Tensor of shape `[batch, height, width, channels]` or 3-D Tensor
of shape `[height, width, channels]`.
target_height: Target height.
target_width: Target width.
method: Method to use for resizing image. See `image.resize()`
antialias: Whether to use anti-aliasing when resizing. See 'image.resize()'.
Raises:
ValueError: if `target_height` or `target_width` are zero or negative.
Returns:
Resized and padded image.
If `images` was 4-D, a 4-D float Tensor of shape
`[batch, new_height, new_width, channels]`.
If `images` was 3-D, a 3-D float Tensor of shape
`[new_height, new_width, channels]`.
"""
def _resize_fn(im, new_size):
return resize_images_v2(im, new_size, method, antialias=antialias)
return _resize_image_with_pad_common(image, target_height, target_width,
_resize_fn)
@tf_export('image.per_image_standardization')
@dispatch.add_dispatch_support
def per_image_standardization(image):
"""Linearly scales each image in `image` to have mean 0 and variance 1.
For each 3-D image `x` in `image`, computes `(x - mean) / adjusted_stddev`,
where
- `mean` is the average of all values in `x`
- `adjusted_stddev = max(stddev, 1.0/sqrt(N))` is capped away from 0 to
protect against division by 0 when handling uniform images
- `N` is the number of elements in `x`
- `stddev` is the standard deviation of all values in `x`
Args:
image: An n-D Tensor with at least 3 dimensions, the last 3 of which are the
dimensions of each image.
Returns:
A `Tensor` with the same shape as `image`.
Raises:
ValueError: if the shape of 'image' is incompatible with this function.
"""
with ops.name_scope(None, 'per_image_standardization', [image]) as scope:
image = ops.convert_to_tensor(image, name='image')
image = _AssertAtLeast3DImage(image)
image = math_ops.cast(image, dtype=dtypes.float32)
num_pixels = math_ops.reduce_prod(array_ops.shape(image)[-3:])
image_mean = math_ops.reduce_mean(image, axis=[-1, -2, -3], keepdims=True)
# Apply a minimum normalization that protects us against uniform images.
stddev = math_ops.reduce_std(image, axis=[-1, -2, -3], keepdims=True)
min_stddev = math_ops.rsqrt(math_ops.cast(num_pixels, dtypes.float32))
adjusted_stddev = math_ops.maximum(stddev, min_stddev)
image -= image_mean
image = math_ops.divide(image, adjusted_stddev, name=scope)
return image
@tf_export('image.random_brightness')
@dispatch.add_dispatch_support
def random_brightness(image, max_delta, seed=None):
"""Adjust the brightness of images by a random factor.
Equivalent to `adjust_brightness()` using a `delta` randomly picked in the
interval `[-max_delta, max_delta)`.
For producing deterministic results given a `seed` value, use
`tf.image.stateless_random_brightness`. Unlike using the `seed` param
with `tf.image.random_*` ops, `tf.image.stateless_random_*` ops guarantee the
same results given the same seed independent of how many times the function is
called, and independent of global seed settings (e.g. tf.random.set_seed).
Args:
image: An image or images to adjust.
max_delta: float, must be non-negative.
seed: A Python integer. Used to create a random seed. See
`tf.compat.v1.set_random_seed` for behavior.
Usage Example:
>>> x = [[[1.0, 2.0, 3.0],
... [4.0, 5.0, 6.0]],
... [[7.0, 8.0, 9.0],
... [10.0, 11.0, 12.0]]]
>>> tf.image.random_brightness(x, 0.2)
<tf.Tensor: shape=(2, 2, 3), dtype=float32, numpy=...>
Returns:
The brightness-adjusted image(s).
Raises:
ValueError: if `max_delta` is negative.
"""
if max_delta < 0:
raise ValueError('max_delta must be non-negative.')
delta = random_ops.random_uniform([], -max_delta, max_delta, seed=seed)
return adjust_brightness(image, delta)
@tf_export('image.stateless_random_brightness', v1=[])
@dispatch.add_dispatch_support
def stateless_random_brightness(image, max_delta, seed):
"""Adjust the brightness of images by a random factor deterministically.
Equivalent to `adjust_brightness()` using a `delta` randomly picked in the
interval `[-max_delta, max_delta)`.
Guarantees the same results given the same `seed` independent of how many
times the function is called, and independent of global seed settings (e.g.
`tf.random.set_seed`).
Usage Example:
>>> x = [[[1.0, 2.0, 3.0],
... [4.0, 5.0, 6.0]],
... [[7.0, 8.0, 9.0],
... [10.0, 11.0, 12.0]]]
>>> seed = (1, 2)
>>> tf.image.stateless_random_brightness(x, 0.2, seed)
<tf.Tensor: shape=(2, 2, 3), dtype=float32, numpy=
array([[[ 1.1376241, 2.1376243, 3.1376243],
[ 4.1376243, 5.1376243, 6.1376243]],
[[ 7.1376243, 8.137624 , 9.137624 ],
[10.137624 , 11.137624 , 12.137624 ]]], dtype=float32)>
Args:
image: An image or images to adjust.
max_delta: float, must be non-negative.
seed: A shape [2] Tensor, the seed to the random number generator. Must have
dtype `int32` or `int64`. (When using XLA, only `int32` is allowed.)
Returns:
The brightness-adjusted image(s).
Raises:
ValueError: if `max_delta` is negative.
"""
if max_delta < 0:
raise ValueError('max_delta must be non-negative.')
delta = stateless_random_ops.stateless_random_uniform(
shape=[], minval=-max_delta, maxval=max_delta, seed=seed)
return adjust_brightness(image, delta)
@tf_export('image.random_contrast')
@dispatch.add_dispatch_support
def random_contrast(image, lower, upper, seed=None):
"""Adjust the contrast of an image or images by a random factor.
Equivalent to `adjust_contrast()` but uses a `contrast_factor` randomly
picked in the interval `[lower, upper)`.
For producing deterministic results given a `seed` value, use
`tf.image.stateless_random_contrast`. Unlike using the `seed` param
with `tf.image.random_*` ops, `tf.image.stateless_random_*` ops guarantee the
same results given the same seed independent of how many times the function is
called, and independent of global seed settings (e.g. tf.random.set_seed).
Args:
image: An image tensor with 3 or more dimensions.
lower: float. Lower bound for the random contrast factor.
upper: float. Upper bound for the random contrast factor.
seed: A Python integer. Used to create a random seed. See
`tf.compat.v1.set_random_seed` for behavior.
Usage Example:
>>> x = [[[1.0, 2.0, 3.0],
... [4.0, 5.0, 6.0]],
... [[7.0, 8.0, 9.0],
... [10.0, 11.0, 12.0]]]
>>> tf.image.random_contrast(x, 0.2, 0.5)
<tf.Tensor: shape=(2, 2, 3), dtype=float32, numpy=...>
Returns:
The contrast-adjusted image(s).
Raises:
ValueError: if `upper <= lower` or if `lower < 0`.
"""
if upper <= lower:
raise ValueError('upper must be > lower.')
if lower < 0:
raise ValueError('lower must be non-negative.')
contrast_factor = random_ops.random_uniform([], lower, upper, seed=seed)
return adjust_contrast(image, contrast_factor)
@tf_export('image.stateless_random_contrast', v1=[])
@dispatch.add_dispatch_support
def stateless_random_contrast(image, lower, upper, seed):
"""Adjust the contrast of images by a random factor deterministically.
Guarantees the same results given the same `seed` independent of how many
times the function is called, and independent of global seed settings (e.g.
`tf.random.set_seed`).
Args:
image: An image tensor with 3 or more dimensions.
lower: float. Lower bound for the random contrast factor.
upper: float. Upper bound for the random contrast factor.
seed: A shape [2] Tensor, the seed to the random number generator. Must have
dtype `int32` or `int64`. (When using XLA, only `int32` is allowed.)
Usage Example:
>>> x = [[[1.0, 2.0, 3.0],
... [4.0, 5.0, 6.0]],
... [[7.0, 8.0, 9.0],
... [10.0, 11.0, 12.0]]]
>>> seed = (1, 2)
>>> tf.image.stateless_random_contrast(x, 0.2, 0.5, seed)
<tf.Tensor: shape=(2, 2, 3), dtype=float32, numpy=
array([[[3.4605184, 4.4605184, 5.4605184],
[4.820173 , 5.820173 , 6.820173 ]],
[[6.179827 , 7.179827 , 8.179828 ],
[7.5394816, 8.539482 , 9.539482 ]]], dtype=float32)>
Returns:
The contrast-adjusted image(s).
Raises:
ValueError: if `upper <= lower` or if `lower < 0`.
"""
if upper <= lower:
raise ValueError('upper must be > lower.')
if lower < 0:
raise ValueError('lower must be non-negative.')
contrast_factor = stateless_random_ops.stateless_random_uniform(
shape=[], minval=lower, maxval=upper, seed=seed)
return adjust_contrast(image, contrast_factor)
@tf_export('image.adjust_brightness')
@dispatch.add_dispatch_support
def adjust_brightness(image, delta):
"""Adjust the brightness of RGB or Grayscale images.
This is a convenience method that converts RGB images to float
representation, adjusts their brightness, and then converts them back to the
original data type. If several adjustments are chained, it is advisable to
minimize the number of redundant conversions.
The value `delta` is added to all components of the tensor `image`. `image` is
converted to `float` and scaled appropriately if it is in fixed-point
representation, and `delta` is converted to the same data type. For regular
images, `delta` should be in the range `(-1,1)`, as it is added to the image
in floating point representation, where pixel values are in the `[0,1)` range.
Usage Example:
>>> x = [[[1.0, 2.0, 3.0],
... [4.0, 5.0, 6.0]],
... [[7.0, 8.0, 9.0],
... [10.0, 11.0, 12.0]]]
>>> tf.image.adjust_brightness(x, delta=0.1)
<tf.Tensor: shape=(2, 2, 3), dtype=float32, numpy=
array([[[ 1.1, 2.1, 3.1],
[ 4.1, 5.1, 6.1]],
[[ 7.1, 8.1, 9.1],
[10.1, 11.1, 12.1]]], dtype=float32)>
Args:
image: RGB image or images to adjust.
delta: A scalar. Amount to add to the pixel values.
Returns:
A brightness-adjusted tensor of the same shape and type as `image`.
"""
with ops.name_scope(None, 'adjust_brightness', [image, delta]) as name:
image = ops.convert_to_tensor(image, name='image')
# Remember original dtype to so we can convert back if needed
orig_dtype = image.dtype
if orig_dtype in [dtypes.float16, dtypes.float32]:
flt_image = image
else:
flt_image = convert_image_dtype(image, dtypes.float32)
adjusted = math_ops.add(
flt_image, math_ops.cast(delta, flt_image.dtype), name=name)
return convert_image_dtype(adjusted, orig_dtype, saturate=True)
@tf_export('image.adjust_contrast')
@dispatch.add_dispatch_support
def adjust_contrast(images, contrast_factor):
"""Adjust contrast of RGB or grayscale images.
This is a convenience method that converts RGB images to float
representation, adjusts their contrast, and then converts them back to the
original data type. If several adjustments are chained, it is advisable to
minimize the number of redundant conversions.
`images` is a tensor of at least 3 dimensions. The last 3 dimensions are
interpreted as `[height, width, channels]`. The other dimensions only
represent a collection of images, such as `[batch, height, width, channels].`
Contrast is adjusted independently for each channel of each image.
For each channel, this Op computes the mean of the image pixels in the
channel and then adjusts each component `x` of each pixel to
`(x - mean) * contrast_factor + mean`.
Usage Example:
>>> x = [[[1.0, 2.0, 3.0],
... [4.0, 5.0, 6.0]],
... [[7.0, 8.0, 9.0],
... [10.0, 11.0, 12.0]]]
>>> tf.image.adjust_contrast(x, 2)
<tf.Tensor: shape=(2, 2, 3), dtype=float32, numpy=
array([[[-3.5, -2.5, -1.5],
[ 2.5, 3.5, 4.5]],
[[ 8.5, 9.5, 10.5],
[14.5, 15.5, 16.5]]], dtype=float32)>
Args:
images: Images to adjust. At least 3-D.
contrast_factor: A float multiplier for adjusting contrast.
Returns:
The contrast-adjusted image or images.
"""
with ops.name_scope(None, 'adjust_contrast',
[images, contrast_factor]) as name:
images = ops.convert_to_tensor(images, name='images')
# Remember original dtype to so we can convert back if needed
orig_dtype = images.dtype
if orig_dtype in (dtypes.float16, dtypes.float32):
flt_images = images
else:
flt_images = convert_image_dtype(images, dtypes.float32)
adjusted = gen_image_ops.adjust_contrastv2(
flt_images, contrast_factor=contrast_factor, name=name)
return convert_image_dtype(adjusted, orig_dtype, saturate=True)
@tf_export('image.adjust_gamma')
@dispatch.add_dispatch_support
def adjust_gamma(image, gamma=1, gain=1):
"""Performs [Gamma Correction](http://en.wikipedia.org/wiki/Gamma_correction).
on the input image.
Also known as Power Law Transform. This function converts the
input images at first to float representation, then transforms them
pixelwise according to the equation `Out = gain * In**gamma`,
and then converts the back to the original data type.
Usage Example:
>>> x = [[[1.0, 2.0, 3.0],
... [4.0, 5.0, 6.0]],
... [[7.0, 8.0, 9.0],
... [10.0, 11.0, 12.0]]]
>>> tf.image.adjust_gamma(x, 0.2)
<tf.Tensor: shape=(2, 2, 3), dtype=float32, numpy=
array([[[1. , 1.1486983, 1.2457309],
[1.319508 , 1.3797297, 1.4309691]],
[[1.4757731, 1.5157166, 1.5518456],
[1.5848932, 1.6153942, 1.6437519]]], dtype=float32)>
Args:
image : RGB image or images to adjust.
gamma : A scalar or tensor. Non-negative real number.
gain : A scalar or tensor. The constant multiplier.
Returns:
A Tensor. A Gamma-adjusted tensor of the same shape and type as `image`.
Raises:
ValueError: If gamma is negative.
Notes:
For gamma greater than 1, the histogram will shift towards left and
the output image will be darker than the input image.
For gamma less than 1, the histogram will shift towards right and
the output image will be brighter than the input image.
References:
[Wikipedia](http://en.wikipedia.org/wiki/Gamma_correction)
"""
with ops.name_scope(None, 'adjust_gamma', [image, gamma, gain]) as name:
image = ops.convert_to_tensor(image, name='image')
# Remember original dtype to so we can convert back if needed
orig_dtype = image.dtype
if orig_dtype in [dtypes.float16, dtypes.float32]:
flt_image = image
else:
flt_image = convert_image_dtype(image, dtypes.float32)
assert_op = _assert(gamma >= 0, ValueError,
'Gamma should be a non-negative real number.')
if assert_op:
gamma = control_flow_ops.with_dependencies(assert_op, gamma)
# According to the definition of gamma correction.
adjusted_img = gain * flt_image**gamma
return convert_image_dtype(adjusted_img, orig_dtype, saturate=True)
@tf_export('image.convert_image_dtype')
@dispatch.add_dispatch_support
def convert_image_dtype(image, dtype, saturate=False, name=None):
"""Convert `image` to `dtype`, scaling its values if needed.
The operation supports data types (for `image` and `dtype`) of
`uint8`, `uint16`, `uint32`, `uint64`, `int8`, `int16`, `int32`, `int64`,
`float16`, `float32`, `float64`, `bfloat16`.
Images that are represented using floating point values are expected to have
values in the range [0,1). Image data stored in integer data types are
expected to have values in the range `[0,MAX]`, where `MAX` is the largest
positive representable number for the data type.
This op converts between data types, scaling the values appropriately before
casting.
Usage Example:
>>> x = [[[1, 2, 3], [4, 5, 6]],
... [[7, 8, 9], [10, 11, 12]]]
>>> x_int8 = tf.convert_to_tensor(x, dtype=tf.int8)
>>> tf.image.convert_image_dtype(x_int8, dtype=tf.float16, saturate=False)
<tf.Tensor: shape=(2, 2, 3), dtype=float16, numpy=
array([[[0.00787, 0.01575, 0.02362],
[0.0315 , 0.03937, 0.04724]],
[[0.0551 , 0.063 , 0.07086],
[0.07874, 0.0866 , 0.0945 ]]], dtype=float16)>
Converting integer types to floating point types returns normalized floating
point values in the range [0, 1); the values are normalized by the `MAX` value
of the input dtype. Consider the following two examples:
>>> a = [[[1], [2]], [[3], [4]]]
>>> a_int8 = tf.convert_to_tensor(a, dtype=tf.int8)
>>> tf.image.convert_image_dtype(a_int8, dtype=tf.float32)
<tf.Tensor: shape=(2, 2, 1), dtype=float32, numpy=
array([[[0.00787402],
[0.01574803]],
[[0.02362205],
[0.03149606]]], dtype=float32)>
>>> a_int32 = tf.convert_to_tensor(a, dtype=tf.int32)
>>> tf.image.convert_image_dtype(a_int32, dtype=tf.float32)
<tf.Tensor: shape=(2, 2, 1), dtype=float32, numpy=
array([[[4.6566129e-10],
[9.3132257e-10]],
[[1.3969839e-09],
[1.8626451e-09]]], dtype=float32)>
Despite having identical values of `a` and output dtype of `float32`, the
outputs differ due to the different input dtypes (`int8` vs. `int32`). This
is, again, because the values are normalized by the `MAX` value of the input
dtype.
Note that converting floating point values to integer type may lose precision.
In the example below, an image tensor `b` of dtype `float32` is converted to
`int8` and back to `float32`. The final output, however, is different from
the original input `b` due to precision loss.
>>> b = [[[0.12], [0.34]], [[0.56], [0.78]]]
>>> b_float32 = tf.convert_to_tensor(b, dtype=tf.float32)
>>> b_int8 = tf.image.convert_image_dtype(b_float32, dtype=tf.int8)
>>> tf.image.convert_image_dtype(b_int8, dtype=tf.float32)
<tf.Tensor: shape=(2, 2, 1), dtype=float32, numpy=
array([[[0.11811024],
[0.33858266]],
[[0.5590551 ],
[0.77952754]]], dtype=float32)>
Scaling up from an integer type (input dtype) to another integer type (output
dtype) will not map input dtype's `MAX` to output dtype's `MAX` but converting
back and forth should result in no change. For example, as shown below, the
`MAX` value of int8 (=127) is not mapped to the `MAX` value of int16 (=32,767)
but, when scaled back, we get the same, original values of `c`.
>>> c = [[[1], [2]], [[127], [127]]]
>>> c_int8 = tf.convert_to_tensor(c, dtype=tf.int8)
>>> c_int16 = tf.image.convert_image_dtype(c_int8, dtype=tf.int16)
>>> print(c_int16)
tf.Tensor(
[[[ 256]
[ 512]]
[[32512]
[32512]]], shape=(2, 2, 1), dtype=int16)
>>> c_int8_back = tf.image.convert_image_dtype(c_int16, dtype=tf.int8)
>>> print(c_int8_back)
tf.Tensor(
[[[ 1]
[ 2]]
[[127]
[127]]], shape=(2, 2, 1), dtype=int8)
Scaling down from an integer type to another integer type can be a lossy
conversion. Notice in the example below that converting `int16` to `uint8` and
back to `int16` has lost precision.
>>> d = [[[1000], [2000]], [[3000], [4000]]]
>>> d_int16 = tf.convert_to_tensor(d, dtype=tf.int16)
>>> d_uint8 = tf.image.convert_image_dtype(d_int16, dtype=tf.uint8)
>>> d_int16_back = tf.image.convert_image_dtype(d_uint8, dtype=tf.int16)
>>> print(d_int16_back)
tf.Tensor(
[[[ 896]
[1920]]
[[2944]
[3968]]], shape=(2, 2, 1), dtype=int16)
Note that converting from floating point inputs to integer types may lead to
over/underflow problems. Set saturate to `True` to avoid such problem in
problematic conversions. If enabled, saturation will clip the output into the
allowed range before performing a potentially dangerous cast (and only before
performing such a cast, i.e., when casting from a floating point to an integer
type, and when casting from a signed to an unsigned type; `saturate` has no
effect on casts between floats, or on casts that increase the type's range).
Args:
image: An image.
dtype: A `DType` to convert `image` to.
saturate: If `True`, clip the input before casting (if necessary).
name: A name for this operation (optional).
Returns:
`image`, converted to `dtype`.
Raises:
AttributeError: Raises an attribute error when dtype is neither
float nor integer
"""
image = ops.convert_to_tensor(image, name='image')
dtype = dtypes.as_dtype(dtype)
if not dtype.is_floating and not dtype.is_integer:
raise AttributeError('dtype must be either floating point or integer')
if dtype == image.dtype:
return array_ops.identity(image, name=name)
with ops.name_scope(name, 'convert_image', [image]) as name:
# Both integer: use integer multiplication in the larger range
if image.dtype.is_integer and dtype.is_integer:
scale_in = image.dtype.max
scale_out = dtype.max
if scale_in > scale_out:
# Scaling down, scale first, then cast. The scaling factor will
# cause in.max to be mapped to above out.max but below out.max+1,
# so that the output is safely in the supported range.
scale = (scale_in + 1) // (scale_out + 1)
scaled = math_ops.floordiv(image, scale)
if saturate:
return math_ops.saturate_cast(scaled, dtype, name=name)
else:
return math_ops.cast(scaled, dtype, name=name)
else:
# Scaling up, cast first, then scale. The scale will not map in.max to
# out.max, but converting back and forth should result in no change.
if saturate:
cast = math_ops.saturate_cast(image, dtype)
else:
cast = math_ops.cast(image, dtype)
scale = (scale_out + 1) // (scale_in + 1)
return math_ops.multiply(cast, scale, name=name)
elif image.dtype.is_floating and dtype.is_floating:
# Both float: Just cast, no possible overflows in the allowed ranges.
# Note: We're ignoring float overflows. If your image dynamic range
# exceeds float range, you're on your own.
return math_ops.cast(image, dtype, name=name)
else:
if image.dtype.is_integer:
# Converting to float: first cast, then scale. No saturation possible.
cast = math_ops.cast(image, dtype)
scale = 1. / image.dtype.max
return math_ops.multiply(cast, scale, name=name)
else:
# Converting from float: first scale, then cast
scale = dtype.max + 0.5 # avoid rounding problems in the cast
scaled = math_ops.multiply(image, scale)
if saturate:
return math_ops.saturate_cast(scaled, dtype, name=name)
else:
return math_ops.cast(scaled, dtype, name=name)
@tf_export('image.rgb_to_grayscale')
@dispatch.add_dispatch_support
def rgb_to_grayscale(images, name=None):
"""Converts one or more images from RGB to Grayscale.
Outputs a tensor of the same `DType` and rank as `images`. The size of the
last dimension of the output is 1, containing the Grayscale value of the
pixels.
>>> original = tf.constant([[[1.0, 2.0, 3.0]]])
>>> converted = tf.image.rgb_to_grayscale(original)
>>> print(converted.numpy())
[[[1.81...]]]
Args:
images: The RGB tensor to convert. The last dimension must have size 3 and
should contain RGB values.
name: A name for the operation (optional).
Returns:
The converted grayscale image(s).
"""
with ops.name_scope(name, 'rgb_to_grayscale', [images]) as name:
images = ops.convert_to_tensor(images, name='images')
# Remember original dtype to so we can convert back if needed
orig_dtype = images.dtype
flt_image = convert_image_dtype(images, dtypes.float32)
# Reference for converting between RGB and grayscale.
# https://en.wikipedia.org/wiki/Luma_%28video%29
rgb_weights = [0.2989, 0.5870, 0.1140]
gray_float = math_ops.tensordot(flt_image, rgb_weights, [-1, -1])
gray_float = array_ops.expand_dims(gray_float, -1)
return convert_image_dtype(gray_float, orig_dtype, name=name)
@tf_export('image.grayscale_to_rgb')
@dispatch.add_dispatch_support
def grayscale_to_rgb(images, name=None):
"""Converts one or more images from Grayscale to RGB.
Outputs a tensor of the same `DType` and rank as `images`. The size of the
last dimension of the output is 3, containing the RGB value of the pixels.
The input images' last dimension must be size 1.
>>> original = tf.constant([[[1.0], [2.0], [3.0]]])
>>> converted = tf.image.grayscale_to_rgb(original)
>>> print(converted.numpy())
[[[1. 1. 1.]
[2. 2. 2.]
[3. 3. 3.]]]
Args:
images: The Grayscale tensor to convert. The last dimension must be size 1.
name: A name for the operation (optional).
Returns:
The converted grayscale image(s).
"""
with ops.name_scope(name, 'grayscale_to_rgb', [images]) as name:
images = _AssertGrayscaleImage(images)
images = ops.convert_to_tensor(images, name='images')
rank_1 = array_ops.expand_dims(array_ops.rank(images) - 1, 0)
shape_list = ([array_ops.ones(rank_1, dtype=dtypes.int32)] +
[array_ops.expand_dims(3, 0)])
multiples = array_ops.concat(shape_list, 0)
rgb = array_ops.tile(images, multiples, name=name)
rgb.set_shape(images.get_shape()[:-1].concatenate([3]))
return rgb
# pylint: disable=invalid-name
@tf_export('image.random_hue')
@dispatch.add_dispatch_support
def random_hue(image, max_delta, seed=None):
"""Adjust the hue of RGB images by a random factor.
Equivalent to `adjust_hue()` but uses a `delta` randomly
picked in the interval `[-max_delta, max_delta)`.
`max_delta` must be in the interval `[0, 0.5]`.
Usage Example:
>>> x = [[[1.0, 2.0, 3.0],
... [4.0, 5.0, 6.0]],
... [[7.0, 8.0, 9.0],
... [10.0, 11.0, 12.0]]]
>>> tf.image.random_hue(x, 0.2)
<tf.Tensor: shape=(2, 2, 3), dtype=float32, numpy=...>
For producing deterministic results given a `seed` value, use
`tf.image.stateless_random_hue`. Unlike using the `seed` param with
`tf.image.random_*` ops, `tf.image.stateless_random_*` ops guarantee the same
results given the same seed independent of how many times the function is
called, and independent of global seed settings (e.g. tf.random.set_seed).
Args:
image: RGB image or images. The size of the last dimension must be 3.
max_delta: float. The maximum value for the random delta.
seed: An operation-specific seed. It will be used in conjunction with the
graph-level seed to determine the real seeds that will be used in this
operation. Please see the documentation of set_random_seed for its
interaction with the graph-level random seed.
Returns:
Adjusted image(s), same shape and DType as `image`.
Raises:
ValueError: if `max_delta` is invalid.
"""
if max_delta > 0.5:
raise ValueError('max_delta must be <= 0.5.')
if max_delta < 0:
raise ValueError('max_delta must be non-negative.')
delta = random_ops.random_uniform([], -max_delta, max_delta, seed=seed)
return adjust_hue(image, delta)
@tf_export('image.stateless_random_hue', v1=[])
@dispatch.add_dispatch_support
def stateless_random_hue(image, max_delta, seed):
"""Adjust the hue of RGB images by a random factor deterministically.
Equivalent to `adjust_hue()` but uses a `delta` randomly picked in the
interval `[-max_delta, max_delta)`.
Guarantees the same results given the same `seed` independent of how many
times the function is called, and independent of global seed settings (e.g.
`tf.random.set_seed`).
`max_delta` must be in the interval `[0, 0.5]`.
Usage Example:
>>> x = [[[1.0, 2.0, 3.0],
... [4.0, 5.0, 6.0]],
... [[7.0, 8.0, 9.0],
... [10.0, 11.0, 12.0]]]
>>> seed = (1, 2)
>>> tf.image.stateless_random_hue(x, 0.2, seed)
<tf.Tensor: shape=(2, 2, 3), dtype=float32, numpy=
array([[[ 1.6514902, 1. , 3. ],
[ 4.65149 , 4. , 6. ]],
[[ 7.65149 , 7. , 9. ],
[10.65149 , 10. , 12. ]]], dtype=float32)>
Args:
image: RGB image or images. The size of the last dimension must be 3.
max_delta: float. The maximum value for the random delta.
seed: A shape [2] Tensor, the seed to the random number generator. Must have
dtype `int32` or `int64`. (When using XLA, only `int32` is allowed.)
Returns:
Adjusted image(s), same shape and DType as `image`.
Raises:
ValueError: if `max_delta` is invalid.
"""
if max_delta > 0.5:
raise ValueError('max_delta must be <= 0.5.')
if max_delta < 0:
raise ValueError('max_delta must be non-negative.')
delta = stateless_random_ops.stateless_random_uniform(
shape=[], minval=-max_delta, maxval=max_delta, seed=seed)
return adjust_hue(image, delta)
@tf_export('image.adjust_hue')
@dispatch.add_dispatch_support
def adjust_hue(image, delta, name=None):
"""Adjust hue of RGB images.
This is a convenience method that converts an RGB image to float
representation, converts it to HSV, adds an offset to the
hue channel, converts back to RGB and then back to the original
data type. If several adjustments are chained it is advisable to minimize
the number of redundant conversions.
`image` is an RGB image. The image hue is adjusted by converting the
image(s) to HSV and rotating the hue channel (H) by
`delta`. The image is then converted back to RGB.
`delta` must be in the interval `[-1, 1]`.
Usage Example:
>>> x = [[[1.0, 2.0, 3.0],
... [4.0, 5.0, 6.0]],
... [[7.0, 8.0, 9.0],
... [10.0, 11.0, 12.0]]]
>>> tf.image.adjust_hue(x, 0.2)
<tf.Tensor: shape=(2, 2, 3), dtype=float32, numpy=
array([[[ 2.3999996, 1. , 3. ],
[ 5.3999996, 4. , 6. ]],
[[ 8.4 , 7. , 9. ],
[11.4 , 10. , 12. ]]], dtype=float32)>
Args:
image: RGB image or images. The size of the last dimension must be 3.
delta: float. How much to add to the hue channel.
name: A name for this operation (optional).
Returns:
Adjusted image(s), same shape and DType as `image`.
Usage Example:
>>> image = [[[1, 2, 3], [4, 5, 6]],
... [[7, 8, 9], [10, 11, 12]],
... [[13, 14, 15], [16, 17, 18]]]
>>> image = tf.constant(image)
>>> tf.image.adjust_hue(image, 0.2)
<tf.Tensor: shape=(3, 2, 3), dtype=int32, numpy=
array([[[ 2, 1, 3],
[ 5, 4, 6]],
[[ 8, 7, 9],
[11, 10, 12]],
[[14, 13, 15],
[17, 16, 18]]], dtype=int32)>
"""
with ops.name_scope(name, 'adjust_hue', [image]) as name:
image = ops.convert_to_tensor(image, name='image')
# Remember original dtype to so we can convert back if needed
orig_dtype = image.dtype
if orig_dtype in (dtypes.float16, dtypes.float32):
flt_image = image
else:
flt_image = convert_image_dtype(image, dtypes.float32)
rgb_altered = gen_image_ops.adjust_hue(flt_image, delta)
return convert_image_dtype(rgb_altered, orig_dtype)
# pylint: disable=invalid-name
@tf_export('image.random_jpeg_quality')
@dispatch.add_dispatch_support
def random_jpeg_quality(image, min_jpeg_quality, max_jpeg_quality, seed=None):
"""Randomly changes jpeg encoding quality for inducing jpeg noise.
`min_jpeg_quality` must be in the interval `[0, 100]` and less than
`max_jpeg_quality`.
`max_jpeg_quality` must be in the interval `[0, 100]`.
Usage Example:
>>> x = [[[1.0, 2.0, 3.0],
... [4.0, 5.0, 6.0]],
... [[7.0, 8.0, 9.0],
... [10.0, 11.0, 12.0]]]
>>> tf.image.random_jpeg_quality(x, 75, 95)
<tf.Tensor: shape=(2, 2, 3), dtype=float32, numpy=...>
For producing deterministic results given a `seed` value, use
`tf.image.stateless_random_jpeg_quality`. Unlike using the `seed` param
with `tf.image.random_*` ops, `tf.image.stateless_random_*` ops guarantee the
same results given the same seed independent of how many times the function is
called, and independent of global seed settings (e.g. tf.random.set_seed).
Args:
image: 3D image. Size of the last dimension must be 1 or 3.
min_jpeg_quality: Minimum jpeg encoding quality to use.
max_jpeg_quality: Maximum jpeg encoding quality to use.
seed: An operation-specific seed. It will be used in conjunction with the
graph-level seed to determine the real seeds that will be used in this
operation. Please see the documentation of set_random_seed for its
interaction with the graph-level random seed.
Returns:
Adjusted image(s), same shape and DType as `image`.
Raises:
ValueError: if `min_jpeg_quality` or `max_jpeg_quality` is invalid.
"""
if (min_jpeg_quality < 0 or max_jpeg_quality < 0 or min_jpeg_quality > 100 or
max_jpeg_quality > 100):
raise ValueError('jpeg encoding range must be between 0 and 100.')
if min_jpeg_quality >= max_jpeg_quality:
raise ValueError('`min_jpeg_quality` must be less than `max_jpeg_quality`.')
jpeg_quality = random_ops.random_uniform([],
min_jpeg_quality,
max_jpeg_quality,
seed=seed,
dtype=dtypes.int32)
return adjust_jpeg_quality(image, jpeg_quality)
@tf_export('image.stateless_random_jpeg_quality', v1=[])
@dispatch.add_dispatch_support
def stateless_random_jpeg_quality(image,
min_jpeg_quality,
max_jpeg_quality,
seed):
"""Deterministically radomize jpeg encoding quality for inducing jpeg noise.
Guarantees the same results given the same `seed` independent of how many
times the function is called, and independent of global seed settings (e.g.
`tf.random.set_seed`).
`min_jpeg_quality` must be in the interval `[0, 100]` and less than
`max_jpeg_quality`.
`max_jpeg_quality` must be in the interval `[0, 100]`.
Usage Example:
>>> x = [[[1, 2, 3],
... [4, 5, 6]],
... [[7, 8, 9],
... [10, 11, 12]]]
>>> x_uint8 = tf.cast(x, tf.uint8)
>>> seed = (1, 2)
>>> tf.image.stateless_random_jpeg_quality(x_uint8, 75, 95, seed)
<tf.Tensor: shape=(2, 2, 3), dtype=uint8, numpy=
array([[[ 0, 4, 5],
[ 1, 5, 6]],
[[ 5, 9, 10],
[ 5, 9, 10]]], dtype=uint8)>
Args:
image: 3D image. Size of the last dimension must be 1 or 3.
min_jpeg_quality: Minimum jpeg encoding quality to use.
max_jpeg_quality: Maximum jpeg encoding quality to use.
seed: A shape [2] Tensor, the seed to the random number generator. Must have
dtype `int32` or `int64`. (When using XLA, only `int32` is allowed.)
Returns:
Adjusted image(s), same shape and DType as `image`.
Raises:
ValueError: if `min_jpeg_quality` or `max_jpeg_quality` is invalid.
"""
if (min_jpeg_quality < 0 or max_jpeg_quality < 0 or min_jpeg_quality > 100 or
max_jpeg_quality > 100):
raise ValueError('jpeg encoding range must be between 0 and 100.')
if min_jpeg_quality >= max_jpeg_quality:
raise ValueError('`min_jpeg_quality` must be less than `max_jpeg_quality`.')
jpeg_quality = stateless_random_ops.stateless_random_uniform(
shape=[], minval=min_jpeg_quality, maxval=max_jpeg_quality, seed=seed,
dtype=dtypes.int32)
return adjust_jpeg_quality(image, jpeg_quality)
@tf_export('image.adjust_jpeg_quality')
@dispatch.add_dispatch_support
def adjust_jpeg_quality(image, jpeg_quality, name=None):
"""Adjust jpeg encoding quality of an image.
This is a convenience method that converts an image to uint8 representation,
encodes it to jpeg with `jpeg_quality`, decodes it, and then converts back
to the original data type.
`jpeg_quality` must be in the interval `[0, 100]`.
Usage Example:
>>> x = [[[1.0, 2.0, 3.0],
... [4.0, 5.0, 6.0]],
... [[7.0, 8.0, 9.0],
... [10.0, 11.0, 12.0]]]
>>> tf.image.adjust_jpeg_quality(x, 75)
<tf.Tensor: shape=(2, 2, 3), dtype=float32, numpy=
array([[[1., 1., 1.],
[1., 1., 1.]],
[[1., 1., 1.],
[1., 1., 1.]]], dtype=float32)>
Args:
image: 3D image. The size of the last dimension must be None, 1 or 3.
jpeg_quality: Python int or Tensor of type int32. jpeg encoding quality.
name: A name for this operation (optional).
Returns:
Adjusted image, same shape and DType as `image`.
Raises:
InvalidArgumentError: quality must be in [0,100]
InvalidArgumentError: image must have 1 or 3 channels
"""
with ops.name_scope(name, 'adjust_jpeg_quality', [image]):
image = ops.convert_to_tensor(image, name='image')
channels = image.shape.as_list()[-1]
# Remember original dtype to so we can convert back if needed
orig_dtype = image.dtype
image = convert_image_dtype(image, dtypes.uint8, saturate=True)
if not _is_tensor(jpeg_quality):
# If jpeg_quality is a int (not tensor).
jpeg_quality = ops.convert_to_tensor(jpeg_quality, dtype=dtypes.int32)
image = gen_image_ops.encode_jpeg_variable_quality(image, jpeg_quality)
image = gen_image_ops.decode_jpeg(image, channels=channels)
return convert_image_dtype(image, orig_dtype, saturate=True)
@tf_export('image.random_saturation')
@dispatch.add_dispatch_support
def random_saturation(image, lower, upper, seed=None):
"""Adjust the saturation of RGB images by a random factor.
Equivalent to `adjust_saturation()` but uses a `saturation_factor` randomly
picked in the interval `[lower, upper)`.
Usage Example:
>>> x = [[[1.0, 2.0, 3.0],
... [4.0, 5.0, 6.0]],
... [[7.0, 8.0, 9.0],
... [10.0, 11.0, 12.0]]]
>>> tf.image.random_saturation(x, 5, 10)
<tf.Tensor: shape=(2, 2, 3), dtype=float32, numpy=
array([[[ 0. , 1.5, 3. ],
[ 0. , 3. , 6. ]],
[[ 0. , 4.5, 9. ],
[ 0. , 6. , 12. ]]], dtype=float32)>
For producing deterministic results given a `seed` value, use
`tf.image.stateless_random_saturation`. Unlike using the `seed` param
with `tf.image.random_*` ops, `tf.image.stateless_random_*` ops guarantee the
same results given the same seed independent of how many times the function is
called, and independent of global seed settings (e.g. tf.random.set_seed).
Args:
image: RGB image or images. The size of the last dimension must be 3.
lower: float. Lower bound for the random saturation factor.
upper: float. Upper bound for the random saturation factor.
seed: An operation-specific seed. It will be used in conjunction with the
graph-level seed to determine the real seeds that will be used in this
operation. Please see the documentation of set_random_seed for its
interaction with the graph-level random seed.
Returns:
Adjusted image(s), same shape and DType as `image`.
Raises:
ValueError: if `upper <= lower` or if `lower < 0`.
"""
if upper <= lower:
raise ValueError('upper must be > lower.')
if lower < 0:
raise ValueError('lower must be non-negative.')
saturation_factor = random_ops.random_uniform([], lower, upper, seed=seed)
return adjust_saturation(image, saturation_factor)
@tf_export('image.stateless_random_saturation', v1=[])
@dispatch.add_dispatch_support
def stateless_random_saturation(image, lower, upper, seed=None):
"""Adjust the saturation of RGB images by a random factor deterministically.
Equivalent to `adjust_saturation()` but uses a `saturation_factor` randomly
picked in the interval `[lower, upper)`.
Guarantees the same results given the same `seed` independent of how many
times the function is called, and independent of global seed settings (e.g.
`tf.random.set_seed`).
Usage Example:
>>> x = [[[1.0, 2.0, 3.0],
... [4.0, 5.0, 6.0]],
... [[7.0, 8.0, 9.0],
... [10.0, 11.0, 12.0]]]
>>> seed = (1, 2)
>>> tf.image.stateless_random_saturation(x, 0.5, 1.0, seed)
<tf.Tensor: shape=(2, 2, 3), dtype=float32, numpy=
array([[[ 1.1559395, 2.0779698, 3. ],
[ 4.1559396, 5.07797 , 6. ]],
[[ 7.1559396, 8.07797 , 9. ],
[10.155939 , 11.07797 , 12. ]]], dtype=float32)>
Args:
image: RGB image or images. The size of the last dimension must be 3.
lower: float. Lower bound for the random saturation factor.
upper: float. Upper bound for the random saturation factor.
seed: A shape [2] Tensor, the seed to the random number generator. Must have
dtype `int32` or `int64`. (When using XLA, only `int32` is allowed.)
Returns:
Adjusted image(s), same shape and DType as `image`.
Raises:
ValueError: if `upper <= lower` or if `lower < 0`.
"""
if upper <= lower:
raise ValueError('upper must be > lower.')
if lower < 0:
raise ValueError('lower must be non-negative.')
saturation_factor = stateless_random_ops.stateless_random_uniform(
shape=[], minval=lower, maxval=upper, seed=seed)
return adjust_saturation(image, saturation_factor)
@tf_export('image.adjust_saturation')
@dispatch.add_dispatch_support
def adjust_saturation(image, saturation_factor, name=None):
"""Adjust saturation of RGB images.
This is a convenience method that converts RGB images to float
representation, converts them to HSV, adds an offset to the
saturation channel, converts back to RGB and then back to the original
data type. If several adjustments are chained it is advisable to minimize
the number of redundant conversions.
`image` is an RGB image or images. The image saturation is adjusted by
converting the images to HSV and multiplying the saturation (S) channel by
`saturation_factor` and clipping. The images are then converted back to RGB.
Usage Example:
>>> x = [[[1.0, 2.0, 3.0],
... [4.0, 5.0, 6.0]],
... [[7.0, 8.0, 9.0],
... [10.0, 11.0, 12.0]]]
>>> tf.image.adjust_saturation(x, 0.5)
<tf.Tensor: shape=(2, 2, 3), dtype=float32, numpy=
array([[[ 2. , 2.5, 3. ],
[ 5. , 5.5, 6. ]],
[[ 8. , 8.5, 9. ],
[11. , 11.5, 12. ]]], dtype=float32)>
Args:
image: RGB image or images. The size of the last dimension must be 3.
saturation_factor: float. Factor to multiply the saturation by.
name: A name for this operation (optional).
Returns:
Adjusted image(s), same shape and DType as `image`.
Raises:
InvalidArgumentError: input must have 3 channels
"""
with ops.name_scope(name, 'adjust_saturation', [image]) as name:
image = ops.convert_to_tensor(image, name='image')
# Remember original dtype to so we can convert back if needed
orig_dtype = image.dtype
if orig_dtype in (dtypes.float16, dtypes.float32):
flt_image = image
else:
flt_image = convert_image_dtype(image, dtypes.float32)
adjusted = gen_image_ops.adjust_saturation(flt_image, saturation_factor)
return convert_image_dtype(adjusted, orig_dtype)
@tf_export('io.is_jpeg', 'image.is_jpeg', v1=['io.is_jpeg', 'image.is_jpeg'])
def is_jpeg(contents, name=None):
r"""Convenience function to check if the 'contents' encodes a JPEG image.
Args:
contents: 0-D `string`. The encoded image bytes.
name: A name for the operation (optional)
Returns:
A scalar boolean tensor indicating if 'contents' may be a JPEG image.
is_jpeg is susceptible to false positives.
"""
# Normal JPEGs start with \xff\xd8\xff\xe0
# JPEG with EXIF starts with \xff\xd8\xff\xe1
# Use \xff\xd8\xff to cover both.
with ops.name_scope(name, 'is_jpeg'):
substr = string_ops.substr(contents, 0, 3)
return math_ops.equal(substr, b'\xff\xd8\xff', name=name)
def _is_png(contents, name=None):
r"""Convenience function to check if the 'contents' encodes a PNG image.
Args:
contents: 0-D `string`. The encoded image bytes.
name: A name for the operation (optional)
Returns:
A scalar boolean tensor indicating if 'contents' may be a PNG image.
is_png is susceptible to false positives.
"""
with ops.name_scope(name, 'is_png'):
substr = string_ops.substr(contents, 0, 3)
return math_ops.equal(substr, b'\211PN', name=name)
tf_export(
'io.decode_and_crop_jpeg',
'image.decode_and_crop_jpeg',
v1=['io.decode_and_crop_jpeg', 'image.decode_and_crop_jpeg'])(
dispatch.add_dispatch_support(gen_image_ops.decode_and_crop_jpeg))
tf_export(
'io.decode_bmp',
'image.decode_bmp',
v1=['io.decode_bmp', 'image.decode_bmp'])(
dispatch.add_dispatch_support(gen_image_ops.decode_bmp))
tf_export(
'io.decode_gif',
'image.decode_gif',
v1=['io.decode_gif', 'image.decode_gif'])(
dispatch.add_dispatch_support(gen_image_ops.decode_gif))
tf_export(
'io.decode_jpeg',
'image.decode_jpeg',
v1=['io.decode_jpeg', 'image.decode_jpeg'])(
dispatch.add_dispatch_support(gen_image_ops.decode_jpeg))
tf_export(
'io.decode_png',
'image.decode_png',
v1=['io.decode_png', 'image.decode_png'])(
dispatch.add_dispatch_support(gen_image_ops.decode_png))
tf_export(
'io.encode_jpeg',
'image.encode_jpeg',
v1=['io.encode_jpeg', 'image.encode_jpeg'])(
dispatch.add_dispatch_support(gen_image_ops.encode_jpeg))
tf_export(
'io.extract_jpeg_shape',
'image.extract_jpeg_shape',
v1=['io.extract_jpeg_shape', 'image.extract_jpeg_shape'])(
dispatch.add_dispatch_support(gen_image_ops.extract_jpeg_shape))
@tf_export('io.encode_png', 'image.encode_png')
@dispatch.add_dispatch_support
def encode_png(image, compression=-1, name=None):
r"""PNG-encode an image.
`image` is a 3-D uint8 or uint16 Tensor of shape `[height, width, channels]`
where `channels` is:
* 1: for grayscale.
* 2: for grayscale + alpha.
* 3: for RGB.
* 4: for RGBA.
The ZLIB compression level, `compression`, can be -1 for the PNG-encoder
default or a value from 0 to 9. 9 is the highest compression level,
generating the smallest output, but is slower.
Args:
image: A `Tensor`. Must be one of the following types: `uint8`, `uint16`.
3-D with shape `[height, width, channels]`.
compression: An optional `int`. Defaults to `-1`. Compression level.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `string`.
"""
return gen_image_ops.encode_png(
ops.convert_to_tensor(image), compression, name)
@tf_export(
'io.decode_image',
'image.decode_image',
v1=['io.decode_image', 'image.decode_image'])
@dispatch.add_dispatch_support
def decode_image(contents,
channels=None,
dtype=dtypes.uint8,
name=None,
expand_animations=True):
"""Function for `decode_bmp`, `decode_gif`, `decode_jpeg`, and `decode_png`.
Detects whether an image is a BMP, GIF, JPEG, or PNG, and performs the
appropriate operation to convert the input bytes `string` into a `Tensor`
of type `dtype`.
Note: `decode_gif` returns a 4-D array `[num_frames, height, width, 3]`, as
opposed to `decode_bmp`, `decode_jpeg` and `decode_png`, which return 3-D
arrays `[height, width, num_channels]`. Make sure to take this into account
when constructing your graph if you are intermixing GIF files with BMP, JPEG,
and/or PNG files. Alternately, set the `expand_animations` argument of this
function to `False`, in which case the op will return 3-dimensional tensors
and will truncate animated GIF files to the first frame.
NOTE: If the first frame of an animated GIF does not occupy the entire
canvas (maximum frame width x maximum frame height), then it fills the
unoccupied areas (in the first frame) with zeros (black). For frames after the
first frame that does not occupy the entire canvas, it uses the previous
frame to fill the unoccupied areas.
Args:
contents: A `Tensor` of type `string`. 0-D. The encoded image bytes.
channels: An optional `int`. Defaults to `0`. Number of color channels for
the decoded image.
dtype: The desired DType of the returned `Tensor`.
name: A name for the operation (optional)
expand_animations: An optional `bool`. Defaults to `True`. Controls the
shape of the returned op's output. If `True`, the returned op will produce
a 3-D tensor for PNG, JPEG, and BMP files; and a 4-D tensor for all GIFs,
whether animated or not. If, `False`, the returned op will produce a 3-D
tensor for all file types and will truncate animated GIFs to the first
frame.
Returns:
`Tensor` with type `dtype` and a 3- or 4-dimensional shape, depending on
the file type and the value of the `expand_animations` parameter.
Raises:
ValueError: On incorrect number of channels.
"""
with ops.name_scope(name, 'decode_image'):
if compat.forward_compatible(2020, 8, 14):
channels = 0 if channels is None else channels
if dtype not in [dtypes.float32, dtypes.uint8, dtypes.uint16]:
dest_dtype = dtype
dtype = dtypes.uint16
return convert_image_dtype(gen_image_ops.decode_image(
contents=contents,
channels=channels,
expand_animations=expand_animations,
dtype=dtype), dest_dtype)
else:
return gen_image_ops.decode_image(
contents=contents,
channels=channels,
expand_animations=expand_animations,
dtype=dtype)
if channels not in (None, 0, 1, 3, 4):
raise ValueError('channels must be in (None, 0, 1, 3, 4)')
substr = string_ops.substr(contents, 0, 3)
def _bmp():
"""Decodes a BMP image."""
signature = string_ops.substr(contents, 0, 2)
# Create assert op to check that bytes are BMP decodable
is_bmp = math_ops.equal(signature, 'BM', name='is_bmp')
decode_msg = 'Unable to decode bytes as JPEG, PNG, GIF, or BMP'
assert_decode = control_flow_ops.Assert(is_bmp, [decode_msg])
bmp_channels = 0 if channels is None else channels
good_channels = math_ops.not_equal(bmp_channels, 1, name='check_channels')
channels_msg = ('Channels must be in (None, 0, 3, 4) when decoding BMP '
'images')
assert_channels = control_flow_ops.Assert(good_channels, [channels_msg])
with ops.control_dependencies([assert_decode, assert_channels]):
return convert_image_dtype(
gen_image_ops.decode_bmp(contents, channels=bmp_channels), dtype)
def _gif():
"""Decodes a GIF image."""
# Create assert to make sure that channels is not set to 1
# Already checked above that channels is in (None, 0, 1, 3)
gif_channels = 0 if channels is None else channels
good_channels = math_ops.logical_and(
math_ops.not_equal(gif_channels, 1, name='check_gif_channels'),
math_ops.not_equal(gif_channels, 4, name='check_gif_channels'))
channels_msg = 'Channels must be in (None, 0, 3) when decoding GIF images'
assert_channels = control_flow_ops.Assert(good_channels, [channels_msg])
with ops.control_dependencies([assert_channels]):
result = convert_image_dtype(gen_image_ops.decode_gif(contents), dtype)
if not expand_animations:
# For now we decode animated GIFs fully and toss out all but the
# first frame when expand_animations is False
result = array_ops.gather(result, 0)
return result
def check_gif():
# Create assert op to check that bytes are GIF decodable
is_gif = math_ops.equal(substr, b'\x47\x49\x46', name='is_gif')
return control_flow_ops.cond(is_gif, _gif, _bmp, name='cond_gif')
def _png():
"""Decodes a PNG image."""
return convert_image_dtype(
gen_image_ops.decode_png(
contents,
channels,
dtype=dtypes.uint8 if dtype == dtypes.uint8 else dtypes.uint16),
dtype)
def check_png():
"""Checks if an image is PNG."""
return control_flow_ops.cond(
_is_png(contents), _png, check_gif, name='cond_png')
def _jpeg():
"""Decodes a jpeg image."""
jpeg_channels = 0 if channels is None else channels
good_channels = math_ops.not_equal(
jpeg_channels, 4, name='check_jpeg_channels')
channels_msg = ('Channels must be in (None, 0, 1, 3) when decoding JPEG '
'images')
assert_channels = control_flow_ops.Assert(good_channels, [channels_msg])
with ops.control_dependencies([assert_channels]):
return convert_image_dtype(
gen_image_ops.decode_jpeg(contents, channels), dtype)
# Decode normal JPEG images (start with \xff\xd8\xff\xe0)
# as well as JPEG images with EXIF data (start with \xff\xd8\xff\xe1).
return control_flow_ops.cond(
is_jpeg(contents), _jpeg, check_png, name='cond_jpeg')
@tf_export('image.total_variation')
@dispatch.add_dispatch_support
def total_variation(images, name=None):
"""Calculate and return the total variation for one or more images.
The total variation is the sum of the absolute differences for neighboring
pixel-values in the input images. This measures how much noise is in the
images.
This can be used as a loss-function during optimization so as to suppress
noise in images. If you have a batch of images, then you should calculate
the scalar loss-value as the sum:
`loss = tf.reduce_sum(tf.image.total_variation(images))`
This implements the anisotropic 2-D version of the formula described here:
https://en.wikipedia.org/wiki/Total_variation_denoising
Args:
images: 4-D Tensor of shape `[batch, height, width, channels]` or 3-D Tensor
of shape `[height, width, channels]`.
name: A name for the operation (optional).
Raises:
ValueError: if images.shape is not a 3-D or 4-D vector.
Returns:
The total variation of `images`.
If `images` was 4-D, return a 1-D float Tensor of shape `[batch]` with the
total variation for each image in the batch.
If `images` was 3-D, return a scalar float with the total variation for
that image.
"""
with ops.name_scope(name, 'total_variation'):
ndims = images.get_shape().ndims
if ndims == 3:
# The input is a single image with shape [height, width, channels].
# Calculate the difference of neighboring pixel-values.
# The images are shifted one pixel along the height and width by slicing.
pixel_dif1 = images[1:, :, :] - images[:-1, :, :]
pixel_dif2 = images[:, 1:, :] - images[:, :-1, :]
# Sum for all axis. (None is an alias for all axis.)
sum_axis = None
elif ndims == 4:
# The input is a batch of images with shape:
# [batch, height, width, channels].
# Calculate the difference of neighboring pixel-values.
# The images are shifted one pixel along the height and width by slicing.
pixel_dif1 = images[:, 1:, :, :] - images[:, :-1, :, :]
pixel_dif2 = images[:, :, 1:, :] - images[:, :, :-1, :]
# Only sum for the last 3 axis.
# This results in a 1-D tensor with the total variation for each image.
sum_axis = [1, 2, 3]
else:
raise ValueError('\'images\' must be either 3 or 4-dimensional.')
# Calculate the total variation by taking the absolute value of the
# pixel-differences and summing over the appropriate axis.
tot_var = (
math_ops.reduce_sum(math_ops.abs(pixel_dif1), axis=sum_axis) +
math_ops.reduce_sum(math_ops.abs(pixel_dif2), axis=sum_axis))
return tot_var
@tf_export('image.sample_distorted_bounding_box', v1=[])
@dispatch.add_dispatch_support
def sample_distorted_bounding_box_v2(image_size,
bounding_boxes,
seed=0,
min_object_covered=0.1,
aspect_ratio_range=None,
area_range=None,
max_attempts=None,
use_image_if_no_bounding_boxes=None,
name=None):
"""Generate a single randomly distorted bounding box for an image.
Bounding box annotations are often supplied in addition to ground-truth labels
in image recognition or object localization tasks. A common technique for
training such a system is to randomly distort an image while preserving
its content, i.e. *data augmentation*. This Op outputs a randomly distorted
localization of an object, i.e. bounding box, given an `image_size`,
`bounding_boxes` and a series of constraints.
The output of this Op is a single bounding box that may be used to crop the
original image. The output is returned as 3 tensors: `begin`, `size` and
`bboxes`. The first 2 tensors can be fed directly into `tf.slice` to crop the
image. The latter may be supplied to `tf.image.draw_bounding_boxes` to
visualize what the bounding box looks like.
Bounding boxes are supplied and returned as `[y_min, x_min, y_max, x_max]`.
The bounding box coordinates are floats in `[0.0, 1.0]` relative to the width
and the height of the underlying image.
For example,
```python
# Generate a single distorted bounding box.
begin, size, bbox_for_draw = tf.image.sample_distorted_bounding_box(
tf.shape(image),
bounding_boxes=bounding_boxes,
min_object_covered=0.1)
# Draw the bounding box in an image summary.
image_with_box = tf.image.draw_bounding_boxes(tf.expand_dims(image, 0),
bbox_for_draw)
tf.compat.v1.summary.image('images_with_box', image_with_box)
# Employ the bounding box to distort the image.
distorted_image = tf.slice(image, begin, size)
```
Note that if no bounding box information is available, setting
`use_image_if_no_bounding_boxes = true` will assume there is a single implicit
bounding box covering the whole image. If `use_image_if_no_bounding_boxes` is
false and no bounding boxes are supplied, an error is raised.
For producing deterministic results given a `seed` value, use
`tf.image.stateless_sample_distorted_bounding_box`. Unlike using the `seed`
param with `tf.image.random_*` ops, `tf.image.stateless_random_*` ops
guarantee the same results given the same seed independent of how many times
the function is called, and independent of global seed settings
(e.g. tf.random.set_seed).
Args:
image_size: A `Tensor`. Must be one of the following types: `uint8`, `int8`,
`int16`, `int32`, `int64`. 1-D, containing `[height, width, channels]`.
bounding_boxes: A `Tensor` of type `float32`. 3-D with shape `[batch, N, 4]`
describing the N bounding boxes associated with the image.
seed: An optional `int`. Defaults to `0`. If `seed` is set to non-zero, the
random number generator is seeded by the given `seed`. Otherwise, it is
seeded by a random seed.
min_object_covered: A Tensor of type `float32`. Defaults to `0.1`. The
cropped area of the image must contain at least this fraction of any
bounding box supplied. The value of this parameter should be non-negative.
In the case of 0, the cropped area does not need to overlap any of the
bounding boxes supplied.
aspect_ratio_range: An optional list of `floats`. Defaults to `[0.75,
1.33]`. The cropped area of the image must have an aspect `ratio = width /
height` within this range.
area_range: An optional list of `floats`. Defaults to `[0.05, 1]`. The
cropped area of the image must contain a fraction of the supplied image
within this range.
max_attempts: An optional `int`. Defaults to `100`. Number of attempts at
generating a cropped region of the image of the specified constraints.
After `max_attempts` failures, return the entire image.
use_image_if_no_bounding_boxes: An optional `bool`. Defaults to `False`.
Controls behavior if no bounding boxes supplied. If true, assume an
implicit bounding box covering the whole input. If false, raise an error.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (begin, size, bboxes).
begin: A `Tensor`. Has the same type as `image_size`. 1-D, containing
`[offset_height, offset_width, 0]`. Provide as input to
`tf.slice`.
size: A `Tensor`. Has the same type as `image_size`. 1-D, containing
`[target_height, target_width, -1]`. Provide as input to
`tf.slice`.
bboxes: A `Tensor` of type `float32`. 3-D with shape `[1, 1, 4]` containing
the distorted bounding box.
Provide as input to `tf.image.draw_bounding_boxes`.
"""
seed1, seed2 = random_seed.get_seed(seed) if seed else (0, 0)
with ops.name_scope(name, 'sample_distorted_bounding_box'):
return gen_image_ops.sample_distorted_bounding_box_v2(
image_size,
bounding_boxes,
seed=seed1,
seed2=seed2,
min_object_covered=min_object_covered,
aspect_ratio_range=aspect_ratio_range,
area_range=area_range,
max_attempts=max_attempts,
use_image_if_no_bounding_boxes=use_image_if_no_bounding_boxes,
name=name)
@tf_export('image.stateless_sample_distorted_bounding_box', v1=[])
@dispatch.add_dispatch_support
def stateless_sample_distorted_bounding_box(image_size,
bounding_boxes,
seed,
min_object_covered=0.1,
aspect_ratio_range=None,
area_range=None,
max_attempts=None,
use_image_if_no_bounding_boxes=None,
name=None):
"""Generate a randomly distorted bounding box for an image deterministically.
Bounding box annotations are often supplied in addition to ground-truth labels
in image recognition or object localization tasks. A common technique for
training such a system is to randomly distort an image while preserving
its content, i.e. *data augmentation*. This Op, given the same `seed`,
deterministically outputs a randomly distorted localization of an object, i.e.
bounding box, given an `image_size`, `bounding_boxes` and a series of
constraints.
The output of this Op is a single bounding box that may be used to crop the
original image. The output is returned as 3 tensors: `begin`, `size` and
`bboxes`. The first 2 tensors can be fed directly into `tf.slice` to crop the
image. The latter may be supplied to `tf.image.draw_bounding_boxes` to
visualize what the bounding box looks like.
Bounding boxes are supplied and returned as `[y_min, x_min, y_max, x_max]`.
The bounding box coordinates are floats in `[0.0, 1.0]` relative to the width
and the height of the underlying image.
The output of this Op is guaranteed to be the same given the same `seed` and
is independent of how many times the function is called, and independent of
global seed settings (e.g. `tf.random.set_seed`).
Example usage:
>>> image = np.array([[[1], [2], [3]], [[4], [5], [6]], [[7], [8], [9]]])
>>> bbox = tf.constant(
... [0.0, 0.0, 1.0, 1.0], dtype=tf.float32, shape=[1, 1, 4])
>>> seed = (1, 2)
>>> # Generate a single distorted bounding box.
>>> bbox_begin, bbox_size, bbox_draw = (
... tf.image.stateless_sample_distorted_bounding_box(
... tf.shape(image), bounding_boxes=bbox, seed=seed))
>>> # Employ the bounding box to distort the image.
>>> tf.slice(image, bbox_begin, bbox_size)
<tf.Tensor: shape=(2, 2, 1), dtype=int64, numpy=
array([[[1],
[2]],
[[4],
[5]]])>
>>> # Draw the bounding box in an image summary.
>>> colors = np.array([[1.0, 0.0, 0.0], [0.0, 0.0, 1.0]])
>>> tf.image.draw_bounding_boxes(
... tf.expand_dims(tf.cast(image, tf.float32),0), bbox_draw, colors)
<tf.Tensor: shape=(1, 3, 3, 1), dtype=float32, numpy=
array([[[[1.],
[1.],
[3.]],
[[1.],
[1.],
[6.]],
[[7.],
[8.],
[9.]]]], dtype=float32)>
Note that if no bounding box information is available, setting
`use_image_if_no_bounding_boxes = true` will assume there is a single implicit
bounding box covering the whole image. If `use_image_if_no_bounding_boxes` is
false and no bounding boxes are supplied, an error is raised.
Args:
image_size: A `Tensor`. Must be one of the following types: `uint8`, `int8`,
`int16`, `int32`, `int64`. 1-D, containing `[height, width, channels]`.
bounding_boxes: A `Tensor` of type `float32`. 3-D with shape `[batch, N, 4]`
describing the N bounding boxes associated with the image.
seed: A shape [2] Tensor, the seed to the random number generator. Must have
dtype `int32` or `int64`. (When using XLA, only `int32` is allowed.)
min_object_covered: A Tensor of type `float32`. Defaults to `0.1`. The
cropped area of the image must contain at least this fraction of any
bounding box supplied. The value of this parameter should be non-negative.
In the case of 0, the cropped area does not need to overlap any of the
bounding boxes supplied.
aspect_ratio_range: An optional list of `floats`. Defaults to `[0.75,
1.33]`. The cropped area of the image must have an aspect `ratio = width /
height` within this range.
area_range: An optional list of `floats`. Defaults to `[0.05, 1]`. The
cropped area of the image must contain a fraction of the supplied image
within this range.
max_attempts: An optional `int`. Defaults to `100`. Number of attempts at
generating a cropped region of the image of the specified constraints.
After `max_attempts` failures, return the entire image.
use_image_if_no_bounding_boxes: An optional `bool`. Defaults to `False`.
Controls behavior if no bounding boxes supplied. If true, assume an
implicit bounding box covering the whole input. If false, raise an error.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (begin, size, bboxes).
begin: A `Tensor`. Has the same type as `image_size`. 1-D, containing
`[offset_height, offset_width, 0]`. Provide as input to
`tf.slice`.
size: A `Tensor`. Has the same type as `image_size`. 1-D, containing
`[target_height, target_width, -1]`. Provide as input to
`tf.slice`.
bboxes: A `Tensor` of type `float32`. 3-D with shape `[1, 1, 4]` containing
the distorted bounding box.
Provide as input to `tf.image.draw_bounding_boxes`.
"""
with ops.name_scope(name, 'stateless_sample_distorted_bounding_box'):
return gen_image_ops.stateless_sample_distorted_bounding_box(
image_size=image_size,
bounding_boxes=bounding_boxes,
seed=seed,
min_object_covered=min_object_covered,
aspect_ratio_range=aspect_ratio_range,
area_range=area_range,
max_attempts=max_attempts,
use_image_if_no_bounding_boxes=use_image_if_no_bounding_boxes,
name=name)
@tf_export(v1=['image.sample_distorted_bounding_box'])
@dispatch.add_dispatch_support
@deprecation.deprecated(
date=None,
instructions='`seed2` arg is deprecated.'
'Use sample_distorted_bounding_box_v2 instead.')
def sample_distorted_bounding_box(image_size,
bounding_boxes,
seed=None,
seed2=None,
min_object_covered=0.1,
aspect_ratio_range=None,
area_range=None,
max_attempts=None,
use_image_if_no_bounding_boxes=None,
name=None):
"""Generate a single randomly distorted bounding box for an image.
Bounding box annotations are often supplied in addition to ground-truth labels
in image recognition or object localization tasks. A common technique for
training such a system is to randomly distort an image while preserving
its content, i.e. *data augmentation*. This Op outputs a randomly distorted
localization of an object, i.e. bounding box, given an `image_size`,
`bounding_boxes` and a series of constraints.
The output of this Op is a single bounding box that may be used to crop the
original image. The output is returned as 3 tensors: `begin`, `size` and
`bboxes`. The first 2 tensors can be fed directly into `tf.slice` to crop the
image. The latter may be supplied to `tf.image.draw_bounding_boxes` to
visualize what the bounding box looks like.
Bounding boxes are supplied and returned as `[y_min, x_min, y_max, x_max]`.
The
bounding box coordinates are floats in `[0.0, 1.0]` relative to the width and
height of the underlying image.
For example,
```python
# Generate a single distorted bounding box.
begin, size, bbox_for_draw = tf.image.sample_distorted_bounding_box(
tf.shape(image),
bounding_boxes=bounding_boxes,
min_object_covered=0.1)
# Draw the bounding box in an image summary.
image_with_box = tf.image.draw_bounding_boxes(tf.expand_dims(image, 0),
bbox_for_draw)
tf.compat.v1.summary.image('images_with_box', image_with_box)
# Employ the bounding box to distort the image.
distorted_image = tf.slice(image, begin, size)
```
Note that if no bounding box information is available, setting
`use_image_if_no_bounding_boxes = True` will assume there is a single implicit
bounding box covering the whole image. If `use_image_if_no_bounding_boxes` is
false and no bounding boxes are supplied, an error is raised.
Args:
image_size: A `Tensor`. Must be one of the following types: `uint8`, `int8`,
`int16`, `int32`, `int64`. 1-D, containing `[height, width, channels]`.
bounding_boxes: A `Tensor` of type `float32`. 3-D with shape `[batch, N, 4]`
describing the N bounding boxes associated with the image.
seed: An optional `int`. Defaults to `0`. If either `seed` or `seed2` are
set to non-zero, the random number generator is seeded by the given
`seed`. Otherwise, it is seeded by a random seed.
seed2: An optional `int`. Defaults to `0`. A second seed to avoid seed
collision.
min_object_covered: A Tensor of type `float32`. Defaults to `0.1`. The
cropped area of the image must contain at least this fraction of any
bounding box supplied. The value of this parameter should be non-negative.
In the case of 0, the cropped area does not need to overlap any of the
bounding boxes supplied.
aspect_ratio_range: An optional list of `floats`. Defaults to `[0.75,
1.33]`. The cropped area of the image must have an aspect ratio = width /
height within this range.
area_range: An optional list of `floats`. Defaults to `[0.05, 1]`. The
cropped area of the image must contain a fraction of the supplied image
within this range.
max_attempts: An optional `int`. Defaults to `100`. Number of attempts at
generating a cropped region of the image of the specified constraints.
After `max_attempts` failures, return the entire image.
use_image_if_no_bounding_boxes: An optional `bool`. Defaults to `False`.
Controls behavior if no bounding boxes supplied. If true, assume an
implicit bounding box covering the whole input. If false, raise an error.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (begin, size, bboxes).
begin: A `Tensor`. Has the same type as `image_size`. 1-D, containing
`[offset_height, offset_width, 0]`. Provide as input to
`tf.slice`.
size: A `Tensor`. Has the same type as `image_size`. 1-D, containing
`[target_height, target_width, -1]`. Provide as input to
`tf.slice`.
bboxes: A `Tensor` of type `float32`. 3-D with shape `[1, 1, 4]` containing
the distorted bounding box.
Provide as input to `tf.image.draw_bounding_boxes`.
"""
with ops.name_scope(name, 'sample_distorted_bounding_box'):
return gen_image_ops.sample_distorted_bounding_box_v2(
image_size,
bounding_boxes,
seed=seed,
seed2=seed2,
min_object_covered=min_object_covered,
aspect_ratio_range=aspect_ratio_range,
area_range=area_range,
max_attempts=max_attempts,
use_image_if_no_bounding_boxes=use_image_if_no_bounding_boxes,
name=name)
@tf_export('image.non_max_suppression')
@dispatch.add_dispatch_support
def non_max_suppression(boxes,
scores,
max_output_size,
iou_threshold=0.5,
score_threshold=float('-inf'),
name=None):
"""Greedily selects a subset of bounding boxes in descending order of score.
Prunes away boxes that have high intersection-over-union (IOU) overlap
with previously selected boxes. Bounding boxes are supplied as
`[y1, x1, y2, x2]`, where `(y1, x1)` and `(y2, x2)` are the coordinates of any
diagonal pair of box corners and the coordinates can be provided as normalized
(i.e., lying in the interval `[0, 1]`) or absolute. Note that this algorithm
is agnostic to where the origin is in the coordinate system. Note that this
algorithm is invariant to orthogonal transformations and translations
of the coordinate system; thus translating or reflections of the coordinate
system result in the same boxes being selected by the algorithm.
The output of this operation is a set of integers indexing into the input
collection of bounding boxes representing the selected boxes. The bounding
box coordinates corresponding to the selected indices can then be obtained
using the `tf.gather` operation. For example:
```python
selected_indices = tf.image.non_max_suppression(
boxes, scores, max_output_size, iou_threshold)
selected_boxes = tf.gather(boxes, selected_indices)
```
Args:
boxes: A 2-D float `Tensor` of shape `[num_boxes, 4]`.
scores: A 1-D float `Tensor` of shape `[num_boxes]` representing a single
score corresponding to each box (each row of boxes).
max_output_size: A scalar integer `Tensor` representing the maximum number
of boxes to be selected by non-max suppression.
iou_threshold: A 0-D float tensor representing the threshold for deciding
whether boxes overlap too much with respect to IOU.
score_threshold: A 0-D float tensor representing the threshold for deciding
when to remove boxes based on score.
name: A name for the operation (optional).
Returns:
selected_indices: A 1-D integer `Tensor` of shape `[M]` representing the
selected indices from the boxes tensor, where `M <= max_output_size`.
"""
with ops.name_scope(name, 'non_max_suppression'):
iou_threshold = ops.convert_to_tensor(iou_threshold, name='iou_threshold')
score_threshold = ops.convert_to_tensor(
score_threshold, name='score_threshold')
return gen_image_ops.non_max_suppression_v3(boxes, scores, max_output_size,
iou_threshold, score_threshold)
@tf_export('image.non_max_suppression_with_scores')
@dispatch.add_dispatch_support
def non_max_suppression_with_scores(boxes,
scores,
max_output_size,
iou_threshold=0.5,
score_threshold=float('-inf'),
soft_nms_sigma=0.0,
name=None):
"""Greedily selects a subset of bounding boxes in descending order of score.
Prunes away boxes that have high intersection-over-union (IOU) overlap
with previously selected boxes. Bounding boxes are supplied as
`[y1, x1, y2, x2]`, where `(y1, x1)` and `(y2, x2)` are the coordinates of any
diagonal pair of box corners and the coordinates can be provided as normalized
(i.e., lying in the interval `[0, 1]`) or absolute. Note that this algorithm
is agnostic to where the origin is in the coordinate system. Note that this
algorithm is invariant to orthogonal transformations and translations
of the coordinate system; thus translating or reflections of the coordinate
system result in the same boxes being selected by the algorithm.
The output of this operation is a set of integers indexing into the input
collection of bounding boxes representing the selected boxes. The bounding
box coordinates corresponding to the selected indices can then be obtained
using the `tf.gather` operation. For example:
```python
selected_indices, selected_scores = tf.image.non_max_suppression_padded(
boxes, scores, max_output_size, iou_threshold=1.0, score_threshold=0.1,
soft_nms_sigma=0.5)
selected_boxes = tf.gather(boxes, selected_indices)
```
This function generalizes the `tf.image.non_max_suppression` op by also
supporting a Soft-NMS (with Gaussian weighting) mode (c.f.
Bodla et al, https://arxiv.org/abs/1704.04503) where boxes reduce the score
of other overlapping boxes instead of directly causing them to be pruned.
Consequently, in contrast to `tf.image.non_max_suppression`,
`tf.image.non_max_suppression_padded` returns the new scores of each input box
in the second output, `selected_scores`.
To enable this Soft-NMS mode, set the `soft_nms_sigma` parameter to be
larger than 0. When `soft_nms_sigma` equals 0, the behavior of
`tf.image.non_max_suppression_padded` is identical to that of
`tf.image.non_max_suppression` (except for the extra output) both in function
and in running time.
Args:
boxes: A 2-D float `Tensor` of shape `[num_boxes, 4]`.
scores: A 1-D float `Tensor` of shape `[num_boxes]` representing a single
score corresponding to each box (each row of boxes).
max_output_size: A scalar integer `Tensor` representing the maximum number
of boxes to be selected by non-max suppression.
iou_threshold: A 0-D float tensor representing the threshold for deciding
whether boxes overlap too much with respect to IOU.
score_threshold: A 0-D float tensor representing the threshold for deciding
when to remove boxes based on score.
soft_nms_sigma: A 0-D float tensor representing the sigma parameter for Soft
NMS; see Bodla et al (c.f. https://arxiv.org/abs/1704.04503). When
`soft_nms_sigma=0.0` (which is default), we fall back to standard (hard)
NMS.
name: A name for the operation (optional).
Returns:
selected_indices: A 1-D integer `Tensor` of shape `[M]` representing the
selected indices from the boxes tensor, where `M <= max_output_size`.
selected_scores: A 1-D float tensor of shape `[M]` representing the
corresponding scores for each selected box, where `M <= max_output_size`.
Scores only differ from corresponding input scores when using Soft NMS
(i.e. when `soft_nms_sigma>0`)
"""
with ops.name_scope(name, 'non_max_suppression_with_scores'):
iou_threshold = ops.convert_to_tensor(iou_threshold, name='iou_threshold')
score_threshold = ops.convert_to_tensor(
score_threshold, name='score_threshold')
soft_nms_sigma = ops.convert_to_tensor(
soft_nms_sigma, name='soft_nms_sigma')
(selected_indices, selected_scores,
_) = gen_image_ops.non_max_suppression_v5(
boxes,
scores,
max_output_size,
iou_threshold,
score_threshold,
soft_nms_sigma,
pad_to_max_output_size=False)
return selected_indices, selected_scores
@tf_export('image.non_max_suppression_overlaps')
@dispatch.add_dispatch_support
def non_max_suppression_with_overlaps(overlaps,
scores,
max_output_size,
overlap_threshold=0.5,
score_threshold=float('-inf'),
name=None):
"""Greedily selects a subset of bounding boxes in descending order of score.
Prunes away boxes that have high overlap with previously selected boxes.
N-by-n overlap values are supplied as square matrix.
The output of this operation is a set of integers indexing into the input
collection of bounding boxes representing the selected boxes. The bounding
box coordinates corresponding to the selected indices can then be obtained
using the `tf.gather` operation. For example:
```python
selected_indices = tf.image.non_max_suppression_overlaps(
overlaps, scores, max_output_size, iou_threshold)
selected_boxes = tf.gather(boxes, selected_indices)
```
Args:
overlaps: A 2-D float `Tensor` of shape `[num_boxes, num_boxes]`
representing the n-by-n box overlap values.
scores: A 1-D float `Tensor` of shape `[num_boxes]` representing a single
score corresponding to each box (each row of boxes).
max_output_size: A scalar integer `Tensor` representing the maximum number
of boxes to be selected by non-max suppression.
overlap_threshold: A 0-D float tensor representing the threshold for
deciding whether boxes overlap too much with respect to the provided
overlap values.
score_threshold: A 0-D float tensor representing the threshold for deciding
when to remove boxes based on score.
name: A name for the operation (optional).
Returns:
selected_indices: A 1-D integer `Tensor` of shape `[M]` representing the
selected indices from the overlaps tensor, where `M <= max_output_size`.
"""
with ops.name_scope(name, 'non_max_suppression_overlaps'):
overlap_threshold = ops.convert_to_tensor(
overlap_threshold, name='overlap_threshold')
# pylint: disable=protected-access
return gen_image_ops.non_max_suppression_with_overlaps(
overlaps, scores, max_output_size, overlap_threshold, score_threshold)
# pylint: enable=protected-access
_rgb_to_yiq_kernel = [[0.299, 0.59590059, 0.2115],
[0.587, -0.27455667, -0.52273617],
[0.114, -0.32134392, 0.31119955]]
@tf_export('image.rgb_to_yiq')
@dispatch.add_dispatch_support
def rgb_to_yiq(images):
"""Converts one or more images from RGB to YIQ.
Outputs a tensor of the same shape as the `images` tensor, containing the YIQ
value of the pixels.
The output is only well defined if the value in images are in [0,1].
Usage Example:
>>> x = tf.constant([[[1.0, 2.0, 3.0]]])
>>> tf.image.rgb_to_yiq(x)
<tf.Tensor: shape=(1, 1, 3), dtype=float32,
numpy=array([[[ 1.815 , -0.91724455, 0.09962624]]], dtype=float32)>
Args:
images: 2-D or higher rank. Image data to convert. Last dimension must be
size 3.
Returns:
images: tensor with the same shape as `images`.
"""
images = ops.convert_to_tensor(images, name='images')
kernel = ops.convert_to_tensor(
_rgb_to_yiq_kernel, dtype=images.dtype, name='kernel')
ndims = images.get_shape().ndims
return math_ops.tensordot(images, kernel, axes=[[ndims - 1], [0]])
_yiq_to_rgb_kernel = [[1, 1, 1], [0.95598634, -0.27201283, -1.10674021],
[0.6208248, -0.64720424, 1.70423049]]
@tf_export('image.yiq_to_rgb')
@dispatch.add_dispatch_support
def yiq_to_rgb(images):
"""Converts one or more images from YIQ to RGB.
Outputs a tensor of the same shape as the `images` tensor, containing the RGB
value of the pixels.
The output is only well defined if the Y value in images are in [0,1],
I value are in [-0.5957,0.5957] and Q value are in [-0.5226,0.5226].
Args:
images: 2-D or higher rank. Image data to convert. Last dimension must be
size 3.
Returns:
images: tensor with the same shape as `images`.
"""
images = ops.convert_to_tensor(images, name='images')
kernel = ops.convert_to_tensor(
_yiq_to_rgb_kernel, dtype=images.dtype, name='kernel')
ndims = images.get_shape().ndims
return math_ops.tensordot(images, kernel, axes=[[ndims - 1], [0]])
_rgb_to_yuv_kernel = [[0.299, -0.14714119, 0.61497538],
[0.587, -0.28886916, -0.51496512],
[0.114, 0.43601035, -0.10001026]]
@tf_export('image.rgb_to_yuv')
@dispatch.add_dispatch_support
def rgb_to_yuv(images):
"""Converts one or more images from RGB to YUV.
Outputs a tensor of the same shape as the `images` tensor, containing the YUV
value of the pixels.
The output is only well defined if the value in images are in [0, 1].
There are two ways of representing an image: [0, 255] pixel values range or
[0, 1] (as float) pixel values range. Users need to convert the input image
into a float [0, 1] range.
Args:
images: 2-D or higher rank. Image data to convert. Last dimension must be
size 3.
Returns:
images: tensor with the same shape as `images`.
"""
images = ops.convert_to_tensor(images, name='images')
kernel = ops.convert_to_tensor(
_rgb_to_yuv_kernel, dtype=images.dtype, name='kernel')
ndims = images.get_shape().ndims
return math_ops.tensordot(images, kernel, axes=[[ndims - 1], [0]])
_yuv_to_rgb_kernel = [[1, 1, 1], [0, -0.394642334, 2.03206185],
[1.13988303, -0.58062185, 0]]
@tf_export('image.yuv_to_rgb')
@dispatch.add_dispatch_support
def yuv_to_rgb(images):
"""Converts one or more images from YUV to RGB.
Outputs a tensor of the same shape as the `images` tensor, containing the RGB
value of the pixels.
The output is only well defined if the Y value in images are in [0,1],
U and V value are in [-0.5,0.5].
As per the above description, you need to scale your YUV images if their
pixel values are not in the required range. Below given example illustrates
preprocessing of each channel of images before feeding them to `yuv_to_rgb`.
```python
yuv_images = tf.random.uniform(shape=[100, 64, 64, 3], maxval=255)
last_dimension_axis = len(yuv_images.shape) - 1
yuv_tensor_images = tf.truediv(
tf.subtract(
yuv_images,
tf.reduce_min(yuv_images)
),
tf.subtract(
tf.reduce_max(yuv_images),
tf.reduce_min(yuv_images)
)
)
y, u, v = tf.split(yuv_tensor_images, 3, axis=last_dimension_axis)
target_uv_min, target_uv_max = -0.5, 0.5
u = u * (target_uv_max - target_uv_min) + target_uv_min
v = v * (target_uv_max - target_uv_min) + target_uv_min
preprocessed_yuv_images = tf.concat([y, u, v], axis=last_dimension_axis)
rgb_tensor_images = tf.image.yuv_to_rgb(preprocessed_yuv_images)
```
Args:
images: 2-D or higher rank. Image data to convert. Last dimension must be
size 3.
Returns:
images: tensor with the same shape as `images`.
"""
images = ops.convert_to_tensor(images, name='images')
kernel = ops.convert_to_tensor(
_yuv_to_rgb_kernel, dtype=images.dtype, name='kernel')
ndims = images.get_shape().ndims
return math_ops.tensordot(images, kernel, axes=[[ndims - 1], [0]])
def _verify_compatible_image_shapes(img1, img2):
"""Checks if two image tensors are compatible for applying SSIM or PSNR.
This function checks if two sets of images have ranks at least 3, and if the
last three dimensions match.
Args:
img1: Tensor containing the first image batch.
img2: Tensor containing the second image batch.
Returns:
A tuple containing: the first tensor shape, the second tensor shape, and a
list of control_flow_ops.Assert() ops implementing the checks.
Raises:
ValueError: When static shape check fails.
"""
shape1 = img1.get_shape().with_rank_at_least(3)
shape2 = img2.get_shape().with_rank_at_least(3)
shape1[-3:].assert_is_compatible_with(shape2[-3:])
if shape1.ndims is not None and shape2.ndims is not None:
for dim1, dim2 in zip(
reversed(shape1.dims[:-3]), reversed(shape2.dims[:-3])):
if not (dim1 == 1 or dim2 == 1 or dim1.is_compatible_with(dim2)):
raise ValueError('Two images are not compatible: %s and %s' %
(shape1, shape2))
# Now assign shape tensors.
shape1, shape2 = array_ops.shape_n([img1, img2])
# TODO(sjhwang): Check if shape1[:-3] and shape2[:-3] are broadcastable.
checks = []
checks.append(
control_flow_ops.Assert(
math_ops.greater_equal(array_ops.size(shape1), 3), [shape1, shape2],
summarize=10))
checks.append(
control_flow_ops.Assert(
math_ops.reduce_all(math_ops.equal(shape1[-3:], shape2[-3:])),
[shape1, shape2],
summarize=10))
return shape1, shape2, checks
@tf_export('image.psnr')
@dispatch.add_dispatch_support
def psnr(a, b, max_val, name=None):
"""Returns the Peak Signal-to-Noise Ratio between a and b.
This is intended to be used on signals (or images). Produces a PSNR value for
each image in batch.
The last three dimensions of input are expected to be [height, width, depth].
Example:
```python
# Read images from file.
im1 = tf.decode_png('path/to/im1.png')
im2 = tf.decode_png('path/to/im2.png')
# Compute PSNR over tf.uint8 Tensors.
psnr1 = tf.image.psnr(im1, im2, max_val=255)
# Compute PSNR over tf.float32 Tensors.
im1 = tf.image.convert_image_dtype(im1, tf.float32)
im2 = tf.image.convert_image_dtype(im2, tf.float32)
psnr2 = tf.image.psnr(im1, im2, max_val=1.0)
# psnr1 and psnr2 both have type tf.float32 and are almost equal.
```
Arguments:
a: First set of images.
b: Second set of images.
max_val: The dynamic range of the images (i.e., the difference between the
maximum the and minimum allowed values).
name: Namespace to embed the computation in.
Returns:
The scalar PSNR between a and b. The returned tensor has type `tf.float32`
and shape [batch_size, 1].
"""
with ops.name_scope(name, 'PSNR', [a, b]):
# Need to convert the images to float32. Scale max_val accordingly so that
# PSNR is computed correctly.
max_val = math_ops.cast(max_val, a.dtype)
max_val = convert_image_dtype(max_val, dtypes.float32)
a = convert_image_dtype(a, dtypes.float32)
b = convert_image_dtype(b, dtypes.float32)
mse = math_ops.reduce_mean(math_ops.squared_difference(a, b), [-3, -2, -1])
psnr_val = math_ops.subtract(
20 * math_ops.log(max_val) / math_ops.log(10.0),
np.float32(10 / np.log(10)) * math_ops.log(mse),
name='psnr')
_, _, checks = _verify_compatible_image_shapes(a, b)
with ops.control_dependencies(checks):
return array_ops.identity(psnr_val)
def _ssim_helper(x, y, reducer, max_val, compensation=1.0, k1=0.01, k2=0.03):
r"""Helper function for computing SSIM.
SSIM estimates covariances with weighted sums. The default parameters
use a biased estimate of the covariance:
Suppose `reducer` is a weighted sum, then the mean estimators are
\mu_x = \sum_i w_i x_i,
\mu_y = \sum_i w_i y_i,
where w_i's are the weighted-sum weights, and covariance estimator is
cov_{xy} = \sum_i w_i (x_i - \mu_x) (y_i - \mu_y)
with assumption \sum_i w_i = 1. This covariance estimator is biased, since
E[cov_{xy}] = (1 - \sum_i w_i ^ 2) Cov(X, Y).
For SSIM measure with unbiased covariance estimators, pass as `compensation`
argument (1 - \sum_i w_i ^ 2).
Arguments:
x: First set of images.
y: Second set of images.
reducer: Function that computes 'local' averages from the set of images. For
non-convolutional version, this is usually tf.reduce_mean(x, [1, 2]), and
for convolutional version, this is usually tf.nn.avg_pool2d or
tf.nn.conv2d with weighted-sum kernel.
max_val: The dynamic range (i.e., the difference between the maximum
possible allowed value and the minimum allowed value).
compensation: Compensation factor. See above.
k1: Default value 0.01
k2: Default value 0.03 (SSIM is less sensitivity to K2 for lower values, so
it would be better if we took the values in the range of 0 < K2 < 0.4).
Returns:
A pair containing the luminance measure, and the contrast-structure measure.
"""
c1 = (k1 * max_val)**2
c2 = (k2 * max_val)**2
# SSIM luminance measure is
# (2 * mu_x * mu_y + c1) / (mu_x ** 2 + mu_y ** 2 + c1).
mean0 = reducer(x)
mean1 = reducer(y)
num0 = mean0 * mean1 * 2.0
den0 = math_ops.square(mean0) + math_ops.square(mean1)
luminance = (num0 + c1) / (den0 + c1)
# SSIM contrast-structure measure is
# (2 * cov_{xy} + c2) / (cov_{xx} + cov_{yy} + c2).
# Note that `reducer` is a weighted sum with weight w_k, \sum_i w_i = 1, then
# cov_{xy} = \sum_i w_i (x_i - \mu_x) (y_i - \mu_y)
# = \sum_i w_i x_i y_i - (\sum_i w_i x_i) (\sum_j w_j y_j).
num1 = reducer(x * y) * 2.0
den1 = reducer(math_ops.square(x) + math_ops.square(y))
c2 *= compensation
cs = (num1 - num0 + c2) / (den1 - den0 + c2)
# SSIM score is the product of the luminance and contrast-structure measures.
return luminance, cs
def _fspecial_gauss(size, sigma):
"""Function to mimic the 'fspecial' gaussian MATLAB function."""
size = ops.convert_to_tensor(size, dtypes.int32)
sigma = ops.convert_to_tensor(sigma)
coords = math_ops.cast(math_ops.range(size), sigma.dtype)
coords -= math_ops.cast(size - 1, sigma.dtype) / 2.0
g = math_ops.square(coords)
g *= -0.5 / math_ops.square(sigma)
g = array_ops.reshape(g, shape=[1, -1]) + array_ops.reshape(g, shape=[-1, 1])
g = array_ops.reshape(g, shape=[1, -1]) # For tf.nn.softmax().
g = nn_ops.softmax(g)
return array_ops.reshape(g, shape=[size, size, 1, 1])
def _ssim_per_channel(img1,
img2,
max_val=1.0,
filter_size=11,
filter_sigma=1.5,
k1=0.01,
k2=0.03):
"""Computes SSIM index between img1 and img2 per color channel.
This function matches the standard SSIM implementation from:
Wang, Z., Bovik, A. C., Sheikh, H. R., & Simoncelli, E. P. (2004). Image
quality assessment: from error visibility to structural similarity. IEEE
transactions on image processing.
Details:
- 11x11 Gaussian filter of width 1.5 is used.
- k1 = 0.01, k2 = 0.03 as in the original paper.
Args:
img1: First image batch.
img2: Second image batch.
max_val: The dynamic range of the images (i.e., the difference between the
maximum the and minimum allowed values).
filter_size: Default value 11 (size of gaussian filter).
filter_sigma: Default value 1.5 (width of gaussian filter).
k1: Default value 0.01
k2: Default value 0.03 (SSIM is less sensitivity to K2 for lower values, so
it would be better if we took the values in the range of 0 < K2 < 0.4).
Returns:
A pair of tensors containing and channel-wise SSIM and contrast-structure
values. The shape is [..., channels].
"""
filter_size = constant_op.constant(filter_size, dtype=dtypes.int32)
filter_sigma = constant_op.constant(filter_sigma, dtype=img1.dtype)
shape1, shape2 = array_ops.shape_n([img1, img2])
checks = [
control_flow_ops.Assert(
math_ops.reduce_all(
math_ops.greater_equal(shape1[-3:-1], filter_size)),
[shape1, filter_size],
summarize=8),
control_flow_ops.Assert(
math_ops.reduce_all(
math_ops.greater_equal(shape2[-3:-1], filter_size)),
[shape2, filter_size],
summarize=8)
]
# Enforce the check to run before computation.
with ops.control_dependencies(checks):
img1 = array_ops.identity(img1)
# TODO(sjhwang): Try to cache kernels and compensation factor.
kernel = _fspecial_gauss(filter_size, filter_sigma)
kernel = array_ops.tile(kernel, multiples=[1, 1, shape1[-1], 1])
# The correct compensation factor is `1.0 - tf.reduce_sum(tf.square(kernel))`,
# but to match MATLAB implementation of MS-SSIM, we use 1.0 instead.
compensation = 1.0
# TODO(sjhwang): Try FFT.
# TODO(sjhwang): Gaussian kernel is separable in space. Consider applying
# 1-by-n and n-by-1 Gaussian filters instead of an n-by-n filter.
def reducer(x):
shape = array_ops.shape(x)
x = array_ops.reshape(x, shape=array_ops.concat([[-1], shape[-3:]], 0))
y = nn.depthwise_conv2d(x, kernel, strides=[1, 1, 1, 1], padding='VALID')
return array_ops.reshape(
y, array_ops.concat([shape[:-3], array_ops.shape(y)[1:]], 0))
luminance, cs = _ssim_helper(img1, img2, reducer, max_val, compensation, k1,
k2)
# Average over the second and the third from the last: height, width.
axes = constant_op.constant([-3, -2], dtype=dtypes.int32)
ssim_val = math_ops.reduce_mean(luminance * cs, axes)
cs = math_ops.reduce_mean(cs, axes)
return ssim_val, cs
@tf_export('image.ssim')
@dispatch.add_dispatch_support
def ssim(img1,
img2,
max_val,
filter_size=11,
filter_sigma=1.5,
k1=0.01,
k2=0.03):
"""Computes SSIM index between img1 and img2.
This function is based on the standard SSIM implementation from:
Wang, Z., Bovik, A. C., Sheikh, H. R., & Simoncelli, E. P. (2004). Image
quality assessment: from error visibility to structural similarity. IEEE
transactions on image processing.
Note: The true SSIM is only defined on grayscale. This function does not
perform any colorspace transform. (If the input is already YUV, then it will
compute YUV SSIM average.)
Details:
- 11x11 Gaussian filter of width 1.5 is used.
- k1 = 0.01, k2 = 0.03 as in the original paper.
The image sizes must be at least 11x11 because of the filter size.
Example:
```python
# Read images (of size 255 x 255) from file.
im1 = tf.image.decode_image(tf.io.read_file('path/to/im1.png'))
im2 = tf.image.decode_image(tf.io.read_file('path/to/im2.png'))
tf.shape(im1) # `img1.png` has 3 channels; shape is `(255, 255, 3)`
tf.shape(im2) # `img2.png` has 3 channels; shape is `(255, 255, 3)`
# Add an outer batch for each image.
im1 = tf.expand_dims(im1, axis=0)
im2 = tf.expand_dims(im2, axis=0)
# Compute SSIM over tf.uint8 Tensors.
ssim1 = tf.image.ssim(im1, im2, max_val=255, filter_size=11,
filter_sigma=1.5, k1=0.01, k2=0.03)
# Compute SSIM over tf.float32 Tensors.
im1 = tf.image.convert_image_dtype(im1, tf.float32)
im2 = tf.image.convert_image_dtype(im2, tf.float32)
ssim2 = tf.image.ssim(im1, im2, max_val=1.0, filter_size=11,
filter_sigma=1.5, k1=0.01, k2=0.03)
# ssim1 and ssim2 both have type tf.float32 and are almost equal.
```
Args:
img1: First image batch. 4-D Tensor of shape `[batch, height, width,
channels]`.
img2: Second image batch. 4-D Tensor of shape `[batch, height, width,
channels]`.
max_val: The dynamic range of the images (i.e., the difference between the
maximum the and minimum allowed values).
filter_size: Default value 11 (size of gaussian filter).
filter_sigma: Default value 1.5 (width of gaussian filter).
k1: Default value 0.01
k2: Default value 0.03 (SSIM is less sensitivity to K2 for lower values, so
it would be better if we took the values in the range of 0 < K2 < 0.4).
Returns:
A tensor containing an SSIM value for each image in batch. Returned SSIM
values are in range (-1, 1], when pixel values are non-negative. Returns
a tensor with shape: broadcast(img1.shape[:-3], img2.shape[:-3]).
"""
with ops.name_scope(None, 'SSIM', [img1, img2]):
# Convert to tensor if needed.
img1 = ops.convert_to_tensor(img1, name='img1')
img2 = ops.convert_to_tensor(img2, name='img2')
# Shape checking.
_, _, checks = _verify_compatible_image_shapes(img1, img2)
with ops.control_dependencies(checks):
img1 = array_ops.identity(img1)
# Need to convert the images to float32. Scale max_val accordingly so that
# SSIM is computed correctly.
max_val = math_ops.cast(max_val, img1.dtype)
max_val = convert_image_dtype(max_val, dtypes.float32)
img1 = convert_image_dtype(img1, dtypes.float32)
img2 = convert_image_dtype(img2, dtypes.float32)
ssim_per_channel, _ = _ssim_per_channel(img1, img2, max_val, filter_size,
filter_sigma, k1, k2)
# Compute average over color channels.
return math_ops.reduce_mean(ssim_per_channel, [-1])
# Default values obtained by Wang et al.
_MSSSIM_WEIGHTS = (0.0448, 0.2856, 0.3001, 0.2363, 0.1333)
@tf_export('image.ssim_multiscale')
@dispatch.add_dispatch_support
def ssim_multiscale(img1,
img2,
max_val,
power_factors=_MSSSIM_WEIGHTS,
filter_size=11,
filter_sigma=1.5,
k1=0.01,
k2=0.03):
"""Computes the MS-SSIM between img1 and img2.
This function assumes that `img1` and `img2` are image batches, i.e. the last
three dimensions are [height, width, channels].
Note: The true SSIM is only defined on grayscale. This function does not
perform any colorspace transform. (If the input is already YUV, then it will
compute YUV SSIM average.)
Original paper: Wang, Zhou, Eero P. Simoncelli, and Alan C. Bovik. "Multiscale
structural similarity for image quality assessment." Signals, Systems and
Computers, 2004.
Arguments:
img1: First image batch.
img2: Second image batch. Must have the same rank as img1.
max_val: The dynamic range of the images (i.e., the difference between the
maximum the and minimum allowed values).
power_factors: Iterable of weights for each of the scales. The number of
scales used is the length of the list. Index 0 is the unscaled
resolution's weight and each increasing scale corresponds to the image
being downsampled by 2. Defaults to (0.0448, 0.2856, 0.3001, 0.2363,
0.1333), which are the values obtained in the original paper.
filter_size: Default value 11 (size of gaussian filter).
filter_sigma: Default value 1.5 (width of gaussian filter).
k1: Default value 0.01
k2: Default value 0.03 (SSIM is less sensitivity to K2 for lower values, so
it would be better if we took the values in the range of 0 < K2 < 0.4).
Returns:
A tensor containing an MS-SSIM value for each image in batch. The values
are in range [0, 1]. Returns a tensor with shape:
broadcast(img1.shape[:-3], img2.shape[:-3]).
"""
with ops.name_scope(None, 'MS-SSIM', [img1, img2]):
# Convert to tensor if needed.
img1 = ops.convert_to_tensor(img1, name='img1')
img2 = ops.convert_to_tensor(img2, name='img2')
# Shape checking.
shape1, shape2, checks = _verify_compatible_image_shapes(img1, img2)
with ops.control_dependencies(checks):
img1 = array_ops.identity(img1)
# Need to convert the images to float32. Scale max_val accordingly so that
# SSIM is computed correctly.
max_val = math_ops.cast(max_val, img1.dtype)
max_val = convert_image_dtype(max_val, dtypes.float32)
img1 = convert_image_dtype(img1, dtypes.float32)
img2 = convert_image_dtype(img2, dtypes.float32)
imgs = [img1, img2]
shapes = [shape1, shape2]
# img1 and img2 are assumed to be a (multi-dimensional) batch of
# 3-dimensional images (height, width, channels). `heads` contain the batch
# dimensions, and `tails` contain the image dimensions.
heads = [s[:-3] for s in shapes]
tails = [s[-3:] for s in shapes]
divisor = [1, 2, 2, 1]
divisor_tensor = constant_op.constant(divisor[1:], dtype=dtypes.int32)
def do_pad(images, remainder):
padding = array_ops.expand_dims(remainder, -1)
padding = array_ops.pad(padding, [[1, 0], [1, 0]])
return [array_ops.pad(x, padding, mode='SYMMETRIC') for x in images]
mcs = []
for k in range(len(power_factors)):
with ops.name_scope(None, 'Scale%d' % k, imgs):
if k > 0:
# Avg pool takes rank 4 tensors. Flatten leading dimensions.
flat_imgs = [
array_ops.reshape(x, array_ops.concat([[-1], t], 0))
for x, t in zip(imgs, tails)
]
remainder = tails[0] % divisor_tensor
need_padding = math_ops.reduce_any(math_ops.not_equal(remainder, 0))
# pylint: disable=cell-var-from-loop
padded = control_flow_ops.cond(need_padding,
lambda: do_pad(flat_imgs, remainder),
lambda: flat_imgs)
# pylint: enable=cell-var-from-loop
downscaled = [
nn_ops.avg_pool(
x, ksize=divisor, strides=divisor, padding='VALID')
for x in padded
]
tails = [x[1:] for x in array_ops.shape_n(downscaled)]
imgs = [
array_ops.reshape(x, array_ops.concat([h, t], 0))
for x, h, t in zip(downscaled, heads, tails)
]
# Overwrite previous ssim value since we only need the last one.
ssim_per_channel, cs = _ssim_per_channel(
*imgs,
max_val=max_val,
filter_size=filter_size,
filter_sigma=filter_sigma,
k1=k1,
k2=k2)
mcs.append(nn_ops.relu(cs))
# Remove the cs score for the last scale. In the MS-SSIM calculation,
# we use the l(p) at the highest scale. l(p) * cs(p) is ssim(p).
mcs.pop() # Remove the cs score for the last scale.
mcs_and_ssim = array_ops.stack(
mcs + [nn_ops.relu(ssim_per_channel)], axis=-1)
# Take weighted geometric mean across the scale axis.
ms_ssim = math_ops.reduce_prod(
math_ops.pow(mcs_and_ssim, power_factors), [-1])
return math_ops.reduce_mean(ms_ssim, [-1]) # Avg over color channels.
@tf_export('image.image_gradients')
@dispatch.add_dispatch_support
def image_gradients(image):
"""Returns image gradients (dy, dx) for each color channel.
Both output tensors have the same shape as the input: [batch_size, h, w,
d]. The gradient values are organized so that [I(x+1, y) - I(x, y)] is in
location (x, y). That means that dy will always have zeros in the last row,
and dx will always have zeros in the last column.
Usage Example:
```python
BATCH_SIZE = 1
IMAGE_HEIGHT = 5
IMAGE_WIDTH = 5
CHANNELS = 1
image = tf.reshape(tf.range(IMAGE_HEIGHT * IMAGE_WIDTH * CHANNELS,
delta=1, dtype=tf.float32),
shape=(BATCH_SIZE, IMAGE_HEIGHT, IMAGE_WIDTH, CHANNELS))
dy, dx = tf.image.image_gradients(image)
print(image[0, :,:,0])
tf.Tensor(
[[ 0. 1. 2. 3. 4.]
[ 5. 6. 7. 8. 9.]
[10. 11. 12. 13. 14.]
[15. 16. 17. 18. 19.]
[20. 21. 22. 23. 24.]], shape=(5, 5), dtype=float32)
print(dy[0, :,:,0])
tf.Tensor(
[[5. 5. 5. 5. 5.]
[5. 5. 5. 5. 5.]
[5. 5. 5. 5. 5.]
[5. 5. 5. 5. 5.]
[0. 0. 0. 0. 0.]], shape=(5, 5), dtype=float32)
print(dx[0, :,:,0])
tf.Tensor(
[[1. 1. 1. 1. 0.]
[1. 1. 1. 1. 0.]
[1. 1. 1. 1. 0.]
[1. 1. 1. 1. 0.]
[1. 1. 1. 1. 0.]], shape=(5, 5), dtype=float32)
```
Arguments:
image: Tensor with shape [batch_size, h, w, d].
Returns:
Pair of tensors (dy, dx) holding the vertical and horizontal image
gradients (1-step finite difference).
Raises:
ValueError: If `image` is not a 4D tensor.
"""
if image.get_shape().ndims != 4:
raise ValueError('image_gradients expects a 4D tensor '
'[batch_size, h, w, d], not {}.'.format(image.get_shape()))
image_shape = array_ops.shape(image)
batch_size, height, width, depth = array_ops.unstack(image_shape)
dy = image[:, 1:, :, :] - image[:, :-1, :, :]
dx = image[:, :, 1:, :] - image[:, :, :-1, :]
# Return tensors with same size as original image by concatenating
# zeros. Place the gradient [I(x+1,y) - I(x,y)] on the base pixel (x, y).
shape = array_ops.stack([batch_size, 1, width, depth])
dy = array_ops.concat([dy, array_ops.zeros(shape, image.dtype)], 1)
dy = array_ops.reshape(dy, image_shape)
shape = array_ops.stack([batch_size, height, 1, depth])
dx = array_ops.concat([dx, array_ops.zeros(shape, image.dtype)], 2)
dx = array_ops.reshape(dx, image_shape)
return dy, dx
@tf_export('image.sobel_edges')
@dispatch.add_dispatch_support
def sobel_edges(image):
"""Returns a tensor holding Sobel edge maps.
Example usage:
For general usage, `image` would be loaded from a file as below:
```python
image_bytes = tf.io.read_file(path_to_image_file)
image = tf.image.decode_image(image_bytes)
image = tf.cast(image, tf.float32)
image = tf.expand_dims(image, 0)
```
But for demo purposes, we are using randomly generated values for `image`:
>>> image = tf.random.uniform(
... maxval=255, shape=[1, 28, 28, 3], dtype=tf.float32)
>>> sobel = tf.image.sobel_edges(image)
>>> sobel_y = np.asarray(sobel[0, :, :, :, 0]) # sobel in y-direction
>>> sobel_x = np.asarray(sobel[0, :, :, :, 1]) # sobel in x-direction
For displaying the sobel results, PIL's [Image Module](
https://pillow.readthedocs.io/en/stable/reference/Image.html) can be used:
```python
# Display edge maps for the first channel (at index 0)
Image.fromarray(sobel_y[..., 0] / 4 + 0.5).show()
Image.fromarray(sobel_x[..., 0] / 4 + 0.5).show()
```
Arguments:
image: Image tensor with shape [batch_size, h, w, d] and type float32 or
float64. The image(s) must be 2x2 or larger.
Returns:
Tensor holding edge maps for each channel. Returns a tensor with shape
[batch_size, h, w, d, 2] where the last two dimensions hold [[dy[0], dx[0]],
[dy[1], dx[1]], ..., [dy[d-1], dx[d-1]]] calculated using the Sobel filter.
"""
# Define vertical and horizontal Sobel filters.
static_image_shape = image.get_shape()
image_shape = array_ops.shape(image)
kernels = [[[-1, -2, -1], [0, 0, 0], [1, 2, 1]],
[[-1, 0, 1], [-2, 0, 2], [-1, 0, 1]]]
num_kernels = len(kernels)
kernels = np.transpose(np.asarray(kernels), (1, 2, 0))
kernels = np.expand_dims(kernels, -2)
kernels_tf = constant_op.constant(kernels, dtype=image.dtype)
kernels_tf = array_ops.tile(
kernels_tf, [1, 1, image_shape[-1], 1], name='sobel_filters')
# Use depth-wise convolution to calculate edge maps per channel.
pad_sizes = [[0, 0], [1, 1], [1, 1], [0, 0]]
padded = array_ops.pad(image, pad_sizes, mode='REFLECT')
# Output tensor has shape [batch_size, h, w, d * num_kernels].
strides = [1, 1, 1, 1]
output = nn.depthwise_conv2d(padded, kernels_tf, strides, 'VALID')
# Reshape to [batch_size, h, w, d, num_kernels].
shape = array_ops.concat([image_shape, [num_kernels]], 0)
output = array_ops.reshape(output, shape=shape)
output.set_shape(static_image_shape.concatenate([num_kernels]))
return output
def resize_bicubic(images,
size,
align_corners=False,
name=None,
half_pixel_centers=False):
return gen_image_ops.resize_bicubic(
images=images,
size=size,
align_corners=align_corners,
half_pixel_centers=half_pixel_centers,
name=name)
def resize_bilinear(images,
size,
align_corners=False,
name=None,
half_pixel_centers=False):
return gen_image_ops.resize_bilinear(
images=images,
size=size,
align_corners=align_corners,
half_pixel_centers=half_pixel_centers,
name=name)
def resize_nearest_neighbor(images,
size,
align_corners=False,
name=None,
half_pixel_centers=False):
return gen_image_ops.resize_nearest_neighbor(
images=images,
size=size,
align_corners=align_corners,
half_pixel_centers=half_pixel_centers,
name=name)
resize_area_deprecation = deprecation.deprecated(
date=None,
instructions=(
'Use `tf.image.resize(...method=ResizeMethod.AREA...)` instead.'))
tf_export(v1=['image.resize_area'])(
resize_area_deprecation(
dispatch.add_dispatch_support(gen_image_ops.resize_area)))
resize_bicubic_deprecation = deprecation.deprecated(
date=None,
instructions=(
'Use `tf.image.resize(...method=ResizeMethod.BICUBIC...)` instead.'))
tf_export(v1=['image.resize_bicubic'])(
dispatch.add_dispatch_support(resize_bicubic_deprecation(resize_bicubic)))
resize_bilinear_deprecation = deprecation.deprecated(
date=None,
instructions=(
'Use `tf.image.resize(...method=ResizeMethod.BILINEAR...)` instead.'))
tf_export(v1=['image.resize_bilinear'])(
dispatch.add_dispatch_support(resize_bilinear_deprecation(resize_bilinear)))
resize_nearest_neighbor_deprecation = deprecation.deprecated(
date=None,
instructions=(
'Use `tf.image.resize(...method=ResizeMethod.NEAREST_NEIGHBOR...)` '
'instead.'))
tf_export(v1=['image.resize_nearest_neighbor'])(
dispatch.add_dispatch_support(
resize_nearest_neighbor_deprecation(resize_nearest_neighbor)))
@tf_export('image.crop_and_resize', v1=[])
@dispatch.add_dispatch_support
def crop_and_resize_v2(image,
boxes,
box_indices,
crop_size,
method='bilinear',
extrapolation_value=0,
name=None):
"""Extracts crops from the input image tensor and resizes them.
Extracts crops from the input image tensor and resizes them using bilinear
sampling or nearest neighbor sampling (possibly with aspect ratio change) to a
common output size specified by `crop_size`. This is more general than the
`crop_to_bounding_box` op which extracts a fixed size slice from the input
image and does not allow resizing or aspect ratio change.
Returns a tensor with `crops` from the input `image` at positions defined at
the bounding box locations in `boxes`. The cropped boxes are all resized (with
bilinear or nearest neighbor interpolation) to a fixed
`size = [crop_height, crop_width]`. The result is a 4-D tensor
`[num_boxes, crop_height, crop_width, depth]`. The resizing is corner aligned.
In particular, if `boxes = [[0, 0, 1, 1]]`, the method will give identical
results to using `tf.compat.v1.image.resize_bilinear()` or
`tf.compat.v1.image.resize_nearest_neighbor()`(depends on the `method`
argument) with
`align_corners=True`.
Args:
image: A 4-D tensor of shape `[batch, image_height, image_width, depth]`.
Both `image_height` and `image_width` need to be positive.
boxes: A 2-D tensor of shape `[num_boxes, 4]`. The `i`-th row of the tensor
specifies the coordinates of a box in the `box_ind[i]` image and is
specified in normalized coordinates `[y1, x1, y2, x2]`. A normalized
coordinate value of `y` is mapped to the image coordinate at `y *
(image_height - 1)`, so as the `[0, 1]` interval of normalized image
height is mapped to `[0, image_height - 1]` in image height coordinates.
We do allow `y1` > `y2`, in which case the sampled crop is an up-down
flipped version of the original image. The width dimension is treated
similarly. Normalized coordinates outside the `[0, 1]` range are allowed,
in which case we use `extrapolation_value` to extrapolate the input image
values.
box_indices: A 1-D tensor of shape `[num_boxes]` with int32 values in `[0,
batch)`. The value of `box_ind[i]` specifies the image that the `i`-th box
refers to.
crop_size: A 1-D tensor of 2 elements, `size = [crop_height, crop_width]`.
All cropped image patches are resized to this size. The aspect ratio of
the image content is not preserved. Both `crop_height` and `crop_width`
need to be positive.
method: An optional string specifying the sampling method for resizing. It
can be either `"bilinear"` or `"nearest"` and default to `"bilinear"`.
Currently two sampling methods are supported: Bilinear and Nearest
Neighbor.
extrapolation_value: An optional `float`. Defaults to `0`. Value used for
extrapolation, when applicable.
name: A name for the operation (optional).
Returns:
A 4-D tensor of shape `[num_boxes, crop_height, crop_width, depth]`.
Example:
```python
import tensorflow as tf
BATCH_SIZE = 1
NUM_BOXES = 5
IMAGE_HEIGHT = 256
IMAGE_WIDTH = 256
CHANNELS = 3
CROP_SIZE = (24, 24)
image = tf.random.normal(shape=(BATCH_SIZE, IMAGE_HEIGHT, IMAGE_WIDTH,
CHANNELS) )
boxes = tf.random.uniform(shape=(NUM_BOXES, 4))
box_indices = tf.random.uniform(shape=(NUM_BOXES,), minval=0,
maxval=BATCH_SIZE, dtype=tf.int32)
output = tf.image.crop_and_resize(image, boxes, box_indices, CROP_SIZE)
output.shape #=> (5, 24, 24, 3)
```
"""
return gen_image_ops.crop_and_resize(image, boxes, box_indices, crop_size,
method, extrapolation_value, name)
@tf_export(v1=['image.crop_and_resize'])
@dispatch.add_dispatch_support
@deprecation.deprecated_args(None,
'box_ind is deprecated, use box_indices instead',
'box_ind')
def crop_and_resize_v1( # pylint: disable=missing-docstring
image,
boxes,
box_ind=None,
crop_size=None,
method='bilinear',
extrapolation_value=0,
name=None,
box_indices=None):
box_ind = deprecation.deprecated_argument_lookup('box_indices', box_indices,
'box_ind', box_ind)
return gen_image_ops.crop_and_resize(image, boxes, box_ind, crop_size, method,
extrapolation_value, name)
crop_and_resize_v1.__doc__ = gen_image_ops.crop_and_resize.__doc__
@tf_export(v1=['image.extract_glimpse'])
@dispatch.add_dispatch_support
def extract_glimpse(
input, # pylint: disable=redefined-builtin
size,
offsets,
centered=True,
normalized=True,
uniform_noise=True,
name=None):
"""Extracts a glimpse from the input tensor.
Returns a set of windows called glimpses extracted at location
`offsets` from the input tensor. If the windows only partially
overlaps the inputs, the non-overlapping areas will be filled with
random noise.
The result is a 4-D tensor of shape `[batch_size, glimpse_height,
glimpse_width, channels]`. The channels and batch dimensions are the
same as that of the input tensor. The height and width of the output
windows are specified in the `size` parameter.
The argument `normalized` and `centered` controls how the windows are built:
* If the coordinates are normalized but not centered, 0.0 and 1.0
correspond to the minimum and maximum of each height and width
dimension.
* If the coordinates are both normalized and centered, they range from
-1.0 to 1.0. The coordinates (-1.0, -1.0) correspond to the upper
left corner, the lower right corner is located at (1.0, 1.0) and the
center is at (0, 0).
* If the coordinates are not normalized they are interpreted as
numbers of pixels.
Usage Example:
>>> x = [[[[0.0],
... [1.0],
... [2.0]],
... [[3.0],
... [4.0],
... [5.0]],
... [[6.0],
... [7.0],
... [8.0]]]]
>>> tf.compat.v1.image.extract_glimpse(x, size=(2, 2), offsets=[[1, 1]],
... centered=False, normalized=False)
<tf.Tensor: shape=(1, 2, 2, 1), dtype=float32, numpy=
array([[[[0.],
[1.]],
[[3.],
[4.]]]], dtype=float32)>
Args:
input: A `Tensor` of type `float32`. A 4-D float tensor of shape
`[batch_size, height, width, channels]`.
size: A `Tensor` of type `int32`. A 1-D tensor of 2 elements containing the
size of the glimpses to extract. The glimpse height must be specified
first, following by the glimpse width.
offsets: A `Tensor` of type `float32`. A 2-D integer tensor of shape
`[batch_size, 2]` containing the y, x locations of the center of each
window.
centered: An optional `bool`. Defaults to `True`. indicates if the offset
coordinates are centered relative to the image, in which case the (0, 0)
offset is relative to the center of the input images. If false, the (0,0)
offset corresponds to the upper left corner of the input images.
normalized: An optional `bool`. Defaults to `True`. indicates if the offset
coordinates are normalized.
uniform_noise: An optional `bool`. Defaults to `True`. indicates if the
noise should be generated using a uniform distribution or a Gaussian
distribution.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `float32`.
"""
return gen_image_ops.extract_glimpse(
input=input,
size=size,
offsets=offsets,
centered=centered,
normalized=normalized,
uniform_noise=uniform_noise,
name=name)
@tf_export('image.extract_glimpse', v1=[])
@dispatch.add_dispatch_support
def extract_glimpse_v2(
input, # pylint: disable=redefined-builtin
size,
offsets,
centered=True,
normalized=True,
noise='uniform',
name=None):
"""Extracts a glimpse from the input tensor.
Returns a set of windows called glimpses extracted at location
`offsets` from the input tensor. If the windows only partially
overlaps the inputs, the non-overlapping areas will be filled with
random noise.
The result is a 4-D tensor of shape `[batch_size, glimpse_height,
glimpse_width, channels]`. The channels and batch dimensions are the
same as that of the input tensor. The height and width of the output
windows are specified in the `size` parameter.
The argument `normalized` and `centered` controls how the windows are built:
* If the coordinates are normalized but not centered, 0.0 and 1.0
correspond to the minimum and maximum of each height and width
dimension.
* If the coordinates are both normalized and centered, they range from
-1.0 to 1.0. The coordinates (-1.0, -1.0) correspond to the upper
left corner, the lower right corner is located at (1.0, 1.0) and the
center is at (0, 0).
* If the coordinates are not normalized they are interpreted as
numbers of pixels.
Usage Example:
>>> x = [[[[0.0],
... [1.0],
... [2.0]],
... [[3.0],
... [4.0],
... [5.0]],
... [[6.0],
... [7.0],
... [8.0]]]]
>>> tf.image.extract_glimpse(x, size=(2, 2), offsets=[[1, 1]],
... centered=False, normalized=False)
<tf.Tensor: shape=(1, 2, 2, 1), dtype=float32, numpy=
array([[[[4.],
[5.]],
[[7.],
[8.]]]], dtype=float32)>
Args:
input: A `Tensor` of type `float32`. A 4-D float tensor of shape
`[batch_size, height, width, channels]`.
size: A `Tensor` of type `int32`. A 1-D tensor of 2 elements containing the
size of the glimpses to extract. The glimpse height must be specified
first, following by the glimpse width.
offsets: A `Tensor` of type `float32`. A 2-D integer tensor of shape
`[batch_size, 2]` containing the y, x locations of the center of each
window.
centered: An optional `bool`. Defaults to `True`. indicates if the offset
coordinates are centered relative to the image, in which case the (0, 0)
offset is relative to the center of the input images. If false, the (0,0)
offset corresponds to the upper left corner of the input images.
normalized: An optional `bool`. Defaults to `True`. indicates if the offset
coordinates are normalized.
noise: An optional `string`. Defaults to `uniform`. indicates if the noise
should be `uniform` (uniform distribution), `gaussian` (gaussian
distribution), or `zero` (zero padding).
name: A name for the operation (optional).
Returns:
A `Tensor` of type `float32`.
"""
return gen_image_ops.extract_glimpse_v2(
input=input,
size=size,
offsets=offsets,
centered=centered,
normalized=normalized,
noise=noise,
uniform_noise=False,
name=name)
@tf_export('image.combined_non_max_suppression')
@dispatch.add_dispatch_support
def combined_non_max_suppression(boxes,
scores,
max_output_size_per_class,
max_total_size,
iou_threshold=0.5,
score_threshold=float('-inf'),
pad_per_class=False,
clip_boxes=True,
name=None):
"""Greedily selects a subset of bounding boxes in descending order of score.
This operation performs non_max_suppression on the inputs per batch, across
all classes.
Prunes away boxes that have high intersection-over-union (IOU) overlap
with previously selected boxes. Bounding boxes are supplied as
[y1, x1, y2, x2], where (y1, x1) and (y2, x2) are the coordinates of any
diagonal pair of box corners and the coordinates can be provided as normalized
(i.e., lying in the interval [0, 1]) or absolute. Note that this algorithm
is agnostic to where the origin is in the coordinate system. Also note that
this algorithm is invariant to orthogonal transformations and translations
of the coordinate system; thus translating or reflections of the coordinate
system result in the same boxes being selected by the algorithm.
The output of this operation is the final boxes, scores and classes tensor
returned after performing non_max_suppression.
Args:
boxes: A 4-D float `Tensor` of shape `[batch_size, num_boxes, q, 4]`. If `q`
is 1 then same boxes are used for all classes otherwise, if `q` is equal
to number of classes, class-specific boxes are used.
scores: A 3-D float `Tensor` of shape `[batch_size, num_boxes, num_classes]`
representing a single score corresponding to each box (each row of boxes).
max_output_size_per_class: A scalar integer `Tensor` representing the
maximum number of boxes to be selected by non-max suppression per class
max_total_size: A int32 scalar representing maximum number of boxes retained
over all classes. Note that setting this value to a large number may
result in OOM error depending on the system workload.
iou_threshold: A float representing the threshold for deciding whether boxes
overlap too much with respect to IOU.
score_threshold: A float representing the threshold for deciding when to
remove boxes based on score.
pad_per_class: If false, the output nmsed boxes, scores and classes are
padded/clipped to `max_total_size`. If true, the output nmsed boxes,
scores and classes are padded to be of length
`max_size_per_class`*`num_classes`, unless it exceeds `max_total_size` in
which case it is clipped to `max_total_size`. Defaults to false.
clip_boxes: If true, the coordinates of output nmsed boxes will be clipped
to [0, 1]. If false, output the box coordinates as it is. Defaults to
true.
name: A name for the operation (optional).
Returns:
'nmsed_boxes': A [batch_size, max_detections, 4] float32 tensor
containing the non-max suppressed boxes.
'nmsed_scores': A [batch_size, max_detections] float32 tensor containing
the scores for the boxes.
'nmsed_classes': A [batch_size, max_detections] float32 tensor
containing the class for boxes.
'valid_detections': A [batch_size] int32 tensor indicating the number of
valid detections per batch item. Only the top valid_detections[i] entries
in nms_boxes[i], nms_scores[i] and nms_class[i] are valid. The rest of the
entries are zero paddings.
"""
with ops.name_scope(name, 'combined_non_max_suppression'):
iou_threshold = ops.convert_to_tensor(
iou_threshold, dtype=dtypes.float32, name='iou_threshold')
score_threshold = ops.convert_to_tensor(
score_threshold, dtype=dtypes.float32, name='score_threshold')
# Convert `max_total_size` to tensor *without* setting the `dtype` param.
# This allows us to catch `int32` overflow case with `max_total_size`
# whose expected dtype is `int32` by the op registration. Any number within
# `int32` will get converted to `int32` tensor. Anything larger will get
# converted to `int64`. Passing in `int64` for `max_total_size` to the op
# will throw dtype mismatch exception.
# TODO(b/173251596): Once there is a more general solution to warn against
# int overflow conversions, revisit this check.
max_total_size = ops.convert_to_tensor(max_total_size)
return gen_image_ops.combined_non_max_suppression(
boxes, scores, max_output_size_per_class, max_total_size, iou_threshold,
score_threshold, pad_per_class, clip_boxes)
def _bbox_overlap(boxes_a, boxes_b):
"""Calculates the overlap (iou - intersection over union) between boxes_a and boxes_b.
Args:
boxes_a: a tensor with a shape of [batch_size, N, 4]. N is the number of
boxes per image. The last dimension is the pixel coordinates in
[ymin, xmin, ymax, xmax] form.
boxes_b: a tensor with a shape of [batch_size, M, 4]. M is the number of
boxes. The last dimension is the pixel coordinates in
[ymin, xmin, ymax, xmax] form.
Returns:
intersection_over_union: a tensor with as a shape of [batch_size, N, M],
representing the ratio of intersection area over union area (IoU) between
two boxes
"""
with ops.name_scope('bbox_overlap'):
a_y_min, a_x_min, a_y_max, a_x_max = array_ops.split(
value=boxes_a, num_or_size_splits=4, axis=2)
b_y_min, b_x_min, b_y_max, b_x_max = array_ops.split(
value=boxes_b, num_or_size_splits=4, axis=2)
# Calculates the intersection area.
i_xmin = math_ops.maximum(
a_x_min, array_ops.transpose(b_x_min, [0, 2, 1]))
i_xmax = math_ops.minimum(
a_x_max, array_ops.transpose(b_x_max, [0, 2, 1]))
i_ymin = math_ops.maximum(
a_y_min, array_ops.transpose(b_y_min, [0, 2, 1]))
i_ymax = math_ops.minimum(
a_y_max, array_ops.transpose(b_y_max, [0, 2, 1]))
i_area = math_ops.maximum(
(i_xmax - i_xmin), 0) * math_ops.maximum((i_ymax - i_ymin), 0)
# Calculates the union area.
a_area = (a_y_max - a_y_min) * (a_x_max - a_x_min)
b_area = (b_y_max - b_y_min) * (b_x_max - b_x_min)
EPSILON = 1e-8
# Adds a small epsilon to avoid divide-by-zero.
u_area = a_area + array_ops.transpose(b_area, [0, 2, 1]) - i_area + EPSILON
# Calculates IoU.
intersection_over_union = i_area / u_area
return intersection_over_union
def _self_suppression(iou, _, iou_sum, iou_threshold):
"""Suppress boxes in the same tile.
Compute boxes that cannot be suppressed by others (i.e.,
can_suppress_others), and then use them to suppress boxes in the same tile.
Args:
iou: a tensor of shape [batch_size, num_boxes_with_padding] representing
intersection over union.
iou_sum: a scalar tensor.
iou_threshold: a scalar tensor.
Returns:
iou_suppressed: a tensor of shape [batch_size, num_boxes_with_padding].
iou_diff: a scalar tensor representing whether any box is supressed in
this step.
iou_sum_new: a scalar tensor of shape [batch_size] that represents
the iou sum after suppression.
iou_threshold: a scalar tensor.
"""
batch_size = array_ops.shape(iou)[0]
can_suppress_others = math_ops.cast(
array_ops.reshape(
math_ops.reduce_max(iou, 1) < iou_threshold, [batch_size, -1, 1]),
iou.dtype)
iou_after_suppression = array_ops.reshape(
math_ops.cast(
math_ops.reduce_max(can_suppress_others * iou, 1) < iou_threshold,
iou.dtype),
[batch_size, -1, 1]) * iou
iou_sum_new = math_ops.reduce_sum(iou_after_suppression, [1, 2])
return [
iou_after_suppression,
math_ops.reduce_any(iou_sum - iou_sum_new > iou_threshold), iou_sum_new,
iou_threshold
]
def _cross_suppression(boxes, box_slice, iou_threshold, inner_idx, tile_size):
"""Suppress boxes between different tiles.
Args:
boxes: a tensor of shape [batch_size, num_boxes_with_padding, 4]
box_slice: a tensor of shape [batch_size, tile_size, 4]
iou_threshold: a scalar tensor
inner_idx: a scalar tensor representing the tile index of the tile
that is used to supress box_slice
tile_size: an integer representing the number of boxes in a tile
Returns:
boxes: unchanged boxes as input
box_slice_after_suppression: box_slice after suppression
iou_threshold: unchanged
"""
batch_size = array_ops.shape(boxes)[0]
new_slice = array_ops.slice(
boxes, [0, inner_idx * tile_size, 0],
[batch_size, tile_size, 4])
iou = _bbox_overlap(new_slice, box_slice)
box_slice_after_suppression = array_ops.expand_dims(
math_ops.cast(math_ops.reduce_all(iou < iou_threshold, [1]),
box_slice.dtype),
2) * box_slice
return boxes, box_slice_after_suppression, iou_threshold, inner_idx + 1
def _suppression_loop_body(boxes, iou_threshold, output_size, idx, tile_size):
"""Process boxes in the range [idx*tile_size, (idx+1)*tile_size).
Args:
boxes: a tensor with a shape of [batch_size, anchors, 4].
iou_threshold: a float representing the threshold for deciding whether boxes
overlap too much with respect to IOU.
output_size: an int32 tensor of size [batch_size]. Representing the number
of selected boxes for each batch.
idx: an integer scalar representing induction variable.
tile_size: an integer representing the number of boxes in a tile
Returns:
boxes: updated boxes.
iou_threshold: pass down iou_threshold to the next iteration.
output_size: the updated output_size.
idx: the updated induction variable.
"""
with ops.name_scope('suppression_loop_body'):
num_tiles = array_ops.shape(boxes)[1] // tile_size
batch_size = array_ops.shape(boxes)[0]
def cross_suppression_func(boxes, box_slice, iou_threshold, inner_idx):
return _cross_suppression(boxes, box_slice, iou_threshold, inner_idx,
tile_size)
# Iterates over tiles that can possibly suppress the current tile.
box_slice = array_ops.slice(boxes, [0, idx * tile_size, 0],
[batch_size, tile_size, 4])
_, box_slice, _, _ = control_flow_ops.while_loop(
lambda _boxes, _box_slice, _threshold, inner_idx: inner_idx < idx,
cross_suppression_func,
[boxes, box_slice, iou_threshold, constant_op.constant(0)])
# Iterates over the current tile to compute self-suppression.
iou = _bbox_overlap(box_slice, box_slice)
mask = array_ops.expand_dims(
array_ops.reshape(
math_ops.range(tile_size), [1, -1]) > array_ops.reshape(
math_ops.range(tile_size), [-1, 1]), 0)
iou *= math_ops.cast(
math_ops.logical_and(mask, iou >= iou_threshold), iou.dtype)
suppressed_iou, _, _, _ = control_flow_ops.while_loop(
lambda _iou, loop_condition, _iou_sum, _: loop_condition,
_self_suppression,
[iou, constant_op.constant(True), math_ops.reduce_sum(iou, [1, 2]),
iou_threshold])
suppressed_box = math_ops.reduce_sum(suppressed_iou, 1) > 0
box_slice *= array_ops.expand_dims(
1.0 - math_ops.cast(suppressed_box, box_slice.dtype), 2)
# Uses box_slice to update the input boxes.
mask = array_ops.reshape(
math_ops.cast(
math_ops.equal(math_ops.range(num_tiles), idx), boxes.dtype),
[1, -1, 1, 1])
boxes = array_ops.tile(array_ops.expand_dims(
box_slice, [1]), [1, num_tiles, 1, 1]) * mask + array_ops.reshape(
boxes, [batch_size, num_tiles, tile_size, 4]) * (1 - mask)
boxes = array_ops.reshape(boxes, [batch_size, -1, 4])
# Updates output_size.
output_size += math_ops.reduce_sum(
math_ops.cast(
math_ops.reduce_any(box_slice > 0, [2]), dtypes.int32), [1])
return boxes, iou_threshold, output_size, idx + 1
@tf_export('image.non_max_suppression_padded')
@dispatch.add_dispatch_support
def non_max_suppression_padded(boxes,
scores,
max_output_size,
iou_threshold=0.5,
score_threshold=float('-inf'),
pad_to_max_output_size=False,
name=None,
sorted_input=False,
canonicalized_coordinates=False,
tile_size=512):
"""Greedily selects a subset of bounding boxes in descending order of score.
Performs algorithmically equivalent operation to tf.image.non_max_suppression,
with the addition of an optional parameter which zero-pads the output to
be of size `max_output_size`.
The output of this operation is a tuple containing the set of integers
indexing into the input collection of bounding boxes representing the selected
boxes and the number of valid indices in the index set. The bounding box
coordinates corresponding to the selected indices can then be obtained using
the `tf.slice` and `tf.gather` operations. For example:
```python
selected_indices_padded, num_valid = tf.image.non_max_suppression_padded(
boxes, scores, max_output_size, iou_threshold,
score_threshold, pad_to_max_output_size=True)
selected_indices = tf.slice(
selected_indices_padded, tf.constant([0]), num_valid)
selected_boxes = tf.gather(boxes, selected_indices)
```
Args:
boxes: a tensor of rank 2 or higher with a shape of [..., num_boxes, 4].
Dimensions except the last two are batch dimensions.
scores: a tensor of rank 1 or higher with a shape of [..., num_boxes].
max_output_size: a scalar integer `Tensor` representing the maximum number
of boxes to be selected by non max suppression.
iou_threshold: a float representing the threshold for deciding whether boxes
overlap too much with respect to IoU (intersection over union).
score_threshold: a float representing the threshold for box scores. Boxes
with a score that is not larger than this threshold will be suppressed.
pad_to_max_output_size: whether to pad the output idx to max_output_size.
Must be set to True when the input is a batch of images.
name: name of operation.
sorted_input: a boolean indicating whether the input boxes and scores
are sorted in descending order by the score.
canonicalized_coordinates: if box coordinates are given as
`[y_min, x_min, y_max, x_max]`, setting to True eliminate redundant
computation to canonicalize box coordinates.
tile_size: an integer representing the number of boxes in a tile, i.e.,
the maximum number of boxes per image that can be used to suppress other
boxes in parallel; larger tile_size means larger parallelism and
potentially more redundant work.
Returns:
idx: a tensor with a shape of [..., num_boxes] representing the
indices selected by non-max suppression. The leading dimensions
are the batch dimensions of the input boxes. All numbers are within
[0, num_boxes). For each image (i.e., idx[i]), only the first num_valid[i]
indices (i.e., idx[i][:num_valid[i]]) are valid.
num_valid: a tensor of rank 0 or higher with a shape of [...]
representing the number of valid indices in idx. Its dimensions are the
batch dimensions of the input boxes.
Raises:
ValueError: When set pad_to_max_output_size to False for batched input.
"""
# if no new arguments are used and no later than 2020/6/23, use the old
# version to give us time to fix TFLite conversion after the TF 2.3 release.
if (not sorted_input) and \
(not canonicalized_coordinates) and \
tile_size == 512 and not compat.forward_compatible(2020, 6, 23):
return non_max_suppression_padded_v1(
boxes, scores, max_output_size, iou_threshold, score_threshold,
pad_to_max_output_size, name)
else:
with ops.name_scope(name, 'non_max_suppression_padded'):
if not pad_to_max_output_size:
# pad_to_max_output_size may be set to False only when the shape of
# boxes is [num_boxes, 4], i.e., a single image. We make best effort to
# detect violations at compile time. If `boxes` does not have a static
# rank, the check allows computation to proceed.
if boxes.get_shape().rank is not None and boxes.get_shape().rank > 2:
raise ValueError(
"'pad_to_max_output_size' (value {}) must be True for "
'batched input'.format(pad_to_max_output_size))
if name is None:
name = ''
idx, num_valid = non_max_suppression_padded_v2(
boxes, scores, max_output_size, iou_threshold, score_threshold,
sorted_input, canonicalized_coordinates, tile_size)
# def_function.function seems to lose shape information, so set it here.
if not pad_to_max_output_size:
idx = idx[0, :num_valid]
else:
batch_dims = array_ops.concat([
array_ops.shape(boxes)[:-2],
array_ops.expand_dims(max_output_size, 0)
], 0)
idx = array_ops.reshape(idx, batch_dims)
return idx, num_valid
# TODO(b/158709815): Improve performance regression due to
# def_function.function.
@def_function.function(
experimental_implements='non_max_suppression_padded_v2')
def non_max_suppression_padded_v2(boxes,
scores,
max_output_size,
iou_threshold=0.5,
score_threshold=float('-inf'),
sorted_input=False,
canonicalized_coordinates=False,
tile_size=512):
"""Non-maximum suppression.
Prunes away boxes that have high intersection-over-union (IOU) overlap
with previously selected boxes. Bounding boxes are supplied as
`[y1, x1, y2, x2]`, where `(y1, x1)` and `(y2, x2)` are the coordinates of any
diagonal pair of box corners and the coordinates can be provided as normalized
(i.e., lying in the interval `[0, 1]`) or absolute. The bounding box
coordinates are cannonicalized to `[y_min, x_min, y_max, x_max]`,
where `(y_min, x_min)` and `(y_max, x_mas)` are the coordinates of the lower
left and upper right corner. User may indiciate the input box coordinates are
already canonicalized to eliminate redundant work by setting
canonicalized_coordinates to `True`. Note that this algorithm is agnostic to
where the origin is in the coordinate system. Note that this algorithm is
invariant to orthogonal transformations and translations of the coordinate
system; thus translating or reflections of the coordinate system result in the
same boxes being selected by the algorithm.
Similar to tf.image.non_max_suppression, non_max_suppression_padded
implements hard NMS but can operate on a batch of images and improves
performance by titling the bounding boxes. Non_max_suppression_padded should
be preferred over tf.image_non_max_suppression when running on devices with
abundant parallelsim for higher computation speed. For soft NMS, refer to
tf.image.non_max_suppression_with_scores.
While a serial NMS algorithm iteratively uses the highest-scored unprocessed
box to suppress boxes, this algorithm uses many boxes to suppress other boxes
in parallel. The key idea is to partition boxes into tiles based on their
score and suppresses boxes tile by tile, thus achieving parallelism within a
tile. The tile size determines the degree of parallelism.
In cross suppression (using boxes of tile A to suppress boxes of tile B),
all boxes in A can independently suppress boxes in B.
Self suppression (suppressing boxes of the same tile) needs to be iteratively
applied until there's no more suppression. In each iteration, boxes that
cannot be suppressed are used to suppress boxes in the same tile.
boxes = boxes.pad_to_multiply_of(tile_size)
num_tiles = len(boxes) // tile_size
output_boxes = []
for i in range(num_tiles):
box_tile = boxes[i*tile_size : (i+1)*tile_size]
for j in range(i - 1):
# in parallel suppress boxes in box_tile using boxes from suppressing_tile
suppressing_tile = boxes[j*tile_size : (j+1)*tile_size]
iou = _bbox_overlap(box_tile, suppressing_tile)
# if the box is suppressed in iou, clear it to a dot
box_tile *= _update_boxes(iou)
# Iteratively handle the diagnal tile.
iou = _box_overlap(box_tile, box_tile)
iou_changed = True
while iou_changed:
# boxes that are not suppressed by anything else
suppressing_boxes = _get_suppressing_boxes(iou)
# boxes that are suppressed by suppressing_boxes
suppressed_boxes = _get_suppressed_boxes(iou, suppressing_boxes)
# clear iou to 0 for boxes that are suppressed, as they cannot be used
# to suppress other boxes any more
new_iou = _clear_iou(iou, suppressed_boxes)
iou_changed = (new_iou != iou)
iou = new_iou
# remaining boxes that can still suppress others, are selected boxes.
output_boxes.append(_get_suppressing_boxes(iou))
if len(output_boxes) >= max_output_size:
break
Args:
boxes: a tensor of rank 2 or higher with a shape of [..., num_boxes, 4].
Dimensions except the last two are batch dimensions. The last dimension
represents box coordinates, given as [y_1, x_1, y_2, x_2]. The coordinates
on each dimension can be given in any order
(see also `canonicalized_coordinates`) but must describe a box with
a positive area.
scores: a tensor of rank 1 or higher with a shape of [..., num_boxes].
max_output_size: a scalar integer `Tensor` representing the maximum number
of boxes to be selected by non max suppression.
iou_threshold: a float representing the threshold for deciding whether boxes
overlap too much with respect to IoU (intersection over union).
score_threshold: a float representing the threshold for box scores. Boxes
with a score that is not larger than this threshold will be suppressed.
sorted_input: a boolean indicating whether the input boxes and scores
are sorted in descending order by the score.
canonicalized_coordinates: if box coordinates are given as
`[y_min, x_min, y_max, x_max]`, setting to True eliminate redundant
computation to canonicalize box coordinates.
tile_size: an integer representing the number of boxes in a tile, i.e.,
the maximum number of boxes per image that can be used to suppress other
boxes in parallel; larger tile_size means larger parallelism and
potentially more redundant work.
Returns:
idx: a tensor with a shape of [..., num_boxes] representing the
indices selected by non-max suppression. The leading dimensions
are the batch dimensions of the input boxes. All numbers are within
[0, num_boxes). For each image (i.e., idx[i]), only the first num_valid[i]
indices (i.e., idx[i][:num_valid[i]]) are valid.
num_valid: a tensor of rank 0 or higher with a shape of [...]
representing the number of valid indices in idx. Its dimensions are the
batch dimensions of the input boxes.
Raises:
ValueError: When set pad_to_max_output_size to False for batched input.
"""
def _sort_scores_and_boxes(scores, boxes):
"""Sort boxes based their score from highest to lowest.
Args:
scores: a tensor with a shape of [batch_size, num_boxes] representing
the scores of boxes.
boxes: a tensor with a shape of [batch_size, num_boxes, 4] representing
the boxes.
Returns:
sorted_scores: a tensor with a shape of [batch_size, num_boxes]
representing the sorted scores.
sorted_boxes: a tensor representing the sorted boxes.
sorted_scores_indices: a tensor with a shape of [batch_size, num_boxes]
representing the index of the scores in a sorted descending order.
"""
with ops.name_scope('sort_scores_and_boxes'):
batch_size = array_ops.shape(boxes)[0]
num_boxes = array_ops.shape(boxes)[1]
sorted_scores_indices = sort_ops.argsort(
scores, axis=1, direction='DESCENDING')
index_offsets = math_ops.range(batch_size) * num_boxes
indices = array_ops.reshape(
sorted_scores_indices + array_ops.expand_dims(index_offsets, 1), [-1])
sorted_scores = array_ops.reshape(
array_ops.gather(array_ops.reshape(scores, [-1]), indices),
[batch_size, -1])
sorted_boxes = array_ops.reshape(
array_ops.gather(array_ops.reshape(boxes, [-1, 4]), indices),
[batch_size, -1, 4])
return sorted_scores, sorted_boxes, sorted_scores_indices
batch_dims = array_ops.shape(boxes)[:-2]
num_boxes = array_ops.shape(boxes)[-2]
boxes = array_ops.reshape(boxes, [-1, num_boxes, 4])
scores = array_ops.reshape(scores, [-1, num_boxes])
batch_size = array_ops.shape(boxes)[0]
if score_threshold != float('-inf'):
with ops.name_scope('filter_by_score'):
score_mask = math_ops.cast(scores > score_threshold, scores.dtype)
scores *= score_mask
box_mask = array_ops.expand_dims(
math_ops.cast(score_mask, boxes.dtype), 2)
boxes *= box_mask
if not canonicalized_coordinates:
with ops.name_scope('canonicalize_coordinates'):
y_1, x_1, y_2, x_2 = array_ops.split(
value=boxes, num_or_size_splits=4, axis=2)
y_1_is_min = math_ops.reduce_all(
math_ops.less_equal(y_1[0, 0, 0], y_2[0, 0, 0]))
y_min, y_max = control_flow_ops.cond(
y_1_is_min, lambda: (y_1, y_2), lambda: (y_2, y_1))
x_1_is_min = math_ops.reduce_all(
math_ops.less_equal(x_1[0, 0, 0], x_2[0, 0, 0]))
x_min, x_max = control_flow_ops.cond(
x_1_is_min, lambda: (x_1, x_2), lambda: (x_2, x_1))
boxes = array_ops.concat([y_min, x_min, y_max, x_max], axis=2)
if not sorted_input:
scores, boxes, sorted_indices = _sort_scores_and_boxes(scores, boxes)
else:
# Default value required for Autograph.
sorted_indices = array_ops.zeros_like(scores, dtype=dtypes.int32)
pad = math_ops.cast(
math_ops.ceil(
math_ops.cast(
math_ops.maximum(num_boxes, max_output_size), dtypes.float32) /
math_ops.cast(tile_size, dtypes.float32)),
dtypes.int32) * tile_size - num_boxes
boxes = array_ops.pad(
math_ops.cast(boxes, dtypes.float32), [[0, 0], [0, pad], [0, 0]])
scores = array_ops.pad(
math_ops.cast(scores, dtypes.float32), [[0, 0], [0, pad]])
num_boxes_after_padding = num_boxes + pad
num_iterations = num_boxes_after_padding // tile_size
def _loop_cond(unused_boxes, unused_threshold, output_size, idx):
return math_ops.logical_and(
math_ops.reduce_min(output_size) < max_output_size,
idx < num_iterations)
def suppression_loop_body(boxes, iou_threshold, output_size, idx):
return _suppression_loop_body(
boxes, iou_threshold, output_size, idx, tile_size)
selected_boxes, _, output_size, _ = control_flow_ops.while_loop(
_loop_cond,
suppression_loop_body,
[
boxes, iou_threshold,
array_ops.zeros([batch_size], dtypes.int32),
constant_op.constant(0)
],
shape_invariants=[
tensor_shape.TensorShape([None, None, 4]),
tensor_shape.TensorShape([]),
tensor_shape.TensorShape([None]),
tensor_shape.TensorShape([]),
],
)
num_valid = math_ops.minimum(output_size, max_output_size)
idx = num_boxes_after_padding - math_ops.cast(
nn_ops.top_k(
math_ops.cast(math_ops.reduce_any(
selected_boxes > 0, [2]), dtypes.int32) *
array_ops.expand_dims(
math_ops.range(num_boxes_after_padding, 0, -1), 0),
max_output_size)[0], dtypes.int32)
idx = math_ops.minimum(idx, num_boxes - 1)
if not sorted_input:
index_offsets = math_ops.range(batch_size) * num_boxes
gather_idx = array_ops.reshape(
idx + array_ops.expand_dims(index_offsets, 1), [-1])
idx = array_ops.reshape(
array_ops.gather(array_ops.reshape(sorted_indices, [-1]),
gather_idx),
[batch_size, -1])
invalid_index = array_ops.fill([batch_size, max_output_size], 0)
idx_index = array_ops.expand_dims(math_ops.range(max_output_size), 0)
num_valid_expanded = array_ops.expand_dims(num_valid, 1)
idx = array_ops.where(idx_index < num_valid_expanded,
idx, invalid_index)
num_valid = array_ops.reshape(num_valid, batch_dims)
return idx, num_valid
def non_max_suppression_padded_v1(boxes,
scores,
max_output_size,
iou_threshold=0.5,
score_threshold=float('-inf'),
pad_to_max_output_size=False,
name=None):
"""Greedily selects a subset of bounding boxes in descending order of score.
Performs algorithmically equivalent operation to tf.image.non_max_suppression,
with the addition of an optional parameter which zero-pads the output to
be of size `max_output_size`.
The output of this operation is a tuple containing the set of integers
indexing into the input collection of bounding boxes representing the selected
boxes and the number of valid indices in the index set. The bounding box
coordinates corresponding to the selected indices can then be obtained using
the `tf.slice` and `tf.gather` operations. For example:
```python
selected_indices_padded, num_valid = tf.image.non_max_suppression_padded(
boxes, scores, max_output_size, iou_threshold,
score_threshold, pad_to_max_output_size=True)
selected_indices = tf.slice(
selected_indices_padded, tf.constant([0]), num_valid)
selected_boxes = tf.gather(boxes, selected_indices)
```
Args:
boxes: A 2-D float `Tensor` of shape `[num_boxes, 4]`.
scores: A 1-D float `Tensor` of shape `[num_boxes]` representing a single
score corresponding to each box (each row of boxes).
max_output_size: A scalar integer `Tensor` representing the maximum number
of boxes to be selected by non-max suppression.
iou_threshold: A float representing the threshold for deciding whether boxes
overlap too much with respect to IOU.
score_threshold: A float representing the threshold for deciding when to
remove boxes based on score.
pad_to_max_output_size: bool. If True, size of `selected_indices` output is
padded to `max_output_size`.
name: A name for the operation (optional).
Returns:
selected_indices: A 1-D integer `Tensor` of shape `[M]` representing the
selected indices from the boxes tensor, where `M <= max_output_size`.
valid_outputs: A scalar integer `Tensor` denoting how many elements in
`selected_indices` are valid. Valid elements occur first, then padding.
"""
with ops.name_scope(name, 'non_max_suppression_padded'):
iou_threshold = ops.convert_to_tensor(iou_threshold, name='iou_threshold')
score_threshold = ops.convert_to_tensor(
score_threshold, name='score_threshold')
return gen_image_ops.non_max_suppression_v4(boxes, scores, max_output_size,
iou_threshold, score_threshold,
pad_to_max_output_size)
@tf_export('image.draw_bounding_boxes', v1=[])
@dispatch.add_dispatch_support
def draw_bounding_boxes_v2(images, boxes, colors, name=None):
"""Draw bounding boxes on a batch of images.
Outputs a copy of `images` but draws on top of the pixels zero or more
bounding boxes specified by the locations in `boxes`. The coordinates of the
each bounding box in `boxes` are encoded as `[y_min, x_min, y_max, x_max]`.
The bounding box coordinates are floats in `[0.0, 1.0]` relative to the width
and the height of the underlying image.
For example, if an image is 100 x 200 pixels (height x width) and the bounding
box is `[0.1, 0.2, 0.5, 0.9]`, the upper-left and bottom-right coordinates of
the bounding box will be `(40, 10)` to `(180, 50)` (in (x,y) coordinates).
Parts of the bounding box may fall outside the image.
Args:
images: A `Tensor`. Must be one of the following types: `float32`, `half`.
4-D with shape `[batch, height, width, depth]`. A batch of images.
boxes: A `Tensor` of type `float32`. 3-D with shape `[batch,
num_bounding_boxes, 4]` containing bounding boxes.
colors: A `Tensor` of type `float32`. 2-D. A list of RGBA colors to cycle
through for the boxes.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `images`.
Usage Example:
>>> # create an empty image
>>> img = tf.zeros([1, 3, 3, 3])
>>> # draw a box around the image
>>> box = np.array([0, 0, 1, 1])
>>> boxes = box.reshape([1, 1, 4])
>>> # alternate between red and blue
>>> colors = np.array([[1.0, 0.0, 0.0], [0.0, 0.0, 1.0]])
>>> tf.image.draw_bounding_boxes(img, boxes, colors)
<tf.Tensor: shape=(1, 3, 3, 3), dtype=float32, numpy=
array([[[[1., 0., 0.],
[1., 0., 0.],
[1., 0., 0.]],
[[1., 0., 0.],
[0., 0., 0.],
[1., 0., 0.]],
[[1., 0., 0.],
[1., 0., 0.],
[1., 0., 0.]]]], dtype=float32)>
"""
if colors is None:
return gen_image_ops.draw_bounding_boxes(images, boxes, name)
return gen_image_ops.draw_bounding_boxes_v2(images, boxes, colors, name)
@tf_export(v1=['image.draw_bounding_boxes'])
@dispatch.add_dispatch_support
def draw_bounding_boxes(images, boxes, name=None, colors=None):
"""Draw bounding boxes on a batch of images.
Outputs a copy of `images` but draws on top of the pixels zero or more
bounding boxes specified by the locations in `boxes`. The coordinates of the
each bounding box in `boxes` are encoded as `[y_min, x_min, y_max, x_max]`.
The bounding box coordinates are floats in `[0.0, 1.0]` relative to the width
and the height of the underlying image.
For example, if an image is 100 x 200 pixels (height x width) and the bounding
box is `[0.1, 0.2, 0.5, 0.9]`, the upper-left and bottom-right coordinates of
the bounding box will be `(40, 10)` to `(180, 50)` (in (x,y) coordinates).
Parts of the bounding box may fall outside the image.
Args:
images: A `Tensor`. Must be one of the following types: `float32`, `half`.
4-D with shape `[batch, height, width, depth]`. A batch of images.
boxes: A `Tensor` of type `float32`. 3-D with shape `[batch,
num_bounding_boxes, 4]` containing bounding boxes.
name: A name for the operation (optional).
colors: A `Tensor` of type `float32`. 2-D. A list of RGBA colors to cycle
through for the boxes.
Returns:
A `Tensor`. Has the same type as `images`.
Usage Example:
>>> # create an empty image
>>> img = tf.zeros([1, 3, 3, 3])
>>> # draw a box around the image
>>> box = np.array([0, 0, 1, 1])
>>> boxes = box.reshape([1, 1, 4])
>>> # alternate between red and blue
>>> colors = np.array([[1.0, 0.0, 0.0], [0.0, 0.0, 1.0]])
>>> tf.image.draw_bounding_boxes(img, boxes, colors)
<tf.Tensor: shape=(1, 3, 3, 3), dtype=float32, numpy=
array([[[[1., 0., 0.],
[1., 0., 0.],
[1., 0., 0.]],
[[1., 0., 0.],
[0., 0., 0.],
[1., 0., 0.]],
[[1., 0., 0.],
[1., 0., 0.],
[1., 0., 0.]]]], dtype=float32)>
"""
return draw_bounding_boxes_v2(images, boxes, colors, name)
@tf_export('image.generate_bounding_box_proposals')
@dispatch.add_dispatch_support
def generate_bounding_box_proposals(scores,
bbox_deltas,
image_info,
anchors,
nms_threshold=0.7,
pre_nms_topn=6000,
min_size=16,
post_nms_topn=300,
name=None):
"""Generate bounding box proposals from encoded bounding boxes.
Args:
scores: A 4-D float `Tensor` of shape
`[num_images, height, width, num_achors]` containing scores of
the boxes for given anchors, can be unsorted.
bbox_deltas: A 4-D float `Tensor` of shape
`[num_images, height, width, 4 x num_anchors]` encoding boxes
with respect to each anchor. Coordinates are given
in the form `[dy, dx, dh, dw]`.
image_info: A 2-D float `Tensor` of shape `[num_images, 5]`
containing image information Height, Width, Scale.
anchors: A 2-D float `Tensor` of shape `[num_anchors, 4]`
describing the anchor boxes.
Boxes are formatted in the form `[y1, x1, y2, x2]`.
nms_threshold: A scalar float `Tensor` for non-maximal-suppression
threshold. Defaults to 0.7.
pre_nms_topn: A scalar int `Tensor` for the number of
top scoring boxes to be used as input. Defaults to 6000.
min_size: A scalar float `Tensor`. Any box that has a smaller size
than min_size will be discarded. Defaults to 16.
post_nms_topn: An integer. Maximum number of rois in the output.
name: A name for this operation (optional).
Returns:
rois: Region of interest boxes sorted by their scores.
roi_probabilities: scores of the ROI boxes in the ROIs' `Tensor`.
"""
return gen_image_ops.generate_bounding_box_proposals(
scores=scores,
bbox_deltas=bbox_deltas,
image_info=image_info,
anchors=anchors,
nms_threshold=nms_threshold,
pre_nms_topn=pre_nms_topn,
min_size=min_size,
post_nms_topn=post_nms_topn,
name=name)
| 39.510073 | 88 | 0.665572 |
095f1b4d8ee44f1fa9df45ec8595fa573518b243 | 1,461 | py | Python | gkmerge/data_tools/util.py | ISCOTTYI/gk-merge | d12421c23ea2d28f5f2bf7985c54b1e3f062cdbc | [
"MIT"
] | null | null | null | gkmerge/data_tools/util.py | ISCOTTYI/gk-merge | d12421c23ea2d28f5f2bf7985c54b1e3f062cdbc | [
"MIT"
] | null | null | null | gkmerge/data_tools/util.py | ISCOTTYI/gk-merge | d12421c23ea2d28f5f2bf7985c54b1e3f062cdbc | [
"MIT"
] | null | null | null | import numpy as np
def mean(lst):
"""
Clears the given list of None and calculates the mean
"""
lst = [elem for elem in lst if elem is not None]
return np.mean(np.array(lst)) if len(lst) > 0 else 0
def std(lst):
return np.std(np.array(lst)) if len(lst) > 0 else 0
def greater_than(val, cond=0):
return bool(val > cond)
def filtered_mean(lst, filter_func):
a = np.array(lst)
a = a[filter_func(a)]
return np.mean(a) if len(a) > 0 else 0
def externally_filtered_mean(lst, filter_lst, filter_func):
"""
Returns the conditional mean of the list after clearing it from None
E.g. a value in lst at pos i is counted to the mean if and only if
condition(condition_lst[i]) is true. Give keyworded args of condition function as kwargs.
"""
if len(lst) != len(filter_lst):
raise ValueError("lst and condition_lst must be of the same length!")
cond_corr_lst = []
for i, elem in enumerate(lst):
if filter_func(filter_lst[i]):
cond_corr_lst.append(elem)
cond_corr_lst = [elem for elem in cond_corr_lst if elem is not None]
return np.mean(np.array(cond_corr_lst)) if len(cond_corr_lst) > 0 else 0
def fraction(lst, frac_func):
lst = [elem for elem in lst if elem is not None]
cond_cnt, tot_cnt = 0, 0
for elem in lst:
tot_cnt += 1
if frac_func(elem):
cond_cnt += 1
return cond_cnt / tot_cnt if tot_cnt > 0 else 0
| 29.22 | 93 | 0.652977 |
259f07627a0fa55fa6f511d4d654167da98662ff | 3,513 | py | Python | .scripts/arch-update.py | WesterWest/tomatorice | 9ffca44c985565d6087ac985d587a0e3b3f8bf9a | [
"MIT"
] | 5 | 2019-02-17T20:02:51.000Z | 2021-02-11T20:41:18.000Z | .scripts/arch-update.py | WesterWest/tomatorice | 9ffca44c985565d6087ac985d587a0e3b3f8bf9a | [
"MIT"
] | null | null | null | .scripts/arch-update.py | WesterWest/tomatorice | 9ffca44c985565d6087ac985d587a0e3b3f8bf9a | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
#
# Copyright (C) 2017 Marcel Patzwahl
# Licensed under the terms of the GNU GPL v3 only.
#
# i3blocks blocklet script to see the available updates of pacman and the AUR
import subprocess
from subprocess import check_output
import argparse
import os
import re
def create_argparse():
def _default(name, default='', arg_type=str):
val = default
if name in os.environ:
val = os.environ[name]
return arg_type(val)
strbool = lambda s: s.lower() in ['t', 'true', '1']
strlist = lambda s: s.split()
parser = argparse.ArgumentParser(description='Check for pacman updates')
parser.add_argument(
'-b',
'--base_color',
default = _default('BASE_COLOR', 'green'),
help='base color of the output(default=green)'
)
parser.add_argument(
'-u',
'--updates_available_color',
default = _default('UPDATE_COLOR', 'yellow'),
help='color of the output, when updates are available(default=yellow)'
)
parser.add_argument(
'-a',
'--aur',
action = 'store_const',
const = True,
default = _default('AUR', 'False', strbool),
help='Include AUR packages. Attn: pikaur must be installed'
)
parser.add_argument(
'-q',
'--quiet',
action = 'store_const',
const = True,
default = _default('QUIET', 'False', strbool),
help = 'Do not produce output when system is up to date'
)
parser.add_argument(
'-w',
'--watch',
nargs='*',
default = _default('WATCH', arg_type=strlist),
help='Explicitly watch for specified packages. '
'Listed elements are treated as regular expressions for matching.'
)
return parser.parse_args()
def get_updates():
output = check_output(['checkupdates']).decode('utf-8')
if not output:
return []
updates = [line.split(' ')[0]
for line in output.split('\n')
if line]
return updates
def get_aur_updates():
output = ''
try:
output = check_output(['pikaur', '-Qua']).decode('utf-8')
except subprocess.CalledProcessError as exc:
# pikaur exits with 1 and no output if no updates are available.
# we ignore this case and go on
if not (exc.returncode == 1 and not exc.output):
raise exc
if not output:
return []
aur_updates = [line.split(' ')[0]
for line in output.split('\n')
if line.startswith('aur/')]
return aur_updates
def matching_updates(updates, watch_list):
matches = set()
for u in updates:
for w in watch_list:
if re.match(w, u):
matches.add(u)
return matches
label = os.environ.get("LABEL","")
message = "{0}<span color='{1}'>{2}</span>"
args = create_argparse()
updates = get_updates()
if args.aur:
updates += get_aur_updates()
update_count = len(updates)
if update_count > 0:
info = str(update_count) + ' updates available'
short_info = str(update_count) + ' updates'
matches = matching_updates(updates, args.watch)
if matches:
info += ' [{0}]'.format(', '.join(matches))
short_info += '*'
print(message.format(label, args.updates_available_color, info))
print(message.format(label, args.updates_available_color, short_info))
elif not args.quiet:
print(message.format(label, args.base_color, 'system up to date'))
| 28.104 | 78 | 0.605181 |
aea5ff1bc2b7a21dd8511ad3846921bdf357243d | 515 | py | Python | examples/show_i_q.py | quyensy/pyrf | a133d97239aec53962b506d09e45f60238094582 | [
"BSD-3-Clause"
] | null | null | null | examples/show_i_q.py | quyensy/pyrf | a133d97239aec53962b506d09e45f60238094582 | [
"BSD-3-Clause"
] | null | null | null | examples/show_i_q.py | quyensy/pyrf | a133d97239aec53962b506d09e45f60238094582 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
import sys
from pyrf.devices.thinkrf import WSA4000
# connect to wsa
dut = WSA4000()
dut.connect(sys.argv[1])
# setup test conditions
dut.reset()
dut.request_read_perm()
dut.ifgain(0)
dut.freq(2450e6)
dut.gain('low')
dut.fshift(0)
dut.decimation(0)
# capture 1 packet
dut.capture(1024, 1)
# read until I get 1 data packet
while not dut.eof():
pkt = dut.read()
if pkt.is_data_packet():
break
# print I/Q data into i and q
for i, q in pkt.data:
print "%d,%d" % (i, q)
| 16.09375 | 40 | 0.673786 |
b50b5ed98fba207699fed127915efdf1e8ce1ca1 | 1,953 | py | Python | tools/SeeDot/faceDetection/scale_image.py | Shenzhen-Cloudatawalk-Technology-Co-Ltd/EdgeML | ef9f8a77f096acbdeb941014791f8eda1c1bc35b | [
"MIT"
] | 719 | 2019-05-10T00:31:30.000Z | 2022-03-30T23:04:23.000Z | tools/SeeDot/faceDetection/scale_image.py | Shenzhen-Cloudatawalk-Technology-Co-Ltd/EdgeML | ef9f8a77f096acbdeb941014791f8eda1c1bc35b | [
"MIT"
] | 119 | 2019-05-14T10:50:15.000Z | 2022-03-01T22:01:09.000Z | tools/SeeDot/faceDetection/scale_image.py | Shenzhen-Cloudatawalk-Technology-Co-Ltd/EdgeML | ef9f8a77f096acbdeb941014791f8eda1c1bc35b | [
"MIT"
] | 235 | 2019-05-07T13:55:37.000Z | 2022-03-20T04:07:38.000Z | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT license.
import torch
import argparse
import cv2
import numpy as np
from PIL import Image
import os
os.environ['IS_QVGA_MONO'] = '1'
from data.choose_config import cfg
cfg = cfg.cfg
parser = argparse.ArgumentParser(description='Generating input to quantized face detection code')
parser.add_argument('--image_dir', default="images", type=str, help='Folder containing image(s)')
parser.add_argument('--out_dir', default="input", type=str, help='Folder containing the CSV files')
args = parser.parse_args()
if not os.path.exists(args.out_dir):
os.makedirs(args.out_dir)
img_list = [os.path.join(args.image_dir, x)
for x in os.listdir(args.image_dir)]
xoutfile = open(os.path.join(args.out_dir, "X.csv"), "w")
for image_path in sorted(img_list):
img = Image.open(image_path)
img = img.convert('RGB')
img = np.array(img)
scale = 1
max_im_shrink_x = 320 / (img.shape[1])
max_im_shrink_y = 240 / (img.shape[0])
image = cv2.resize(img, None, None, fx=max_im_shrink_x,
fy=max_im_shrink_y, interpolation=cv2.INTER_LINEAR)
if len(image.shape) == 3:
image = np.swapaxes(image, 1, 2)
image = np.swapaxes(image, 1, 0)
# RBG to BGR
x = image[[2, 1, 0], :, :]
x = x.astype('float32')
x -= cfg.img_mean
x = x[[2, 1, 0], :, :]
x = 0.299 * x[0] + 0.587 * x[1] + 0.114 * x[2]
x /= scale
x = np.rint(x).astype(int)
for i in range(240):
for j in range(320):
if i == 239 and j == 319:
xoutfile.write(str(x[i, j]) + "\n")
else:
xoutfile.write(str(x[i, j]) + ', ')
youtfile = open(os.path.join(args.out_dir, "Y.csv"), "w")
for _ in range(len(img_list)):
for i in range(18000):
if i == 17999:
youtfile.write("0\n")
else:
youtfile.write("0, ")
| 27.9 | 99 | 0.606759 |
3418594c82340531577540ebc29f14b7d9916255 | 376 | py | Python | ch03/step_function.py | Kobe10/Deep-learning-scratch | 4f9172a8ddf4a7424241e8255aafba6f5ed650fd | [
"MIT"
] | null | null | null | ch03/step_function.py | Kobe10/Deep-learning-scratch | 4f9172a8ddf4a7424241e8255aafba6f5ed650fd | [
"MIT"
] | null | null | null | ch03/step_function.py | Kobe10/Deep-learning-scratch | 4f9172a8ddf4a7424241e8255aafba6f5ed650fd | [
"MIT"
] | null | null | null | # coding: utf-8
import numpy as np
import matplotlib.pylab as plt
def step_function(x):
# 这里对数组做了一个特殊处理:如果大于0的x 都是true, 然后利用dtype=int 将true转换为1
# :如果小于0的x 都是false, 然后利用dtype=int 将true转换为0
return np.array(x > 0, dtype=np.int)
X = np.arange(-5.0, 5.0, 0.1)
Y = step_function(X)
plt.plot(X, Y)
plt.ylim(-0.1, 1.1) # 指定图中绘制的y轴的范围
plt.show()
| 23.5 | 69 | 0.648936 |
6bf42a822ce0ca0e7e789afeb7050ee769273b4e | 182 | py | Python | RL/TicTacToe/agent/AlphaZero/__init__.py | tsubame-mz/reinforcement_learning | b69cce573deed11676f60653b41e036d8e79aedc | [
"MIT"
] | null | null | null | RL/TicTacToe/agent/AlphaZero/__init__.py | tsubame-mz/reinforcement_learning | b69cce573deed11676f60653b41e036d8e79aedc | [
"MIT"
] | null | null | null | RL/TicTacToe/agent/AlphaZero/__init__.py | tsubame-mz/reinforcement_learning | b69cce573deed11676f60653b41e036d8e79aedc | [
"MIT"
] | null | null | null | from .config import AlphaZeroConfig # NOQA
from .network import AlphaZeroNetwork # NOQA
from .AlphaZeroAgent import AlphaZeroAgent # NOQA
from .alphazero import AlphaZero # NOQA
| 36.4 | 50 | 0.802198 |
ec1a4817764fe6c8c2b881e4ff1823d29643716d | 6,683 | py | Python | UpdatedCarRentalSystem/projectver1/Carrentsystem/rental_car/migrations/0001_initial.py | hklhfong/Car-Rental-System | 3a4844eea8e9dbf85f4ce62b5115772f48277240 | [
"Apache-2.0"
] | null | null | null | UpdatedCarRentalSystem/projectver1/Carrentsystem/rental_car/migrations/0001_initial.py | hklhfong/Car-Rental-System | 3a4844eea8e9dbf85f4ce62b5115772f48277240 | [
"Apache-2.0"
] | null | null | null | UpdatedCarRentalSystem/projectver1/Carrentsystem/rental_car/migrations/0001_initial.py | hklhfong/Car-Rental-System | 3a4844eea8e9dbf85f4ce62b5115772f48277240 | [
"Apache-2.0"
] | null | null | null | # Generated by Django 2.1.1 on 2018-10-10 06:28
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Car',
fields=[
('car_id', models.IntegerField(primary_key=True, serialize=False)),
('brand_name', models.CharField(choices=[('LAND ROVER', 'LAND ROVER'), ('BMW', 'BMW'), ('VOLVO', 'VOLVO'), ('VOLKSWAGEN', 'VOLKSWAGEN'), ('TOYOTA', 'TOYOTA'), ('SAAB', 'SAAB'), ('RENAULT', 'RENAULT'), ('NISSAN', 'NISSAN'), ('MITSUBISHI', 'MITSUBISHI'), ('MERCEDES-BENZ', 'MERCEDES-BENZ'), ('MAZDA', 'MAZDA'), ('HONDA', 'HONDA'), ('EUNOS', 'EUNOS'), ('DATSUN', 'DATSUN'), ('CHRYSLER', 'CHRYSLER'), ('AUDI', 'AUDI'), ('ALFA ROMEO', 'ALFA ROMEO'), ('PEUGEOT', 'PEUGEOT'), ('NULL', 'NULL')], max_length=13)),
('type', models.CharField(max_length=30)),
('transmission', models.CharField(max_length=20)),
('price', models.CharField(max_length=10)),
('luggage_size', models.CharField(max_length=10)),
('seat_number', models.CharField(max_length=10)),
('release_year', models.CharField(max_length=10)),
],
options={
'db_table': 'car',
},
),
migrations.CreateModel(
name='Client',
fields=[
('client_id', models.IntegerField(primary_key=True, serialize=False)),
('client_name', models.CharField(max_length=30)),
('client_phone', models.CharField(max_length=30)),
('client_address', models.CharField(max_length=50)),
('client_brithday', models.DateField()),
('client_occupation', models.CharField(max_length=20)),
('client_gender', models.CharField(choices=[('M', 'Male'), ('F', 'Female')], max_length=1)),
],
options={
'db_table': 'Client',
},
),
migrations.CreateModel(
name='Discount',
fields=[
('discount_id', models.IntegerField(primary_key=True, serialize=False)),
('discount_code', models.IntegerField(null=True)),
('starting_date', models.DateField()),
('ending_date', models.DateField()),
],
options={
'db_table': 'Discount',
},
),
migrations.CreateModel(
name='DropOffStore',
fields=[
('drop_off_store_id', models.IntegerField(primary_key=True, serialize=False)),
],
options={
'db_table': 'DropOffStore',
},
),
migrations.CreateModel(
name='PickUpStore',
fields=[
('pick_up_store_id', models.IntegerField(primary_key=True, serialize=False)),
],
options={
'db_table': 'PickUpStore',
},
),
migrations.CreateModel(
name='Record',
fields=[
('record_id', models.IntegerField(primary_key=True, serialize=False)),
('accident_type', models.CharField(max_length=30)),
('accident_address', models.CharField(max_length=30)),
('client', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='rental_car.Client')),
],
options={
'db_table': 'Record',
},
),
migrations.CreateModel(
name='Rental',
fields=[
('rental_id', models.IntegerField(primary_key=True, serialize=False)),
('rent_date', models.DateField()),
('end_rent_date', models.DateField()),
('rent_price_total', models.IntegerField(null=True)),
('pick_up_city', models.CharField(choices=[('Sydney', 'Sydney'), ('Melbourne', 'Melbourne'), ('Brisbane', 'Brisbane')], max_length=9)),
('drop_off_city', models.CharField(choices=[('Sydney', 'Sydney'), ('Melbourne', 'Melbourne'), ('Brisbane', 'Brisbane')], max_length=9)),
('insurance_number', models.CharField(max_length=20)),
('car', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='rental_car.Car')),
('client', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='rental_car.Client')),
('discount', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='rental_car.Discount')),
('drop_off_store', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='rental_car.DropOffStore')),
('pick_up_store', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='rental_car.PickUpStore')),
],
options={
'db_table': 'Rental',
},
),
migrations.CreateModel(
name='Store',
fields=[
('store_id', models.IntegerField(primary_key=True, serialize=False)),
('store_name', models.CharField(max_length=30)),
('store_address', models.CharField(max_length=30)),
('store_phone', models.TextField(default='null')),
('store_city', models.CharField(max_length=30)),
('store_state_name', models.CharField(choices=[('Queensland', 'Queensland'), ('South Australia', 'South Australia'), ('Tasmania', 'Tasmania'), ('Victoria', 'Victoria'), ('New South Wales', 'New South Wales')], max_length=15)),
],
options={
'db_table': 'Store',
},
),
migrations.AddField(
model_name='pickupstore',
name='store',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='rental_car.Store'),
),
migrations.AddField(
model_name='dropoffstore',
name='store',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='rental_car.Store'),
),
migrations.AddField(
model_name='car',
name='record',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='rental_car.Record'),
),
]
| 48.427536 | 520 | 0.55095 |
7dcb25ecf475928ed63ce384e37bebbe5c76cf08 | 10,558 | py | Python | deepxde/nn/tensorflow_compat_v1/fnn.py | matth86/deepxde | 457cad03903792d9d107d60f2b639b87361805d0 | [
"Apache-2.0"
] | 955 | 2019-06-21T21:56:02.000Z | 2022-03-31T03:44:45.000Z | deepxde/nn/tensorflow_compat_v1/fnn.py | matth86/deepxde | 457cad03903792d9d107d60f2b639b87361805d0 | [
"Apache-2.0"
] | 517 | 2019-07-25T16:47:44.000Z | 2022-03-31T17:37:58.000Z | deepxde/nn/tensorflow_compat_v1/fnn.py | matth86/deepxde | 457cad03903792d9d107d60f2b639b87361805d0 | [
"Apache-2.0"
] | 374 | 2019-06-24T00:44:16.000Z | 2022-03-30T08:17:36.000Z | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
from .nn import NN
from .. import activations
from .. import initializers
from .. import regularizers
from ... import config
from ...backend import tf
from ...utils import timing
class FNN(NN):
"""Fully-connected neural network."""
def __init__(
self,
layer_sizes,
activation,
kernel_initializer,
regularization=None,
dropout_rate=0,
batch_normalization=None,
layer_normalization=None,
kernel_constraint=None,
use_bias=True,
):
super(FNN, self).__init__()
self.layer_size = layer_sizes
self.activation = activations.get(activation)
self.kernel_initializer = initializers.get(kernel_initializer)
self.regularizer = regularizers.get(regularization)
self.dropout_rate = dropout_rate
self.batch_normalization = batch_normalization
self.layer_normalization = layer_normalization
self.kernel_constraint = kernel_constraint
self.use_bias = use_bias
@property
def inputs(self):
return self.x
@property
def outputs(self):
return self.y
@property
def targets(self):
return self.y_
@timing
def build(self):
print("Building feed-forward neural network...")
self.x = tf.placeholder(config.real(tf), [None, self.layer_size[0]])
y = self.x
if self._input_transform is not None:
y = self._input_transform(y)
for i in range(len(self.layer_size) - 2):
if self.batch_normalization is None and self.layer_normalization is None:
y = self._dense(
y,
self.layer_size[i + 1],
activation=self.activation,
use_bias=self.use_bias,
)
elif self.batch_normalization and self.layer_normalization:
raise ValueError(
"Can not apply batch_normalization and layer_normalization at the "
"same time."
)
elif self.batch_normalization == "before":
y = self._dense_batchnorm_v1(y, self.layer_size[i + 1])
elif self.batch_normalization == "after":
y = self._dense_batchnorm_v2(y, self.layer_size[i + 1])
elif self.layer_normalization == "before":
y = self._dense_layernorm_v1(y, self.layer_size[i + 1])
elif self.layer_normalization == "after":
y = self._dense_layernorm_v2(y, self.layer_size[i + 1])
else:
raise ValueError(
"batch_normalization: {}, layer_normalization: {}".format(
self.batch_normalization, self.layer_normalization
)
)
if self.dropout_rate > 0:
y = tf.layers.dropout(y, rate=self.dropout_rate, training=self.training)
self.y = self._dense(y, self.layer_size[-1], use_bias=self.use_bias)
if self._output_transform is not None:
self.y = self._output_transform(self.x, self.y)
self.y_ = tf.placeholder(config.real(tf), [None, self.layer_size[-1]])
self.built = True
def _dense(self, inputs, units, activation=None, use_bias=True):
# Cannot directly replace tf.layers.dense() with tf.keras.layers.Dense() due to
# some differences. One difference is that tf.layers.dense() will add
# regularizer loss to the collection REGULARIZATION_LOSSES, but
# tf.keras.layers.Dense() will not. Hence, tf.losses.get_regularization_loss()
# cannot be used for tf.keras.layers.Dense().
# References:
# - https://github.com/tensorflow/tensorflow/issues/21587
# - https://www.tensorflow.org/guide/migrate
return tf.layers.dense(
inputs,
units,
activation=activation,
use_bias=use_bias,
kernel_initializer=self.kernel_initializer,
kernel_regularizer=self.regularizer,
kernel_constraint=self.kernel_constraint,
)
@staticmethod
def _dense_weightnorm(inputs, units, activation=None, use_bias=True):
fan_in = inputs.shape[1]
W = tf.Variable(tf.random_normal([fan_in, units], stddev=math.sqrt(2 / fan_in)))
g = tf.Variable(tf.ones(units))
W = tf.nn.l2_normalize(W, axis=0) * g
y = tf.matmul(inputs, W)
if use_bias:
b = tf.Variable(tf.zeros(units))
y += b
if activation is not None:
return activation(y)
return y
def _dense_batchnorm_v1(self, inputs, units):
# FC - BN - activation
y = self._dense(inputs, units, use_bias=False)
y = tf.layers.batch_normalization(y, training=self.training)
return self.activation(y)
def _dense_batchnorm_v2(self, inputs, units):
# FC - activation - BN
y = self._dense(inputs, units, activation=self.activation)
return tf.layers.batch_normalization(y, training=self.training)
@staticmethod
def _layer_normalization(inputs, elementwise_affine=True):
"""References:
- https://tensorflow.google.cn/api_docs/python/tf/keras/layers/LayerNormalization?hl=en
- https://github.com/taki0112/Group_Normalization-Tensorflow
"""
with tf.variable_scope("layer_norm"):
mean, var = tf.nn.moments(inputs, axes=[1], keepdims=True)
if elementwise_affine:
gamma = tf.Variable(
initial_value=tf.constant_initializer(1.0)(shape=[1, 1]),
trainable=True,
name="gamma",
dtype=config.real(tf),
)
beta = tf.Variable(
initial_value=tf.constant_initializer(0.0)(shape=[1, 1]),
trainable=True,
name="beta",
dtype=config.real(tf),
)
else:
gamma, beta = None, None
return tf.nn.batch_normalization(
inputs, mean, var, offset=beta, scale=gamma, variance_epsilon=1e-3
)
def _dense_layernorm_v1(self, inputs, units):
# FC - LN - activation
y = self._dense(inputs, units, use_bias=False)
y = self._layer_normalization(y)
return self.activation(y)
def _dense_layernorm_v2(self, inputs, units):
# FC - activation - LN
y = self._dense(inputs, units, activation=self.activation)
return self._layer_normalization(y)
class PFNN(FNN):
"""Parallel fully-connected neural network that uses independent sub-networks for
each network output.
Args:
layer_sizes: A nested list to define the architecture of the neural network (how
the layers are connected). If `layer_sizes[i]` is int, it represent one
layer shared by all the outputs; if `layer_sizes[i]` is list, it represent
`len(layer_sizes[i])` sub-layers, each of which exclusively used by one
output. Note that `len(layer_sizes[i])` should equal to the number of
outputs. Every number specify the number of neurons of that layer.
"""
def __init__(
self,
layer_sizes,
activation,
kernel_initializer,
regularization=None,
dropout_rate=0,
batch_normalization=None,
):
super(PFNN, self).__init__(
layer_sizes,
activation,
kernel_initializer,
regularization,
dropout_rate,
batch_normalization,
)
@timing
def build(self):
def layer_map(_y, layer_size, net):
if net.batch_normalization is None:
_y = net._dense(_y, layer_size, activation=net.activation)
elif net.batch_normalization == "before":
_y = net._dense_batchnorm_v1(_y, layer_size)
elif net.batch_normalization == "after":
_y = net._dense_batchnorm_v2(_y, layer_size)
else:
raise ValueError("batch_normalization")
if net.dropout_rate > 0:
_y = tf.layers.dropout(_y, rate=net.dropout_rate, training=net.training)
return _y
print("Building feed-forward neural network...")
self.x = tf.placeholder(config.real(tf), [None, self.layer_size[0]])
y = self.x
if self._input_transform is not None:
y = self._input_transform(y)
# hidden layers
for i_layer in range(len(self.layer_size) - 2):
if isinstance(self.layer_size[i_layer + 1], (list, tuple)):
if isinstance(y, (list, tuple)):
# e.g. [8, 8, 8] -> [16, 16, 16]
if len(self.layer_size[i_layer + 1]) != len(
self.layer_size[i_layer]
):
raise ValueError(
"Number of sub-layers should be the same when feed-forwarding"
)
y = [
layer_map(y[i_net], self.layer_size[i_layer + 1][i_net], self)
for i_net in range(len(self.layer_size[i_layer + 1]))
]
else:
# e.g. 64 -> [8, 8, 8]
y = [
layer_map(y, self.layer_size[i_layer + 1][i_net], self)
for i_net in range(len(self.layer_size[i_layer + 1]))
]
else:
# e.g. 64 -> 64
y = layer_map(y, self.layer_size[i_layer + 1], self)
# output layers
if isinstance(y, (list, tuple)):
# e.g. [3, 3, 3] -> 3
if len(self.layer_size[-2]) != self.layer_size[-1]:
raise ValueError(
"Number of sub-layers should be the same as number of outputs"
)
y = [self._dense(y[i_net], 1) for i_net in range(len(y))]
self.y = tf.concat(y, axis=1)
else:
self.y = self._dense(y, self.layer_size[-1])
if self._output_transform is not None:
self.y = self._output_transform(self.x, self.y)
self.y_ = tf.placeholder(config.real(tf), [None, self.layer_size[-1]])
self.built = True
| 37.842294 | 95 | 0.571415 |
408fbd5fca3ec9fcc6057c732c73d61f347e09eb | 2,428 | py | Python | ghettoq/messaging.py | ask/ghettoq | 22a0fcd865b618cbbbfd102efd88a7983507c24e | [
"BSD-3-Clause"
] | 5 | 2015-01-13T14:36:52.000Z | 2021-05-07T15:45:55.000Z | ghettoq/messaging.py | ask/ghettoq | 22a0fcd865b618cbbbfd102efd88a7983507c24e | [
"BSD-3-Clause"
] | null | null | null | ghettoq/messaging.py | ask/ghettoq | 22a0fcd865b618cbbbfd102efd88a7983507c24e | [
"BSD-3-Clause"
] | null | null | null | from Queue import Empty
from itertools import cycle
class Queue(object):
def __init__(self, backend, name):
self.name = name
self.backend = backend
def put(self, payload, priority=0):
self.backend.put(self.name, payload, priority=priority)
def get(self):
payload = self.backend.get(self.name)
if payload is not None:
return payload
raise Empty
def purge(self):
return self.backend.purge(self.name)
def __repr__(self):
return "<Queue: %s>" % repr(self.name)
class QueueSet(object):
"""A set of queues that operates as one."""
def __init__(self, backend, queues):
self.backend = backend
self.queue_names = list(queues)
self._get_many = getattr(self.backend, "get_many", None)
self.get = self._emulated
if self._get_many:
self.get = self._native
# attributes below are only used in emulation mode.
# queues could be a PriorityQueue as well to support
# priorities.
self.queues = map(self.backend.Queue, self.queue_names)
# an infinite cycle through all the queues.
self.cycle = cycle(self.queues)
# A set of all the queue names, so we can match when we've
# tried all of them.
self.all = frozenset(self.queue_names)
def _native(self, timeout=None):
return self._get_many(self.queue_names, timeout=timeout)
def _emulated(self, timeout=None):
"""Get the next message avaiable in the queue.
:returns: The message and the name of the queue it came from as
a tuple.
:raises Empty: If there are no more items in any of the queues.
"""
# A set of queues we've already tried.
tried = set()
while True:
# Get the next queue in the cycle, and try to get an item off it.
try:
queue = self.cycle.next()
except StopIteration:
raise Empty("No queues registered")
try:
item = queue.get()
except Empty:
# raises Empty when we've tried all of them.
tried.add(queue.name)
if tried == self.all:
raise
else:
return item, queue.name
def __repr__(self):
return "<QueueSet: %s>" % repr(self.queue_names)
| 28.564706 | 77 | 0.580725 |
c83b4e8d07c7890b2783da5ca922985c575b5890 | 2,723 | py | Python | autokeras/gui/resnet_gui.py | Huiqing666/autokeras | 8c6ff435f82e8a7bf1cdd0fc8fdb499fab88c991 | [
"MIT"
] | null | null | null | autokeras/gui/resnet_gui.py | Huiqing666/autokeras | 8c6ff435f82e8a7bf1cdd0fc8fdb499fab88c991 | [
"MIT"
] | null | null | null | autokeras/gui/resnet_gui.py | Huiqing666/autokeras | 8c6ff435f82e8a7bf1cdd0fc8fdb499fab88c991 | [
"MIT"
] | null | null | null | from tkinter import *
class resnet_gui:
def __init__(self):
root = Tk()
theGui = gui(root)
root.geometry("600x400") # You want the size of the app to be 500x500
root.resizable(0, 0) # Don't allow resizing in the x or y direction
root.title('Gui for resnet')
root.mainloop()
self.var = theGui.var
class gui:
def __init__(self, master):
self.master = master
self.var = []
label = Label(self.master, text='Set your parameters of resnet', font=("Arial", 18))
# label.config(font=("Arial", 20))
label.pack()
frame1 = Frame(self.master)
frame1.pack()
self.btn2 = IntVar()
Label(frame1, text='Block Design', font=("", 12)).grid(row=2, column=0)
subframe2 = Frame(frame1)
subframe2.grid(row=2, column=1)
radio2_1 = Radiobutton(subframe2, text='Basic', value=0, variable=self.btn2, font=("", 12)).pack(side=LEFT)
radio2_2 = Radiobutton(subframe2, text='Bottleneck', value=1, variable=self.btn2, font=("", 12)).pack(side=LEFT)
frame2 = Frame(self.master)
frame2.pack()
Label(frame1, text='Repetitions of Conv Block', font=("", 12)).grid(row=3, column=0, rowspan=50)
subframe1 = Frame(frame2)
default_r1 = StringVar()
default_r1.set('2')
self.conv2_x = Entry(subframe1, textvariable=default_r1)
self.conv2_x.grid(row=3, column=1, columnspan=20)
default_r2 = StringVar()
default_r2.set('2')
self.conv3_x = Entry(subframe1, textvariable=default_r2)
self.conv3_x.grid(row=4, column=1, columnspan=20)
default_r3 = StringVar()
default_r3.set('2')
self.conv4_x = Entry(subframe1, textvariable=default_r3)
self.conv4_x.grid(row=5, column=1, columnspan=20)
default_r4 = StringVar()
default_r4.set('2')
self.conv5_x = Entry(subframe1, textvariable=default_r4)
self.conv5_x.grid(row=6, column=1, columnspan=20)
subframe1.grid(row=3, column=0)
frame3 = Frame(self.master)
frame3.pack()
button = Button(frame3, text='Run network', width=10, font=("", 12))
button.bind('<Button-1>', self.get_variable)
button.pack()
self.quitBtn = Button(frame3, text="Quit", command=self.master.quit, font=("", 12)).pack()
# get parameters of resnet
def get_variable(self, event):
self.var.append(int(self.conv2_x.get()))
self.var.append(int(self.conv3_x.get()))
self.var.append(int(self.conv4_x.get()))
self.var.append(int(self.conv5_x.get()))
self.var.append(int(self.btn2.get()))
print(self.var)
| 34.468354 | 120 | 0.607418 |
8154ba91a10e61ef10080cc2daae47b7f07cb644 | 3,240 | py | Python | openpose.py | Muskan-Malhotra/Pose-Detection-for-YOGA | 98045020af9e66fe1167699a1a8f5725b27fe676 | [
"Apache-2.0"
] | null | null | null | openpose.py | Muskan-Malhotra/Pose-Detection-for-YOGA | 98045020af9e66fe1167699a1a8f5725b27fe676 | [
"Apache-2.0"
] | null | null | null | openpose.py | Muskan-Malhotra/Pose-Detection-for-YOGA | 98045020af9e66fe1167699a1a8f5725b27fe676 | [
"Apache-2.0"
] | null | null | null |
import cv2 as cv
import numpy as np
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--input', help='Path to image or video. Skip to capture frames from camera')
parser.add_argument('--thr', default=0.2, type=float, help='Threshold value for pose parts heat map')
parser.add_argument('--width', default=368, type=int, help='Resize input to specific width.')
parser.add_argument('--height', default=368, type=int, help='Resize input to specific height.')
args = parser.parse_args()
BODY_PARTS = { "Nose": 0, "Neck": 1, "RShoulder": 2, "RElbow": 3, "RWrist": 4,
"LShoulder": 5, "LElbow": 6, "LWrist": 7, "RHip": 8, "RKnee": 9,
"RAnkle": 10, "LHip": 11, "LKnee": 12, "LAnkle": 13, "REye": 14,
"LEye": 15, "REar": 16, "LEar": 17, "Background": 18 }
POSE_PAIRS = [ ["Neck", "RShoulder"], ["Neck", "LShoulder"], ["RShoulder", "RElbow"],
["RElbow", "RWrist"], ["LShoulder", "LElbow"], ["LElbow", "LWrist"],
["Neck", "RHip"], ["RHip", "RKnee"], ["RKnee", "RAnkle"], ["Neck", "LHip"],
["LHip", "LKnee"], ["LKnee", "LAnkle"], ["Neck", "Nose"], ["Nose", "REye"],
["REye", "REar"], ["Nose", "LEye"], ["LEye", "LEar"] ]
inWidth = args.width
inHeight = args.height
net = cv.dnn.readNetFromTensorflow("graph_opt.pb")
cap = cv.VideoCapture(args.input if args.input else 0)
while cv.waitKey(1) < 0:
hasFrame, frame = cap.read()
if not hasFrame:
cv.waitKey()
break
frameWidth = frame.shape[1]
frameHeight = frame.shape[0]
net.setInput(cv.dnn.blobFromImage(frame, 1.0, (inWidth, inHeight), (127.5, 127.5, 127.5), swapRB=True, crop=False))
out = net.forward()
out = out[:, :19, :, :] # MobileNet output [1, 57, -1, -1], we only need the first 19 elements
assert(len(BODY_PARTS) == out.shape[1])
points = []
for i in range(len(BODY_PARTS)):
# Slice heatmap of corresponging body's part.
heatMap = out[0, i, :, :]
# Originally, we try to find all the local maximums. To simplify a sample
# we just find a global one. However only a single pose at the same time
# could be detected this way.
_, conf, _, point = cv.minMaxLoc(heatMap)
x = (frameWidth * point[0]) / out.shape[3]
y = (frameHeight * point[1]) / out.shape[2]
# Add a point if it's confidence is higher than threshold.
points.append((int(x), int(y)) if conf > args.thr else None)
for pair in POSE_PAIRS:
partFrom = pair[0]
partTo = pair[1]
assert(partFrom in BODY_PARTS)
assert(partTo in BODY_PARTS)
idFrom = BODY_PARTS[partFrom]
idTo = BODY_PARTS[partTo]
if points[idFrom] and points[idTo]:
cv.line(frame, points[idFrom], points[idTo], (0, 255, 0), 3)
cv.ellipse(frame, points[idFrom], (3, 3), 0, 0, 360, (0, 0, 255), cv.FILLED)
cv.ellipse(frame, points[idTo], (3, 3), 0, 0, 360, (0, 0, 255), cv.FILLED)
t, _ = net.getPerfProfile()
freq = cv.getTickFrequency() / 1000
cv.putText(frame, '%.2fms' % (t / freq), (10, 20), cv.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0))
cv.imshow('OpenPose using OpenCV', frame) | 41.012658 | 119 | 0.591667 |
5846de10cf4bfa24ca8d8d7bbbfc041e65e605a7 | 3,052 | py | Python | model.py | zhengyangb/NLU2019 | 690ad35e8028d6547ca1d5f71641fb02c377369c | [
"BSD-3-Clause"
] | null | null | null | model.py | zhengyangb/NLU2019 | 690ad35e8028d6547ca1d5f71641fb02c377369c | [
"BSD-3-Clause"
] | null | null | null | model.py | zhengyangb/NLU2019 | 690ad35e8028d6547ca1d5f71641fb02c377369c | [
"BSD-3-Clause"
] | null | null | null | import torch
import torch.nn as nn
from embed_regularize import embedded_dropout
from locked_dropout import LockedDropout
from weight_drop import WeightDrop
from ON_LSTM import ONLSTMStack
class RNNModel(nn.Module):
"""Container module with an encoder, a recurrent module, and a decoder."""
def __init__(self, rnn_type, ntoken, ninp, nhid, chunk_size, nlayers, dropout=0.5, dropouth=0.5, dropouti=0.5,
dropoute=0.1, wdrop=0, tie_weights=False, pre_emb=None,):
super(RNNModel, self).__init__()
self.lockdrop = LockedDropout()
self.idrop = nn.Dropout(dropouti)
self.hdrop = nn.Dropout(dropouth)
self.drop = nn.Dropout(dropout)
self.encoder = nn.Embedding(ntoken, ninp)
assert rnn_type in ['LSTM'], 'RNN type is not supported'
self.rnn = ONLSTMStack(
[ninp] + [nhid] * (nlayers - 1) + [ninp],
chunk_size=chunk_size,
dropconnect=wdrop,
dropout=dropouth
)
self.decoder = nn.Linear(ninp, ntoken)
# Optionally tie weights as in:
# "Using the Output Embedding to Improve Language Models" (Press & Wolf 2016)
# https://arxiv.org/abs/1608.05859
# and
# "Tying Word Vectors and Word Classifiers: A Loss Framework for Language Modeling" (Inan et al. 2016)
# https://arxiv.org/abs/1611.01462
if tie_weights:
#if nhid != ninp:
# raise ValueError('When using the tied flag, nhid must be equal to emsize')
self.decoder.weight = self.encoder.weight
self.init_weights(pre_emb)
self.rnn_type = rnn_type
self.ninp = ninp
self.nhid = nhid
self.nlayers = nlayers
self.dropout = dropout
self.dropouti = dropouti
self.dropouth = dropouth
self.dropoute = dropoute
self.tie_weights = tie_weights
def reset(self):
if self.rnn_type == 'QRNN': [r.reset() for r in self.rnns]
def init_weights(self, pre_emb):
initrange = 0.1
self.encoder.weight.data.uniform_(-initrange, initrange)
if pre_emb is not None:
self.encoder.weight.data[:pre_emb.size(0), :pre_emb.size(1)] = torch.FloatTensor(pre_emb)
self.decoder.bias.data.fill_(0)
self.decoder.weight.data.uniform_(-initrange, initrange)
def forward(self, input, hidden, return_h=False):
emb = embedded_dropout(
self.encoder, input,
dropout=self.dropoute if self.training else 0
)
emb = self.lockdrop(emb, self.dropouti)
raw_output, hidden, raw_outputs, outputs, distances = self.rnn(emb, hidden)
self.distance = distances
output = self.lockdrop(raw_output, self.dropout)
result = output.view(output.size(0)*output.size(1), output.size(2))
if return_h:
return result, hidden, raw_outputs, outputs
else:
return result, hidden
def init_hidden(self, bsz):
return self.rnn.init_hidden(bsz)
| 35.488372 | 115 | 0.629423 |
35350298ef26bce632b955532d13abc3a4c1faff | 19,270 | py | Python | se/vendor/kindleunpack/mobi_split.py | vr8hub/tools | 345a3b34ee29094f938d59ee809e44f17cd3d940 | [
"CC0-1.0"
] | 985 | 2015-03-30T22:44:49.000Z | 2022-03-26T00:43:53.000Z | se/vendor/kindleunpack/mobi_split.py | vr8hub/tools | 345a3b34ee29094f938d59ee809e44f17cd3d940 | [
"CC0-1.0"
] | 483 | 2015-11-27T22:06:36.000Z | 2022-03-23T20:13:44.000Z | se/vendor/kindleunpack/mobi_split.py | vr8hub/tools | 345a3b34ee29094f938d59ee809e44f17cd3d940 | [
"CC0-1.0"
] | 148 | 2017-02-23T16:26:08.000Z | 2022-03-04T12:48:57.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# vim:ts=4:sw=4:softtabstop=4:smarttab:expandtab
from __future__ import unicode_literals, division, absolute_import, print_function
import struct
# note: struct pack, unpack, unpack_from all require bytestring format
# data all the way up to at least python 2.7.5, python 3 okay with bytestring
from .unipath import pathof
# important pdb header offsets
unique_id_seed = 68
number_of_pdb_records = 76
# important palmdoc header offsets
book_length = 4
book_record_count = 8
first_pdb_record = 78
# important rec0 offsets
length_of_book = 4
mobi_header_base = 16
mobi_header_length = 20
mobi_type = 24
mobi_version = 36
first_non_text = 80
title_offset = 84
first_resc_record = 108
first_content_index = 192
last_content_index = 194
kf8_fdst_index = 192 # for KF8 mobi headers
fcis_index = 200
flis_index = 208
srcs_index = 224
srcs_count = 228
primary_index = 244
datp_index = 256
huffoff = 112
hufftbloff = 120
def getint(datain,ofs,sz=b'L'):
i, = struct.unpack_from(b'>'+sz,datain,ofs)
return i
def writeint(datain,ofs,n,len=b'L'):
if len==b'L':
return datain[:ofs]+struct.pack(b'>L',n)+datain[ofs+4:]
else:
return datain[:ofs]+struct.pack(b'>H',n)+datain[ofs+2:]
def getsecaddr(datain,secno):
nsec = getint(datain,number_of_pdb_records,b'H')
assert secno>=0 & secno<nsec,'secno %d out of range (nsec=%d)'%(secno,nsec)
secstart = getint(datain,first_pdb_record+secno*8)
if secno == nsec-1:
secend = len(datain)
else:
secend = getint(datain,first_pdb_record+(secno+1)*8)
return secstart,secend
def readsection(datain,secno):
secstart, secend = getsecaddr(datain,secno)
return datain[secstart:secend]
def writesection(datain,secno,secdata): # overwrite, accounting for different length
# dataout = deletesectionrange(datain,secno, secno)
# return insertsection(dataout, secno, secdata)
datalst = []
nsec = getint(datain,number_of_pdb_records,b'H')
zerosecstart,zerosecend = getsecaddr(datain,0)
secstart,secend = getsecaddr(datain,secno)
dif = len(secdata) - (secend - secstart)
datalst.append(datain[:unique_id_seed])
datalst.append(struct.pack(b'>L',2*nsec+1))
datalst.append(datain[unique_id_seed+4:number_of_pdb_records])
datalst.append(struct.pack(b'>H',nsec))
newstart = zerosecstart
for i in range(0,secno):
ofs, flgval = struct.unpack_from(b'>2L',datain,first_pdb_record+i*8)
datalst.append(struct.pack(b'>L',ofs) + struct.pack(b'>L', flgval))
datalst.append(struct.pack(b'>L', secstart) + struct.pack(b'>L', (2*secno)))
for i in range(secno+1,nsec):
ofs, flgval = struct.unpack_from(b'>2L',datain,first_pdb_record+i*8)
ofs = ofs + dif
datalst.append(struct.pack(b'>L',ofs) + struct.pack(b'>L',flgval))
lpad = newstart - (first_pdb_record + 8*nsec)
if lpad > 0:
datalst.append(b'\0' * lpad)
datalst.append(datain[zerosecstart:secstart])
datalst.append(secdata)
datalst.append(datain[secend:])
dataout = b''.join(datalst)
return dataout
def nullsection(datain,secno): # make it zero-length without deleting it
datalst = []
nsec = getint(datain,number_of_pdb_records,b'H')
secstart, secend = getsecaddr(datain,secno)
zerosecstart, zerosecend = getsecaddr(datain, 0)
dif = secend-secstart
datalst.append(datain[:first_pdb_record])
for i in range(0,secno+1):
ofs, flgval = struct.unpack_from(b'>2L',datain,first_pdb_record+i*8)
datalst.append(struct.pack(b'>L',ofs) + struct.pack(b'>L', flgval))
for i in range(secno+1, nsec):
ofs, flgval = struct.unpack_from(b'>2L',datain,first_pdb_record+i*8)
ofs = ofs - dif
datalst.append(struct.pack(b'>L',ofs) + struct.pack(b'>L',flgval))
lpad = zerosecstart - (first_pdb_record + 8*nsec)
if lpad > 0:
datalst.append(b'\0' * lpad)
datalst.append(datain[zerosecstart: secstart])
datalst.append(datain[secend:])
dataout = b''.join(datalst)
return dataout
def deletesectionrange(datain,firstsec,lastsec): # delete a range of sections
datalst = []
firstsecstart,firstsecend = getsecaddr(datain,firstsec)
lastsecstart,lastsecend = getsecaddr(datain,lastsec)
zerosecstart, zerosecend = getsecaddr(datain, 0)
dif = lastsecend - firstsecstart + 8*(lastsec-firstsec+1)
nsec = getint(datain,number_of_pdb_records,b'H')
datalst.append(datain[:unique_id_seed])
datalst.append(struct.pack(b'>L',2*(nsec-(lastsec-firstsec+1))+1))
datalst.append(datain[unique_id_seed+4:number_of_pdb_records])
datalst.append(struct.pack(b'>H',nsec-(lastsec-firstsec+1)))
newstart = zerosecstart - 8*(lastsec-firstsec+1)
for i in range(0,firstsec):
ofs, flgval = struct.unpack_from(b'>2L',datain,first_pdb_record+i*8)
ofs = ofs-8*(lastsec-firstsec+1)
datalst.append(struct.pack(b'>L',ofs) + struct.pack(b'>L', flgval))
for i in range(lastsec+1,nsec):
ofs, flgval = struct.unpack_from(b'>2L',datain,first_pdb_record+i*8)
ofs = ofs - dif
flgval = 2*(i-(lastsec-firstsec+1))
datalst.append(struct.pack(b'>L',ofs) + struct.pack(b'>L',flgval))
lpad = newstart - (first_pdb_record + 8*(nsec - (lastsec - firstsec + 1)))
if lpad > 0:
datalst.append(b'\0' * lpad)
datalst.append(datain[zerosecstart:firstsecstart])
datalst.append(datain[lastsecend:])
dataout = b''.join(datalst)
return dataout
def insertsection(datain,secno,secdata): # insert a new section
datalst = []
nsec = getint(datain,number_of_pdb_records,b'H')
# print("inserting secno" , secno, "into" ,nsec, "sections")
secstart,secend = getsecaddr(datain,secno)
zerosecstart,zerosecend = getsecaddr(datain,0)
dif = len(secdata)
datalst.append(datain[:unique_id_seed])
datalst.append(struct.pack(b'>L',2*(nsec+1)+1))
datalst.append(datain[unique_id_seed+4:number_of_pdb_records])
datalst.append(struct.pack(b'>H',nsec+1))
newstart = zerosecstart + 8
for i in range(0,secno):
ofs, flgval = struct.unpack_from(b'>2L',datain,first_pdb_record+i*8)
ofs += 8
datalst.append(struct.pack(b'>L',ofs) + struct.pack(b'>L', flgval))
datalst.append(struct.pack(b'>L', secstart + 8) + struct.pack(b'>L', (2*secno)))
for i in range(secno,nsec):
ofs, flgval = struct.unpack_from(b'>2L',datain,first_pdb_record+i*8)
ofs = ofs + dif + 8
flgval = 2*(i+1)
datalst.append(struct.pack(b'>L',ofs) + struct.pack(b'>L',flgval))
lpad = newstart - (first_pdb_record + 8*(nsec + 1))
if lpad > 0:
datalst.append(b'\0' * lpad)
datalst.append(datain[zerosecstart:secstart])
datalst.append(secdata)
datalst.append(datain[secstart:])
dataout = b''.join(datalst)
return dataout
def insertsectionrange(sectionsource,firstsec,lastsec,sectiontarget,targetsec): # insert a range of sections
# print("inserting secno" , firstsec, "to", lastsec, "into" ,targetsec, "sections")
# dataout = sectiontarget
# for idx in range(lastsec,firstsec-1,-1):
# dataout = insertsection(dataout,targetsec,readsection(sectionsource,idx))
# return dataout
datalst = []
nsec = getint(sectiontarget,number_of_pdb_records,b'H')
zerosecstart, zerosecend = getsecaddr(sectiontarget,0)
insstart, nul = getsecaddr(sectiontarget,targetsec)
nins = lastsec - firstsec + 1
srcstart, nul = getsecaddr(sectionsource,firstsec)
nul, srcend = getsecaddr(sectionsource,lastsec)
newstart = zerosecstart + 8*nins
datalst.append(sectiontarget[:unique_id_seed])
datalst.append(struct.pack(b'>L',2*(nsec+nins)+1))
datalst.append(sectiontarget[unique_id_seed+4:number_of_pdb_records])
datalst.append(struct.pack(b'>H',nsec+nins))
for i in range(0,targetsec):
ofs, flgval = struct.unpack_from(b'>2L',sectiontarget,first_pdb_record+i*8)
ofsnew = ofs + 8*nins
flgvalnew = flgval
datalst.append(struct.pack(b'>L',ofsnew) + struct.pack(b'>L', flgvalnew))
# print(ofsnew, flgvalnew, ofs, flgval)
srcstart0, nul = getsecaddr(sectionsource,firstsec)
for i in range(nins):
isrcstart, nul = getsecaddr(sectionsource,firstsec+i)
ofsnew = insstart + (isrcstart-srcstart0) + 8*nins
flgvalnew = 2*(targetsec+i)
datalst.append(struct.pack(b'>L',ofsnew) + struct.pack(b'>L', flgvalnew))
# print(ofsnew, flgvalnew)
dif = srcend - srcstart
for i in range(targetsec,nsec):
ofs, flgval = struct.unpack_from(b'>2L',sectiontarget,first_pdb_record+i*8)
ofsnew = ofs + dif + 8*nins
flgvalnew = 2*(i+nins)
datalst.append(struct.pack(b'>L',ofsnew) + struct.pack(b'>L',flgvalnew))
# print(ofsnew, flgvalnew, ofs, flgval)
lpad = newstart - (first_pdb_record + 8*(nsec + nins))
if lpad > 0:
datalst.append(b'\0' * lpad)
datalst.append(sectiontarget[zerosecstart:insstart])
datalst.append(sectionsource[srcstart:srcend])
datalst.append(sectiontarget[insstart:])
dataout = b''.join(datalst)
return dataout
def get_exth_params(rec0):
ebase = mobi_header_base + getint(rec0,mobi_header_length)
elen = getint(rec0,ebase+4)
enum = getint(rec0,ebase+8)
return ebase,elen,enum
def add_exth(rec0,exth_num,exth_bytes):
ebase,elen,enum = get_exth_params(rec0)
newrecsize = 8+len(exth_bytes)
newrec0 = rec0[0:ebase+4]+struct.pack(b'>L',elen+newrecsize)+struct.pack(b'>L',enum+1)+\
struct.pack(b'>L',exth_num)+struct.pack(b'>L',newrecsize)+exth_bytes+rec0[ebase+12:]
newrec0 = writeint(newrec0,title_offset,getint(newrec0,title_offset)+newrecsize)
return newrec0
def read_exth(rec0,exth_num):
exth_values = []
ebase,elen,enum = get_exth_params(rec0)
ebase = ebase+12
while enum>0:
exth_id = getint(rec0,ebase)
if exth_id == exth_num:
# We might have multiple exths, so build a list.
exth_values.append(rec0[ebase+8:ebase+getint(rec0,ebase+4)])
enum = enum-1
ebase = ebase+getint(rec0,ebase+4)
return exth_values
def write_exth(rec0,exth_num,exth_bytes):
ebase,elen,enum = get_exth_params(rec0)
ebase_idx = ebase+12
enum_idx = enum
while enum_idx>0:
exth_id = getint(rec0,ebase_idx)
if exth_id == exth_num:
dif = len(exth_bytes)+8-getint(rec0,ebase_idx+4)
newrec0 = rec0
if dif != 0:
newrec0 = writeint(newrec0,title_offset,getint(newrec0,title_offset)+dif)
return newrec0[:ebase+4]+struct.pack(b'>L',elen+len(exth_bytes)+8-getint(rec0,ebase_idx+4))+\
struct.pack(b'>L',enum)+rec0[ebase+12:ebase_idx+4]+\
struct.pack(b'>L',len(exth_bytes)+8)+exth_bytes+\
rec0[ebase_idx+getint(rec0,ebase_idx+4):]
enum_idx = enum_idx-1
ebase_idx = ebase_idx+getint(rec0,ebase_idx+4)
return rec0
def del_exth(rec0,exth_num):
ebase,elen,enum = get_exth_params(rec0)
ebase_idx = ebase+12
enum_idx = 0
while enum_idx < enum:
exth_id = getint(rec0,ebase_idx)
exth_size = getint(rec0,ebase_idx+4)
if exth_id == exth_num:
newrec0 = rec0
newrec0 = writeint(newrec0,title_offset,getint(newrec0,title_offset)-exth_size)
newrec0 = newrec0[:ebase_idx]+newrec0[ebase_idx+exth_size:]
newrec0 = newrec0[0:ebase+4]+struct.pack(b'>L',elen-exth_size)+struct.pack(b'>L',enum-1)+newrec0[ebase+12:]
return newrec0
enum_idx += 1
ebase_idx = ebase_idx+exth_size
return rec0
class mobi_split:
def __init__(self, infile):
datain = b''
with open(pathof(infile), 'rb') as f:
datain = f.read()
datain_rec0 = readsection(datain,0)
ver = getint(datain_rec0,mobi_version)
self.combo = (ver!=8)
if not self.combo:
return
exth121 = read_exth(datain_rec0,121)
if len(exth121) == 0:
self.combo = False
return
else:
# only pay attention to first exth121
# (there should only be one)
datain_kf8, = struct.unpack_from(b'>L',exth121[0],0)
if datain_kf8 == 0xffffffff:
self.combo = False
return
datain_kfrec0 =readsection(datain,datain_kf8)
# create the standalone mobi7
num_sec = getint(datain,number_of_pdb_records,b'H')
# remove BOUNDARY up to but not including ELF record
self.result_file7 = deletesectionrange(datain,datain_kf8-1,num_sec-2)
# check if there are SRCS records and delete them
srcs = getint(datain_rec0,srcs_index)
num_srcs = getint(datain_rec0,srcs_count)
if srcs != 0xffffffff and num_srcs > 0:
self.result_file7 = deletesectionrange(self.result_file7,srcs,srcs+num_srcs-1)
datain_rec0 = writeint(datain_rec0,srcs_index,0xffffffff)
datain_rec0 = writeint(datain_rec0,srcs_count,0)
# reset the EXTH 121 KF8 Boundary meta data to 0xffffffff
datain_rec0 = write_exth(datain_rec0,121, struct.pack(b'>L', 0xffffffff))
# datain_rec0 = del_exth(datain_rec0,121)
# datain_rec0 = del_exth(datain_rec0,534)
# don't remove the EXTH 125 KF8 Count of Resources, seems to be present in mobi6 files as well
# set the EXTH 129 KF8 Masthead / Cover Image string to the null string
datain_rec0 = write_exth(datain_rec0,129, b'')
# don't remove the EXTH 131 KF8 Unidentified Count, seems to be present in mobi6 files as well
# need to reset flags stored in 0x80-0x83
# old mobi with exth: 0x50, mobi7 part with exth: 0x1850, mobi8 part with exth: 0x1050
# Bit Flags
# 0x1000 = Bit 12 indicates if embedded fonts are used or not
# 0x0800 = means this Header points to *shared* images/resource/fonts ??
# 0x0080 = unknown new flag, why is this now being set by Kindlegen 2.8?
# 0x0040 = exth exists
# 0x0010 = Not sure but this is always set so far
fval, = struct.unpack_from(b'>L',datain_rec0, 0x80)
# need to remove flag 0x0800 for KindlePreviewer 2.8 and unset Bit 12 for embedded fonts
fval = fval & 0x07FF
datain_rec0 = datain_rec0[:0x80] + struct.pack(b'>L',fval) + datain_rec0[0x84:]
self.result_file7 = writesection(self.result_file7,0,datain_rec0)
# no need to replace kf8 style fcis with mobi 7 one
# fcis_secnum, = struct.unpack_from(b'>L',datain_rec0, 0xc8)
# if fcis_secnum != 0xffffffff:
# fcis_info = readsection(datain, fcis_secnum)
# text_len, = struct.unpack_from(b'>L', fcis_info, 0x14)
# new_fcis = 'FCIS\x00\x00\x00\x14\x00\x00\x00\x10\x00\x00\x00\x01\x00\x00\x00\x00'
# new_fcis += struct.pack(b'>L',text_len)
# new_fcis += '\x00\x00\x00\x00\x00\x00\x00\x20\x00\x00\x00\x08\x00\x01\x00\x01\x00\x00\x00\x00'
# self.result_file7 = writesection(self.result_file7, fcis_secnum, new_fcis)
firstimage = getint(datain_rec0,first_resc_record)
lastimage = getint(datain_rec0,last_content_index,b'H')
# print("Old First Image, last Image", firstimage,lastimage)
if lastimage == 0xffff:
# find the lowest of the next sections and copy up to that.
ofs_list = [(fcis_index,b'L'),(flis_index,b'L'),(datp_index,b'L'),(hufftbloff, b'L')]
for ofs,sz in ofs_list:
n = getint(datain_rec0,ofs,sz)
# print("n",n)
if n > 0 and n < lastimage:
lastimage = n-1
print("First Image, last Image", firstimage,lastimage)
# Try to null out FONT and RES, but leave the (empty) PDB record so image refs remain valid
for i in range(firstimage,lastimage):
imgsec = readsection(self.result_file7,i)
if imgsec[0:4] in [b'RESC',b'FONT']:
self.result_file7 = nullsection(self.result_file7,i)
# mobi7 finished
# create standalone mobi8
self.result_file8 = deletesectionrange(datain,0,datain_kf8-1)
target = getint(datain_kfrec0,first_resc_record)
self.result_file8 = insertsectionrange(datain,firstimage,lastimage,self.result_file8,target)
datain_kfrec0 =readsection(self.result_file8,0)
# Only keep the correct EXTH 116 StartOffset, KG 2.5 carries over the one from the mobi7 part, which then points at garbage in the mobi8 part, and confuses FW 3.4
kf8starts = read_exth(datain_kfrec0,116)
# If we have multiple StartOffset, keep only the last one
kf8start_count = len(kf8starts)
while kf8start_count > 1:
kf8start_count -= 1
datain_kfrec0 = del_exth(datain_kfrec0,116)
# update the EXTH 125 KF8 Count of Images/Fonts/Resources
datain_kfrec0 = write_exth(datain_kfrec0,125,struct.pack(b'>L',lastimage-firstimage+1))
# need to reset flags stored in 0x80-0x83
# old mobi with exth: 0x50, mobi7 part with exth: 0x1850, mobi8 part with exth: 0x1050
# standalone mobi8 with exth: 0x0050
# Bit Flags
# 0x1000 = Bit 12 indicates if embedded fonts are used or not
# 0x0800 = means this Header points to *shared* images/resource/fonts ??
# 0x0080 = unknown new flag, why is this now being set by Kindlegen 2.8?
# 0x0040 = exth exists
# 0x0010 = Not sure but this is always set so far
fval, = struct.unpack_from('>L',datain_kfrec0, 0x80)
fval = fval & 0x1FFF
fval |= 0x0800
datain_kfrec0 = datain_kfrec0[:0x80] + struct.pack(b'>L',fval) + datain_kfrec0[0x84:]
# properly update other index pointers that have been shifted by the insertion of images
ofs_list = [(kf8_fdst_index,b'L'),(fcis_index,b'L'),(flis_index,b'L'),(datp_index,b'L'),(hufftbloff, b'L')]
for ofs,sz in ofs_list:
n = getint(datain_kfrec0,ofs,sz)
if n != 0xffffffff:
datain_kfrec0 = writeint(datain_kfrec0,ofs,n+lastimage-firstimage+1,sz)
self.result_file8 = writesection(self.result_file8,0,datain_kfrec0)
# no need to replace kf8 style fcis with mobi 7 one
# fcis_secnum, = struct.unpack_from(b'>L',datain_kfrec0, 0xc8)
# if fcis_secnum != 0xffffffff:
# fcis_info = readsection(self.result_file8, fcis_secnum)
# text_len, = struct.unpack_from(b'>L', fcis_info, 0x14)
# new_fcis = 'FCIS\x00\x00\x00\x14\x00\x00\x00\x10\x00\x00\x00\x01\x00\x00\x00\x00'
# new_fcis += struct.pack(b'>L',text_len)
# new_fcis += '\x00\x00\x00\x00\x00\x00\x00\x20\x00\x00\x00\x08\x00\x01\x00\x01\x00\x00\x00\x00'
# self.result_file8 = writesection(self.result_file8, fcis_secnum, new_fcis)
# mobi8 finished
def getResult8(self):
return self.result_file8
def getResult7(self):
return self.result_file7
| 43.895216 | 170 | 0.656253 |
efeb9e4f53a5d0eeba7d5fa4c91f57add1392269 | 9,427 | py | Python | connector.py | haje01/pypbac | 6c1b78293767dd08039f98c7af1d7813c285e03b | [
"MIT"
] | null | null | null | connector.py | haje01/pypbac | 6c1b78293767dd08039f98c7af1d7813c285e03b | [
"MIT"
] | 8 | 2019-10-18T01:48:37.000Z | 2019-12-10T07:46:38.000Z | connector.py | haje01/pypbac | 6c1b78293767dd08039f98c7af1d7813c285e03b | [
"MIT"
] | null | null | null | """AWS에서 데이터 가져오기."""
# Power Bi에 경고 없이 빠르게 파이썬 버전을 반환
import warnings
warnings.filterwarnings('ignore') # disable warning for dist binary.
import sys
import platform
if len(sys.argv) > 1 and sys.argv[1] == '-V':
print("Python {}".format(platform.python_version()))
sys.exit()
# 여기서 부터 시작 ---------------------------------
import os
import sys
import re
import codecs
import time
from configparser import ConfigParser
from datetime import datetime, timedelta
from shutil import copyfile
from pathlib import Path
import tkinter # pyinstaller 빌드에서 tcl/tk 포함되지 않는 것 방지
import win32api
import pandas as pd
from dateutil.parser import parse
from pyathena import connect
from util import *
PBTDIR_PTRN = r"os.chdir\((?:u')?(.*)(?:')\)"
PRO_BEGIN_PTRN = r"# Original Script[^#]+"
PRO_END_PTRN = r"# Epilog -[^#]+"
cursor = None
# Power BI에서 실행 여부 \
POWER_BI = len(sys.argv) > 1 and sys.argv[1].lower() == 'pythonscriptwrapper.py'
start_mode = 'Test' if not POWER_BI else 'Power BI'
critical("======== Connector Start ({} Mode) ========".format(start_mode))
info("Start up argv: {}".format(sys.argv))
info("Project Dir: {}".format(proj_dir))
def copy_cache(pbt_dir, cache_dir):
"""로컬 캐쉬를 복사."""
info("copy_cache from {}".format(cache_dir))
cnt = 0
for ofile in os.listdir(cache_dir):
if not ofile.endswith('.csv'):
continue
spath = os.path.join(cache_dir, ofile)
dpath = os.path.join(pbt_dir, ofile)
info(" copy from {} to {}".format(spath, dpath))
copyfile(spath, dpath)
cnt += 1
info("total {} files copied.".format(cnt))
def get_dscript_cfg(wrapper):
"""PythonScriptWrapper.py에서 프로파일 관련 설정 얻기.
Args:
wrapper (str): PythonScriptWrapper.py의 내용
Returns:
tuple: 데이터 스크립트 파싱한 ConfigParser, 데이터 스크립트 hash
"""
bg_match = re.search(r"# Original Script[^#]+", wrapper)
begin = bg_match.span()[1] + 1
ed_match = re.search("# Epilog -[^#]+", wrapper)
end = ed_match.span()[0] - 1
dscript = "[default]\n{}".format(wrapper[begin:end].strip())
dscript_hash = get_text_hash(dscript)
info("=== Data script in PythonScriptWrapper.py ===")
info(dscript)
info("======")
cfg = ConfigParser()
try:
cfg.read_string(dscript)
except Exception as e:
warning("Invalid config format: {}".format(str(e)))
return
return cfg, dscript_hash
def check_import_data(cfg, cfg_hash):
"""Power BI 용 데이터 가져오기.
- 유효한 로컬 캐쉬가 았으면 그것을 이용
- 아니면 새로 가져와 로컬 캐쉬에 저장
"""
global pbt_dir
# PythonScriptWrapper.py에서 데이터 소스용 임시 디렉토리와 데이터 스크립트 정보 얻음
pro_name = 'default'
dscript_hash = ''
dscfg = None
if POWER_BI:
arg = sys.argv[1]
with codecs.open(arg, 'r', encoding='utf-8') as fp:
wrapper = fp.read()
try:
# Power BI 데이터 소스 임시 경로
pbt_dir = re.search(PBTDIR_PTRN, wrapper).groups()[0]
except Exception as e:
error("Can not find pbt_dir in PythonScriptWrapper.py:")
info(wrapper)
sys.exit(-1)
try:
# 프로파일 정보
res = get_dscript_cfg(wrapper)
if res is not None:
dscfg, dscript_hash = res
ddscfg = dscfg['default']
if 'profile' in ddscfg:
pro_name = ddscfg['profile']
except Exception as e:
error("Data script error: {}".format(str(e)))
if pro_name == 'default':
info("Using default profile.")
else:
# VS Code에서 실행?
critical("======= No argument. Exit now =======")
sys.exit()
# pbt_dir = os.path.join(mod_dir, 'temp')
# if not os.path.isdir(pbt_dir):
# os.mkdir(pbt_dir)
# 설정 파일에서 해당 프로파일 정보 찾아보기
pkey = "profile.{}".format(pro_name)
if pkey in cfg:
pcfg = cfg[pkey]
else:
error("Can not find '{}' profile.".format(pro_name))
win32api.MessageBox(0, "설정에서 '{}' 프로파일을 찾을 수 없습니다.".format(pro_name))
sys.exit(-1)
# 필요한 경로 얻기
cache_dir = check_cache_dir(pro_name)
meta_path = get_meta_path(pro_name)
info("Power BI Temp Dir: {}".format(pbt_dir))
info("pro_name: {}".format(pro_name))
info("cache_dir: {}".format(cache_dir))
info("meta_path: {}".format(meta_path))
# 가능하면 캐쉬 이용
if os.path.isdir(cache_dir) and os.path.isfile(meta_path):
# 메타 데이터 읽어옴
meta = ConfigParser()
meta.read(meta_path)
metad = meta['default']
# 캐쉬 수명 체크
if 'created' in metad:
created = parse(metad['created'])
dif = datetime.now() - created
days, hours, mins = dif.days, dif.seconds // 3600, dif.seconds // 60
cache_valid_hour = int(pcfg['cache_valid_hour'])
valid_life = dif.total_seconds() < cache_valid_hour * 3600
info("Cache created {} days {} hours {} minutess ago: {}".format(days, hours, mins, valid_life))
else:
valid_life = False
# 설정 변화 체크
meta_cfg_hash = metad['config_hash'] if 'config_hash' in metad else None
valid_cfg = meta_cfg_hash == cfg_hash
if not valid_cfg:
info("Config hash mismatch: {}(old) != {}(new)".format(meta_cfg_hash, cfg_hash))
else:
info("Config hash match: {}".format(cfg_hash))
# 데이터 스크립트 체크
meta_dscript_hash = metad['dscript_hash'] if 'dscript_hash' in metad else None
valid_dscript = meta_dscript_hash == dscript_hash
if not valid_dscript:
info("Data script hash mismatch: {}(old) != {}(new)".format(meta_dscript_hash, dscript_hash))
else:
info("Data script hash match: {}".format(dscript_hash))
# 캐쉬 이용 가능하면
if valid_life and valid_cfg and valid_dscript:
# 유효한 캐쉬를 복사해 사용하고
warning("Use cache data.")
try:
copy_cache(pbt_dir, cache_dir)
except Exception as e:
error("Copy error: {}".format(str(e)))
# 종료
# time.sleep(5) # 미리보기 안되는 이슈에 도움?
sys.exit()
else:
# 오래된 캐쉬 지움
del_cache(pro_name)
else:
info("No valid cache. Import now.")
# 아니면 새로 가져옴
_import_profile_data(cfg, pcfg, dscfg, cache_dir, meta_path, cfg_hash, dscript_hash)
def save_metadata(meta_path, cfg_hash, dscript_hash):
warning("save_metadata")
meta = ConfigParser()
metad = {}
created = datetime.now()
metad['created'] = created.strftime('%Y-%m-%d %H:%M:%S')
metad['config_hash'] = cfg_hash
metad['dscript_hash'] = dscript_hash
meta['default'] = metad
with open(meta_path, 'w') as fp:
meta.write(fp)
def _import_profile_data(cfg, pcfg, dscfg, cache_dir, meta_path, cfg_hash, dscript_hash):
"""설정대로 프로파일 데이터 가져오기.
- Power BI에서 불려짐
- 유효한 캐쉬가 있으면 이용
- 아니면 새로 가져옴
Args:
cfg (ConfigParser): 설정
pcfg: 프로파일 설정
dscfg: 데이터 스크립트 설정
cache_dir: 프로파일 용 캐쉬 디렉토리
meta_path: 프로파일 용 메티파일 경로
"""
global cursor
warning("Import data.")
# 접속
info("Connect to import.")
conn = connect(aws_access_key_id=cfg['aws']['access_key'],
aws_secret_access_key=cfg['aws']['secret_key'],
s3_staging_dir=cfg['aws']['s3_stage_dir'],
region_name='ap-northeast-2')
cursor = conn.cursor()
sect = pcfg
ttype = sect['ttype']
if ttype == 'rel':
before = int(sect['before'])
offset = int(sect['offset'])
else:
start = parse(sect['start']).date()
end = parse(sect['end']).date()
# 모든 대상 DB의 테이블에 대해
for key in pcfg.keys():
if not key.startswith('db_'):
continue
db = key[3:]
tables = eval(pcfg[key])
for tbl in tables:
cols = None
if type(tbl) is not str:
tbl, cols = tbl
# 쿼리 준비
no_part = no_part_table(cursor, db, tbl)
if ttype == 'rel':
cnt = get_query_rows_rel(cursor, db, tbl, before, offset, dscfg, no_part)
query = make_query_rel(db, tbl, before, offset, dscfg, "default", no_part, cols)
else:
cnt = get_query_rows_abs(cursor, db, tbl, start, end, dscfg, no_part)
query = make_query_abs(db, tbl, start, end, dscfg, "default", no_part, cols)
# 가져옴
warning("Import '{}' ({:,} rows) from '{}'".format(tbl, cnt, db))
info(" query: {}".format(query))
try:
df = pd.read_sql(query, conn)
except Exception:
win32api.MessageBox(0, "다음 쿼리에 에러가 있습니다:\n" + query, "쿼리 에러")
sys.exit(-1)
csv_file = "{}.{}.csv".format(db, tbl)
spath = os.path.join(cache_dir, csv_file)
dpath = os.path.join(pbt_dir, csv_file)
# 저장
info("Write CSV to cache: {}".format(spath))
df.to_csv(spath, index=False, encoding='utf-8-sig')
info("Copy from {} to {}\n".format(spath, dpath))
copyfile(spath, dpath)
# 메타정보 기록
save_metadata(meta_path, cfg_hash, dscript_hash)
critical("======= Import successful =======")
# 설정 읽기
cfg, cfg_hash = load_config()
# 데이터 임포트 후 종료
check_import_data(cfg, cfg_hash)
| 31.318937 | 108 | 0.57208 |
d4ca66dc01af1ded6159f94d64b2e38e6d1f2702 | 27,592 | py | Python | sqlite_web/sqlite_web.py | ITMO-lab/sqlite-web | 7a1dc0a292e19dc45a96914e7c0c5c3f7b6a01cd | [
"MIT"
] | null | null | null | sqlite_web/sqlite_web.py | ITMO-lab/sqlite-web | 7a1dc0a292e19dc45a96914e7c0c5c3f7b6a01cd | [
"MIT"
] | null | null | null | sqlite_web/sqlite_web.py | ITMO-lab/sqlite-web | 7a1dc0a292e19dc45a96914e7c0c5c3f7b6a01cd | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import datetime
import math
import operator
import optparse
import os
import re
import sys
import threading
import time
import webbrowser
import hashlib
import random
import string
from collections import namedtuple, OrderedDict
from functools import wraps
from getpass import getpass
from io import TextIOWrapper
# Py2k compat.
if sys.version_info[0] == 2:
PY2 = True
binary_types = (buffer, bytes, bytearray)
decode_handler = 'replace'
numeric = (int, long, float)
unicode_type = unicode
from StringIO import StringIO
else:
PY2 = False
binary_types = (bytes, bytearray)
decode_handler = 'backslashreplace'
numeric = (int, float)
unicode_type = str
from io import StringIO
try:
from flask import (
Flask, abort, escape, flash, jsonify, make_response, Markup, redirect,
render_template, request, session, url_for)
except ImportError:
raise RuntimeError('Unable to import flask module. Install by running '
'pip install flask')
try:
from pygments import formatters, highlight, lexers
except ImportError:
import warnings
warnings.warn('pygments library not found.', ImportWarning)
syntax_highlight = lambda data: '<pre>%s</pre>' % data
else:
def syntax_highlight(data):
if not data:
return ''
lexer = lexers.get_lexer_by_name('sql')
formatter = formatters.HtmlFormatter(linenos=False)
return highlight(data, lexer, formatter)
try:
from peewee import __version__
peewee_version = tuple([int(p) for p in __version__.split('.')])
except ImportError:
raise RuntimeError('Unable to import peewee module. Install by running '
'pip install peewee')
else:
if peewee_version <= (3, 0, 0):
raise RuntimeError('Peewee >= 3.0.0 is required. Found version %s. '
'Please update by running pip install --update '
'peewee' % __version__)
from peewee import *
from peewee import IndexMetadata
from peewee import sqlite3
from playhouse.dataset import DataSet
from playhouse.migrate import migrate
CUR_DIR = os.path.realpath(os.path.dirname(__file__))
DEBUG = False
MAX_RESULT_SIZE = 1000
ROWS_PER_PAGE = 50
SECRET_KEY = 'sqlite-database-browser-0.1.0'
global SESSION_SECRET_SET
SESSION_SECRET_SET = set()
app = Flask(
__name__,
static_folder=os.path.join(CUR_DIR, 'static'),
template_folder=os.path.join(CUR_DIR, 'templates'))
app.config.from_object(__name__)
dataset = None
migrator = None
#
# Database metadata objects.
#
TriggerMetadata = namedtuple('TriggerMetadata', ('name', 'sql'))
ViewMetadata = namedtuple('ViewMetadata', ('name', 'sql'))
#
# Database helpers.
#
class SqliteDataSet(DataSet):
@property
def filename(self):
db_file = dataset._database.database
if db_file.startswith('file:'):
db_file = db_file[5:]
return os.path.realpath(db_file.rsplit('?', 1)[0])
@property
def is_readonly(self):
db_file = dataset._database.database
return db_file.endswith('?mode=ro')
@property
def base_name(self):
return os.path.basename(self.filename)
@property
def created(self):
stat = os.stat(self.filename)
return datetime.datetime.fromtimestamp(stat.st_ctime)
@property
def modified(self):
stat = os.stat(self.filename)
return datetime.datetime.fromtimestamp(stat.st_mtime)
@property
def size_on_disk(self):
stat = os.stat(self.filename)
return stat.st_size
def get_indexes(self, table):
return dataset._database.get_indexes(table)
def get_all_indexes(self):
cursor = self.query(
'SELECT name, sql FROM sqlite_master '
'WHERE type = ? ORDER BY name',
('index',))
return [IndexMetadata(row[0], row[1], None, None, None)
for row in cursor.fetchall()]
def get_columns(self, table):
return dataset._database.get_columns(table)
def get_foreign_keys(self, table):
return dataset._database.get_foreign_keys(table)
def get_triggers(self, table):
cursor = self.query(
'SELECT name, sql FROM sqlite_master '
'WHERE type = ? AND tbl_name = ?',
('trigger', table))
return [TriggerMetadata(*row) for row in cursor.fetchall()]
def get_all_triggers(self):
cursor = self.query(
'SELECT name, sql FROM sqlite_master '
'WHERE type = ? ORDER BY name',
('trigger',))
return [TriggerMetadata(*row) for row in cursor.fetchall()]
def get_all_views(self):
cursor = self.query(
'SELECT name, sql FROM sqlite_master '
'WHERE type = ? ORDER BY name',
('view',))
return [ViewMetadata(*row) for row in cursor.fetchall()]
def get_virtual_tables(self):
cursor = self.query(
'SELECT name FROM sqlite_master '
'WHERE type = ? AND sql LIKE ? '
'ORDER BY name',
('table', 'CREATE VIRTUAL TABLE%'))
return set([row[0] for row in cursor.fetchall()])
def get_corollary_virtual_tables(self):
virtual_tables = self.get_virtual_tables()
suffixes = ['content', 'docsize', 'segdir', 'segments', 'stat']
return set(
'%s_%s' % (virtual_table, suffix) for suffix in suffixes
for virtual_table in virtual_tables)
#
# Flask views.
#
@app.route('/')
def index():
return render_template('index.html', sqlite=sqlite3)
@app.route('/login/', methods=['GET', 'POST'])
def login():
global SESSION_SECRET_SET
if request.method == 'POST':
if request.form.get('password') == app.config['PASSWORD']:
letters = string.ascii_letters
salt = ''.join(random.choice(letters) for i in range(128))
secret = hashlib.sha512(app.config['PASSWORD'].encode()).hexdigest()
pepper = ''.join(random.choice(letters) for i in range(128))
session_secret = hashlib.sha512((salt + secret + pepper).encode()).hexdigest()
session['secret'] = session_secret
SESSION_SECRET_SET.add(session_secret)
return redirect(session.get('next_url') or url_for('index'))
flash('The password you entered is incorrect.', 'danger')
return render_template('login.html')
@app.route('/logout/', methods=['GET'])
def logout():
global SESSION_SECRET_SET
try:
SESSION_SECRET_SET.remove(session.get('secret'))
except KeyError:
pass
session.pop('secret', None)
return redirect(url_for('login'))
def require_table(fn):
@wraps(fn)
def inner(table, *args, **kwargs):
if table not in dataset.tables:
abort(404)
return fn(table, *args, **kwargs)
return inner
@app.route('/create-table/', methods=['POST'])
def table_create():
table = (request.form.get('table_name') or '').strip()
if not table:
flash('Table name is required.', 'danger')
return redirect(request.form.get('redirect') or url_for('index'))
dataset[table]
return redirect(url_for('table_import', table=table))
@app.route('/<table>/')
@require_table
def table_structure(table):
ds_table = dataset[table]
model_class = ds_table.model_class
table_sql = dataset.query(
'SELECT sql FROM sqlite_master WHERE tbl_name = ? AND type = ?',
[table, 'table']).fetchone()[0]
return render_template(
'table_structure.html',
columns=dataset.get_columns(table),
ds_table=ds_table,
foreign_keys=dataset.get_foreign_keys(table),
indexes=dataset.get_indexes(table),
model_class=model_class,
table=table,
table_sql=table_sql,
triggers=dataset.get_triggers(table))
def get_request_data():
if request.method == 'POST':
return request.form
return request.args
@app.route('/<table>/add-column/', methods=['GET', 'POST'])
@require_table
def add_column(table):
column_mapping = OrderedDict((
('VARCHAR', CharField),
('TEXT', TextField),
('INTEGER', IntegerField),
('REAL', FloatField),
('BOOL', BooleanField),
('BLOB', BlobField),
('DATETIME', DateTimeField),
('DATE', DateField),
('TIME', TimeField),
('DECIMAL', DecimalField)))
request_data = get_request_data()
col_type = request_data.get('type')
name = request_data.get('name', '')
if request.method == 'POST':
if name and col_type in column_mapping:
migrate(
migrator.add_column(
table,
name,
column_mapping[col_type](null=True)))
flash('Column "%s" was added successfully!' % name, 'success')
dataset.update_cache(table)
return redirect(url_for('table_structure', table=table))
else:
flash('Name and column type are required.', 'danger')
return render_template(
'add_column.html',
col_type=col_type,
column_mapping=column_mapping,
name=name,
table=table)
@app.route('/<table>/drop-column/', methods=['GET', 'POST'])
@require_table
def drop_column(table):
request_data = get_request_data()
name = request_data.get('name', '')
columns = dataset.get_columns(table)
column_names = [column.name for column in columns]
if request.method == 'POST':
if name in column_names:
migrate(migrator.drop_column(table, name))
flash('Column "%s" was dropped successfully!' % name, 'success')
dataset.update_cache(table)
return redirect(url_for('table_structure', table=table))
else:
flash('Name is required.', 'danger')
return render_template(
'drop_column.html',
columns=columns,
column_names=column_names,
name=name,
table=table)
@app.route('/<table>/rename-column/', methods=['GET', 'POST'])
@require_table
def rename_column(table):
request_data = get_request_data()
rename = request_data.get('rename', '')
rename_to = request_data.get('rename_to', '')
columns = dataset.get_columns(table)
column_names = [column.name for column in columns]
if request.method == 'POST':
if (rename in column_names) and (rename_to not in column_names):
migrate(migrator.rename_column(table, rename, rename_to))
flash('Column "%s" was renamed successfully!' % rename, 'success')
dataset.update_cache(table)
return redirect(url_for('table_structure', table=table))
else:
flash('Column name is required and cannot conflict with an '
'existing column\'s name.', 'danger')
return render_template(
'rename_column.html',
columns=columns,
column_names=column_names,
rename=rename,
rename_to=rename_to,
table=table)
@app.route('/<table>/add-index/', methods=['GET', 'POST'])
@require_table
def add_index(table):
request_data = get_request_data()
indexed_columns = request_data.getlist('indexed_columns')
unique = bool(request_data.get('unique'))
columns = dataset.get_columns(table)
if request.method == 'POST':
if indexed_columns:
migrate(
migrator.add_index(
table,
indexed_columns,
unique))
flash('Index created successfully.', 'success')
return redirect(url_for('table_structure', table=table))
else:
flash('One or more columns must be selected.', 'danger')
return render_template(
'add_index.html',
columns=columns,
indexed_columns=indexed_columns,
table=table,
unique=unique)
@app.route('/<table>/drop-index/', methods=['GET', 'POST'])
@require_table
def drop_index(table):
request_data = get_request_data()
name = request_data.get('name', '')
indexes = dataset.get_indexes(table)
index_names = [index.name for index in indexes]
if request.method == 'POST':
if name in index_names:
migrate(migrator.drop_index(table, name))
flash('Index "%s" was dropped successfully!' % name, 'success')
return redirect(url_for('table_structure', table=table))
else:
flash('Index name is required.', 'danger')
return render_template(
'drop_index.html',
indexes=indexes,
index_names=index_names,
name=name,
table=table)
@app.route('/<table>/drop-trigger/', methods=['GET', 'POST'])
@require_table
def drop_trigger(table):
request_data = get_request_data()
name = request_data.get('name', '')
triggers = dataset.get_triggers(table)
trigger_names = [trigger.name for trigger in triggers]
if request.method == 'POST':
if name in trigger_names:
dataset.query('DROP TRIGGER "%s";' % name)
flash('Trigger "%s" was dropped successfully!' % name, 'success')
return redirect(url_for('table_structure', table=table))
else:
flash('Trigger name is required.', 'danger')
return render_template(
'drop_trigger.html',
triggers=triggers,
trigger_names=trigger_names,
name=name,
table=table)
@app.route('/<table>/content/')
@require_table
def table_content(table):
page_number = request.args.get('page') or ''
page_number = int(page_number) if page_number.isdigit() else 1
dataset.update_cache(table)
ds_table = dataset[table]
total_rows = ds_table.all().count()
rows_per_page = app.config['ROWS_PER_PAGE']
total_pages = int(math.ceil(total_rows / float(rows_per_page)))
# Restrict bounds.
page_number = min(page_number, total_pages)
page_number = max(page_number, 1)
previous_page = page_number - 1 if page_number > 1 else None
next_page = page_number + 1 if page_number < total_pages else None
query = ds_table.all().paginate(page_number, rows_per_page)
ordering = request.args.get('ordering')
if ordering:
field = ds_table.model_class._meta.columns[ordering.lstrip('-')]
if ordering.startswith('-'):
field = field.desc()
query = query.order_by(field)
field_names = ds_table.columns
columns = [f.column_name for f in ds_table.model_class._meta.sorted_fields]
table_sql = dataset.query(
'SELECT sql FROM sqlite_master WHERE tbl_name = ? AND type = ?',
[table, 'table']).fetchone()[0]
return render_template(
'table_content.html',
columns=columns,
ds_table=ds_table,
field_names=field_names,
next_page=next_page,
ordering=ordering,
page=page_number,
previous_page=previous_page,
query=query,
table=table,
total_pages=total_pages,
total_rows=total_rows)
@app.route('/<table>/query/', methods=['GET', 'POST'])
@require_table
def table_query(table):
data = []
data_description = error = row_count = sql = None
if request.method == 'POST':
sql = request.form['sql']
if 'export_json' in request.form:
return export(table, sql, 'json')
elif 'export_csv' in request.form:
return export(table, sql, 'csv')
try:
cursor = dataset.query(sql)
except Exception as exc:
error = str(exc)
else:
data = cursor.fetchall()[:app.config['MAX_RESULT_SIZE']]
data_description = cursor.description
row_count = cursor.rowcount
else:
if request.args.get('sql'):
sql = request.args.get('sql')
else:
sql = 'SELECT *\nFROM "%s"' % (table)
table_sql = dataset.query(
'SELECT sql FROM sqlite_master WHERE tbl_name = ? AND type = ?',
[table, 'table']).fetchone()[0]
return render_template(
'table_query.html',
data=data,
data_description=data_description,
error=error,
query_images=get_query_images(),
row_count=row_count,
sql=sql,
table=table,
table_sql=table_sql)
@app.route('/table-definition/', methods=['POST'])
def set_table_definition_preference():
key = 'show'
show = False
if request.form.get(key) and request.form.get(key) != 'false':
session[key] = show = True
elif key in session:
del session[key]
return jsonify({key: show})
def export(table, sql, export_format):
model_class = dataset[table].model_class
query = model_class.raw(sql).dicts()
buf = StringIO()
if export_format == 'json':
kwargs = {'indent': 2}
filename = '%s-export.json' % table
mimetype = 'text/javascript'
else:
kwargs = {}
filename = '%s-export.csv' % table
mimetype = 'text/csv'
dataset.freeze(query, export_format, file_obj=buf, **kwargs)
response_data = buf.getvalue()
response = make_response(response_data)
response.headers['Content-Length'] = len(response_data)
response.headers['Content-Type'] = mimetype
response.headers['Content-Disposition'] = 'attachment; filename=%s' % (
filename)
response.headers['Expires'] = 0
response.headers['Pragma'] = 'public'
return response
@app.route('/<table>/import/', methods=['GET', 'POST'])
@require_table
def table_import(table):
count = None
request_data = get_request_data()
strict = bool(request_data.get('strict'))
if request.method == 'POST':
file_obj = request.files.get('file')
if not file_obj:
flash('Please select an import file.', 'danger')
elif not file_obj.filename.lower().endswith(('.csv', '.json')):
flash('Unsupported file-type. Must be a .json or .csv file.',
'danger')
else:
if file_obj.filename.lower().endswith('.json'):
format = 'json'
else:
format = 'csv'
# Here we need to translate the file stream. Werkzeug uses a
# spooled temporary file opened in wb+ mode, which is not
# compatible with Python's CSV module. We'd need to reach pretty
# far into Flask's internals to modify this behavior, so instead
# we'll just translate the stream into utf8-decoded unicode.
if not PY2:
try:
stream = TextIOWrapper(file_obj, encoding='utf8')
except AttributeError:
# The SpooledTemporaryFile used by werkzeug does not
# implement an API that the TextIOWrapper expects, so we'll
# just consume the whole damn thing and decode it.
# Fixed in werkzeug 0.15.
stream = StringIO(file_obj.read().decode('utf8'))
else:
stream = file_obj.stream
try:
with dataset.transaction():
count = dataset.thaw(
table,
format=format,
file_obj=stream,
strict=strict)
except Exception as exc:
flash('Error importing file: %s' % exc, 'danger')
else:
flash(
'Successfully imported %s objects from %s.' % (
count, file_obj.filename),
'success')
return redirect(url_for('table_content', table=table))
return render_template(
'table_import.html',
count=count,
strict=strict,
table=table)
@app.route('/<table>/drop/', methods=['GET', 'POST'])
@require_table
def drop_table(table):
if request.method == 'POST':
model_class = dataset[table].model_class
model_class.drop_table()
dataset.update_cache() # Update all tables.
flash('Table "%s" dropped successfully.' % table, 'success')
return redirect(url_for('index'))
return render_template('drop_table.html', table=table)
@app.template_filter('format_index')
def format_index(index_sql):
split_regex = re.compile(r'\bon\b', re.I)
if not split_regex.search(index_sql):
return index_sql
create, definition = split_regex.split(index_sql)
return '\nON '.join((create.strip(), definition.strip()))
@app.template_filter('value_filter')
def value_filter(value, max_length=50):
if isinstance(value, numeric):
return value
if isinstance(value, binary_types):
if not isinstance(value, (bytes, bytearray)):
value = bytes(value) # Handle `buffer` type.
value = value.decode('utf-8', decode_handler)
if isinstance(value, unicode_type):
value = escape(value)
if len(value) > max_length:
return ('<span class="truncated">%s</span> '
'<span class="full" style="display:none;">%s</span>'
'<a class="toggle-value" href="#">...</a>') % (
value[:max_length],
value)
return value
column_re = re.compile('(.+?)\((.+)\)', re.S)
column_split_re = re.compile(r'(?:[^,(]|\([^)]*\))+')
def _format_create_table(sql):
create_table, column_list = column_re.search(sql).groups()
columns = [' %s' % column.strip()
for column in column_split_re.findall(column_list)
if column.strip()]
return '%s (\n%s\n)' % (
create_table,
',\n'.join(columns))
@app.template_filter()
def format_create_table(sql):
try:
return _format_create_table(sql)
except:
return sql
@app.template_filter('highlight')
def highlight_filter(data):
return Markup(syntax_highlight(data))
def get_query_images():
accum = []
image_dir = os.path.join(app.static_folder, 'img')
if not os.path.exists(image_dir):
return accum
for filename in sorted(os.listdir(image_dir)):
basename = os.path.splitext(os.path.basename(filename))[0]
parts = basename.split('-')
accum.append((parts, 'img/' + filename))
return accum
#
# Flask application helpers.
#
@app.context_processor
def _general():
return {
'dataset': dataset,
'login_required': bool(app.config.get('PASSWORD')),
}
@app.context_processor
def _now():
return {'now': datetime.datetime.now()}
@app.before_request
def _connect_db():
dataset.connect()
@app.teardown_request
def _close_db(exc):
if not dataset._database.is_closed():
dataset.close()
class PrefixMiddleware(object):
def __init__(self, app, prefix):
self.app = app
self.prefix = '/%s' % prefix.strip('/')
self.prefix_len = len(self.prefix)
def __call__(self, environ, start_response):
if environ['PATH_INFO'].startswith(self.prefix):
environ['PATH_INFO'] = environ['PATH_INFO'][self.prefix_len:]
environ['SCRIPT_NAME'] = self.prefix
return self.app(environ, start_response)
else:
start_response('404', [('Content-Type', 'text/plain')])
return ['URL does not match application prefix.'.encode()]
#
# Script options.
#
def get_option_parser():
parser = optparse.OptionParser()
parser.add_option(
'-p',
'--port',
default=8080,
help='Port for web interface, default=8080',
type='int')
parser.add_option(
'-H',
'--host',
default='127.0.0.1',
help='Host for web interface, default=127.0.0.1')
parser.add_option(
'-d',
'--debug',
action='store_true',
help='Run server in debug mode')
parser.add_option(
'-x',
'--no-browser',
action='store_false',
default=True,
dest='browser',
help='Do not automatically open browser page.')
parser.add_option(
'-P',
'--password',
action='store_true',
dest='prompt_password',
help='Prompt for password to access database browser.')
parser.add_option(
'-r',
'--read-only',
action='store_true',
dest='read_only',
help='Open database in read-only mode.')
parser.add_option(
'-u',
'--url-prefix',
dest='url_prefix',
help='URL prefix for application.')
return parser
def die(msg, exit_code=1):
sys.stderr.write('%s\n' % msg)
sys.stderr.flush()
sys.exit(exit_code)
def open_browser_tab(host, port):
url = 'http://%s:%s/' % (host, port)
def _open_tab(url):
time.sleep(1.5)
webbrowser.open_new_tab(url)
thread = threading.Thread(target=_open_tab, args=(url,))
thread.daemon = True
thread.start()
def install_auth_handler(password):
app.config['PASSWORD'] = password
@app.before_request
def check_password():
global SESSION_SECRET_SET
if session.get('secret') not in SESSION_SECRET_SET and \
request.path != '/login/' and \
not request.path.startswith(('/static/', '/favicon')):
flash('You must log-in to view the database browser.', 'danger')
session['next_url'] = request.base_url
return redirect(url_for('login'))
def initialize_app(filename, read_only=False, password=None, url_prefix=None):
global dataset
global migrator
if password:
install_auth_handler(password)
if read_only:
if sys.version_info < (3, 4, 0):
die('Python 3.4.0 or newer is required for read-only access.')
if peewee_version < (3, 5, 1):
die('Peewee 3.5.1 or newer is required for read-only access.')
db = SqliteDatabase('file:%s?mode=ro' % filename, uri=True)
try:
db.connect()
except OperationalError:
die('Unable to open database file in read-only mode. Ensure that '
'the database exists in order to use read-only mode.')
db.close()
dataset = SqliteDataSet(db, bare_fields=True)
else:
dataset = SqliteDataSet('sqlite:///%s' % filename, bare_fields=True)
if url_prefix:
app.wsgi_app = PrefixMiddleware(app.wsgi_app, prefix=url_prefix)
migrator = dataset._migrator
dataset.close()
def main():
# This function exists to act as a console script entry-point.
parser = get_option_parser()
options, args = parser.parse_args()
if not args:
die('Error: missing required path to database file.')
password = None
if options.prompt_password:
if os.environ.get('SQLITE_WEB_PASSWORD'):
password = os.environ['SQLITE_WEB_PASSWORD']
else:
while True:
password = getpass('Enter password: ')
password_confirm = getpass('Confirm password: ')
if password != password_confirm:
print('Passwords did not match!')
else:
break
# Initialize the dataset instance and (optionally) authentication handler.
initialize_app(args[0], options.read_only, password, options.url_prefix)
if options.browser:
open_browser_tab(options.host, options.port)
app.run(host=options.host, port=options.port, debug=options.debug)
if __name__ == '__main__':
main()
| 31.788018 | 90 | 0.612823 |
b5e304d77fd325764f2cf81f3db727dbe107836e | 5,747 | py | Python | src/calibrate_laser_camera.py | hushunda/CameraLaserCalibrate | 1576a49f2aab1214f58b97ea2cda2f460fd69751 | [
"MIT"
] | 11 | 2021-01-05T06:45:16.000Z | 2021-12-30T13:16:43.000Z | src/calibrate_laser_camera.py | hushunda/CameraLaserCalibrate | 1576a49f2aab1214f58b97ea2cda2f460fd69751 | [
"MIT"
] | 3 | 2021-03-10T03:08:50.000Z | 2021-04-20T08:48:28.000Z | src/calibrate_laser_camera.py | hushunda/CameraLaserCalibrate | 1576a49f2aab1214f58b97ea2cda2f460fd69751 | [
"MIT"
] | 2 | 2021-05-17T09:52:36.000Z | 2022-01-16T09:47:30.000Z | # coding:utf-8
'''
相机和单线激光标定:
上一步已经相机和标定板的坐标变换
相机坐标和激光时间完全统一
当前:
1. 找出标定板对应的激光点
2. 拟合标定板的直线,
3. 求解最优化
方法1 SVD
方法2 http://people.csail.mit.edu/bkph/articles/Nearest_Orthonormal_Matrix.pdf
https://home.deec.uc.pt/~jpbar/Publication_Source/pami2012.pdf
方法3 随机梯度下降法求解
'''
import math
import numpy as np
import cv2
import random
from scipy.optimize import root,leastsq
class Optimize():
def __init__(self,config):
self.AllMethod = ['svd','sgd']
self.method = config['optimize_method']
assert self.method in self.AllMethod
def __call__(self,**args):
return getattr(self,self.method)(args)
def svd(self,args):
'''标定
输入:激光点位置,板子的法向量,板子到圆点的距离
nHp = -d --> AH = b
[ h1, h4, h7]
H =[ h2, h5, h8]
[ h3, h6, h9]
返回: 激光到相机的变换关系RT
'''
# 数据预处理
laser_points = args['laser_points']
Nces = args['Nc']
Dses = args['Ds']
# H初始化
if 'H0' in args and args['H0'] is not None:
H0 = args['H0']
else:
H0 = np.eye(3)#np.array([[0,0,1],[-1,0,0],[0,-1,0]])
Nc = []
Ds = []
laser_3dpoints = []
for p,n,d in zip(laser_points, Nces, Dses):
for pi in p:
if len(n)>0:
laser_3dpoints.append([pi[0],pi[1],1])
Nc.append(n)
Ds.append(d)
if len(laser_3dpoints)<6:
raise NotImplemented
# 第一步 最小二乘求解
def func(H,Nc,D,laser_points):
Nc = np.array(Nc)
D = np.array(D)
H = H.reshape(3,3)
laser_points = np.array(laser_points)
# 加上正则项
return (Nc*(H.dot(laser_points))-D).sum()#+abs(0.03*(H).sum())
def loss(H,Nc,D,laser_points):
err =[]
for n,d,p in zip(Nc,D,laser_points):
err.append(func(H,n,d,p))
return err
para = leastsq(loss,H0,args=(Nc,Ds,laser_3dpoints))
H0 = para[0].reshape(3,3)
# 第二步 计算出旋转和平移矩阵
h3 = H0[:,2]
Rcl = H0.copy()
Rcl[:,2] = np.cross(Rcl[:,0],Rcl[:,1])
Tcl = h3
# 第三步 SVD计算Rcl.(Rcl可能不是旋转矩阵)
U,s,V =np.linalg.svd(Rcl)
Rcl = U.dot(V)
return Rcl,Tcl
def sgd(self,args):
print('sgd')
print(args)
def RT2ncd(self, R_cpt,T_cpt):
'''将旋转平移矩阵,计算出法线和距离'''
Nc = []
Ds = []
for R,T in zip(R_cpt,T_cpt):
n = R[:,2]
Nc.append(n)
Ds.append(n * T)
return Nc, Ds
def theta_to_rotate_matrix(angle):
anglex,angley,anglez = np.deg2rad(angle)
rx = np.array([[1, 0, 0],
[0, np.cos(anglex), np.sin(anglex)],
[0, -np.sin(anglex), np.cos(anglex)]],
np.float32)
ry = np.array([[np.cos(angley), 0, np.sin(angley)],
[0, 1, 0],
[-np.sin(angley), 0, np.cos(angley), ]],
np.float32)
rz = np.array([[np.cos(anglez), np.sin(anglez), 0],
[-np.sin(anglez), np.cos(anglez), 0],
[0, 0, 1]], dtype=np.float32)
r = rx.dot(ry).dot(rz)
return r
def test_optimize():
''' 测试优化算法 camera laser plane '''
# 假定激光和相机的位姿
R_clt = theta_to_rotate_matrix([5,15,89]) # 激光到相机坐标系的旋转矩阵
T_clt = np.array([1.0,15.3,26.3])[:,None] # 激光到相机坐标系的平移矩阵
# 生成标定板的位姿
R_cpt = np.array([theta_to_rotate_matrix([random.random()*90,random.random()*90,random.random()*90]) for i in range(10)])
T_cpt = np.array([[random.random()*100,random.random()*50,random.random()*50] for i in range(10)])
# 激光在板子上的位置
opt = Optimize({'optimize_method':'svd'})
P_l = compute_laser_points(R_cpt, T_cpt, R_clt, T_clt)
# 验证激光点正确性
for i,p in enumerate(P_l):
R_cpt_inv = np.linalg.inv(R_cpt[i])
T = T_cpt[i]
Nc = R_cpt[i][:,2]
d = - Nc.dot(T)
for pi in p:
pp = R_cpt_inv.dot(R_clt.dot(np.array(pi)[:,None])+T_clt)-R_cpt_inv.dot(np.array(T)[:,None])
err = sum(Nc*(R_clt.dot(np.array(pi)[:,None])+T_clt)[:,0])+d
if abs(pp[2])>1e-3 or abs(err)>1e-3:
print('laser point is wrong!')
# 初始化
tlc = [3,16,30]
Ti = np.eye(3)
Ti[:,2] = -np.array(tlc)
H0 = theta_to_rotate_matrix([15,0,25]).dot(Ti)
H_true = R_clt.copy()
H_true[:,2] = T_clt[:,0]
Nc, Ds = opt.RT2ncd(R_cpt, T_cpt)
RT = opt(laser_points=P_l,Nc=Nc, Ds=Ds , H0=H0)
print('-'*10+'label'+'-'*10)
# print(np.linalg.inv(R_clt))
# print(-np.linalg.inv(R_clt).dot(T_clt))
print(R_clt)
print(T_clt)
print('-' * 10 + 'prediction' + '-' * 10)
print(RT[0])
print(RT[1].tolist())
def compute_laser_points(R_cpt, T_cpt, R_clt, T_clt):
'''
RR.dot(p)+RT+T
:return:
'''
P_l = []
R_clt_inv = np.linalg.inv(R_clt)
for R,T in zip(R_cpt,T_cpt):
R_new = R_clt_inv.dot(R)
T_new = R_clt_inv.dot(T)[:,None]-R_clt_inv.dot(T_clt)
# 求激光线的方向
# N1 = np.array([0,0,1])[:,None]
N2 = R_new[:,2]
N3 = [1,-N2[0]/N2[1],0]
# 取出激光上一点
R_new_inv = np.linalg.inv(R_new)
T_new_inv = R_new_inv.dot(T_new)
if R_new_inv[2,0]!=0:
st_point = [float(T_new_inv[2]/R_new_inv[2,0]),0,0]
else:
st_point = [0, float(T_new_inv[1] / R_new_inv[1, 1]), 0]
# 再取出一个点
end_point = (np.array(st_point)+np.array(N3)*1).tolist()
P_l.append([st_point,end_point])
return P_l
if __name__ == '__main__':
test_optimize()
| 29.321429 | 125 | 0.511049 |
502d8313b1d834cd4f764d561e3fabaeb7c89441 | 1,269 | py | Python | eynnyd/internal/plan_execution/error_handlers.py | cbefus/Eynnyd | 1b25281af98c1360794806db21f52ddbe0bd2cad | [
"MIT"
] | 3 | 2019-08-24T19:01:52.000Z | 2020-01-21T00:39:07.000Z | eynnyd/internal/plan_execution/error_handlers.py | cbefus/Eynnyd | 1b25281af98c1360794806db21f52ddbe0bd2cad | [
"MIT"
] | null | null | null | eynnyd/internal/plan_execution/error_handlers.py | cbefus/Eynnyd | 1b25281af98c1360794806db21f52ddbe0bd2cad | [
"MIT"
] | null | null | null | import logging
from eynnyd.exceptions import NoGenericErrorHandlerException
LOG = logging.getLogger("error_handlers")
class ErrorHandlers:
def __init__(
self,
pre_response_error_handlers,
post_response_error_handlers):
self._pre_response_error_handlers = pre_response_error_handlers
self._post_response_error_handlers = post_response_error_handlers
def handle_pre_response_error(self, thrown_error, request):
return ErrorHandlers\
._get_handler_for_error(thrown_error, self._pre_response_error_handlers)(thrown_error, request)
def handle_post_response_error(self, thrown_error, request, response):
return ErrorHandlers\
._get_handler_for_error(
thrown_error,
self._post_response_error_handlers)(thrown_error, request, response)
@staticmethod
def _get_handler_for_error(thrown_error, error_handlers):
for registered_error, registered_handler in error_handlers:
if isinstance(thrown_error, registered_error):
return registered_handler
raise NoGenericErrorHandlerException(
"No error handler registered for even generic exceptions.",
thrown_error)
| 32.538462 | 107 | 0.719464 |
8d0abef9b43e9a2dd7a4947bd7230cf36383dd32 | 5,467 | py | Python | tests/test_exceptions.py | parkerclayton/jira | 87e68994bcc980981934908f7cbff0b7ade52934 | [
"BSD-2-Clause"
] | null | null | null | tests/test_exceptions.py | parkerclayton/jira | 87e68994bcc980981934908f7cbff0b7ade52934 | [
"BSD-2-Clause"
] | 54 | 2021-06-02T06:40:21.000Z | 2022-03-28T23:17:24.000Z | tests/test_exceptions.py | parkerclayton/jira | 87e68994bcc980981934908f7cbff0b7ade52934 | [
"BSD-2-Clause"
] | 1 | 2020-04-23T15:18:49.000Z | 2020-04-23T15:18:49.000Z | import unittest
from pathlib import Path
from unittest.mock import mock_open, patch
from requests import Response
from requests.structures import CaseInsensitiveDict
from jira.exceptions import JIRAError
DUMMY_HEADERS = {"h": "nice headers"}
DUMMY_TEXT = "nice text"
DUMMY_URL = "https://nice.jira.tests"
DUMMY_STATUS_CODE = 200
PATCH_BASE = "jira.exceptions"
class ExceptionsTests(unittest.TestCase):
class MockResponse(Response, object):
def __init__(
self,
headers: dict = None,
text: str = "",
status_code: int = DUMMY_STATUS_CODE,
url: str = DUMMY_URL,
):
"""Sub optimal but we create a mock response like this."""
self.headers = CaseInsensitiveDict(headers if headers else {})
self._text = text
self.status_code = status_code
self.url = url
@property
def text(self):
return self._text
@text.setter
def text(self, new_text):
self._text = new_text
class MalformedMockResponse(object):
def __init__(
self,
headers: dict = None,
text: str = "",
status_code: int = DUMMY_STATUS_CODE,
url: str = DUMMY_URL,
):
if headers:
self.headers = headers
if text:
self.text = text
self.url = url
self.status_code = status_code
def test_jira_error_response_added(self):
err = JIRAError(
response=self.MockResponse(headers=DUMMY_HEADERS, text=DUMMY_TEXT)
)
err_str = str(err)
assert f"headers = {DUMMY_HEADERS}" in err_str
assert f"text = {DUMMY_TEXT}" in err_str
def test_jira_error_malformed_response(self):
# GIVEN: a malformed Response object, without headers or text set
bad_repsonse = self.MalformedMockResponse()
# WHEN: The JiraError's __str__ method is called
err = JIRAError(response=bad_repsonse)
err_str = str(err)
# THEN: there are no errors and neither headers nor text are in the result
assert "headers = " not in err_str
assert "text = " not in err_str
def test_jira_error_request_added(self):
err = JIRAError(
request=self.MockResponse(headers=DUMMY_HEADERS, text=DUMMY_TEXT)
)
err_str = str(err)
assert f"headers = {DUMMY_HEADERS}" in err_str
assert f"text = {DUMMY_TEXT}" in err_str
def test_jira_error_malformed_request(self):
# GIVEN: a malformed Response object, without headers or text set
bad_repsonse = self.MalformedMockResponse()
# WHEN: The JiraError's __str__ method is called
err = JIRAError(request=bad_repsonse)
err_str = str(err)
# THEN: there are no errors and neither headers nor text are in the result
assert "headers = " not in err_str
assert "text = " not in err_str
def test_jira_error_url_added(self):
assert f"url: {DUMMY_URL}" in str(JIRAError(url=DUMMY_URL))
def test_jira_error_status_code_added(self):
assert f"JiraError HTTP {DUMMY_STATUS_CODE}" in str(
JIRAError(status_code=DUMMY_STATUS_CODE)
)
def test_jira_error_text_added(self):
dummy_text = "wow\tthis\nis\nso cool"
assert f"text: {dummy_text}" in str(JIRAError(text=dummy_text))
def test_jira_error_log_to_tempfile_if_env_var_set(self):
# GIVEN: the right env vars are set and the tempfile's filename
env_vars = {"PYJIRA_LOG_TO_TEMPFILE": "so true"}
test_jira_error_filename = (
Path(__file__).parent / "test_jira_error_log_to_tempfile.bak"
)
# https://docs.python.org/3/library/unittest.mock.html#mock-open
mocked_open = mock_open()
# WHEN: a JIRAError's __str__ method is called and
# log details are expected to be sent to the tempfile
with patch.dict("os.environ", env_vars), patch(
f"{PATCH_BASE}.tempfile.mkstemp", autospec=True
) as mock_mkstemp, patch(f"{PATCH_BASE}.open", mocked_open):
mock_mkstemp.return_value = 0, str(test_jira_error_filename)
str(JIRAError(response=self.MockResponse(text=DUMMY_TEXT)))
# THEN: the known filename is opened and contains the exception details
mocked_open.assert_called_once_with(str(test_jira_error_filename), "w")
mock_file_stream = mocked_open()
assert f"text = {DUMMY_TEXT}" in mock_file_stream.write.call_args[0][0]
def test_jira_error_log_to_tempfile_not_used_if_env_var_not_set(self):
# GIVEN: no env vars are set and the tempfile's filename
env_vars = {}
test_jira_error_filename = (
Path(__file__).parent / "test_jira_error_log_to_tempfile.bak"
)
# https://docs.python.org/3/library/unittest.mock.html#mock-open
mocked_open = mock_open()
# WHEN: a JIRAError's __str__ method is called
with patch.dict("os.environ", env_vars), patch(
f"{PATCH_BASE}.tempfile.mkstemp", autospec=True
) as mock_mkstemp, patch(f"{PATCH_BASE}.open", mocked_open):
mock_mkstemp.return_value = 0, str(test_jira_error_filename)
str(JIRAError(response=self.MockResponse(text=DUMMY_TEXT)))
# THEN: no files are opened
mocked_open.assert_not_called()
| 36.939189 | 82 | 0.644229 |
981ea979649e3a04e00837bba80b52589ed0eccc | 2,445 | py | Python | src/crawl/unicrawl/spiders/hech_programs.py | VictorLeP/Education4Climate | 7d419c96c759da2215504ac9051dd6f61d940b96 | [
"MIT"
] | 4 | 2021-09-24T16:17:57.000Z | 2022-01-31T17:32:16.000Z | src/crawl/unicrawl/spiders/hech_programs.py | VictorLeP/Education4Climate | 7d419c96c759da2215504ac9051dd6f61d940b96 | [
"MIT"
] | 23 | 2021-08-02T07:38:47.000Z | 2022-02-11T14:16:56.000Z | src/crawl/unicrawl/spiders/hech_programs.py | VictorLeP/Education4Climate | 7d419c96c759da2215504ac9051dd6f61d940b96 | [
"MIT"
] | 3 | 2021-11-20T06:06:44.000Z | 2022-03-11T07:44:43.000Z | # -*- coding: utf-8 -*-
from abc import ABC
from pathlib import Path
import scrapy
from settings import YEAR, CRAWLING_OUTPUT_FOLDER
BASE_URL = "http://progcours.hech.be/cocoon/fac/fac{}"
DEPARTMENTS_CODES = {
"A": "Département Agronomique",
"E": "Département Economique",
"M": "Département Paramédicale",
"P": "Département Pédagogique"
}
class HECHProgramSpider(scrapy.Spider, ABC):
"""
Programs crawler for Haute Ecole Charlemagne
"""
name = "hech-programs"
custom_settings = {
'FEED_URI': Path(__file__).parent.absolute().joinpath(
f'../../../../{CRAWLING_OUTPUT_FOLDER}hech_programs_{YEAR}.json').as_uri()
}
def start_requests(self):
for code in DEPARTMENTS_CODES.keys():
yield scrapy.Request(BASE_URL.format(code), self.parse_main,
cb_kwargs={'faculty': DEPARTMENTS_CODES[code]})
def parse_main(self, response, faculty):
# Get list of faculties
programs_names = response.xpath(f"//a[@class='LienProg']/text()").getall()
programs_links = response.xpath(f"//a[@class='LienProg']/@href").getall()
programs_codes = [link.split("/")[-1].split("_")[0] for link in programs_links]
programs_cycles = [name.split(" ")[0].lower() for name in programs_names]
for program_name, code, link, cycle in zip(programs_names, programs_codes, programs_links, programs_cycles):
if 'bachelier' in cycle:
cycle = 'bac'
elif 'master' in cycle:
cycle = 'master'
elif 'certificat' in cycle:
cycle = 'certificate'
else:
cycle = 'other'
base_dict = {
'id': code,
'name': program_name,
'cycle': cycle,
'faculties': [faculty],
'campuses': []
}
yield response.follow(link, self.parse_program, cb_kwargs={'base_dict': base_dict})
@staticmethod
def parse_program(response, base_dict):
ects = response.xpath("//td[contains(@class, 'ContColG')]/text()").getall()
ects = [int(e) for e in ects if e != '\xa0']
courses_ids = response.xpath("//nobr/text()").getall()
cur_dict = {
"url": response.url,
"courses": courses_ids,
"ects": ects
}
yield {**base_dict, **cur_dict}
| 31.346154 | 116 | 0.574233 |
9a0fc576162f686eafbdd4189c4cf8f7b32cae78 | 40,259 | py | Python | src/olympia/versions/models.py | ankushduacodes/addons-server | 5ff2fd40dbe28852ad53caa42ce804cc64a4bbdc | [
"BSD-3-Clause"
] | null | null | null | src/olympia/versions/models.py | ankushduacodes/addons-server | 5ff2fd40dbe28852ad53caa42ce804cc64a4bbdc | [
"BSD-3-Clause"
] | null | null | null | src/olympia/versions/models.py | ankushduacodes/addons-server | 5ff2fd40dbe28852ad53caa42ce804cc64a4bbdc | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
import datetime
import os
from base64 import b64encode
import django.dispatch
from django.core.exceptions import ObjectDoesNotExist
from django.core.files.storage import default_storage as storage
from django.db import models, transaction
from django.db.models import Q
from django.utils.encoding import force_text
from django.utils.functional import cached_property
from django.utils.translation import ugettext
import jinja2
from olympia.constants.applications import APP_IDS
import waffle
from django_jsonfield_backport.models import JSONField
from django_statsd.clients import statsd
import olympia.core.logger
from olympia import activity, amo, core
from olympia.amo.decorators import use_primary_db
from olympia.amo.fields import PositiveAutoField
from olympia.amo.models import (
BasePreview,
LongNameIndex,
ManagerBase,
ModelBase,
OnChangeMixin,
)
from olympia.amo.urlresolvers import reverse
from olympia.amo.utils import sorted_groupby, utc_millesecs_from_epoch
from olympia.applications.models import AppVersion
from olympia.constants.licenses import LICENSES_BY_BUILTIN
from olympia.constants.promoted import PROMOTED_GROUPS_BY_ID
from olympia.constants.scanners import MAD
from olympia.files import utils
from olympia.files.models import File, cleanup_file
from olympia.translations.fields import (
LinkifiedField,
PurifiedField,
TranslatedField,
save_signal,
)
from olympia.scanners.models import ScannerResult
from olympia.users.utils import get_task_user
from .compare import version_int
from .fields import VersionStringField
log = olympia.core.logger.getLogger('z.versions')
# Valid source extensions. Actual validation lives in
# devhub.forms.WithSourceMixin and is slightly more complex (the file
# contents are checked to see if it matches the extension).
# If changing this, make sure devhub.forms.WithSourceMixin.clean_source() and
# source_upload_path() are updated accordingly if needed, and that source
# submission still works both at add-on and version upload time.
VALID_SOURCE_EXTENSIONS = (
'.zip',
'.tar.gz',
'.tgz',
'.tar.bz2',
)
class VersionManager(ManagerBase):
def __init__(self, include_deleted=False):
ManagerBase.__init__(self)
self.include_deleted = include_deleted
def get_queryset(self):
qs = super(VersionManager, self).get_queryset()
if not self.include_deleted:
qs = qs.exclude(deleted=True)
return qs.transform(Version.transformer)
def valid(self):
return self.filter(files__status__in=amo.VALID_FILE_STATUSES).distinct()
def latest_public_compatible_with(self, application, appversions):
"""Return a queryset filtering the versions so that they are public,
listed, and compatible with the application and appversions parameters
passed. The queryset is ordered by creation date descending, allowing
the caller to get the latest compatible version available.
application is an application id
appversions is a dict containing min and max values, as version ints.
"""
return Version.objects.filter(
apps__application=application,
apps__min__version_int__lte=appversions['min'],
apps__max__version_int__gte=appversions['max'],
channel=amo.RELEASE_CHANNEL_LISTED,
files__status=amo.STATUS_APPROVED,
).order_by('-created')
def auto_approvable(self):
"""Returns a queryset filtered with just the versions that should
attempted for auto-approval by the cron job."""
qs = self.filter(
files__status=amo.STATUS_AWAITING_REVIEW, files__is_webextension=True
).filter(
# For listed, add-on can't be incomplete, deleted or disabled.
# It also cannot be disabled by user ("invisible"), and can not
# be a theme either.
Q(
channel=amo.RELEASE_CHANNEL_LISTED,
addon__status__in=(amo.STATUS_NOMINATED, amo.STATUS_APPROVED),
addon__disabled_by_user=False,
addon__type__in=(amo.ADDON_EXTENSION, amo.ADDON_LPAPP, amo.ADDON_DICT),
)
# For unlisted, add-on can't be deleted or disabled.
| Q(
channel=amo.RELEASE_CHANNEL_UNLISTED,
addon__status__in=(
amo.STATUS_NULL,
amo.STATUS_NOMINATED,
amo.STATUS_APPROVED,
),
)
)
return qs
class UnfilteredVersionManagerForRelations(VersionManager):
"""Like VersionManager, but defaults to include deleted objects.
Designed to be used in reverse relations of Versions like this:
<Addon>.versions(manager='unfiltered_for_relations').all(), for when you
want to use the related manager but need to include deleted versions.
unfiltered_for_relations = UnfilteredVersionManagerForRelations() is
defined in Version for this to work.
"""
def __init__(self, include_deleted=True):
super().__init__(include_deleted=include_deleted)
def source_upload_path(instance, filename):
# At this point we already know that ext is one of VALID_SOURCE_EXTENSIONS
# because we already checked for that in
# /src/olympia/devhub/forms.py#WithSourceMixin.clean_source.
ext = ''
for ext in VALID_SOURCE_EXTENSIONS:
if filename.endswith(ext):
break
return os.path.join(
'version_source',
utils.id_to_path(instance.pk),
'{0}-{1}-src{2}'.format(instance.addon.slug, instance.version, ext),
)
class VersionCreateError(ValueError):
pass
class Version(OnChangeMixin, ModelBase):
id = PositiveAutoField(primary_key=True)
addon = models.ForeignKey(
'addons.Addon', related_name='versions', on_delete=models.CASCADE
)
license = models.ForeignKey(
'License', null=True, blank=True, on_delete=models.SET_NULL
)
release_notes = PurifiedField(db_column='releasenotes', short=False)
approval_notes = models.TextField(
db_column='approvalnotes', default='', null=True, blank=True
)
version = VersionStringField(max_length=255, default='0.1')
nomination = models.DateTimeField(null=True)
reviewed = models.DateTimeField(null=True)
deleted = models.BooleanField(default=False)
source = models.FileField(
upload_to=source_upload_path, null=True, blank=True, max_length=255
)
channel = models.IntegerField(
choices=amo.RELEASE_CHANNEL_CHOICES, default=amo.RELEASE_CHANNEL_LISTED
)
git_hash = models.CharField(max_length=40, blank=True)
needs_human_review = models.BooleanField(default=False)
# The order of those managers is very important: please read the lengthy
# comment above the Addon managers declaration/instantiation.
unfiltered = VersionManager(include_deleted=True)
objects = VersionManager()
# See UnfilteredVersionManagerForRelations() docstring for usage of this
# special manager.
unfiltered_for_relations = UnfilteredVersionManagerForRelations()
class Meta(ModelBase.Meta):
db_table = 'versions'
# This is very important: please read the lengthy comment in Addon.Meta
# description
base_manager_name = 'unfiltered'
ordering = ['-created', '-modified']
indexes = [
models.Index(fields=('addon',), name='addon_id'),
models.Index(fields=('license',), name='license_id'),
]
def __str__(self):
return jinja2.escape(self.version)
@classmethod
def from_upload(cls, upload, addon, selected_apps, channel, parsed_data=None):
"""
Create a Version instance and corresponding File(s) from a
FileUpload, an Addon, a list of compatible app ids, a channel id and
the parsed_data generated by parse_addon().
Note that it's the caller's responsability to ensure the file is valid.
We can't check for that here because an admin may have overridden the
validation results.
"""
from olympia.addons.models import AddonReviewerFlags
from olympia.git.utils import create_git_extraction_entry
assert parsed_data is not None
if addon.status == amo.STATUS_DISABLED:
raise VersionCreateError(
'Addon is Mozilla Disabled; no new versions are allowed.'
)
license_id = None
if channel == amo.RELEASE_CHANNEL_LISTED:
previous_version = addon.find_latest_version(channel=channel, exclude=())
if previous_version and previous_version.license_id:
license_id = previous_version.license_id
approval_notes = None
if parsed_data.get('is_mozilla_signed_extension'):
approval_notes = (
'This version has been signed with Mozilla internal certificate.'
)
version = cls.objects.create(
addon=addon,
approval_notes=approval_notes,
version=parsed_data['version'],
license_id=license_id,
channel=channel,
)
email = upload.user.email if upload.user and upload.user.email else ''
with core.override_remote_addr(upload.ip_address):
log.info(
'New version: %r (%s) from %r' % (version, version.id, upload),
extra={
'email': email,
'guid': addon.guid,
'upload': upload.uuid.hex,
'user_id': upload.user_id,
'from_api': upload.source == amo.UPLOAD_SOURCE_API,
},
)
activity.log_create(
amo.LOG.ADD_VERSION, version, addon, user=upload.user or get_task_user()
)
if addon.type == amo.ADDON_STATICTHEME:
# We don't let developers select apps for static themes
selected_apps = [app.id for app in amo.APP_USAGE]
compatible_apps = {}
for app in parsed_data.get('apps', []):
if app.id not in selected_apps:
# If the user chose to explicitly deselect Firefox for Android
# we're not creating the respective `ApplicationsVersions`
# which will have this add-on then be listed only for
# Firefox specifically.
continue
compatible_apps[app.appdata] = ApplicationsVersions(
version=version, min=app.min, max=app.max, application=app.id
)
compatible_apps[app.appdata].save()
# See #2828: sometimes when we generate the filename(s) below, in
# File.from_upload(), cache-machine is confused and has trouble
# fetching the ApplicationsVersions that were just created. To work
# around this we pre-generate version.compatible_apps and avoid the
# queries completely.
version._compatible_apps = compatible_apps
# For backwards compatibility. We removed specific platform
# support during submission but we don't handle it any different
# beyond that yet. That means, we're going to simply set it
# to `PLATFORM_ALL` and also have the backend create separate
# files for each platform. Cleaning that up is another step.
# Given the timing on this, we don't care about updates to legacy
# add-ons as well.
# Create relevant file and update the all_files cached property on the
# Version, because we might need it afterwards.
version.all_files = [
File.from_upload(
upload=upload,
version=version,
platform=amo.PLATFORM_ALL.id,
parsed_data=parsed_data,
)
]
version.inherit_nomination(from_statuses=[amo.STATUS_AWAITING_REVIEW])
version.disable_old_files()
# After the upload has been copied to all platforms, remove the upload.
storage.delete(upload.path)
upload.path = ''
upload.save()
version_uploaded.send(instance=version, sender=Version)
if version.is_webextension:
if (
waffle.switch_is_active('enable-yara')
or waffle.switch_is_active('enable-customs')
or waffle.switch_is_active('enable-wat')
):
ScannerResult.objects.filter(upload_id=upload.id).update(
version=version
)
if waffle.switch_is_active('enable-uploads-commit-to-git-storage'):
# Schedule this version for git extraction.
transaction.on_commit(lambda: create_git_extraction_entry(version=version))
# Generate a preview and icon for listed static themes
if (
addon.type == amo.ADDON_STATICTHEME
and channel == amo.RELEASE_CHANNEL_LISTED
):
theme_data = parsed_data.get('theme', {})
generate_static_theme_preview(theme_data, version.pk)
# Reset add-on reviewer flags to disable auto-approval and require
# admin code review if the package has already been signed by mozilla.
reviewer_flags_defaults = {}
is_mozilla_signed = parsed_data.get('is_mozilla_signed_extension')
if upload.validation_timeout:
reviewer_flags_defaults['needs_admin_code_review'] = True
if is_mozilla_signed and addon.type != amo.ADDON_LPAPP:
reviewer_flags_defaults['needs_admin_code_review'] = True
reviewer_flags_defaults['auto_approval_disabled'] = True
if reviewer_flags_defaults:
AddonReviewerFlags.objects.update_or_create(
addon=addon, defaults=reviewer_flags_defaults
)
# Authors need to be notified about auto-approval delay again since
# they are submitting a new version.
addon.reset_notified_about_auto_approval_delay()
# Track the time it took from first upload through validation
# (and whatever else) until a version was created.
upload_start = utc_millesecs_from_epoch(upload.created)
now = datetime.datetime.now()
now_ts = utc_millesecs_from_epoch(now)
upload_time = now_ts - upload_start
log.info(
'Time for version {version} creation from upload: {delta}; '
'created={created}; now={now}'.format(
delta=upload_time, version=version, created=upload.created, now=now
)
)
statsd.timing('devhub.version_created_from_upload', upload_time)
return version
def license_url(self, impala=False):
return reverse('addons.license', args=[self.addon.slug, self.version])
def get_url_path(self):
if self.channel == amo.RELEASE_CHANNEL_UNLISTED:
return ''
return reverse('addons.versions', args=[self.addon.slug])
def delete(self, hard=False):
# To avoid a circular import
from .tasks import delete_preview_files
log.info('Version deleted: %r (%s)' % (self, self.id))
activity.log_create(amo.LOG.DELETE_VERSION, self.addon, str(self.version))
if hard:
super(Version, self).delete()
else:
# By default we soft delete so we can keep the files for comparison
# and a record of the version number.
self.files.update(status=amo.STATUS_DISABLED)
self.deleted = True
self.save()
# Clear pending rejection flag (we have the activity log for
# records purposes, the flag serves no purpose anymore if the
# version is deleted).
VersionReviewerFlags.objects.filter(version=self).update(
pending_rejection=None
)
previews_pks = list(
VersionPreview.objects.filter(version__id=self.id).values_list(
'id', flat=True
)
)
for preview_pk in previews_pks:
delete_preview_files.delay(preview_pk)
@property
def is_user_disabled(self):
return (
self.files.filter(status=amo.STATUS_DISABLED)
.exclude(original_status=amo.STATUS_NULL)
.exists()
)
@is_user_disabled.setter
def is_user_disabled(self, disable):
# User wants to disable (and the File isn't already).
if disable:
activity.log_create(amo.LOG.DISABLE_VERSION, self.addon, self)
for file in self.files.exclude(status=amo.STATUS_DISABLED).all():
file.update(original_status=file.status, status=amo.STATUS_DISABLED)
# User wants to re-enable (and user did the disable, not Mozilla).
else:
activity.log_create(amo.LOG.ENABLE_VERSION, self.addon, self)
for file in self.files.exclude(original_status=amo.STATUS_NULL).all():
file.update(
status=file.original_status, original_status=amo.STATUS_NULL
)
@cached_property
def all_activity(self):
# prefetch_related() and not select_related() the ActivityLog to make
# sure its transformer is called.
return self.versionlog_set.prefetch_related('activity_log').order_by('created')
@property
def compatible_apps(self):
# Dicts and search providers don't have compatibility info.
# Fake one for them.
if self.addon and self.addon.type in amo.NO_COMPAT:
return {app: None for app in amo.APP_TYPE_SUPPORT[self.addon.type]}
# Otherwise, return _compatible_apps which is a cached property that
# is filled by the transformer, or simply calculated from the related
# compat instances.
return self._compatible_apps
@cached_property
def _compatible_apps(self):
"""Get a mapping of {APP: ApplicationsVersions}."""
return self._compat_map(self.apps.all().select_related('min', 'max'))
@cached_property
def compatible_apps_ordered(self):
apps = self.compatible_apps.items()
return sorted(apps, key=lambda v: v[0].short)
@cached_property
def is_compatible_by_default(self):
"""Returns whether or not the add-on is considered compatible by
default."""
# Use self.all_files directly since that's cached and more potentially
# prefetched through a transformer already
return not any(
[
file
for file in self.all_files
if file.binary_components or file.strict_compatibility
]
)
def is_compatible_app(self, app):
"""Returns True if the provided app passes compatibility conditions."""
if self.addon.type in amo.NO_COMPAT:
return True
appversion = self.compatible_apps.get(app)
if appversion and app.id in amo.D2C_MIN_VERSIONS:
return version_int(appversion.max.version) >= version_int(
amo.D2C_MIN_VERSIONS.get(app.id, '*')
)
return False
def compat_override_app_versions(self):
"""Returns the incompatible app versions range(s).
If not ranges, returns empty list. Otherwise, this will return all
the app version ranges that this particular version is incompatible
with.
"""
overrides = list(self.addon.compatoverride_set.all())
if not overrides:
return []
app_versions = []
for co in overrides:
for range in co.collapsed_ranges():
if (
version_int(range.min)
<= version_int(self.version)
<= version_int(range.max)
):
app_versions.extend([(a.min, a.max) for a in range.apps])
return app_versions
@cached_property
def all_files(self):
"""Shortcut for list(self.files.all()). Cached."""
return list(self.files.all())
@property
def current_file(self):
"""Shortcut for selecting the first file from self.all_files"""
return self.all_files[0]
@cached_property
def supported_platforms(self):
"""Get a list of supported platform names."""
return list(set(amo.PLATFORMS[f.platform] for f in self.all_files))
@property
def status(self):
return [
f.STATUS_CHOICES.get(f.status, ugettext('[status:%s]') % f.status)
for f in self.all_files
]
@property
def statuses(self):
"""Unadulterated statuses, good for an API."""
return [(f.id, f.status) for f in self.all_files]
def is_public(self):
# To be public, a version must not be deleted, must belong to a public
# addon, and all its attached files must have public status.
try:
return (
not self.deleted
and self.addon.is_public()
and all(f.status == amo.STATUS_APPROVED for f in self.all_files)
)
except ObjectDoesNotExist:
return False
@property
def is_restart_required(self):
return any(file_.is_restart_required for file_ in self.all_files)
@property
def is_webextension(self):
return any(file_.is_webextension for file_ in self.all_files)
@property
def is_mozilla_signed(self):
"""Is the file a special "Mozilla Signed Extension"
See https://wiki.mozilla.org/Add-ons/InternalSigning for more details.
We use that information to workaround compatibility limits for legacy
add-ons and to avoid them receiving negative boosts compared to
WebExtensions.
See https://github.com/mozilla/addons-server/issues/6424
"""
return all(file_.is_mozilla_signed_extension for file_ in self.all_files)
@property
def has_files(self):
return bool(self.all_files)
@property
def is_unreviewed(self):
return bool(
list(
filter(
lambda f: f.status in amo.UNREVIEWED_FILE_STATUSES, self.all_files
)
)
)
@property
def is_all_unreviewed(self):
return not bool(
[f for f in self.all_files if f.status not in amo.UNREVIEWED_FILE_STATUSES]
)
@property
def sources_provided(self):
return bool(self.source)
def _compat_map(self, avs):
apps = {}
for av in avs:
av.version = self
app_id = av.application
if app_id in amo.APP_IDS:
apps[amo.APP_IDS[app_id]] = av
return apps
@classmethod
def transformer(cls, versions):
"""Attach all the compatible apps and files to the versions."""
if not versions:
return
ids = set(v.id for v in versions)
avs = ApplicationsVersions.objects.filter(version__in=ids).select_related(
'min', 'max'
)
files = File.objects.filter(version__in=ids)
def rollup(xs):
groups = sorted_groupby(xs, 'version_id')
return dict((k, list(vs)) for k, vs in groups)
av_dict, file_dict = rollup(avs), rollup(files)
for version in versions:
v_id = version.id
version._compatible_apps = version._compat_map(av_dict.get(v_id, []))
version.all_files = file_dict.get(v_id, [])
for f in version.all_files:
f.version = version
@classmethod
def transformer_promoted(cls, versions):
"""Attach the promoted approvals to the versions."""
if not versions:
return
PromotedApproval = versions[0].promoted_approvals.model
ids = set(v.id for v in versions)
approvals = list(
PromotedApproval.objects.filter(version_id__in=ids).values_list(
'version_id', 'group_id', 'application_id', named=True
)
)
approval_dict = {
version_id: list(groups)
for version_id, groups in sorted_groupby(approvals, 'version_id')
}
for version in versions:
v_id = version.id
groups = [
(
PROMOTED_GROUPS_BY_ID.get(approval.group_id),
APP_IDS.get(approval.application_id),
)
for approval in approval_dict.get(v_id, [])
if approval.group_id in PROMOTED_GROUPS_BY_ID
]
version.approved_for_groups = groups
@classmethod
def transformer_activity(cls, versions):
"""Attach all the activity to the versions."""
from olympia.activity.models import VersionLog
ids = set(v.id for v in versions)
if not versions:
return
# Ideally, we'd start from the ActivityLog, but because VersionLog
# to ActivityLog isn't a OneToOneField, we wouldn't be able to find
# the version easily afterwards - we can't even do a
# select_related('versionlog') and try to traverse the relation to find
# the version. So, instead, start from VersionLog, but make sure to use
# prefetch_related() (and not select_related() - yes, it's one extra
# query, but it's worth it to benefit from the default transformer) so
# that the ActivityLog default transformer is called.
al = (
VersionLog.objects.prefetch_related('activity_log')
.filter(version__in=ids)
.order_by('created')
)
def rollup(xs):
groups = sorted_groupby(xs, 'version_id')
return {k: list(vs) for k, vs in groups}
al_dict = rollup(al)
for version in versions:
v_id = version.id
version.all_activity = al_dict.get(v_id, [])
@classmethod
def transformer_license(cls, versions):
"""Attach all the licenses to the versions."""
if not versions:
return
license_ids = {ver.license_id for ver in versions}
licenses = License.objects.filter(id__in=license_ids)
license_dict = {lic.id: lic for lic in licenses}
for version in versions:
license = license_dict.get(version.license_id)
if license:
version.license = license
@classmethod
def transformer_auto_approvable(cls, versions):
"""Attach auto-approvability information to the versions."""
ids = set(v.id for v in versions)
if not ids:
return
auto_approvable = (
Version.objects.auto_approvable()
.filter(pk__in=ids)
.values_list('pk', flat=True)
)
for version in versions:
version.is_ready_for_auto_approval = version.pk in auto_approvable
def disable_old_files(self):
"""
Disable files from versions older than the current one in the same
channel and awaiting review. Used when uploading a new version.
Does nothing if the current instance is unlisted.
"""
if self.channel == amo.RELEASE_CHANNEL_LISTED:
qs = File.objects.filter(
version__addon=self.addon_id,
version__lt=self.id,
version__deleted=False,
version__channel=self.channel,
status=amo.STATUS_AWAITING_REVIEW,
)
# Use File.update so signals are triggered.
for f in qs:
f.update(status=amo.STATUS_DISABLED)
def reset_nomination_time(self, nomination=None):
if not self.nomination or nomination:
nomination = nomination or datetime.datetime.now()
# We need signal=False not to call update_status (which calls us).
self.update(nomination=nomination, _signal=False)
def inherit_nomination(self, from_statuses=None):
last_ver = (
Version.objects.filter(addon=self.addon, channel=amo.RELEASE_CHANNEL_LISTED)
.exclude(nomination=None)
.exclude(id=self.pk)
.order_by('-nomination')
)
if from_statuses:
last_ver = last_ver.filter(files__status__in=from_statuses)
if last_ver.exists():
self.reset_nomination_time(nomination=last_ver[0].nomination)
@property
def unreviewed_files(self):
"""A File is unreviewed if its status is amo.STATUS_AWAITING_REVIEW."""
return self.files.filter(status=amo.STATUS_AWAITING_REVIEW)
@cached_property
def is_ready_for_auto_approval(self):
"""Return whether or not this version could be *considered* for
auto-approval.
Does not necessarily mean that it would be auto-approved, just that it
passes the most basic criteria to be considered a candidate by the
auto_approve command."""
return Version.objects.auto_approvable().filter(id=self.id).exists()
@property
def was_auto_approved(self):
"""Return whether or not this version was auto-approved."""
from olympia.reviewers.models import AutoApprovalSummary
try:
return (
self.is_public()
and AutoApprovalSummary.objects.filter(version=self).get().verdict
== amo.AUTO_APPROVED
)
except AutoApprovalSummary.DoesNotExist:
pass
return False
def get_background_images_encoded(self, header_only=False):
if not self.has_files:
return {}
file_obj = self.all_files[0]
return {
name: force_text(b64encode(background))
for name, background in utils.get_background_images(
file_obj, theme_data=None, header_only=header_only
).items()
}
def can_be_disabled_and_deleted(self):
# see https://github.com/mozilla/addons-server/issues/15121#issuecomment-667226959 # noqa
# "It should apply to the <groups> that require a review to be badged"
from olympia.promoted.models import PromotedApproval
if self != self.addon.current_version or (
not (group := self.addon.promoted_group())
or not (group.badged and group.pre_review)
):
return True
previous_ver = (
self.addon.versions.valid()
.filter(channel=self.channel)
.exclude(id=self.id)
.no_transforms()[:1]
)
previous_approval = PromotedApproval.objects.filter(
group_id=group.id, version__in=previous_ver
)
return previous_approval.exists()
@property
def is_blocked(self):
block = self.addon.block
return bool(block and block.is_version_blocked(self.version))
@property
def pending_rejection(self):
try:
return self.reviewerflags.pending_rejection
except VersionReviewerFlags.DoesNotExist:
return None
@property
def needs_human_review_by_mad(self):
try:
return self.reviewerflags.needs_human_review_by_mad
except VersionReviewerFlags.DoesNotExist:
return False
@property
def scanners_score(self):
try:
# We use the score of the MAD scanner because it is the 'ensemble'
# score (i.e. score computed using all other scanner scores).
# We iterate on all .scannerresults instead of doing .filter()
# because there shouldn't be many results, and chances are the
# caller (normally reviewer tools review page) will have prefetched
# all scanner results.
score = [
result.score
for result in self.scannerresults.all()
if result.scanner == MAD
][0]
except IndexError:
score = None
return '{:0.0f}%'.format(score * 100) if score and score >= 0 else 'n/a'
@cached_property
def approved_for_groups(self):
approvals = list(self.promoted_approvals.all())
return [
(PROMOTED_GROUPS_BY_ID.get(approval.group_id), approval.application)
for approval in approvals
if approval.group_id in PROMOTED_GROUPS_BY_ID
]
class VersionReviewerFlags(ModelBase):
version = models.OneToOneField(
Version,
primary_key=True,
on_delete=models.CASCADE,
related_name='reviewerflags',
)
needs_human_review_by_mad = models.BooleanField(default=False, db_index=True)
pending_rejection = models.DateTimeField(
default=None, null=True, blank=True, db_index=True
)
def generate_static_theme_preview(theme_data, version_pk):
"""This redirection is so we can mock generate_static_theme_preview, where
needed, in tests."""
# To avoid a circular import
from . import tasks
tasks.generate_static_theme_preview.delay(theme_data, version_pk)
class VersionPreview(BasePreview, ModelBase):
version = models.ForeignKey(
Version, related_name='previews', on_delete=models.CASCADE
)
position = models.IntegerField(default=0)
sizes = JSONField(default=dict)
colors = JSONField(default=None, null=True)
media_folder = 'version-previews'
class Meta:
db_table = 'version_previews'
ordering = ('position', 'created')
indexes = [
LongNameIndex(
fields=('version',), name='version_previews_version_id_fk_versions_id'
),
models.Index(
fields=('version', 'position', 'created'),
name='version_position_created_idx',
),
]
@cached_property
def caption(self):
"""We only don't support defining a caption for previews because
they're auto-generated. This is for compatibility with Addon Preview
objects. (it's a cached_property so it can be set transparently)"""
return None
models.signals.post_delete.connect(
VersionPreview.delete_preview_files,
sender=VersionPreview,
dispatch_uid='delete_preview_files',
)
@use_primary_db
def update_status(sender, instance, **kw):
if not kw.get('raw'):
try:
instance.addon.reload()
instance.addon.update_status()
except models.ObjectDoesNotExist:
log.info(
'Got ObjectDoesNotExist processing Version change signal', exc_info=True
)
pass
def inherit_nomination(sender, instance, **kw):
"""
For new versions pending review, ensure nomination date
is inherited from last nominated version.
"""
if kw.get('raw'):
return
addon = instance.addon
if instance.nomination is None and addon.status in amo.UNREVIEWED_ADDON_STATUSES:
instance.inherit_nomination()
def cleanup_version(sender, instance, **kw):
"""On delete of the version object call the file delete and signals."""
if kw.get('raw'):
return
for file_ in instance.files.all():
cleanup_file(file_.__class__, file_)
@Version.on_change
def watch_changes(old_attr=None, new_attr=None, instance=None, sender=None, **kwargs):
if old_attr is None:
old_attr = {}
if new_attr is None:
new_attr = {}
changes = {
x for x in new_attr if not x.startswith('_') and new_attr[x] != old_attr.get(x)
}
if instance.channel == amo.RELEASE_CHANNEL_UNLISTED and 'deleted' in changes:
# Sync the related add-on to basket when an unlisted version is
# deleted. (When a listed version is deleted, watch_changes() in
# olympia.addon.models should take care of it (since _current_version
# will change).
from olympia.amo.tasks import trigger_sync_objects_to_basket
trigger_sync_objects_to_basket(
'addon', [instance.addon.pk], 'unlisted version deleted'
)
def watch_new_unlisted_version(sender=None, instance=None, **kwargs):
# Sync the related add-on to basket when an unlisted version is uploaded.
# Unlisted version deletion is handled by watch_changes() above, and new
# version approval changes are handled by watch_changes()
# in olympia.addon.models (since _current_version will change).
# What's left here is unlisted version upload.
if instance and instance.channel == amo.RELEASE_CHANNEL_UNLISTED:
from olympia.amo.tasks import trigger_sync_objects_to_basket
trigger_sync_objects_to_basket(
'addon', [instance.addon.pk], 'new unlisted version'
)
version_uploaded = django.dispatch.Signal()
version_uploaded.connect(watch_new_unlisted_version)
models.signals.pre_save.connect(
save_signal, sender=Version, dispatch_uid='version_translations'
)
models.signals.post_save.connect(
update_status, sender=Version, dispatch_uid='version_update_status'
)
models.signals.post_save.connect(
inherit_nomination, sender=Version, dispatch_uid='version_inherit_nomination'
)
models.signals.pre_delete.connect(
cleanup_version, sender=Version, dispatch_uid='cleanup_version'
)
models.signals.post_delete.connect(
update_status, sender=Version, dispatch_uid='version_update_status'
)
class LicenseManager(ManagerBase):
def builtins(self, cc=False):
return self.filter(builtin__gt=0, creative_commons=cc).order_by('builtin')
class License(ModelBase):
OTHER = 0
id = PositiveAutoField(primary_key=True)
name = TranslatedField()
url = models.URLField(null=True)
builtin = models.PositiveIntegerField(default=OTHER)
text = LinkifiedField()
on_form = models.BooleanField(
default=False, help_text='Is this a license choice in the devhub?'
)
some_rights = models.BooleanField(
default=False,
help_text='Show "Some Rights Reserved" instead of the license name?',
)
icons = models.CharField(
max_length=255, null=True, help_text='Space-separated list of icon identifiers.'
)
creative_commons = models.BooleanField(default=False)
objects = LicenseManager()
class Meta:
db_table = 'licenses'
indexes = [models.Index(fields=('builtin',), name='builtin_idx')]
def __str__(self):
license = self._constant or self
return str(license.name)
@property
def _constant(self):
return LICENSES_BY_BUILTIN.get(self.builtin)
models.signals.pre_save.connect(
save_signal, sender=License, dispatch_uid='license_translations'
)
class ApplicationsVersions(models.Model):
id = PositiveAutoField(primary_key=True)
application = models.PositiveIntegerField(
choices=amo.APPS_CHOICES, db_column='application_id'
)
version = models.ForeignKey(Version, related_name='apps', on_delete=models.CASCADE)
min = models.ForeignKey(
AppVersion, db_column='min', related_name='min_set', on_delete=models.CASCADE
)
max = models.ForeignKey(
AppVersion, db_column='max', related_name='max_set', on_delete=models.CASCADE
)
class Meta:
db_table = 'applications_versions'
constraints = [
models.UniqueConstraint(
fields=('application', 'version'), name='application_id'
),
]
def get_application_display(self):
return str(amo.APPS_ALL[self.application].pretty)
def get_latest_application_version(self):
return (
AppVersion.objects.filter(
~models.Q(version__contains='*'), application=self.application
)
.order_by('-version_int')
.first()
)
def __str__(self):
if self.version.is_compatible_by_default and self.version.is_compatible_app(
amo.APP_IDS[self.application]
):
return ugettext('{app} {min} and later').format(
app=self.get_application_display(), min=self.min
)
return '%s %s - %s' % (self.get_application_display(), self.min, self.max)
| 35.945536 | 98 | 0.643508 |
fbee5ccca144a025307e8333f1b238dd249c815e | 423 | py | Python | paper_admin/apps.py | dldevinc/paper_admin | ef90605cabdbd2bf966a7b11d5a0a69b72a3dd87 | [
"BSD-3-Clause"
] | 8 | 2021-03-01T00:12:01.000Z | 2022-01-18T10:54:20.000Z | paper_admin/apps.py | dldevinc/paper_admin | ef90605cabdbd2bf966a7b11d5a0a69b72a3dd87 | [
"BSD-3-Clause"
] | 7 | 2020-02-11T21:54:07.000Z | 2022-03-16T15:22:35.000Z | paper_admin/apps.py | dldevinc/paper_admin | ef90605cabdbd2bf966a7b11d5a0a69b72a3dd87 | [
"BSD-3-Clause"
] | null | null | null | from django.apps import AppConfig
from django.utils.translation import gettext_lazy as _
class Config(AppConfig):
name = "paper_admin"
verbose_name = _("Paper Admin")
def ready(self):
from .patches import django
from django.contrib.auth.views import PasswordResetView
PasswordResetView.html_email_template_name = (
"registration/password_reset_email_alt.html"
)
| 24.882353 | 63 | 0.706856 |
c94fe0833d1b092c80c7dd0259bfc40293b8ddb4 | 112,143 | py | Python | test/orm/test_mapper.py | pje1740/sqlalchemy | 28138ee6fbc1273f1bf16699927e09db81ae7870 | [
"MIT"
] | null | null | null | test/orm/test_mapper.py | pje1740/sqlalchemy | 28138ee6fbc1273f1bf16699927e09db81ae7870 | [
"MIT"
] | null | null | null | test/orm/test_mapper.py | pje1740/sqlalchemy | 28138ee6fbc1273f1bf16699927e09db81ae7870 | [
"MIT"
] | null | null | null | """General mapper operations with an emphasis on selecting/loading."""
import logging
import logging.handlers
import sqlalchemy as sa
from sqlalchemy import ForeignKey
from sqlalchemy import func
from sqlalchemy import Integer
from sqlalchemy import MetaData
from sqlalchemy import select
from sqlalchemy import String
from sqlalchemy import testing
from sqlalchemy import util
from sqlalchemy.engine import default
from sqlalchemy.orm import aliased
from sqlalchemy.orm import attributes
from sqlalchemy.orm import backref
from sqlalchemy.orm import class_mapper
from sqlalchemy.orm import column_property
from sqlalchemy.orm import composite
from sqlalchemy.orm import configure_mappers
from sqlalchemy.orm import create_session
from sqlalchemy.orm import deferred
from sqlalchemy.orm import dynamic_loader
from sqlalchemy.orm import mapper
from sqlalchemy.orm import reconstructor
from sqlalchemy.orm import relationship
from sqlalchemy.orm import Session
from sqlalchemy.orm import synonym
from sqlalchemy.orm.persistence import _sort_states
from sqlalchemy.testing import assert_raises
from sqlalchemy.testing import assert_raises_message
from sqlalchemy.testing import AssertsCompiledSQL
from sqlalchemy.testing import eq_
from sqlalchemy.testing import fixtures
from sqlalchemy.testing import is_
from sqlalchemy.testing.assertsql import CompiledSQL
from sqlalchemy.testing.schema import Column
from sqlalchemy.testing.schema import Table
from test.orm import _fixtures
class MapperTest(_fixtures.FixtureTest, AssertsCompiledSQL):
__dialect__ = "default"
def test_prop_shadow(self):
"""A backref name may not shadow an existing property name."""
Address, addresses, users, User = (
self.classes.Address,
self.tables.addresses,
self.tables.users,
self.classes.User,
)
mapper(Address, addresses)
mapper(
User,
users,
properties={
"addresses": relationship(Address, backref="email_address")
},
)
assert_raises(sa.exc.ArgumentError, sa.orm.configure_mappers)
def test_update_attr_keys(self):
"""test that update()/insert() use the correct key when given
InstrumentedAttributes."""
User, users = self.classes.User, self.tables.users
mapper(User, users, properties={"foobar": users.c.name})
users.insert().values({User.foobar: "name1"}).execute()
eq_(
sa.select(User.foobar)
.where(User.foobar == "name1")
.execute()
.fetchall(),
[("name1",)],
)
users.update().values({User.foobar: User.foobar + "foo"}).execute()
eq_(
sa.select(User.foobar)
.where(User.foobar == "name1foo")
.execute()
.fetchall(),
[("name1foo",)],
)
def test_utils(self):
users = self.tables.users
addresses = self.tables.addresses
Address = self.classes.Address
from sqlalchemy.orm.base import _is_mapped_class, _is_aliased_class
class Foo(object):
x = "something"
@property
def y(self):
return "something else"
m = mapper(Foo, users, properties={"addresses": relationship(Address)})
mapper(Address, addresses)
a1 = aliased(Foo)
f = Foo()
for fn, arg, ret in [
(_is_mapped_class, Foo.x, False),
(_is_mapped_class, Foo.y, False),
(_is_mapped_class, Foo.name, False),
(_is_mapped_class, Foo.addresses, False),
(_is_mapped_class, Foo, True),
(_is_mapped_class, f, False),
(_is_mapped_class, a1, True),
(_is_mapped_class, m, True),
(_is_aliased_class, a1, True),
(_is_aliased_class, Foo.x, False),
(_is_aliased_class, Foo.y, False),
(_is_aliased_class, Foo, False),
(_is_aliased_class, f, False),
(_is_aliased_class, a1, True),
(_is_aliased_class, m, False),
]:
assert fn(arg) == ret
def test_entity_descriptor(self):
users = self.tables.users
from sqlalchemy.orm.base import _entity_descriptor
class Foo(object):
x = "something"
@property
def y(self):
return "something else"
m = mapper(Foo, users)
a1 = aliased(Foo)
for arg, key, ret in [
(m, "x", Foo.x),
(Foo, "x", Foo.x),
(a1, "x", a1.x),
(users, "name", users.c.name),
]:
assert _entity_descriptor(arg, key) is ret
def test_friendly_attribute_str_on_uncompiled_boom(self):
User, users = self.classes.User, self.tables.users
def boom():
raise Exception("it broke")
mapper(User, users, properties={"addresses": relationship(boom)})
# test that QueryableAttribute.__str__() doesn't
# cause a compile.
eq_(str(User.addresses), "User.addresses")
def test_exceptions_sticky(self):
"""test preservation of mapper compile errors raised during hasattr(),
as well as for redundant mapper compile calls. Test that
repeated calls don't stack up error messages.
"""
Address, addresses, User = (
self.classes.Address,
self.tables.addresses,
self.classes.User,
)
mapper(Address, addresses, properties={"user": relationship(User)})
try:
hasattr(Address.user, "property")
except sa.orm.exc.UnmappedClassError:
assert util.compat.py3k
for i in range(3):
assert_raises_message(
sa.exc.InvalidRequestError,
"One or more mappers failed to initialize - can't "
"proceed with initialization of other mappers. "
"Triggering mapper: 'mapped class Address->addresses'. "
"Original exception was: Class 'test.orm._fixtures.User' "
"is not mapped",
configure_mappers,
)
def test_column_prefix(self):
users, User = self.tables.users, self.classes.User
mapper(
User,
users,
column_prefix="_",
properties={"user_name": synonym("_name")},
)
s = create_session()
u = s.query(User).get(7)
eq_(u._name, "jack")
eq_(u._id, 7)
u2 = s.query(User).filter_by(user_name="jack").one()
assert u is u2
def test_no_pks_1(self):
User, users = self.classes.User, self.tables.users
s = sa.select(users.c.name).alias("foo")
assert_raises(sa.exc.ArgumentError, mapper, User, s)
def test_no_pks_2(self):
User, users = self.classes.User, self.tables.users
s = sa.select(users.c.name).alias()
assert_raises(sa.exc.ArgumentError, mapper, User, s)
def test_reconfigure_on_other_mapper(self):
"""A configure trigger on an already-configured mapper
still triggers a check against all mappers."""
users, Address, addresses, User = (
self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User,
)
mapper(User, users)
sa.orm.configure_mappers()
assert sa.orm.mapperlib.Mapper._new_mappers is False
m = mapper(
Address,
addresses,
properties={"user": relationship(User, backref="addresses")},
)
assert m.configured is False
assert sa.orm.mapperlib.Mapper._new_mappers is True
User()
assert User.addresses
assert sa.orm.mapperlib.Mapper._new_mappers is False
def test_configure_on_session(self):
User, users = self.classes.User, self.tables.users
m = mapper(User, users)
session = create_session()
session.connection(mapper=m)
def test_incomplete_columns(self):
"""Loading from a select which does not contain all columns"""
addresses, Address = self.tables.addresses, self.classes.Address
mapper(Address, addresses)
s = create_session()
a = (
s.query(Address)
.from_statement(
sa.select(addresses.c.id, addresses.c.user_id).order_by(
addresses.c.id
)
)
.first()
)
eq_(a.user_id, 7)
eq_(a.id, 1)
# email address auto-defers
assert "email_address" not in a.__dict__
eq_(a.email_address, "jack@bean.com")
def test_column_not_present(self):
users, addresses, User = (
self.tables.users,
self.tables.addresses,
self.classes.User,
)
assert_raises_message(
sa.exc.ArgumentError,
"not represented in the mapper's table",
mapper,
User,
users,
properties={"foo": addresses.c.user_id},
)
def test_constructor_exc(self):
"""TypeError is raised for illegal constructor args,
whether or not explicit __init__ is present [ticket:908]."""
users, addresses = self.tables.users, self.tables.addresses
class Foo(object):
def __init__(self):
pass
class Bar(object):
pass
mapper(Foo, users)
mapper(Bar, addresses)
assert_raises(TypeError, Foo, x=5)
assert_raises(TypeError, Bar, x=5)
def test_sort_states_comparisons(self):
"""test that _sort_states() doesn't compare
insert_order to state.key, for set of mixed
persistent/pending. In particular Python 3 disallows
this.
"""
class Foo(object):
def __init__(self, id_):
self.id = id_
m = MetaData()
foo_t = Table("foo", m, Column("id", String, primary_key=True))
m = mapper(Foo, foo_t)
class DontCompareMeToString(int):
if util.py2k:
def __lt__(self, other):
assert not isinstance(other, basestring) # noqa
return int(self) < other
foos = [Foo(id_="f%d" % i) for i in range(5)]
states = [attributes.instance_state(f) for f in foos]
for s in states[0:3]:
s.key = m._identity_key_from_state(s)
states[3].insert_order = DontCompareMeToString(5)
states[4].insert_order = DontCompareMeToString(1)
states[2].insert_order = DontCompareMeToString(3)
eq_(
_sort_states(m, states),
[states[4], states[3], states[0], states[1], states[2]],
)
def test_props(self):
users, Address, addresses, User = (
self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User,
)
m = mapper(
User,
users,
properties={"addresses": relationship(mapper(Address, addresses))},
)
assert User.addresses.property is m.get_property("addresses")
def test_unicode_relationship_backref_names(self):
# test [ticket:2901]
users, Address, addresses, User = (
self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User,
)
mapper(Address, addresses)
mapper(
User,
users,
properties={
util.u("addresses"): relationship(
Address, backref=util.u("user")
)
},
)
u1 = User()
a1 = Address()
u1.addresses.append(a1)
assert a1.user is u1
def test_configure_on_prop_1(self):
users, Address, addresses, User = (
self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User,
)
mapper(
User,
users,
properties={"addresses": relationship(mapper(Address, addresses))},
)
User.addresses.any(Address.email_address == "foo@bar.com")
def test_configure_on_prop_2(self):
users, Address, addresses, User = (
self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User,
)
mapper(
User,
users,
properties={"addresses": relationship(mapper(Address, addresses))},
)
eq_(str(User.id == 3), str(users.c.id == 3))
def test_configure_on_prop_3(self):
users, addresses, User = (
self.tables.users,
self.tables.addresses,
self.classes.User,
)
class Foo(User):
pass
mapper(User, users)
mapper(
Foo,
addresses,
inherits=User,
properties={"address_id": addresses.c.id},
)
assert getattr(Foo().__class__, "name").impl is not None
def test_deferred_subclass_attribute_instrument(self):
users, addresses, User = (
self.tables.users,
self.tables.addresses,
self.classes.User,
)
class Foo(User):
pass
mapper(User, users)
configure_mappers()
mapper(
Foo,
addresses,
inherits=User,
properties={"address_id": addresses.c.id},
)
assert getattr(Foo().__class__, "name").impl is not None
def test_class_hier_only_instrument_once_multiple_configure(self):
users, addresses = (self.tables.users, self.tables.addresses)
class A(object):
pass
class ASub(A):
pass
class ASubSub(ASub):
pass
class B(object):
pass
from sqlalchemy.testing import mock
from sqlalchemy.orm.attributes import register_attribute_impl
with mock.patch(
"sqlalchemy.orm.attributes.register_attribute_impl",
side_effect=register_attribute_impl,
) as some_mock:
mapper(A, users, properties={"bs": relationship(B)})
mapper(B, addresses)
configure_mappers()
mapper(ASub, inherits=A)
mapper(ASubSub, inherits=ASub)
configure_mappers()
b_calls = [c for c in some_mock.mock_calls if c[1][1] == "bs"]
eq_(len(b_calls), 3)
def test_check_descriptor_as_method(self):
User, users = self.classes.User, self.tables.users
m = mapper(User, users)
class MyClass(User):
def foo(self):
pass
assert m._is_userland_descriptor("foo", MyClass.foo)
def test_configure_on_get_props_1(self):
User, users = self.classes.User, self.tables.users
m = mapper(User, users)
assert not m.configured
assert list(m.iterate_properties)
assert m.configured
def test_configure_on_get_props_2(self):
User, users = self.classes.User, self.tables.users
m = mapper(User, users)
assert not m.configured
assert m.get_property("name")
assert m.configured
def test_configure_on_get_props_3(self):
users, Address, addresses, User = (
self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User,
)
m = mapper(User, users)
assert not m.configured
configure_mappers()
mapper(
Address,
addresses,
properties={"user": relationship(User, backref="addresses")},
)
assert m.get_property("addresses")
def test_info(self):
users = self.tables.users
Address = self.classes.Address
class MyComposite(object):
pass
for constructor, args in [
(column_property, (users.c.name,)),
(relationship, (Address,)),
(composite, (MyComposite, "id", "name")),
(synonym, "foo"),
]:
obj = constructor(info={"x": "y"}, *args)
eq_(obj.info, {"x": "y"})
obj.info["q"] = "p"
eq_(obj.info, {"x": "y", "q": "p"})
obj = constructor(*args)
eq_(obj.info, {})
obj.info["q"] = "p"
eq_(obj.info, {"q": "p"})
def test_info_via_instrumented(self):
m = MetaData()
# create specific tables here as we don't want
# users.c.id.info to be pre-initialized
users = Table(
"u",
m,
Column("id", Integer, primary_key=True),
Column("name", String),
)
addresses = Table(
"a",
m,
Column("id", Integer, primary_key=True),
Column("name", String),
Column("user_id", Integer, ForeignKey("u.id")),
)
Address = self.classes.Address
User = self.classes.User
mapper(
User,
users,
properties={
"name_lower": column_property(func.lower(users.c.name)),
"addresses": relationship(Address),
},
)
mapper(Address, addresses)
# attr.info goes down to the original Column object
# for the dictionary. The annotated element needs to pass
# this on.
assert "info" not in users.c.id.__dict__
is_(User.id.info, users.c.id.info)
assert "info" in users.c.id.__dict__
# for SQL expressions, ORM-level .info
is_(User.name_lower.info, User.name_lower.property.info)
# same for relationships
is_(User.addresses.info, User.addresses.property.info)
def test_add_property(self):
users, addresses, Address = (
self.tables.users,
self.tables.addresses,
self.classes.Address,
)
assert_col = []
class User(fixtures.ComparableEntity):
def _get_name(self):
assert_col.append(("get", self._name))
return self._name
def _set_name(self, name):
assert_col.append(("set", name))
self._name = name
name = property(_get_name, _set_name)
m = mapper(User, users)
mapper(Address, addresses)
m.add_property("_name", deferred(users.c.name))
m.add_property("name", synonym("_name"))
m.add_property("addresses", relationship(Address))
sess = create_session(autocommit=False)
assert sess.query(User).get(7)
u = sess.query(User).filter_by(name="jack").one()
def go():
eq_(
len(u.addresses),
len(self.static.user_address_result[0].addresses),
)
eq_(u.name, "jack")
eq_(assert_col, [("get", "jack")], str(assert_col))
self.sql_count_(2, go)
u.name = "ed"
u3 = User()
u3.name = "some user"
sess.add(u3)
sess.flush()
sess.rollback()
def test_add_prop_via_backref_resets_memoizations_reconfigures(self):
users, User = self.tables.users, self.classes.User
addresses, Address = self.tables.addresses, self.classes.Address
m1 = mapper(User, users)
User()
mapper(
Address,
addresses,
properties={"user": relationship(User, backref="addresses")},
)
# configure mappers takes place when User is generated
User()
assert hasattr(User, "addresses")
assert "addresses" in [p.key for p in m1._polymorphic_properties]
def test_replace_col_prop_w_syn(self):
users, User = self.tables.users, self.classes.User
m = mapper(User, users)
m.add_property("_name", users.c.name)
m.add_property("name", synonym("_name"))
sess = create_session()
u = sess.query(User).filter_by(name="jack").one()
eq_(u._name, "jack")
eq_(u.name, "jack")
u.name = "jacko"
assert m._columntoproperty[users.c.name] is m.get_property("_name")
sa.orm.clear_mappers()
m = mapper(User, users)
m.add_property("name", synonym("_name", map_column=True))
sess.expunge_all()
u = sess.query(User).filter_by(name="jack").one()
eq_(u._name, "jack")
eq_(u.name, "jack")
u.name = "jacko"
assert m._columntoproperty[users.c.name] is m.get_property("_name")
def test_replace_rel_prop_with_rel_warns(self):
users, User = self.tables.users, self.classes.User
addresses, Address = self.tables.addresses, self.classes.Address
m = mapper(
User, users, properties={"addresses": relationship(Address)}
)
mapper(Address, addresses)
assert_raises_message(
sa.exc.SAWarning,
"Property User.addresses on Mapper|User|users being replaced "
"with new property User.addresses; the old property will "
"be discarded",
m.add_property,
"addresses",
relationship(Address),
)
def test_add_column_prop_deannotate(self):
User, users = self.classes.User, self.tables.users
Address, addresses = self.classes.Address, self.tables.addresses
class SubUser(User):
pass
m = mapper(User, users)
m2 = mapper(
SubUser,
addresses,
inherits=User,
properties={"address_id": addresses.c.id},
)
mapper(Address, addresses, properties={"foo": relationship(m2)})
# add property using annotated User.name,
# needs to be deannotated
m.add_property("x", column_property(User.name + "name"))
s = create_session()
q = s.query(m2).select_from(Address).join(Address.foo)
self.assert_compile(
q,
"SELECT "
"addresses_1.id AS addresses_1_id, "
"users_1.id AS users_1_id, "
"users_1.name AS users_1_name, "
"addresses_1.user_id AS addresses_1_user_id, "
"addresses_1.email_address AS "
"addresses_1_email_address, "
"users_1.name || :name_1 AS anon_1 "
"FROM addresses JOIN (users AS users_1 JOIN addresses "
"AS addresses_1 ON users_1.id = "
"addresses_1.user_id) ON "
"users_1.id = addresses.user_id",
)
def test_column_prop_deannotate(self):
"""test that column property deannotates,
bringing expressions down to the original mapped columns.
"""
User, users = self.classes.User, self.tables.users
m = mapper(User, users)
assert User.id.property.columns[0] is users.c.id
assert User.name.property.columns[0] is users.c.name
expr = User.name + "name"
expr2 = sa.select(User.name, users.c.id)
m.add_property("x", column_property(expr))
m.add_property("y", column_property(expr2.scalar_subquery()))
assert User.x.property.columns[0] is not expr
assert User.x.property.columns[0].element.left is users.c.name
# a deannotate needs to clone the base, in case
# the original one referenced annotated elements.
assert User.x.property.columns[0].element.right is not expr.right
assert User.y.property.columns[0] is not expr2
assert (
User.y.property.columns[0].element._raw_columns[0] is users.c.name
)
assert User.y.property.columns[0].element._raw_columns[1] is users.c.id
def test_synonym_replaces_backref(self):
addresses, users, User = (
self.tables.addresses,
self.tables.users,
self.classes.User,
)
assert_calls = []
class Address(object):
def _get_user(self):
assert_calls.append("get")
return self._user
def _set_user(self, user):
assert_calls.append("set")
self._user = user
user = property(_get_user, _set_user)
# synonym is created against nonexistent prop
mapper(Address, addresses, properties={"user": synonym("_user")})
sa.orm.configure_mappers()
# later, backref sets up the prop
mapper(
User,
users,
properties={"addresses": relationship(Address, backref="_user")},
)
sess = create_session()
u1 = sess.query(User).get(7)
u2 = sess.query(User).get(8)
# comparaison ops need to work
a1 = sess.query(Address).filter(Address.user == u1).one()
eq_(a1.id, 1)
a1.user = u2
assert a1.user is u2
eq_(assert_calls, ["set", "get"])
def test_self_ref_synonym(self):
t = Table(
"nodes",
MetaData(),
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("parent_id", Integer, ForeignKey("nodes.id")),
)
class Node(object):
pass
mapper(
Node,
t,
properties={
"_children": relationship(
Node, backref=backref("_parent", remote_side=t.c.id)
),
"children": synonym("_children"),
"parent": synonym("_parent"),
},
)
n1 = Node()
n2 = Node()
n1.children.append(n2)
assert n2.parent is n2._parent is n1
assert n1.children[0] is n1._children[0] is n2
eq_(str(Node.parent == n2), ":param_1 = nodes.parent_id")
def test_reassign_polymorphic_identity_warns(self):
User = self.classes.User
users = self.tables.users
class MyUser(User):
pass
mapper(
User,
users,
polymorphic_on=users.c.name,
polymorphic_identity="user",
)
assert_raises_message(
sa.exc.SAWarning,
"Reassigning polymorphic association for identity 'user'",
mapper,
MyUser,
users,
inherits=User,
polymorphic_identity="user",
)
def test_prop_filters(self):
t = Table(
"person",
MetaData(),
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("type", String(128)),
Column("name", String(128)),
Column("employee_number", Integer),
Column("boss_id", Integer, ForeignKey("person.id")),
Column("vendor_id", Integer),
)
class Person(object):
pass
class Vendor(Person):
pass
class Employee(Person):
pass
class Manager(Employee):
pass
class Hoho(object):
pass
class Lala(object):
pass
class Fub(object):
pass
class Frob(object):
pass
class HasDef(object):
def name(self):
pass
class Empty(object):
pass
mapper(
Empty, t, properties={"empty_id": t.c.id}, include_properties=[]
)
p_m = mapper(
Person,
t,
polymorphic_on=t.c.type,
include_properties=("id", "type", "name"),
)
e_m = mapper(
Employee,
inherits=p_m,
polymorphic_identity="employee",
properties={
"boss": relationship(
Manager, backref=backref("peon"), remote_side=t.c.id
)
},
exclude_properties=("vendor_id",),
)
mapper(
Manager,
inherits=e_m,
polymorphic_identity="manager",
include_properties=("id", "type"),
)
mapper(
Vendor,
inherits=p_m,
polymorphic_identity="vendor",
exclude_properties=("boss_id", "employee_number"),
)
mapper(Hoho, t, include_properties=("id", "type", "name"))
mapper(
Lala,
t,
exclude_properties=("vendor_id", "boss_id"),
column_prefix="p_",
)
mapper(HasDef, t, column_prefix="h_")
mapper(Fub, t, include_properties=(t.c.id, t.c.type))
mapper(
Frob,
t,
column_prefix="f_",
exclude_properties=(t.c.boss_id, "employee_number", t.c.vendor_id),
)
configure_mappers()
def assert_props(cls, want):
have = set([n for n in dir(cls) if not n.startswith("_")])
want = set(want)
eq_(have, want)
def assert_instrumented(cls, want):
have = set([p.key for p in class_mapper(cls).iterate_properties])
want = set(want)
eq_(have, want)
assert_props(
HasDef,
[
"h_boss_id",
"h_employee_number",
"h_id",
"name",
"h_name",
"h_vendor_id",
"h_type",
],
)
assert_props(Person, ["id", "name", "type"])
assert_instrumented(Person, ["id", "name", "type"])
assert_props(
Employee,
["boss", "boss_id", "employee_number", "id", "name", "type"],
)
assert_instrumented(
Employee,
["boss", "boss_id", "employee_number", "id", "name", "type"],
)
assert_props(
Manager,
[
"boss",
"boss_id",
"employee_number",
"peon",
"id",
"name",
"type",
],
)
# 'peon' and 'type' are both explicitly stated properties
assert_instrumented(Manager, ["peon", "type", "id"])
assert_props(Vendor, ["vendor_id", "id", "name", "type"])
assert_props(Hoho, ["id", "name", "type"])
assert_props(Lala, ["p_employee_number", "p_id", "p_name", "p_type"])
assert_props(Fub, ["id", "type"])
assert_props(Frob, ["f_id", "f_type", "f_name"])
# putting the discriminator column in exclude_properties,
# very weird. As of 0.7.4 this re-maps it.
class Foo(Person):
pass
assert_props(Empty, ["empty_id"])
mapper(
Foo,
inherits=Person,
polymorphic_identity="foo",
exclude_properties=("type",),
)
assert hasattr(Foo, "type")
assert Foo.type.property.columns[0] is t.c.type
@testing.provide_metadata
def test_prop_filters_defaults(self):
metadata = self.metadata
t = Table(
"t",
metadata,
Column(
"id",
Integer(),
primary_key=True,
test_needs_autoincrement=True,
),
Column("x", Integer(), nullable=False, server_default="0"),
)
t.create()
class A(object):
pass
mapper(A, t, include_properties=["id"])
s = Session()
s.add(A())
s.commit()
def test_we_dont_call_bool(self):
class NoBoolAllowed(object):
def __bool__(self):
raise Exception("nope")
mapper(NoBoolAllowed, self.tables.users)
u1 = NoBoolAllowed()
u1.name = "some name"
s = Session(testing.db)
s.add(u1)
s.commit()
assert s.query(NoBoolAllowed).get(u1.id) is u1
def test_we_dont_call_eq(self):
class NoEqAllowed(object):
def __eq__(self, other):
raise Exception("nope")
addresses, users = self.tables.addresses, self.tables.users
Address = self.classes.Address
mapper(
NoEqAllowed,
users,
properties={"addresses": relationship(Address, backref="user")},
)
mapper(Address, addresses)
u1 = NoEqAllowed()
u1.name = "some name"
u1.addresses = [Address(id=12, email_address="a1")]
s = Session(testing.db)
s.add(u1)
s.commit()
a1 = s.query(Address).filter_by(id=12).one()
assert a1.user is u1
def test_mapping_to_join_raises(self):
"""Test implicit merging of two cols raises."""
addresses, users, User = (
self.tables.addresses,
self.tables.users,
self.classes.User,
)
usersaddresses = sa.join(
users, addresses, users.c.id == addresses.c.user_id
)
assert_raises_message(
sa.exc.InvalidRequestError,
"Implicitly",
mapper,
User,
usersaddresses,
primary_key=[users.c.id],
)
def test_mapping_to_join_explicit_prop(self):
"""Mapping to a join"""
User, addresses, users = (
self.classes.User,
self.tables.addresses,
self.tables.users,
)
usersaddresses = sa.join(
users, addresses, users.c.id == addresses.c.user_id
)
mapper(
User,
usersaddresses,
primary_key=[users.c.id],
properties={"add_id": addresses.c.id},
)
result = create_session().query(User).order_by(users.c.id).all()
eq_(result, self.static.user_result[:3])
def test_mapping_to_join_exclude_prop(self):
"""Mapping to a join"""
User, addresses, users = (
self.classes.User,
self.tables.addresses,
self.tables.users,
)
usersaddresses = sa.join(
users, addresses, users.c.id == addresses.c.user_id
)
mapper(
User,
usersaddresses,
primary_key=[users.c.id],
exclude_properties=[addresses.c.id],
)
result = create_session().query(User).order_by(users.c.id).all()
eq_(result, self.static.user_result[:3])
def test_mapping_to_join_no_pk(self):
email_bounces, addresses, Address = (
self.tables.email_bounces,
self.tables.addresses,
self.classes.Address,
)
m = mapper(
Address,
addresses.join(email_bounces),
properties={"id": [addresses.c.id, email_bounces.c.id]},
)
configure_mappers()
assert addresses in m._pks_by_table
assert email_bounces not in m._pks_by_table
sess = create_session()
a = Address(id=10, email_address="e1")
sess.add(a)
sess.flush()
eq_(select(func.count("*")).select_from(addresses).scalar(), 6)
eq_(select(func.count("*")).select_from(email_bounces).scalar(), 5)
def test_mapping_to_outerjoin(self):
"""Mapping to an outer join with a nullable composite primary key."""
users, addresses, User = (
self.tables.users,
self.tables.addresses,
self.classes.User,
)
mapper(
User,
users.outerjoin(addresses),
primary_key=[users.c.id, addresses.c.id],
properties=dict(address_id=addresses.c.id),
)
session = create_session()
result = session.query(User).order_by(User.id, User.address_id).all()
eq_(
result,
[
User(id=7, address_id=1),
User(id=8, address_id=2),
User(id=8, address_id=3),
User(id=8, address_id=4),
User(id=9, address_id=5),
User(id=10, address_id=None),
],
)
def test_mapping_to_outerjoin_no_partial_pks(self):
"""test the allow_partial_pks=False flag."""
users, addresses, User = (
self.tables.users,
self.tables.addresses,
self.classes.User,
)
mapper(
User,
users.outerjoin(addresses),
allow_partial_pks=False,
primary_key=[users.c.id, addresses.c.id],
properties=dict(address_id=addresses.c.id),
)
session = create_session()
result = session.query(User).order_by(User.id, User.address_id).all()
eq_(
result,
[
User(id=7, address_id=1),
User(id=8, address_id=2),
User(id=8, address_id=3),
User(id=8, address_id=4),
User(id=9, address_id=5),
None,
],
)
def test_scalar_pk_arg(self):
users, Keyword, items, Item, User, keywords = (
self.tables.users,
self.classes.Keyword,
self.tables.items,
self.classes.Item,
self.classes.User,
self.tables.keywords,
)
m1 = mapper(Item, items, primary_key=[items.c.id])
m2 = mapper(Keyword, keywords, primary_key=keywords.c.id)
m3 = mapper(User, users, primary_key=(users.c.id,))
assert m1.primary_key[0] is items.c.id
assert m2.primary_key[0] is keywords.c.id
assert m3.primary_key[0] is users.c.id
def test_custom_join(self):
"""select_from totally replace the FROM parameters."""
users, items, order_items, orders, Item, User, Order = (
self.tables.users,
self.tables.items,
self.tables.order_items,
self.tables.orders,
self.classes.Item,
self.classes.User,
self.classes.Order,
)
mapper(Item, items)
mapper(
Order,
orders,
properties=dict(items=relationship(Item, order_items)),
)
mapper(User, users, properties=dict(orders=relationship(Order)))
session = create_session()
result = (
session.query(User)
.select_from(users.join(orders).join(order_items).join(items))
.filter(items.c.description == "item 4")
).all()
eq_(result, [self.static.user_result[0]])
# 'Raises a "expression evaluation not supported" error at prepare time
@testing.fails_on("firebird", "FIXME: unknown")
def test_function(self):
"""Mapping to a SELECT statement that has functions in it."""
addresses, users, User = (
self.tables.addresses,
self.tables.users,
self.classes.User,
)
s = (
sa.select(
users,
(users.c.id * 2).label("concat"),
sa.func.count(addresses.c.id).label("count"),
)
.where(users.c.id == addresses.c.user_id)
.group_by(*[c for c in users.c])
.alias("myselect")
)
mapper(User, s)
sess = create_session()
result = sess.query(User).order_by(s.c.id).all()
for idx, total in enumerate((14, 16)):
eq_(result[idx].concat, result[idx].id * 2)
eq_(result[idx].concat, total)
def test_count(self):
"""The count function on Query."""
User, users = self.classes.User, self.tables.users
mapper(User, users)
session = create_session()
q = session.query(User)
eq_(q.count(), 4)
eq_(q.filter(User.id.in_([8, 9])).count(), 2)
eq_(q.filter(users.c.id.in_([8, 9])).count(), 2)
eq_(session.query(User.id).count(), 4)
eq_(session.query(User.id).filter(User.id.in_((8, 9))).count(), 2)
def test_many_to_many_count(self):
keywords, items, item_keywords, Keyword, Item = (
self.tables.keywords,
self.tables.items,
self.tables.item_keywords,
self.classes.Keyword,
self.classes.Item,
)
mapper(Keyword, keywords)
mapper(
Item,
items,
properties=dict(
keywords=relationship(Keyword, item_keywords, lazy="select")
),
)
session = create_session()
q = (
session.query(Item)
.join("keywords")
.distinct()
.filter(Keyword.name == "red")
)
eq_(q.count(), 2)
def test_override_1(self):
"""Overriding a column raises an error."""
Address, addresses, users, User = (
self.classes.Address,
self.tables.addresses,
self.tables.users,
self.classes.User,
)
def go():
mapper(
User,
users,
properties=dict(name=relationship(mapper(Address, addresses))),
)
assert_raises(sa.exc.ArgumentError, go)
def test_override_2(self):
"""exclude_properties cancels the error."""
Address, addresses, users, User = (
self.classes.Address,
self.tables.addresses,
self.tables.users,
self.classes.User,
)
mapper(
User,
users,
exclude_properties=["name"],
properties=dict(name=relationship(mapper(Address, addresses))),
)
assert bool(User.name)
def test_override_3(self):
"""The column being named elsewhere also cancels the error,"""
Address, addresses, users, User = (
self.classes.Address,
self.tables.addresses,
self.tables.users,
self.classes.User,
)
mapper(
User,
users,
properties=dict(
name=relationship(mapper(Address, addresses)), foo=users.c.name
),
)
def test_synonym(self):
users, addresses, Address = (
self.tables.users,
self.tables.addresses,
self.classes.Address,
)
assert_col = []
class extendedproperty(property):
attribute = 123
class User(object):
def _get_name(self):
assert_col.append(("get", self.name))
return self.name
def _set_name(self, name):
assert_col.append(("set", name))
self.name = name
uname = extendedproperty(_get_name, _set_name)
mapper(
User,
users,
properties=dict(
addresses=relationship(
mapper(Address, addresses), lazy="select"
),
uname=synonym("name"),
adlist=synonym("addresses"),
adname=synonym("addresses"),
),
)
# ensure the synonym can get at the proxied comparators without
# an explicit compile
User.name == "ed"
User.adname.any()
assert hasattr(User, "adlist")
# as of 0.4.2, synonyms always create a property
assert hasattr(User, "adname")
# test compile
assert not isinstance(User.uname == "jack", bool)
assert User.uname.property
assert User.adlist.property
sess = create_session()
# test RowTuple names
row = sess.query(User.id, User.uname).first()
assert row.uname == row[1]
u = sess.query(User).filter(User.uname == "jack").one()
fixture = self.static.user_address_result[0].addresses
eq_(u.adlist, fixture)
addr = sess.query(Address).filter_by(id=fixture[0].id).one()
u = sess.query(User).filter(User.adname.contains(addr)).one()
u2 = sess.query(User).filter(User.adlist.contains(addr)).one()
assert u is u2
assert u not in sess.dirty
u.uname = "some user name"
assert len(assert_col) > 0
eq_(assert_col, [("set", "some user name")])
eq_(u.uname, "some user name")
eq_(assert_col, [("set", "some user name"), ("get", "some user name")])
eq_(u.name, "some user name")
assert u in sess.dirty
eq_(User.uname.attribute, 123)
def test_synonym_of_synonym(self):
users, User = (self.tables.users, self.classes.User)
mapper(User, users, properties={"x": synonym("id"), "y": synonym("x")})
s = Session()
u = s.query(User).filter(User.y == 8).one()
eq_(u.y, 8)
def test_synonym_get_history(self):
users, User = (self.tables.users, self.classes.User)
mapper(User, users, properties={"x": synonym("id"), "y": synonym("x")})
u1 = User()
eq_(attributes.instance_state(u1).attrs.x.history, (None, None, None))
eq_(attributes.instance_state(u1).attrs.y.history, (None, None, None))
u1.y = 5
eq_(attributes.instance_state(u1).attrs.x.history, ([5], (), ()))
eq_(attributes.instance_state(u1).attrs.y.history, ([5], (), ()))
def test_synonym_nonexistent_attr(self):
# test [ticket:4767].
# synonym points to non-existent attrbute that hasn't been mapped yet.
users = self.tables.users
class User(object):
def _x(self):
return self.id
x = property(_x)
m = mapper(
User,
users,
properties={"x": synonym("some_attr", descriptor=User.x)},
)
# object gracefully handles this condition
assert not hasattr(User.x, "__name__")
assert not hasattr(User.x, "comparator")
m.add_property("some_attr", column_property(users.c.name))
assert not hasattr(User.x, "__name__")
assert hasattr(User.x, "comparator")
def test_synonym_of_non_property_raises(self):
from sqlalchemy.ext.associationproxy import association_proxy
class User(object):
pass
users, Address, addresses = (
self.tables.users,
self.classes.Address,
self.tables.addresses,
)
mapper(
User,
users,
properties={"y": synonym("x"), "addresses": relationship(Address)},
)
mapper(Address, addresses)
User.x = association_proxy("addresses", "email_address")
assert_raises_message(
sa.exc.InvalidRequestError,
r'synonym\(\) attribute "User.x" only supports ORM mapped '
"attributes, got .*AssociationProxy",
getattr,
User.y,
"property",
)
def test_synonym_column_location(self):
users, User = self.tables.users, self.classes.User
def go():
mapper(
User,
users,
properties={"not_name": synonym("_name", map_column=True)},
)
assert_raises_message(
sa.exc.ArgumentError,
(
"Can't compile synonym '_name': no column on table "
"'users' named 'not_name'"
),
go,
)
def test_column_synonyms(self):
"""Synonyms which automatically instrument properties,
set up aliased column, etc."""
addresses, users, Address = (
self.tables.addresses,
self.tables.users,
self.classes.Address,
)
assert_col = []
class User(object):
def _get_name(self):
assert_col.append(("get", self._name))
return self._name
def _set_name(self, name):
assert_col.append(("set", name))
self._name = name
name = property(_get_name, _set_name)
mapper(Address, addresses)
mapper(
User,
users,
properties={
"addresses": relationship(Address, lazy="select"),
"name": synonym("_name", map_column=True),
},
)
# test compile
assert not isinstance(User.name == "jack", bool)
assert hasattr(User, "name")
assert hasattr(User, "_name")
sess = create_session()
u = sess.query(User).filter(User.name == "jack").one()
eq_(u.name, "jack")
u.name = "foo"
eq_(u.name, "foo")
eq_(assert_col, [("get", "jack"), ("set", "foo"), ("get", "foo")])
def test_synonym_map_column_conflict(self):
users, User = self.tables.users, self.classes.User
assert_raises(
sa.exc.ArgumentError,
mapper,
User,
users,
properties=util.OrderedDict(
[
("_user_id", users.c.id),
("id", synonym("_user_id", map_column=True)),
]
),
)
assert_raises(
sa.exc.ArgumentError,
mapper,
User,
users,
properties=util.OrderedDict(
[
("id", synonym("_user_id", map_column=True)),
("_user_id", users.c.id),
]
),
)
def test_reentrant_compile(self):
users, Address, addresses, User = (
self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User,
)
class MyFakeProperty(sa.orm.properties.ColumnProperty):
def post_instrument_class(self, mapper):
super(MyFakeProperty, self).post_instrument_class(mapper)
configure_mappers()
mapper(User, users, properties={"name": MyFakeProperty(users.c.name)})
mapper(Address, addresses)
configure_mappers()
sa.orm.clear_mappers()
class MyFakeProperty(sa.orm.properties.ColumnProperty):
def post_instrument_class(self, mapper):
super(MyFakeProperty, self).post_instrument_class(mapper)
configure_mappers()
mapper(User, users, properties={"name": MyFakeProperty(users.c.name)})
mapper(Address, addresses)
configure_mappers()
def test_reconstructor(self):
users = self.tables.users
recon = []
class User(object):
@reconstructor
def reconstruct(self):
recon.append("go")
mapper(User, users)
User()
eq_(recon, [])
create_session().query(User).first()
eq_(recon, ["go"])
def test_reconstructor_inheritance(self):
users = self.tables.users
recon = []
class A(object):
@reconstructor
def reconstruct(self):
assert isinstance(self, A)
recon.append("A")
class B(A):
@reconstructor
def reconstruct(self):
assert isinstance(self, B)
recon.append("B")
class C(A):
@reconstructor
def reconstruct(self):
assert isinstance(self, C)
recon.append("C")
mapper(
A, users, polymorphic_on=users.c.name, polymorphic_identity="jack"
)
mapper(B, inherits=A, polymorphic_identity="ed")
mapper(C, inherits=A, polymorphic_identity="chuck")
A()
B()
C()
eq_(recon, [])
sess = create_session()
sess.query(A).first()
sess.query(B).first()
sess.query(C).first()
eq_(recon, ["A", "B", "C"])
def test_reconstructor_init(self):
users = self.tables.users
recon = []
class User(object):
@reconstructor
def __init__(self):
recon.append("go")
mapper(User, users)
User()
eq_(recon, ["go"])
recon[:] = []
create_session().query(User).first()
eq_(recon, ["go"])
def test_reconstructor_init_inheritance(self):
users = self.tables.users
recon = []
class A(object):
@reconstructor
def __init__(self):
assert isinstance(self, A)
recon.append("A")
class B(A):
@reconstructor
def __init__(self):
assert isinstance(self, B)
recon.append("B")
class C(A):
@reconstructor
def __init__(self):
assert isinstance(self, C)
recon.append("C")
mapper(
A, users, polymorphic_on=users.c.name, polymorphic_identity="jack"
)
mapper(B, inherits=A, polymorphic_identity="ed")
mapper(C, inherits=A, polymorphic_identity="chuck")
A()
B()
C()
eq_(recon, ["A", "B", "C"])
recon[:] = []
sess = create_session()
sess.query(A).first()
sess.query(B).first()
sess.query(C).first()
eq_(recon, ["A", "B", "C"])
def test_unmapped_reconstructor_inheritance(self):
users = self.tables.users
recon = []
class Base(object):
@reconstructor
def reconstruct(self):
recon.append("go")
class User(Base):
pass
mapper(User, users)
User()
eq_(recon, [])
create_session().query(User).first()
eq_(recon, ["go"])
def test_unmapped_error(self):
Address, addresses, users, User = (
self.classes.Address,
self.tables.addresses,
self.tables.users,
self.classes.User,
)
mapper(Address, addresses)
sa.orm.clear_mappers()
mapper(User, users, properties={"addresses": relationship(Address)})
assert_raises_message(
sa.orm.exc.UnmappedClassError,
"Class 'test.orm._fixtures.Address' is not mapped",
sa.orm.configure_mappers,
)
def test_unmapped_not_type_error(self):
assert_raises_message(
sa.exc.ArgumentError,
"Class object expected, got '5'.",
class_mapper,
5,
)
def test_unmapped_not_type_error_iter_ok(self):
assert_raises_message(
sa.exc.ArgumentError,
r"Class object expected, got '\(5, 6\)'.",
class_mapper,
(5, 6),
)
def test_attribute_error_raised_class_mapper(self):
users = self.tables.users
addresses = self.tables.addresses
User = self.classes.User
Address = self.classes.Address
mapper(
User,
users,
properties={
"addresses": relationship(
Address,
primaryjoin=lambda: users.c.id == addresses.wrong.user_id,
)
},
)
mapper(Address, addresses)
assert_raises_message(
AttributeError,
"'Table' object has no attribute 'wrong'",
class_mapper,
Address,
)
def test_key_error_raised_class_mapper(self):
users = self.tables.users
addresses = self.tables.addresses
User = self.classes.User
Address = self.classes.Address
mapper(
User,
users,
properties={
"addresses": relationship(
Address,
primaryjoin=lambda: users.c.id
== addresses.__dict__["wrong"].user_id,
)
},
)
mapper(Address, addresses)
assert_raises_message(KeyError, "wrong", class_mapper, Address)
def test_unmapped_subclass_error_postmap(self):
users = self.tables.users
class Base(object):
pass
class Sub(Base):
pass
mapper(Base, users)
sa.orm.configure_mappers()
# we can create new instances, set attributes.
s = Sub()
s.name = "foo"
eq_(s.name, "foo")
eq_(attributes.get_history(s, "name"), (["foo"], (), ()))
# using it with an ORM operation, raises
assert_raises(
sa.orm.exc.UnmappedClassError, create_session().add, Sub()
)
def test_unmapped_subclass_error_premap(self):
users = self.tables.users
class Base(object):
pass
mapper(Base, users)
class Sub(Base):
pass
sa.orm.configure_mappers()
# we can create new instances, set attributes.
s = Sub()
s.name = "foo"
eq_(s.name, "foo")
eq_(attributes.get_history(s, "name"), (["foo"], (), ()))
# using it with an ORM operation, raises
assert_raises(
sa.orm.exc.UnmappedClassError, create_session().add, Sub()
)
def test_oldstyle_mixin(self):
users = self.tables.users
class OldStyle:
pass
class NewStyle(object):
pass
class A(NewStyle, OldStyle):
pass
mapper(A, users)
class B(OldStyle, NewStyle):
pass
mapper(B, users)
class DocumentTest(fixtures.TestBase):
def test_doc_propagate(self):
metadata = MetaData()
t1 = Table(
"t1",
metadata,
Column(
"col1", Integer, primary_key=True, doc="primary key column"
),
Column("col2", String, doc="data col"),
Column("col3", String, doc="data col 2"),
Column("col4", String, doc="data col 3"),
Column("col5", String),
)
t2 = Table(
"t2",
metadata,
Column(
"col1", Integer, primary_key=True, doc="primary key column"
),
Column("col2", String, doc="data col"),
Column(
"col3",
Integer,
ForeignKey("t1.col1"),
doc="foreign key to t1.col1",
),
)
class Foo(object):
pass
class Bar(object):
pass
mapper(
Foo,
t1,
properties={
"bars": relationship(
Bar,
doc="bar relationship",
backref=backref("foo", doc="foo relationship"),
),
"foober": column_property(t1.c.col3, doc="alternate data col"),
"hoho": synonym("col4", doc="syn of col4"),
},
)
mapper(Bar, t2)
configure_mappers()
eq_(Foo.col1.__doc__, "primary key column")
eq_(Foo.col2.__doc__, "data col")
eq_(Foo.col5.__doc__, None)
eq_(Foo.foober.__doc__, "alternate data col")
eq_(Foo.bars.__doc__, "bar relationship")
eq_(Foo.hoho.__doc__, "syn of col4")
eq_(Bar.col1.__doc__, "primary key column")
eq_(Bar.foo.__doc__, "foo relationship")
class ORMLoggingTest(_fixtures.FixtureTest):
def setup(self):
self.buf = logging.handlers.BufferingHandler(100)
for log in [logging.getLogger("sqlalchemy.orm")]:
log.addHandler(self.buf)
def teardown(self):
for log in [logging.getLogger("sqlalchemy.orm")]:
log.removeHandler(self.buf)
def _current_messages(self):
return [b.getMessage() for b in self.buf.buffer]
def test_mapper_info_aliased(self):
User, users = self.classes.User, self.tables.users
tb = users.select().alias()
mapper(User, tb)
s = Session()
s.add(User(name="ed"))
s.commit()
for msg in self._current_messages():
assert msg.startswith("(User|%%(%d anon)s) " % id(tb))
class OptionsTest(_fixtures.FixtureTest):
def test_synonym_options(self):
Address, addresses, users, User = (
self.classes.Address,
self.tables.addresses,
self.tables.users,
self.classes.User,
)
mapper(
User,
users,
properties=dict(
addresses=relationship(
mapper(Address, addresses),
lazy="select",
order_by=addresses.c.id,
),
adlist=synonym("addresses"),
),
)
def go():
sess = create_session()
u = (
sess.query(User)
.order_by(User.id)
.options(sa.orm.joinedload("adlist"))
.filter_by(name="jack")
).one()
eq_(u.adlist, [self.static.user_address_result[0].addresses[0]])
self.assert_sql_count(testing.db, go, 1)
def test_eager_options(self):
"""A lazy relationship can be upgraded to an eager relationship."""
Address, addresses, users, User = (
self.classes.Address,
self.tables.addresses,
self.tables.users,
self.classes.User,
)
mapper(
User,
users,
properties=dict(
addresses=relationship(
mapper(Address, addresses), order_by=addresses.c.id
)
),
)
sess = create_session()
result = (
sess.query(User)
.order_by(User.id)
.options(sa.orm.joinedload("addresses"))
).all()
def go():
eq_(result, self.static.user_address_result)
self.sql_count_(0, go)
def test_eager_options_with_limit(self):
Address, addresses, users, User = (
self.classes.Address,
self.tables.addresses,
self.tables.users,
self.classes.User,
)
mapper(
User,
users,
properties=dict(
addresses=relationship(
mapper(Address, addresses), lazy="select"
)
),
)
sess = create_session()
u = (
sess.query(User)
.options(sa.orm.joinedload("addresses"))
.filter_by(id=8)
).one()
def go():
eq_(u.id, 8)
eq_(len(u.addresses), 3)
self.sql_count_(0, go)
sess.expunge_all()
u = sess.query(User).filter_by(id=8).one()
eq_(u.id, 8)
eq_(len(u.addresses), 3)
def test_lazy_options_with_limit(self):
Address, addresses, users, User = (
self.classes.Address,
self.tables.addresses,
self.tables.users,
self.classes.User,
)
mapper(
User,
users,
properties=dict(
addresses=relationship(
mapper(Address, addresses), lazy="joined"
)
),
)
sess = create_session()
u = (
sess.query(User)
.options(sa.orm.lazyload("addresses"))
.filter_by(id=8)
).one()
def go():
eq_(u.id, 8)
eq_(len(u.addresses), 3)
self.sql_count_(1, go)
def test_eager_degrade(self):
"""An eager relationship automatically degrades to a lazy relationship
if eager columns are not available"""
Address, addresses, users, User = (
self.classes.Address,
self.tables.addresses,
self.tables.users,
self.classes.User,
)
mapper(
User,
users,
properties=dict(
addresses=relationship(
mapper(Address, addresses),
lazy="joined",
order_by=addresses.c.id,
)
),
)
sess = create_session()
# first test straight eager load, 1 statement
def go():
result = sess.query(User).order_by(User.id).all()
eq_(result, self.static.user_address_result)
self.sql_count_(1, go)
sess.expunge_all()
# then select just from users. run it into instances.
# then assert the data, which will launch 3 more lazy loads
# (previous users in session fell out of scope and were removed from
# session's identity map)
r = users.select().order_by(users.c.id).execute()
ctx = sess.query(User)._compile_context()
def go():
result = list(sess.query(User).instances(r, ctx))
eq_(result, self.static.user_address_result)
self.sql_count_(4, go)
def test_eager_degrade_deep(self):
(
users,
Keyword,
items,
order_items,
orders,
Item,
User,
Address,
keywords,
item_keywords,
Order,
addresses,
) = (
self.tables.users,
self.classes.Keyword,
self.tables.items,
self.tables.order_items,
self.tables.orders,
self.classes.Item,
self.classes.User,
self.classes.Address,
self.tables.keywords,
self.tables.item_keywords,
self.classes.Order,
self.tables.addresses,
)
# test with a deeper set of eager loads. when we first load the three
# users, they will have no addresses or orders. the number of lazy
# loads when traversing the whole thing will be three for the
# addresses and three for the orders.
mapper(Address, addresses)
mapper(Keyword, keywords)
mapper(
Item,
items,
properties=dict(
keywords=relationship(
Keyword,
secondary=item_keywords,
lazy="joined",
order_by=item_keywords.c.keyword_id,
)
),
)
mapper(
Order,
orders,
properties=dict(
items=relationship(
Item,
secondary=order_items,
lazy="joined",
order_by=order_items.c.item_id,
)
),
)
mapper(
User,
users,
properties=dict(
addresses=relationship(
Address, lazy="joined", order_by=addresses.c.id
),
orders=relationship(
Order, lazy="joined", order_by=orders.c.id
),
),
)
sess = create_session()
# first test straight eager load, 1 statement
def go():
result = sess.query(User).order_by(User.id).all()
eq_(result, self.static.user_all_result)
self.assert_sql_count(testing.db, go, 1)
sess.expunge_all()
# then select just from users. run it into instances.
# then assert the data, which will launch 6 more lazy loads
r = users.select().execute()
ctx = sess.query(User)._compile_context()
def go():
result = list(sess.query(User).instances(r, ctx))
eq_(result, self.static.user_all_result)
self.assert_sql_count(testing.db, go, 6)
def test_lazy_options(self):
"""An eager relationship can be upgraded to a lazy relationship."""
Address, addresses, users, User = (
self.classes.Address,
self.tables.addresses,
self.tables.users,
self.classes.User,
)
mapper(
User,
users,
properties=dict(
addresses=relationship(
mapper(Address, addresses), lazy="joined"
)
),
)
sess = create_session()
result = (
sess.query(User)
.order_by(User.id)
.options(sa.orm.lazyload("addresses"))
).all()
def go():
eq_(result, self.static.user_address_result)
self.sql_count_(4, go)
def test_option_propagate(self):
users, items, order_items, Order, Item, User, orders = (
self.tables.users,
self.tables.items,
self.tables.order_items,
self.classes.Order,
self.classes.Item,
self.classes.User,
self.tables.orders,
)
mapper(User, users, properties=dict(orders=relationship(Order)))
mapper(
Order,
orders,
properties=dict(items=relationship(Item, secondary=order_items)),
)
mapper(Item, items)
sess = create_session()
oalias = aliased(Order)
opt1 = sa.orm.joinedload(User.orders, Order.items)
opt2 = sa.orm.contains_eager(User.orders, Order.items, alias=oalias)
u1 = (
sess.query(User)
.join(oalias, User.orders)
.options(opt1, opt2)
.first()
)
ustate = attributes.instance_state(u1)
assert opt1 in ustate.load_options
assert opt2 not in ustate.load_options
class DeepOptionsTest(_fixtures.FixtureTest):
@classmethod
def setup_mappers(cls):
(
users,
Keyword,
items,
order_items,
Order,
Item,
User,
keywords,
item_keywords,
orders,
) = (
cls.tables.users,
cls.classes.Keyword,
cls.tables.items,
cls.tables.order_items,
cls.classes.Order,
cls.classes.Item,
cls.classes.User,
cls.tables.keywords,
cls.tables.item_keywords,
cls.tables.orders,
)
mapper(Keyword, keywords)
mapper(
Item,
items,
properties=dict(
keywords=relationship(
Keyword, item_keywords, order_by=item_keywords.c.item_id
)
),
)
mapper(
Order,
orders,
properties=dict(
items=relationship(Item, order_items, order_by=items.c.id)
),
)
mapper(
User,
users,
properties=dict(orders=relationship(Order, order_by=orders.c.id)),
)
def test_deep_options_1(self):
User = self.classes.User
sess = create_session()
# joinedload nothing.
u = sess.query(User).order_by(User.id).all()
def go():
u[0].orders[1].items[0].keywords[1]
self.assert_sql_count(testing.db, go, 3)
def test_deep_options_2(self):
"""test (joined|subquery)load_all() options"""
User = self.classes.User
sess = create_session()
result = (
sess.query(User)
.order_by(User.id)
.options(
sa.orm.joinedload("orders")
.joinedload("items")
.joinedload("keywords")
)
).all()
def go():
result[0].orders[1].items[0].keywords[1]
self.sql_count_(0, go)
sess = create_session()
result = (
sess.query(User).options(
sa.orm.subqueryload("orders")
.subqueryload("items")
.subqueryload("keywords")
)
).all()
def go():
result[0].orders[1].items[0].keywords[1]
self.sql_count_(0, go)
def test_deep_options_3(self):
User = self.classes.User
sess = create_session()
# same thing, with separate options calls
q2 = (
sess.query(User)
.order_by(User.id)
.options(sa.orm.joinedload("orders"))
.options(sa.orm.joinedload("orders.items"))
.options(sa.orm.joinedload("orders.items.keywords"))
)
u = q2.all()
def go():
u[0].orders[1].items[0].keywords[1]
self.sql_count_(0, go)
def test_deep_options_4(self):
Item, User, Order = (
self.classes.Item,
self.classes.User,
self.classes.Order,
)
sess = create_session()
assert_raises_message(
sa.exc.ArgumentError,
'Mapped attribute "Order.items" does not apply to any of the '
"root entities in this query, e.g. mapped class User->users. "
"Please specify the full path from one of the root entities "
"to the target attribute.",
sess.query(User)
.options(sa.orm.joinedload(Order.items))
._compile_context,
)
# joinedload "keywords" on items. it will lazy load "orders", then
# lazy load the "items" on the order, but on "items" it will eager
# load the "keywords"
q3 = (
sess.query(User)
.order_by(User.id)
.options(sa.orm.joinedload("orders.items.keywords"))
)
u = q3.all()
def go():
u[0].orders[1].items[0].keywords[1]
self.sql_count_(2, go)
sess = create_session()
q3 = (
sess.query(User)
.order_by(User.id)
.options(
sa.orm.joinedload(User.orders, Order.items, Item.keywords)
)
)
u = q3.all()
def go():
u[0].orders[1].items[0].keywords[1]
self.sql_count_(2, go)
class ComparatorFactoryTest(_fixtures.FixtureTest, AssertsCompiledSQL):
def test_kwarg_accepted(self):
users, Address = self.tables.users, self.classes.Address
class DummyComposite(object):
def __init__(self, x, y):
pass
from sqlalchemy.orm.interfaces import PropComparator
class MyFactory(PropComparator):
pass
for args in (
(column_property, users.c.name),
(deferred, users.c.name),
(synonym, "name"),
(composite, DummyComposite, users.c.id, users.c.name),
(relationship, Address),
(backref, "address"),
(dynamic_loader, Address),
):
fn = args[0]
args = args[1:]
fn(comparator_factory=MyFactory, *args)
def test_column(self):
User, users = self.classes.User, self.tables.users
from sqlalchemy.orm.properties import ColumnProperty
class MyFactory(ColumnProperty.Comparator):
__hash__ = None
def __eq__(self, other):
return func.foobar(self.__clause_element__()) == func.foobar(
other
)
mapper(
User,
users,
properties={
"name": column_property(
users.c.name, comparator_factory=MyFactory
)
},
)
self.assert_compile(
User.name == "ed",
"foobar(users.name) = foobar(:foobar_1)",
dialect=default.DefaultDialect(),
)
self.assert_compile(
aliased(User).name == "ed",
"foobar(users_1.name) = foobar(:foobar_1)",
dialect=default.DefaultDialect(),
)
def test_synonym(self):
users, User = self.tables.users, self.classes.User
from sqlalchemy.orm.properties import ColumnProperty
class MyFactory(ColumnProperty.Comparator):
__hash__ = None
def __eq__(self, other):
return func.foobar(self.__clause_element__()) == func.foobar(
other
)
mapper(
User,
users,
properties={
"name": synonym(
"_name", map_column=True, comparator_factory=MyFactory
)
},
)
self.assert_compile(
User.name == "ed",
"foobar(users.name) = foobar(:foobar_1)",
dialect=default.DefaultDialect(),
)
self.assert_compile(
aliased(User).name == "ed",
"foobar(users_1.name) = foobar(:foobar_1)",
dialect=default.DefaultDialect(),
)
def test_relationship(self):
users, Address, addresses, User = (
self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User,
)
from sqlalchemy.orm.relationships import RelationshipProperty
# NOTE: this API changed in 0.8, previously __clause_element__()
# gave the parent selecatable, now it gives the
# primaryjoin/secondaryjoin
class MyFactory(RelationshipProperty.Comparator):
__hash__ = None
def __eq__(self, other):
return func.foobar(
self._source_selectable().c.user_id
) == func.foobar(other.id)
class MyFactory2(RelationshipProperty.Comparator):
__hash__ = None
def __eq__(self, other):
return func.foobar(
self._source_selectable().c.id
) == func.foobar(other.user_id)
mapper(User, users)
mapper(
Address,
addresses,
properties={
"user": relationship(
User,
comparator_factory=MyFactory,
backref=backref(
"addresses", comparator_factory=MyFactory2
),
)
},
)
# these are kind of nonsensical tests.
self.assert_compile(
Address.user == User(id=5),
"foobar(addresses.user_id) = foobar(:foobar_1)",
dialect=default.DefaultDialect(),
)
self.assert_compile(
User.addresses == Address(id=5, user_id=7),
"foobar(users.id) = foobar(:foobar_1)",
dialect=default.DefaultDialect(),
)
self.assert_compile(
aliased(Address).user == User(id=5),
"foobar(addresses_1.user_id) = foobar(:foobar_1)",
dialect=default.DefaultDialect(),
)
self.assert_compile(
aliased(User).addresses == Address(id=5, user_id=7),
"foobar(users_1.id) = foobar(:foobar_1)",
dialect=default.DefaultDialect(),
)
class SecondaryOptionsTest(fixtures.MappedTest):
"""test that the contains_eager() option doesn't bleed
into a secondary load."""
run_inserts = "once"
run_deletes = None
@classmethod
def define_tables(cls, metadata):
Table(
"base",
metadata,
Column("id", Integer, primary_key=True),
Column("type", String(50), nullable=False),
)
Table(
"child1",
metadata,
Column("id", Integer, ForeignKey("base.id"), primary_key=True),
Column(
"child2id", Integer, ForeignKey("child2.id"), nullable=False
),
)
Table(
"child2",
metadata,
Column("id", Integer, ForeignKey("base.id"), primary_key=True),
)
Table(
"related",
metadata,
Column("id", Integer, ForeignKey("base.id"), primary_key=True),
)
@classmethod
def setup_mappers(cls):
child1, child2, base, related = (
cls.tables.child1,
cls.tables.child2,
cls.tables.base,
cls.tables.related,
)
class Base(cls.Comparable):
pass
class Child1(Base):
pass
class Child2(Base):
pass
class Related(cls.Comparable):
pass
mapper(
Base,
base,
polymorphic_on=base.c.type,
properties={"related": relationship(Related, uselist=False)},
)
mapper(
Child1,
child1,
inherits=Base,
polymorphic_identity="child1",
properties={
"child2": relationship(
Child2,
primaryjoin=child1.c.child2id == base.c.id,
foreign_keys=child1.c.child2id,
)
},
)
mapper(Child2, child2, inherits=Base, polymorphic_identity="child2")
mapper(Related, related)
@classmethod
def insert_data(cls, connection):
child1, child2, base, related = (
cls.tables.child1,
cls.tables.child2,
cls.tables.base,
cls.tables.related,
)
connection.execute(
base.insert(),
[
{"id": 1, "type": "child1"},
{"id": 2, "type": "child1"},
{"id": 3, "type": "child1"},
{"id": 4, "type": "child2"},
{"id": 5, "type": "child2"},
{"id": 6, "type": "child2"},
],
)
connection.execute(child2.insert(), [{"id": 4}, {"id": 5}, {"id": 6}])
connection.execute(
child1.insert(),
[
{"id": 1, "child2id": 4},
{"id": 2, "child2id": 5},
{"id": 3, "child2id": 6},
],
)
connection.execute(
related.insert(),
[{"id": 1}, {"id": 2}, {"id": 3}, {"id": 4}, {"id": 5}, {"id": 6}],
)
def test_contains_eager(self):
Child1, Related = self.classes.Child1, self.classes.Related
sess = create_session()
child1s = (
sess.query(Child1)
.join(Child1.related)
.options(sa.orm.contains_eager(Child1.related))
.order_by(Child1.id)
)
def go():
eq_(
child1s.all(),
[
Child1(id=1, related=Related(id=1)),
Child1(id=2, related=Related(id=2)),
Child1(id=3, related=Related(id=3)),
],
)
self.assert_sql_count(testing.db, go, 1)
c1 = child1s[0]
self.assert_sql_execution(
testing.db,
lambda: c1.child2,
CompiledSQL(
"SELECT child2.id AS child2_id, base.id AS base_id, "
"base.type AS base_type "
"FROM base JOIN child2 ON base.id = child2.id "
"WHERE base.id = :param_1",
{"param_1": 4},
),
)
def test_joinedload_on_other(self):
Child1, Related = self.classes.Child1, self.classes.Related
sess = create_session()
child1s = (
sess.query(Child1)
.join(Child1.related)
.options(sa.orm.joinedload(Child1.related))
.order_by(Child1.id)
)
def go():
eq_(
child1s.all(),
[
Child1(id=1, related=Related(id=1)),
Child1(id=2, related=Related(id=2)),
Child1(id=3, related=Related(id=3)),
],
)
self.assert_sql_count(testing.db, go, 1)
c1 = child1s[0]
self.assert_sql_execution(
testing.db,
lambda: c1.child2,
CompiledSQL(
"SELECT child2.id AS child2_id, base.id AS base_id, "
"base.type AS base_type "
"FROM base JOIN child2 ON base.id = child2.id "
"WHERE base.id = :param_1",
{"param_1": 4},
),
)
def test_joinedload_on_same(self):
Child1, Child2, Related = (
self.classes.Child1,
self.classes.Child2,
self.classes.Related,
)
sess = create_session()
child1s = (
sess.query(Child1)
.join(Child1.related)
.options(sa.orm.joinedload(Child1.child2, Child2.related))
.order_by(Child1.id)
)
def go():
eq_(
child1s.all(),
[
Child1(id=1, related=Related(id=1)),
Child1(id=2, related=Related(id=2)),
Child1(id=3, related=Related(id=3)),
],
)
self.assert_sql_count(testing.db, go, 4)
c1 = child1s[0]
# this *does* joinedload
self.assert_sql_execution(
testing.db,
lambda: c1.child2,
CompiledSQL(
"SELECT child2.id AS child2_id, base.id AS base_id, "
"base.type AS base_type, "
"related_1.id AS related_1_id FROM base JOIN child2 "
"ON base.id = child2.id "
"LEFT OUTER JOIN related AS related_1 "
"ON base.id = related_1.id WHERE base.id = :param_1",
{"param_1": 4},
),
)
class DeferredPopulationTest(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
Table(
"thing",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("name", String(20)),
)
Table(
"human",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("thing_id", Integer, ForeignKey("thing.id")),
Column("name", String(20)),
)
@classmethod
def setup_mappers(cls):
thing, human = cls.tables.thing, cls.tables.human
class Human(cls.Basic):
pass
class Thing(cls.Basic):
pass
mapper(Human, human, properties={"thing": relationship(Thing)})
mapper(Thing, thing, properties={"name": deferred(thing.c.name)})
@classmethod
def insert_data(cls, connection):
thing, human = cls.tables.thing, cls.tables.human
connection.execute(thing.insert(), [{"id": 1, "name": "Chair"}])
connection.execute(
human.insert(), [{"id": 1, "thing_id": 1, "name": "Clark Kent"}]
)
def _test(self, thing):
assert "name" in attributes.instance_state(thing).dict
def test_no_previous_query(self):
Thing = self.classes.Thing
session = create_session()
thing = session.query(Thing).options(sa.orm.undefer("name")).first()
self._test(thing)
def test_query_twice_with_clear(self):
Thing = self.classes.Thing
session = create_session()
result = session.query(Thing).first() # noqa
session.expunge_all()
thing = session.query(Thing).options(sa.orm.undefer("name")).first()
self._test(thing)
def test_query_twice_no_clear(self):
Thing = self.classes.Thing
session = create_session()
result = session.query(Thing).first() # noqa
thing = session.query(Thing).options(sa.orm.undefer("name")).first()
self._test(thing)
def test_joinedload_with_clear(self):
Thing, Human = self.classes.Thing, self.classes.Human
session = create_session()
human = ( # noqa
session.query(Human).options(sa.orm.joinedload("thing")).first()
)
session.expunge_all()
thing = session.query(Thing).options(sa.orm.undefer("name")).first()
self._test(thing)
def test_joinedload_no_clear(self):
Thing, Human = self.classes.Thing, self.classes.Human
session = create_session()
human = ( # noqa
session.query(Human).options(sa.orm.joinedload("thing")).first()
)
thing = session.query(Thing).options(sa.orm.undefer("name")).first()
self._test(thing)
def test_join_with_clear(self):
Thing, Human = self.classes.Thing, self.classes.Human
session = create_session()
result = ( # noqa
session.query(Human).add_entity(Thing).join("thing").first()
)
session.expunge_all()
thing = session.query(Thing).options(sa.orm.undefer("name")).first()
self._test(thing)
def test_join_no_clear(self):
Thing, Human = self.classes.Thing, self.classes.Human
session = create_session()
result = ( # noqa
session.query(Human).add_entity(Thing).join("thing").first()
)
thing = session.query(Thing).options(sa.orm.undefer("name")).first()
self._test(thing)
class NoLoadTest(_fixtures.FixtureTest):
run_inserts = "once"
run_deletes = None
def test_o2m_noload(self):
Address, addresses, users, User = (
self.classes.Address,
self.tables.addresses,
self.tables.users,
self.classes.User,
)
m = mapper(
User,
users,
properties=dict(
addresses=relationship(
mapper(Address, addresses), lazy="noload"
)
),
)
q = create_session().query(m)
result = [None]
def go():
x = q.filter(User.id == 7).all()
x[0].addresses
result[0] = x
self.assert_sql_count(testing.db, go, 1)
self.assert_result(
result[0], User, {"id": 7, "addresses": (Address, [])}
)
def test_upgrade_o2m_noload_lazyload_option(self):
Address, addresses, users, User = (
self.classes.Address,
self.tables.addresses,
self.tables.users,
self.classes.User,
)
m = mapper(
User,
users,
properties=dict(
addresses=relationship(
mapper(Address, addresses), lazy="noload"
)
),
)
q = create_session().query(m).options(sa.orm.lazyload("addresses"))
result = [None]
def go():
x = q.filter(User.id == 7).all()
x[0].addresses
result[0] = x
self.sql_count_(2, go)
self.assert_result(
result[0], User, {"id": 7, "addresses": (Address, [{"id": 1}])}
)
def test_m2o_noload_option(self):
Address, addresses, users, User = (
self.classes.Address,
self.tables.addresses,
self.tables.users,
self.classes.User,
)
mapper(Address, addresses, properties={"user": relationship(User)})
mapper(User, users)
s = Session()
a1 = (
s.query(Address)
.filter_by(id=1)
.options(sa.orm.noload("user"))
.first()
)
def go():
eq_(a1.user, None)
self.sql_count_(0, go)
class RaiseLoadTest(_fixtures.FixtureTest):
run_inserts = "once"
run_deletes = None
def test_o2m_raiseload_mapper(self):
Address, addresses, users, User = (
self.classes.Address,
self.tables.addresses,
self.tables.users,
self.classes.User,
)
mapper(Address, addresses)
mapper(
User,
users,
properties=dict(addresses=relationship(Address, lazy="raise")),
)
q = create_session().query(User)
result = [None]
def go():
x = q.filter(User.id == 7).all()
assert_raises_message(
sa.exc.InvalidRequestError,
"'User.addresses' is not available due to lazy='raise'",
lambda: x[0].addresses,
)
result[0] = x
self.assert_sql_count(testing.db, go, 1)
self.assert_result(result[0], User, {"id": 7})
def test_o2m_raiseload_option(self):
Address, addresses, users, User = (
self.classes.Address,
self.tables.addresses,
self.tables.users,
self.classes.User,
)
mapper(Address, addresses)
mapper(User, users, properties=dict(addresses=relationship(Address)))
q = create_session().query(User)
result = [None]
def go():
x = (
q.options(sa.orm.raiseload(User.addresses))
.filter(User.id == 7)
.all()
)
assert_raises_message(
sa.exc.InvalidRequestError,
"'User.addresses' is not available due to lazy='raise'",
lambda: x[0].addresses,
)
result[0] = x
self.assert_sql_count(testing.db, go, 1)
self.assert_result(result[0], User, {"id": 7})
def test_o2m_raiseload_lazyload_option(self):
Address, addresses, users, User = (
self.classes.Address,
self.tables.addresses,
self.tables.users,
self.classes.User,
)
mapper(Address, addresses)
mapper(
User,
users,
properties=dict(addresses=relationship(Address, lazy="raise")),
)
q = create_session().query(User).options(sa.orm.lazyload("addresses"))
result = [None]
def go():
x = q.filter(User.id == 7).all()
x[0].addresses
result[0] = x
self.sql_count_(2, go)
self.assert_result(
result[0], User, {"id": 7, "addresses": (Address, [{"id": 1}])}
)
def test_m2o_raiseload_option(self):
Address, addresses, users, User = (
self.classes.Address,
self.tables.addresses,
self.tables.users,
self.classes.User,
)
mapper(Address, addresses, properties={"user": relationship(User)})
mapper(User, users)
s = Session()
a1 = (
s.query(Address)
.filter_by(id=1)
.options(sa.orm.raiseload("user"))
.first()
)
def go():
assert_raises_message(
sa.exc.InvalidRequestError,
"'Address.user' is not available due to lazy='raise'",
lambda: a1.user,
)
self.sql_count_(0, go)
def test_m2o_raise_on_sql_option(self):
Address, addresses, users, User = (
self.classes.Address,
self.tables.addresses,
self.tables.users,
self.classes.User,
)
mapper(Address, addresses, properties={"user": relationship(User)})
mapper(User, users)
s = Session()
a1 = (
s.query(Address)
.filter_by(id=1)
.options(sa.orm.raiseload("user", sql_only=True))
.first()
)
def go():
assert_raises_message(
sa.exc.InvalidRequestError,
"'Address.user' is not available due to lazy='raise_on_sql'",
lambda: a1.user,
)
self.sql_count_(0, go)
s.close()
u1 = s.query(User).first()
a1 = (
s.query(Address)
.filter_by(id=1)
.options(sa.orm.raiseload("user", sql_only=True))
.first()
)
assert "user" not in a1.__dict__
is_(a1.user, u1)
def test_m2o_non_use_get_raise_on_sql_option(self):
Address, addresses, users, User = (
self.classes.Address,
self.tables.addresses,
self.tables.users,
self.classes.User,
)
mapper(
Address,
addresses,
properties={
"user": relationship(
User,
primaryjoin=sa.and_(
addresses.c.user_id == users.c.id,
users.c.name != None, # noqa
),
)
},
)
mapper(User, users)
s = Session()
u1 = s.query(User).first() # noqa
a1 = (
s.query(Address)
.filter_by(id=1)
.options(sa.orm.raiseload("user", sql_only=True))
.first()
)
def go():
assert_raises_message(
sa.exc.InvalidRequestError,
"'Address.user' is not available due to lazy='raise_on_sql'",
lambda: a1.user,
)
def test_raiseload_wildcard_all_classes_option(self):
Address, addresses, users, User = (
self.classes.Address,
self.tables.addresses,
self.tables.users,
self.classes.User,
)
mapper(Address, addresses)
mapper(
User,
users,
properties=dict(addresses=relationship(Address, backref="user")),
)
q = (
create_session()
.query(User, Address)
.join(Address, User.id == Address.user_id)
)
u1, a1 = q.options(sa.orm.raiseload("*")).filter(User.id == 7).first()
assert_raises_message(
sa.exc.InvalidRequestError,
"'User.addresses' is not available due to lazy='raise'",
lambda: u1.addresses,
)
assert_raises_message(
sa.exc.InvalidRequestError,
"'Address.user' is not available due to lazy='raise'",
lambda: a1.user,
)
# columns still work
eq_(u1.id, 7)
eq_(a1.id, 1)
def test_raiseload_wildcard_specific_class_option(self):
Address, addresses, users, User = (
self.classes.Address,
self.tables.addresses,
self.tables.users,
self.classes.User,
)
mapper(Address, addresses)
mapper(
User,
users,
properties=dict(addresses=relationship(Address, backref="user")),
)
q = (
create_session()
.query(User, Address)
.join(Address, User.id == Address.user_id)
)
u1, a1 = (
q.options(sa.orm.Load(Address).raiseload("*"))
.filter(User.id == 7)
.first()
)
# User doesn't raise
def go():
eq_(u1.addresses, [a1])
self.assert_sql_count(testing.db, go, 1)
# Address does
assert_raises_message(
sa.exc.InvalidRequestError,
"'Address.user' is not available due to lazy='raise'",
lambda: a1.user,
)
# columns still work
eq_(u1.id, 7)
eq_(a1.id, 1)
class RequirementsTest(fixtures.MappedTest):
"""Tests the contract for user classes."""
@classmethod
def define_tables(cls, metadata):
Table(
"ht1",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("value", String(10)),
)
Table(
"ht2",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("ht1_id", Integer, ForeignKey("ht1.id")),
Column("value", String(10)),
)
Table(
"ht3",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("value", String(10)),
)
Table(
"ht4",
metadata,
Column("ht1_id", Integer, ForeignKey("ht1.id"), primary_key=True),
Column("ht3_id", Integer, ForeignKey("ht3.id"), primary_key=True),
)
Table(
"ht5",
metadata,
Column("ht1_id", Integer, ForeignKey("ht1.id"), primary_key=True),
)
Table(
"ht6",
metadata,
Column("ht1a_id", Integer, ForeignKey("ht1.id"), primary_key=True),
Column("ht1b_id", Integer, ForeignKey("ht1.id"), primary_key=True),
Column("value", String(10)),
)
if util.py2k:
def test_baseclass(self):
ht1 = self.tables.ht1
class OldStyle:
pass
assert_raises(sa.exc.ArgumentError, mapper, OldStyle, ht1)
assert_raises(sa.exc.ArgumentError, mapper, 123)
class NoWeakrefSupport(str):
pass
# TODO: is weakref support detectable without an instance?
# self.assertRaises(
# sa.exc.ArgumentError, mapper, NoWeakrefSupport, t2)
class _ValueBase(object):
def __init__(self, value="abc", id_=None):
self.id = id_
self.value = value
def __bool__(self):
return False
def __hash__(self):
return hash(self.value)
def __eq__(self, other):
if isinstance(other, type(self)):
return self.value == other.value
return False
def test_comparison_overrides(self):
"""Simple tests to ensure users can supply comparison __methods__.
The suite-level test --options are better suited to detect
problems- they add selected __methods__ across the board on all
ORM tests. This test simply shoves a variety of operations
through the ORM to catch basic regressions early in a standard
test run.
"""
ht6, ht5, ht4, ht3, ht2, ht1 = (
self.tables.ht6,
self.tables.ht5,
self.tables.ht4,
self.tables.ht3,
self.tables.ht2,
self.tables.ht1,
)
class H1(self._ValueBase):
pass
class H2(self._ValueBase):
pass
class H3(self._ValueBase):
pass
class H6(self._ValueBase):
pass
mapper(
H1,
ht1,
properties={
"h2s": relationship(H2, backref="h1"),
"h3s": relationship(H3, secondary=ht4, backref="h1s"),
"h1s": relationship(H1, secondary=ht5, backref="parent_h1"),
"t6a": relationship(
H6, backref="h1a", primaryjoin=ht1.c.id == ht6.c.ht1a_id
),
"t6b": relationship(
H6, backref="h1b", primaryjoin=ht1.c.id == ht6.c.ht1b_id
),
},
)
mapper(H2, ht2)
mapper(H3, ht3)
mapper(H6, ht6)
s = create_session()
s.add_all([H1("abc"), H1("def")])
h1 = H1("ghi")
s.add(h1)
h1.h2s.append(H2("abc"))
h1.h3s.extend([H3(), H3()])
h1.h1s.append(H1())
s.flush()
eq_(select(func.count("*")).select_from(ht1).scalar(), 4)
h6 = H6()
h6.h1a = h1
h6.h1b = h1
h6 = H6()
h6.h1a = h1
h6.h1b = x = H1()
assert x in s
h6.h1b.h2s.append(H2("def"))
s.flush()
h1.h2s.extend([H2("abc"), H2("def")])
s.flush()
h1s = s.query(H1).options(sa.orm.joinedload("h2s")).all()
eq_(len(h1s), 5)
self.assert_unordered_result(
h1s,
H1,
{"h2s": []},
{"h2s": []},
{
"h2s": (
H2,
[{"value": "abc"}, {"value": "def"}, {"value": "abc"}],
)
},
{"h2s": []},
{"h2s": (H2, [{"value": "def"}])},
)
h1s = s.query(H1).options(sa.orm.joinedload("h3s")).all()
eq_(len(h1s), 5)
h1s = (
s.query(H1)
.options(
sa.orm.joinedload("t6a").joinedload("h1b"),
sa.orm.joinedload("h2s"),
sa.orm.joinedload("h3s").joinedload("h1s"),
)
.all()
)
eq_(len(h1s), 5)
def test_composite_results(self):
ht2, ht1 = (self.tables.ht2, self.tables.ht1)
class H1(self._ValueBase):
def __init__(self, value, id_, h2s):
self.value = value
self.id = id_
self.h2s = h2s
class H2(self._ValueBase):
def __init__(self, value, id_):
self.value = value
self.id = id_
mapper(H1, ht1, properties={"h2s": relationship(H2, backref="h1")})
mapper(H2, ht2)
s = Session()
s.add_all(
[
H1(
"abc",
1,
h2s=[H2("abc", id_=1), H2("def", id_=2), H2("def", id_=3)],
),
H1(
"def",
2,
h2s=[H2("abc", id_=4), H2("abc", id_=5), H2("def", id_=6)],
),
]
)
s.commit()
eq_(
[
(h1.value, h1.id, h2.value, h2.id)
for h1, h2 in s.query(H1, H2)
.join(H1.h2s)
.order_by(H1.id, H2.id)
],
[
("abc", 1, "abc", 1),
("abc", 1, "def", 2),
("abc", 1, "def", 3),
("def", 2, "abc", 4),
("def", 2, "abc", 5),
("def", 2, "def", 6),
],
)
def test_nonzero_len_recursion(self):
ht1 = self.tables.ht1
class H1(object):
def __len__(self):
return len(self.get_value())
def get_value(self):
self.value = "foobar"
return self.value
class H2(object):
def __bool__(self):
return bool(self.get_value())
def get_value(self):
self.value = "foobar"
return self.value
mapper(H1, ht1)
mapper(H2, ht1)
h1 = H1()
h1.value = "Asdf"
h1.value = "asdf asdf" # ding
h2 = H2()
h2.value = "Asdf"
h2.value = "asdf asdf" # ding
class IsUserlandTest(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
Table(
"foo",
metadata,
Column("id", Integer, primary_key=True),
Column("someprop", Integer),
)
def _test(self, value, instancelevel=None):
class Foo(object):
someprop = value
m = mapper(Foo, self.tables.foo)
eq_(Foo.someprop, value)
f1 = Foo()
if instancelevel is not None:
eq_(f1.someprop, instancelevel)
else:
eq_(f1.someprop, value)
assert self.tables.foo.c.someprop not in m._columntoproperty
def _test_not(self, value):
class Foo(object):
someprop = value
m = mapper(Foo, self.tables.foo)
is_(Foo.someprop.property.columns[0], self.tables.foo.c.someprop)
assert self.tables.foo.c.someprop in m._columntoproperty
def test_string(self):
self._test("someprop")
def test_unicode(self):
self._test("someprop")
def test_int(self):
self._test(5)
def test_dict(self):
self._test({"bar": "bat"})
def test_set(self):
self._test(set([6]))
def test_column(self):
self._test_not(self.tables.foo.c.someprop)
def test_relationship(self):
self._test_not(relationship("bar"))
def test_descriptor(self):
def somefunc(self):
return "hi"
self._test(property(somefunc), "hi")
class MagicNamesTest(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
Table(
"cartographers",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("name", String(50)),
Column("alias", String(50)),
Column("quip", String(100)),
)
Table(
"maps",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("cart_id", Integer, ForeignKey("cartographers.id")),
Column("state", String(2)),
Column("data", sa.Text),
)
@classmethod
def setup_classes(cls):
class Cartographer(cls.Basic):
pass
class Map(cls.Basic):
pass
def test_mappish(self):
maps, Cartographer, cartographers, Map = (
self.tables.maps,
self.classes.Cartographer,
self.tables.cartographers,
self.classes.Map,
)
mapper(
Cartographer,
cartographers,
properties=dict(query=cartographers.c.quip),
)
mapper(
Map,
maps,
properties=dict(mapper=relationship(Cartographer, backref="maps")),
)
c = Cartographer(
name="Lenny", alias="The Dude", query="Where be dragons?"
)
Map(state="AK", mapper=c)
sess = create_session()
sess.add(c)
sess.flush()
sess.expunge_all()
for C, M in (
(Cartographer, Map),
(sa.orm.aliased(Cartographer), sa.orm.aliased(Map)),
):
c1 = (
sess.query(C)
.filter(C.alias == "The Dude")
.filter(C.query == "Where be dragons?")
).one()
sess.query(M).filter(M.mapper == c1).one()
def test_direct_stateish(self):
for reserved in (
sa.orm.instrumentation.ClassManager.STATE_ATTR,
sa.orm.instrumentation.ClassManager.MANAGER_ATTR,
):
t = Table(
"t",
sa.MetaData(),
Column(
"id",
Integer,
primary_key=True,
test_needs_autoincrement=True,
),
Column(reserved, Integer),
)
class T(object):
pass
assert_raises_message(
KeyError,
(
"%r: requested attribute name conflicts with "
"instrumentation attribute of the same name." % reserved
),
mapper,
T,
t,
)
def test_indirect_stateish(self):
maps = self.tables.maps
for reserved in (
sa.orm.instrumentation.ClassManager.STATE_ATTR,
sa.orm.instrumentation.ClassManager.MANAGER_ATTR,
):
class M(object):
pass
assert_raises_message(
KeyError,
(
"requested attribute name conflicts with "
"instrumentation attribute of the same name"
),
mapper,
M,
maps,
properties={reserved: maps.c.state},
)
| 28.441035 | 79 | 0.517125 |
dde7a612b2a58c31fb1e2c694e39a0cdf992cf90 | 2,798 | py | Python | examples/PointNet/Classification/model_env.py | Obarads/torch_point_cloud | 86e19cc5c1196f22f609f2d98504b913272cbba8 | [
"MIT"
] | 1 | 2021-04-28T13:01:30.000Z | 2021-04-28T13:01:30.000Z | examples/PointNet/Classification/model_env.py | Obarads/torchpcp | 86e19cc5c1196f22f609f2d98504b913272cbba8 | [
"MIT"
] | null | null | null | examples/PointNet/Classification/model_env.py | Obarads/torchpcp | 86e19cc5c1196f22f609f2d98504b913272cbba8 | [
"MIT"
] | null | null | null | import omegaconf
import torch
from torch import optim
from torch import nn
from torch.optim import lr_scheduler
# dataset
from torchpcp.datasets.PointNet.ModelNet import (
ModelNet40, rotation_and_jitter)
# model
from torchpcp.models.PointNet import PointNetClassification
# loss
from torchpcp.losses.feature_transform_regularizer import (
feature_transform_regularizer)
def get_dataset(cfg):
if dataset.name == "ModelNet40":
dataset = ModelNet40(cfg.dataset.root, cfg.num_points)
else:
raise NotImplementedError('Unknown dataset: ' + cfg.dataset.name)
return dataset
def get_model(cfg):
dataset_name = cfg.dataset.name
num_classes = cfg.dataset[dataset_name].num_classes
model = PointNetClassification(num_classes, cfg.num_points,
cfg.use_input_transform,
cfg.use_feature_transform)
model.to(cfg.device)
return model
def get_optimizer(cfg, model):
optimizer = optim.Adam(model.parameters(), lr=cfg.lr)
return optimizer
def get_scheduler(cfg, optimizer):
scheduler = lr_scheduler.StepLR(optimizer, step_size=cfg.epoch_size,
gamma=cfg.decay_rate)
return scheduler
def get_losses():
# get losses
criterion = {}
criterion["cross_entropy"] = nn.CrossEntropyLoss()
criterion["feature_transform_reguliarzer"] = feature_transform_regularizer
return criterion
def processing(cfg, model, criterion, data, meters):
acc_meter, batch_loss = meters
point_clouds, labels = data
# preprocessing of data
point_clouds = torch.transpose(point_clouds, 1, 2)
point_clouds = point_clouds.to(cfg.device, dtype=torch.float32, non_blocking=True)
labels = labels.to(cfg.device, dtype=torch.long, non_blocking=True)
# model forward processing
pred_labels, _, feat_trans = model(point_clouds)
# compute losses with criterion
loss = 0
loss += criterion["cross_entropy"](pred_labels, labels)
if cfg.use_feature_transform:
loss += criterion["feature_transform_reguliarzer"](feat_trans) * 0.001
# save metrics
batch_loss.update(loss.item())
acc_meter.update(pred_labels, labels)
return loss
def save_params(model_path, epoch, cfg, model, optimizer, scheduler):
torch.save({
'epoch': epoch,
'cfg': omegaconf.OmegaConf.to_container(cfg),
'model': model.state_dict(),
'optimizer': optimizer.state_dict(),
'scheduler': scheduler.state_dict()
}, model_path)
def get_checkpoint(path):
print("-> loading trained data '{}'".format(path))
checkpoint = torch.load(path, map_location='cpu')
checkpoint_cfg = omegaconf.OmegaConf.create(checkpoint["cfg"])
return checkpoint, checkpoint_cfg | 31.795455 | 86 | 0.700858 |
6a858b602172977520ebdce135b45eb817b8f8d8 | 3,744 | py | Python | py/escher/tests/test_plots.py | lukaw3d/escher | d315efa8727f7c7d5179bbe3a7be64a9c4103e8f | [
"MIT"
] | null | null | null | py/escher/tests/test_plots.py | lukaw3d/escher | d315efa8727f7c7d5179bbe3a7be64a9c4103e8f | [
"MIT"
] | null | null | null | py/escher/tests/test_plots.py | lukaw3d/escher | d315efa8727f7c7d5179bbe3a7be64a9c4103e8f | [
"MIT"
] | null | null | null | from escher import __schema_version__, __map_model_version__
from escher import Builder
from escher.plots import (
_load_resource,
server_index,
model_json_for_name,
map_json_for_name,
)
from escher.urls import get_url
import base64
import os
import sys
from os.path import join, basename
import json
from pytest import raises, mark, param
from urllib.error import URLError
import pandas as pd
@mark.web
def test_server_index():
index = server_index()
map_0 = index['maps'][0]
assert 'organism' in map_0
assert 'map_name' in map_0
model_0 = index['models'][0]
assert 'organism' in model_0
assert 'model_name' in model_0
# helper functions
def test_load_resource_json(tmpdir):
test_json = '{"r": "val"}'
assert _load_resource(test_json, 'name') == test_json
def test_load_resource_long_json(tmpdir):
# this used to fail on Windows with Python 3
test_json = '{"r": "' + ('val' * 100000) + '"}'
assert _load_resource(test_json, 'name') == test_json
def test_load_resource_directory(tmpdir):
directory = os.path.abspath(os.path.dirname(__file__))
val = _load_resource(join(directory, 'example.json'), 'name').strip()
assert val == '{"r": "val"}'
def test_load_resource_invalid_file(tmpdir):
with raises(ValueError) as err:
p = join(str(tmpdir), 'dummy')
with open(p, 'w') as f:
f.write('dummy')
_load_resource(p, 'name')
assert 'not a valid json file' in err.value
@mark.web
def test_load_resource_web(tmpdir):
url = '/'.join([get_url('map_download'),
'Escherichia%20coli/iJO1366.Central%20metabolism.json'])
_ = json.loads(_load_resource(url, 'name'))
def look_for_string(st, substring):
"""Look for the string in the substring. This solves a bug in py.test
for these cases"""
try:
found = st.find(substring)
assert found > -1
except AssertionError:
raise AssertionError(f'Could not find\n\n{substring}\n\nin\n\n{st}')
def test_save_html(tmpdir):
b = Builder(map_json='"useless_map"', model_json='"useless_model"')
filepath = join(str(tmpdir), 'builder.html')
b.save_html(filepath)
with open(filepath, 'r') as f:
html = f.read()
look_for_string(
html,
'escher.Builder(data.map_data, data.model_data, ',
)
look_for_string(
html,
"map_data = JSON.parse(b64DecodeUnicode('InVzZWxlc3NfbWFwIg=='))",
)
look_for_string(
html,
"model_data = JSON.parse(b64DecodeUnicode('InVzZWxlc3NfbW9kZWwi'))",
)
assert 'embedded_css =' not in html
def test_save_html_embedded_css(tmpdir):
# ok with embedded_css arg
b = Builder(embedded_css='useless_css')
filepath = join(str(tmpdir), 'builder.html')
b.save_html(filepath)
with open(filepath, 'r') as f:
html = f.read()
look_for_string(
html,
"embedded_css = b64DecodeUnicode('dXNlbGVzc19jc3M=')",
)
def test_Builder_options():
b = Builder(metabolite_no_data_color='blue')
assert b.metabolite_no_data_color == 'blue'
b.metabolite_no_data_color = 'white'
assert b.metabolite_no_data_color == 'white'
@mark.parametrize('data,expected', [
param(pd.Series({'x': 1}), {'x': 1}),
param({'x': 1}, {'x': 1}),
param(None, None),
param({}, {}),
param(
pd.DataFrame([{'x': 1, 'y': 3}, {'x': 2}]).T,
[{'x': 1, 'y': 3}, {'x': 2}]
)
])
def test_handling_cobra_fluxes(data, expected):
b = Builder(reaction_data=data,
gene_data=data,
metabolite_data=data)
assert b.reaction_data == expected
assert b.gene_data == expected
assert b.metabolite_data == expected
| 27.130435 | 76 | 0.648504 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.