hexsha stringlengths 40 40 | size int64 2 1.02M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 2 1.02M | avg_line_length float64 1 417k | max_line_length int64 1 987k | alphanum_fraction float64 0 1 | content_no_comment stringlengths 0 1.01M | is_comment_constant_removed bool 1
class | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f71fa83d6989172da3743eb9fd560fc906515688 | 10,607 | py | Python | tests/commandline/stubber_cli_test.py | Josverl/mipystubber | 504814224b38208e9886661b181a57d2b9077be1 | [
"MIT"
] | 1 | 2019-03-26T16:03:04.000Z | 2019-03-26T16:03:04.000Z | tests/commandline/stubber_cli_test.py | Josverl/mipystubber | 504814224b38208e9886661b181a57d2b9077be1 | [
"MIT"
] | null | null | null | tests/commandline/stubber_cli_test.py | Josverl/mipystubber | 504814224b38208e9886661b181a57d2b9077be1 | [
"MIT"
] | null | null | null | from typing import List
import pytest
from pytest_mock import MockerFixture
from mock import MagicMock
from pathlib import Path
from click.testing import CliRunner
# module under test :
import stubber.stubber as stubber
def test_stubber_help():
# check basic commandline sanity check
runner = CliRunner()
result = runner.invoke(stubber.stubber_cli, ["--help"])
assert result.exit_code == 0
assert "Usage:" in result.output
assert "Commands:" in result.output
##########################################################################################
# clone
##########################################################################################
def test_stubber_clone(mocker: MockerFixture, tmp_path: Path):
runner = CliRunner()
mock_clone: MagicMock = mocker.patch("stubber.stubber.git.clone", autospec=True, return_value=0)
mock_fetch: MagicMock = mocker.patch("stubber.stubber.git.fetch", autospec=True, return_value=0)
result = runner.invoke(stubber.stubber_cli, ["clone"])
assert result.exit_code == 0
# either clone or fetch
assert mock_clone.call_count + mock_fetch.call_count == 2
if mock_clone.call_count > 0:
mock_clone.assert_any_call(remote_repo="https://github.com/micropython/micropython.git", path=Path("repos/micropython"))
mock_clone.assert_any_call(remote_repo="https://github.com/micropython/micropython-lib.git", path=Path("repos/micropython-lib"))
else:
mock_fetch.assert_any_call(Path("repos/micropython"))
mock_fetch.assert_any_call(Path("repos/micropython-lib"))
def test_stubber_clone_path(mocker: MockerFixture, tmp_path: Path):
runner = CliRunner()
mock_clone: MagicMock = mocker.MagicMock(return_value=0)
mocker.patch("stubber.stubber.git.clone", mock_clone)
m_tag = mocker.patch("stubber.stubber.git.get_tag", autospec=True)
m_dir = mocker.patch("stubber.stubber.os.mkdir", autospec=True)
# now test with path specified
result = runner.invoke(stubber.stubber_cli, ["clone", "--path", "foobar"])
assert result.exit_code == 0
assert mock_clone.call_count >= 2
mock_clone.assert_any_call(remote_repo="https://github.com/micropython/micropython.git", path=Path("foobar/micropython"))
mock_clone.assert_any_call(remote_repo="https://github.com/micropython/micropython-lib.git", path=Path("foobar/micropython-lib"))
assert m_tag.call_count >= 2
##########################################################################################
# switch
##########################################################################################
@pytest.mark.parametrize(
"params",
[
["switch", "--version", "latest", "--path", "foobar"],
["switch", "--version", "v1.10", "--path", "foobar"],
],
)
def test_stubber_switch(mocker: MockerFixture, params: List[str]):
runner = CliRunner()
# mock_clone: MagicMock = mocker.patch("stubber.stubber.git.clone", autospec=True, return_value=0)
# Mock Path.exists
m_fetch: MagicMock = mocker.patch("stubber.stubber.git.fetch", autospec=True, return_value=0)
m_switch: MagicMock = mocker.patch("stubber.stubber.git.switch_branch", autospec=True, return_value=0)
m_checkout: MagicMock = mocker.patch("stubber.stubber.git.checkout_tag", autospec=True, return_value=0)
m_get_tag: MagicMock = mocker.patch("stubber.stubber.git.get_tag", autospec=True, return_value="v1.42")
m_match = mocker.patch("stubber.stubber.get_mpy.match_lib_with_mpy", autospec=True)
m_exists = mocker.patch("stubber.stubber.Path.exists", return_value=True)
result = runner.invoke(stubber.stubber_cli, params)
assert result.exit_code == 0
# fetch latest
assert m_fetch.call_count == 2
# "foobar" from params is used as the path
m_fetch.assert_any_call(Path("foobar/micropython"))
m_fetch.assert_any_call(Path("foobar/micropython-lib"))
# core
m_match.assert_called_once()
if "latest" in params:
m_switch.assert_called_once()
m_checkout.assert_not_called()
else:
m_switch.assert_not_called()
m_checkout.assert_called_once()
##########################################################################################
# minify
##########################################################################################
def test_stubber_minify(mocker: MockerFixture):
# check basic commandline sanity check
runner = CliRunner()
mock_minify: MagicMock = mocker.MagicMock(return_value=0)
mocker.patch("stubber.stubber.minify", mock_minify)
result = runner.invoke(stubber.stubber_cli, ["minify"])
assert result.exit_code == 0
mock_minify.assert_called_once_with("board/createstubs.py", "./minified", True, False, False)
def test_stubber_minify_all(mocker: MockerFixture):
# check basic commandline sanity check
runner = CliRunner()
mock_minify: MagicMock = mocker.MagicMock(return_value=0)
mocker.patch("stubber.stubber.minify", mock_minify)
result = runner.invoke(stubber.stubber_cli, ["minify", "--all"])
assert result.exit_code == 0
assert mock_minify.call_count == 3
mock_minify.assert_any_call("board/createstubs.py", "./minified", True, False, False)
mock_minify.assert_any_call("board/createstubs_db.py", "./minified", True, False, False)
mock_minify.assert_any_call("board/createstubs_mem.py", "./minified", True, False, False)
##########################################################################################
# stub
##########################################################################################
def test_stubber_stub(mocker: MockerFixture):
# check basic commandline sanity check
runner = CliRunner()
# mock: MagicMock = mocker.MagicMock(return_value=True)
mock: MagicMock = mocker.patch("stubber.stubber.utils.generate_pyi_files", autospec=True, return_value=True)
# fake run on current folder
result = runner.invoke(stubber.stubber_cli, ["stub", "--source", "."])
mock.assert_called_once_with(Path("."))
assert result.exit_code == 0
##########################################################################################
# get-frozen
##########################################################################################
def test_stubber_get_frozen(mocker: MockerFixture, tmp_path: Path):
# check basic commandline sanity check
runner = CliRunner()
mock_version: MagicMock = mocker.patch("stubber.stubber.git.get_tag", autospec=True, return_value="v1.42")
mock: MagicMock = mocker.patch("stubber.stubber.get_mpy.get_frozen", autospec=True)
mock_post: MagicMock = mocker.patch("stubber.stubber.utils.do_post_processing", autospec=True)
# fake run - need to ensure that there is a destination folder
result = runner.invoke(stubber.stubber_cli, ["get-frozen", "--stub-folder", tmp_path.as_posix()])
assert result.exit_code == 0
# FIXME : test failes in CI
mock.assert_called_once()
mock_version.assert_called_once()
mock_post.assert_called_once_with([tmp_path / "micropython-v1_42-frozen"], True, True)
##########################################################################################
# get-lobo
##########################################################################################
def test_stubber_get_lobo(mocker: MockerFixture, tmp_path: Path):
# check basic commandline sanity check
runner = CliRunner()
mock: MagicMock = mocker.patch("stubber.stubber.get_lobo.get_frozen", autospec=True)
mock_post: MagicMock = mocker.patch("stubber.stubber.utils.do_post_processing", autospec=True)
# fake run
result = runner.invoke(stubber.stubber_cli, ["get-lobo", "--stub-folder", tmp_path.as_posix()])
mock.assert_called_once()
mock_post.assert_called_once()
mock_post.assert_called_once_with([tmp_path / "loboris-v3_2_24-frozen"], True, True)
assert result.exit_code == 0
##########################################################################################
# get-core
##########################################################################################
def test_stubber_get_core(mocker: MockerFixture, tmp_path: Path):
# check basic commandline sanity check
runner = CliRunner()
mock: MagicMock = mocker.patch("stubber.stubber.get_cpython.get_core", autospec=True)
mock_post: MagicMock = mocker.patch("stubber.stubber.utils.do_post_processing", autospec=True)
# fake run
result = runner.invoke(stubber.stubber_cli, ["get-core", "--stub-folder", tmp_path.as_posix()])
assert result.exit_code == 0
# process is called twice
assert mock.call_count == 2
# post is called one
mock_post.assert_called_with([tmp_path / "cpython_core-pycopy", tmp_path / "cpython_core-micropython"], True, True)
##########################################################################################
# get-docstubs
##########################################################################################
def test_stubber_get_docstubs(mocker: MockerFixture, tmp_path: Path):
# check basic commandline sanity check
runner = CliRunner()
mock_version: MagicMock = mocker.patch("stubber.stubber.git.get_tag", autospec=True, return_value="v1.42")
mock: MagicMock = mocker.patch("stubber.stubber.generate_from_rst", autospec=True)
mock_post: MagicMock = mocker.patch("stubber.stubber.utils.do_post_processing", autospec=True)
# fake run
result = runner.invoke(stubber.stubber_cli, ["get-docstubs", "--stub-folder", tmp_path.as_posix()])
assert result.exit_code == 0
# process is called twice
assert mock.call_count == 1
mock.assert_called_once()
assert mock_version.call_count >= 1
# post is called one
mock_post.assert_called_with([tmp_path / "micropython-v1_42-docstubs"], False, True)
##########################################################################################
# get-lobo
##########################################################################################
def test_stubber_fallback(mocker: MockerFixture, tmp_path: Path):
# check basic commandline sanity check
runner = CliRunner()
mock: MagicMock = mocker.patch("stubber.stubber.update_fallback", autospec=True)
# mock2: MagicMock = mocker.patch("stubber.update_fallback.update_fallback", autospec=True)
# from .update_fallback import update_fallback,
# fake run
result = runner.invoke(stubber.stubber_cli, ["update-fallback", "--stub-folder", tmp_path.as_posix()])
mock.assert_called_once()
assert result.exit_code == 0
| 42.258964 | 136 | 0.614217 | from typing import List
import pytest
from pytest_mock import MockerFixture
from mock import MagicMock
from pathlib import Path
from click.testing import CliRunner
import stubber.stubber as stubber
def test_stubber_help():
runner = CliRunner()
result = runner.invoke(stubber.stubber_cli, ["--help"])
assert result.exit_code == 0
assert "Usage:" in result.output
assert "Commands:" in result.output
| true | true |
f71fa8aed93c8b08ae1ae6669c787edf3afcae3d | 12,641 | py | Python | python/ccxt/async_support/base/exchange.py | gabvladov/ccxt | c26ba54afe1617d7314bf6714427a4db6d0c6381 | [
"MIT"
] | null | null | null | python/ccxt/async_support/base/exchange.py | gabvladov/ccxt | c26ba54afe1617d7314bf6714427a4db6d0c6381 | [
"MIT"
] | null | null | null | python/ccxt/async_support/base/exchange.py | gabvladov/ccxt | c26ba54afe1617d7314bf6714427a4db6d0c6381 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
__version__ = '1.20.94'
# -----------------------------------------------------------------------------
import asyncio
import concurrent
import socket
import time
import math
import random
import certifi
import aiohttp
import ssl
import sys
import yarl
# -----------------------------------------------------------------------------
from ccxt.async_support.base.throttle import throttle
# -----------------------------------------------------------------------------
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import ExchangeNotAvailable
from ccxt.base.errors import RequestTimeout
from ccxt.base.errors import NotSupported
# -----------------------------------------------------------------------------
from ccxt.base.exchange import Exchange as BaseExchange
# -----------------------------------------------------------------------------
__all__ = [
'BaseExchange',
'Exchange',
]
# -----------------------------------------------------------------------------
class Exchange(BaseExchange):
def __init__(self, config={}):
if 'asyncio_loop' in config:
self.asyncio_loop = config['asyncio_loop']
self.asyncio_loop = self.asyncio_loop or asyncio.get_event_loop()
self.aiohttp_trust_env = config.get('aiohttp_trust_env', self.aiohttp_trust_env)
self.verify = config.get('verify', self.verify)
self.own_session = 'session' not in config
self.cafile = config.get('cafile', certifi.where())
super(Exchange, self).__init__(config)
self.init_rest_rate_limiter()
def init_rest_rate_limiter(self):
self.throttle = throttle(self.extend({
'loop': self.asyncio_loop,
}, self.tokenBucket))
def __del__(self):
if self.session is not None:
self.logger.warning(self.id + " requires to release all resources with an explicit call to the .close() coroutine. If you are using the exchange instance with async coroutines, add exchange.close() to your code into a place when you're done with the exchange and don't need the exchange instance anymore (at the end of your async coroutine).")
if sys.version_info >= (3, 5):
async def __aenter__(self):
self.open()
return self
async def __aexit__(self, exc_type, exc, tb):
await self.close()
def open(self):
if self.own_session and self.session is None:
# Create our SSL context object with our CA cert file
context = ssl.create_default_context(cafile=self.cafile) if self.verify else self.verify
# Pass this SSL context to aiohttp and create a TCPConnector
connector = aiohttp.TCPConnector(ssl=context, loop=self.asyncio_loop)
self.session = aiohttp.ClientSession(loop=self.asyncio_loop, connector=connector, trust_env=self.aiohttp_trust_env)
async def close(self):
if self.session is not None:
if self.own_session:
await self.session.close()
self.session = None
async def wait_for_token(self):
while self.rateLimitTokens <= 1:
# if self.verbose:
# print('Waiting for tokens: Exchange: {0}'.format(self.id))
self.add_new_tokens()
seconds_delays = [0.001, 0.005, 0.022, 0.106, 0.5]
delay = random.choice(seconds_delays)
await asyncio.sleep(delay)
self.rateLimitTokens -= 1
def add_new_tokens(self):
# if self.verbose:
# print('Adding new tokens: Exchange: {0}'.format(self.id))
now = time.monotonic()
time_since_update = now - self.rateLimitUpdateTime
new_tokens = math.floor((0.8 * 1000.0 * time_since_update) / self.rateLimit)
if new_tokens > 1:
self.rateLimitTokens = min(self.rateLimitTokens + new_tokens, self.rateLimitMaxTokens)
self.rateLimitUpdateTime = now
async def fetch2(self, path, api='public', method='GET', params={}, headers=None, body=None):
"""A better wrapper over request for deferred signing"""
if self.enableRateLimit:
await self.throttle()
self.lastRestRequestTimestamp = self.milliseconds()
request = self.sign(path, api, method, params, headers, body)
return await self.fetch(request['url'], request['method'], request['headers'], request['body'])
async def fetch(self, url, method='GET', headers=None, body=None):
"""Perform a HTTP request and return decoded JSON data"""
request_headers = self.prepare_request_headers(headers)
url = self.proxy + url
if self.verbose:
print("\nRequest:", method, url, headers, body)
self.logger.debug("%s %s, Request: %s %s", method, url, headers, body)
request_body = body
encoded_body = body.encode() if body else None
self.open()
session_method = getattr(self.session, method.lower())
http_response = None
http_status_code = None
http_status_text = None
json_response = None
try:
async with session_method(yarl.URL(url, encoded=True),
data=encoded_body,
headers=request_headers,
timeout=(self.timeout / 1000),
proxy=self.aiohttp_proxy) as response:
http_response = await response.text()
http_status_code = response.status
http_status_text = response.reason
json_response = self.parse_json(http_response)
headers = response.headers
if self.enableLastHttpResponse:
self.last_http_response = http_response
if self.enableLastResponseHeaders:
self.last_response_headers = headers
if self.enableLastJsonResponse:
self.last_json_response = json_response
if self.verbose:
print("\nResponse:", method, url, http_status_code, headers, http_response)
self.logger.debug("%s %s, Response: %s %s %s", method, url, http_status_code, headers, http_response)
except socket.gaierror as e:
raise ExchangeNotAvailable(method + ' ' + url)
except concurrent.futures._base.TimeoutError as e:
raise RequestTimeout(method + ' ' + url)
except aiohttp.client_exceptions.ClientConnectionError as e:
raise ExchangeNotAvailable(method + ' ' + url)
except aiohttp.client_exceptions.ClientError as e: # base exception class
raise ExchangeError(method + ' ' + url)
self.handle_errors(http_status_code, http_status_text, url, method, headers, http_response, json_response, request_headers, request_body)
self.handle_rest_errors(http_status_code, http_status_text, http_response, url, method)
self.handle_rest_response(http_response, json_response, url, method)
if json_response is not None:
return json_response
if self.is_text_response(headers):
return http_response
return response.content
async def load_markets(self, reload=False, params={}):
if not reload:
if self.markets:
if not self.markets_by_id:
return self.set_markets(self.markets)
return self.markets
currencies = None
if self.has['fetchCurrencies']:
currencies = await self.fetch_currencies()
markets = await self.fetch_markets(params)
return self.set_markets(markets, currencies)
async def fetch_fees(self):
trading = {}
funding = {}
if self.has['fetchTradingFees']:
trading = await self.fetch_trading_fees()
if self.has['fetchFundingFees']:
funding = await self.fetch_funding_fees()
return {
'trading': trading,
'funding': funding,
}
async def load_fees(self, reload=False):
if not reload:
if self.loaded_fees != Exchange.loaded_fees:
return self.loaded_fees
self.loaded_fees = self.deep_extend(self.loaded_fees, await self.fetch_fees())
return self.loaded_fees
async def fetch_markets(self, params={}):
# markets are returned as a list
# currencies are returned as a dict
# this is for historical reasons
# and may be changed for consistency later
return self.to_array(self.markets)
async def fetch_currencies(self, params={}):
# markets are returned as a list
# currencies are returned as a dict
# this is for historical reasons
# and may be changed for consistency later
return self.currencies
async def fetch_status(self, params={}):
if self.has['fetchTime']:
updated = await self.fetch_time(params)
self.status['updated'] = updated
return self.status
async def fetch_order_status(self, id, symbol=None, params={}):
order = await self.fetch_order(id, symbol, params)
return order['status']
async def fetch_partial_balance(self, part, params={}):
balance = await self.fetch_balance(params)
return balance[part]
async def fetch_l2_order_book(self, symbol, limit=None, params={}):
orderbook = await self.fetch_order_book(symbol, limit, params)
return self.extend(orderbook, {
'bids': self.sort_by(self.aggregate(orderbook['bids']), 0, True),
'asks': self.sort_by(self.aggregate(orderbook['asks']), 0),
})
async def perform_order_book_request(self, market, limit=None, params={}):
raise NotSupported(self.id + ' performOrderBookRequest not supported yet')
async def fetch_order_book(self, symbol, limit=None, params={}):
await self.load_markets()
market = self.market(symbol)
orderbook = await self.perform_order_book_request(market, limit, params)
return self.parse_order_book(orderbook, market, limit, params)
async def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
if not self.has['fetchTrades']:
raise NotSupported('fetch_ohlcv() not implemented yet')
await self.load_markets()
trades = await self.fetch_trades(symbol, since, limit, params)
return self.build_ohlcv(trades, timeframe, since, limit)
async def fetchOHLCV(self, symbol, timeframe='1m', since=None, limit=None, params={}):
return await self.fetch_ohlcv(symbol, timeframe, since, limit, params)
async def fetch_full_tickers(self, symbols=None, params={}):
return await self.fetch_tickers(symbols, params)
async def edit_order(self, id, symbol, *args):
if not self.enableRateLimit:
raise ExchangeError('updateOrder() requires enableRateLimit = true')
await self.cancel_order(id, symbol)
return await self.create_order(symbol, *args)
async def fetch_trading_fees(self, params={}):
raise NotSupported('fetch_trading_fees() not supported yet')
async def fetch_trading_fee(self, symbol, params={}):
if not self.has['fetchTradingFees']:
raise NotSupported('fetch_trading_fee() not supported yet')
return await self.fetch_trading_fees(params)
async def load_trading_limits(self, symbols=None, reload=False, params={}):
if self.has['fetchTradingLimits']:
if reload or not('limitsLoaded' in list(self.options.keys())):
response = await self.fetch_trading_limits(symbols)
for i in range(0, len(symbols)):
symbol = symbols[i]
self.markets[symbol] = self.deep_extend(self.markets[symbol], response[symbol])
self.options['limitsLoaded'] = self.milliseconds()
return self.markets
async def load_accounts(self, reload=False, params={}):
if reload:
self.accounts = await self.fetch_accounts(params)
else:
if self.accounts:
return self.accounts
else:
self.accounts = await self.fetch_accounts(params)
self.accountsById = self.index_by(self.accounts, 'id')
return self.accounts
async def fetch_ticker(self, symbol, params={}):
raise NotSupported('fetch_ticker() not supported yet')
| 41.719472 | 355 | 0.610316 |
__version__ = '1.20.94'
import asyncio
import concurrent
import socket
import time
import math
import random
import certifi
import aiohttp
import ssl
import sys
import yarl
from ccxt.async_support.base.throttle import throttle
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import ExchangeNotAvailable
from ccxt.base.errors import RequestTimeout
from ccxt.base.errors import NotSupported
from ccxt.base.exchange import Exchange as BaseExchange
__all__ = [
'BaseExchange',
'Exchange',
]
class Exchange(BaseExchange):
def __init__(self, config={}):
if 'asyncio_loop' in config:
self.asyncio_loop = config['asyncio_loop']
self.asyncio_loop = self.asyncio_loop or asyncio.get_event_loop()
self.aiohttp_trust_env = config.get('aiohttp_trust_env', self.aiohttp_trust_env)
self.verify = config.get('verify', self.verify)
self.own_session = 'session' not in config
self.cafile = config.get('cafile', certifi.where())
super(Exchange, self).__init__(config)
self.init_rest_rate_limiter()
def init_rest_rate_limiter(self):
self.throttle = throttle(self.extend({
'loop': self.asyncio_loop,
}, self.tokenBucket))
def __del__(self):
if self.session is not None:
self.logger.warning(self.id + " requires to release all resources with an explicit call to the .close() coroutine. If you are using the exchange instance with async coroutines, add exchange.close() to your code into a place when you're done with the exchange and don't need the exchange instance anymore (at the end of your async coroutine).")
if sys.version_info >= (3, 5):
async def __aenter__(self):
self.open()
return self
async def __aexit__(self, exc_type, exc, tb):
await self.close()
def open(self):
if self.own_session and self.session is None:
context = ssl.create_default_context(cafile=self.cafile) if self.verify else self.verify
connector = aiohttp.TCPConnector(ssl=context, loop=self.asyncio_loop)
self.session = aiohttp.ClientSession(loop=self.asyncio_loop, connector=connector, trust_env=self.aiohttp_trust_env)
async def close(self):
if self.session is not None:
if self.own_session:
await self.session.close()
self.session = None
async def wait_for_token(self):
while self.rateLimitTokens <= 1:
self.add_new_tokens()
seconds_delays = [0.001, 0.005, 0.022, 0.106, 0.5]
delay = random.choice(seconds_delays)
await asyncio.sleep(delay)
self.rateLimitTokens -= 1
def add_new_tokens(self):
now = time.monotonic()
time_since_update = now - self.rateLimitUpdateTime
new_tokens = math.floor((0.8 * 1000.0 * time_since_update) / self.rateLimit)
if new_tokens > 1:
self.rateLimitTokens = min(self.rateLimitTokens + new_tokens, self.rateLimitMaxTokens)
self.rateLimitUpdateTime = now
async def fetch2(self, path, api='public', method='GET', params={}, headers=None, body=None):
if self.enableRateLimit:
await self.throttle()
self.lastRestRequestTimestamp = self.milliseconds()
request = self.sign(path, api, method, params, headers, body)
return await self.fetch(request['url'], request['method'], request['headers'], request['body'])
async def fetch(self, url, method='GET', headers=None, body=None):
request_headers = self.prepare_request_headers(headers)
url = self.proxy + url
if self.verbose:
print("\nRequest:", method, url, headers, body)
self.logger.debug("%s %s, Request: %s %s", method, url, headers, body)
request_body = body
encoded_body = body.encode() if body else None
self.open()
session_method = getattr(self.session, method.lower())
http_response = None
http_status_code = None
http_status_text = None
json_response = None
try:
async with session_method(yarl.URL(url, encoded=True),
data=encoded_body,
headers=request_headers,
timeout=(self.timeout / 1000),
proxy=self.aiohttp_proxy) as response:
http_response = await response.text()
http_status_code = response.status
http_status_text = response.reason
json_response = self.parse_json(http_response)
headers = response.headers
if self.enableLastHttpResponse:
self.last_http_response = http_response
if self.enableLastResponseHeaders:
self.last_response_headers = headers
if self.enableLastJsonResponse:
self.last_json_response = json_response
if self.verbose:
print("\nResponse:", method, url, http_status_code, headers, http_response)
self.logger.debug("%s %s, Response: %s %s %s", method, url, http_status_code, headers, http_response)
except socket.gaierror as e:
raise ExchangeNotAvailable(method + ' ' + url)
except concurrent.futures._base.TimeoutError as e:
raise RequestTimeout(method + ' ' + url)
except aiohttp.client_exceptions.ClientConnectionError as e:
raise ExchangeNotAvailable(method + ' ' + url)
except aiohttp.client_exceptions.ClientError as e:
raise ExchangeError(method + ' ' + url)
self.handle_errors(http_status_code, http_status_text, url, method, headers, http_response, json_response, request_headers, request_body)
self.handle_rest_errors(http_status_code, http_status_text, http_response, url, method)
self.handle_rest_response(http_response, json_response, url, method)
if json_response is not None:
return json_response
if self.is_text_response(headers):
return http_response
return response.content
async def load_markets(self, reload=False, params={}):
if not reload:
if self.markets:
if not self.markets_by_id:
return self.set_markets(self.markets)
return self.markets
currencies = None
if self.has['fetchCurrencies']:
currencies = await self.fetch_currencies()
markets = await self.fetch_markets(params)
return self.set_markets(markets, currencies)
async def fetch_fees(self):
trading = {}
funding = {}
if self.has['fetchTradingFees']:
trading = await self.fetch_trading_fees()
if self.has['fetchFundingFees']:
funding = await self.fetch_funding_fees()
return {
'trading': trading,
'funding': funding,
}
async def load_fees(self, reload=False):
if not reload:
if self.loaded_fees != Exchange.loaded_fees:
return self.loaded_fees
self.loaded_fees = self.deep_extend(self.loaded_fees, await self.fetch_fees())
return self.loaded_fees
async def fetch_markets(self, params={}):
return self.to_array(self.markets)
async def fetch_currencies(self, params={}):
return self.currencies
async def fetch_status(self, params={}):
if self.has['fetchTime']:
updated = await self.fetch_time(params)
self.status['updated'] = updated
return self.status
async def fetch_order_status(self, id, symbol=None, params={}):
order = await self.fetch_order(id, symbol, params)
return order['status']
async def fetch_partial_balance(self, part, params={}):
balance = await self.fetch_balance(params)
return balance[part]
async def fetch_l2_order_book(self, symbol, limit=None, params={}):
orderbook = await self.fetch_order_book(symbol, limit, params)
return self.extend(orderbook, {
'bids': self.sort_by(self.aggregate(orderbook['bids']), 0, True),
'asks': self.sort_by(self.aggregate(orderbook['asks']), 0),
})
async def perform_order_book_request(self, market, limit=None, params={}):
raise NotSupported(self.id + ' performOrderBookRequest not supported yet')
async def fetch_order_book(self, symbol, limit=None, params={}):
await self.load_markets()
market = self.market(symbol)
orderbook = await self.perform_order_book_request(market, limit, params)
return self.parse_order_book(orderbook, market, limit, params)
async def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
if not self.has['fetchTrades']:
raise NotSupported('fetch_ohlcv() not implemented yet')
await self.load_markets()
trades = await self.fetch_trades(symbol, since, limit, params)
return self.build_ohlcv(trades, timeframe, since, limit)
async def fetchOHLCV(self, symbol, timeframe='1m', since=None, limit=None, params={}):
return await self.fetch_ohlcv(symbol, timeframe, since, limit, params)
async def fetch_full_tickers(self, symbols=None, params={}):
return await self.fetch_tickers(symbols, params)
async def edit_order(self, id, symbol, *args):
if not self.enableRateLimit:
raise ExchangeError('updateOrder() requires enableRateLimit = true')
await self.cancel_order(id, symbol)
return await self.create_order(symbol, *args)
async def fetch_trading_fees(self, params={}):
raise NotSupported('fetch_trading_fees() not supported yet')
async def fetch_trading_fee(self, symbol, params={}):
if not self.has['fetchTradingFees']:
raise NotSupported('fetch_trading_fee() not supported yet')
return await self.fetch_trading_fees(params)
async def load_trading_limits(self, symbols=None, reload=False, params={}):
if self.has['fetchTradingLimits']:
if reload or not('limitsLoaded' in list(self.options.keys())):
response = await self.fetch_trading_limits(symbols)
for i in range(0, len(symbols)):
symbol = symbols[i]
self.markets[symbol] = self.deep_extend(self.markets[symbol], response[symbol])
self.options['limitsLoaded'] = self.milliseconds()
return self.markets
async def load_accounts(self, reload=False, params={}):
if reload:
self.accounts = await self.fetch_accounts(params)
else:
if self.accounts:
return self.accounts
else:
self.accounts = await self.fetch_accounts(params)
self.accountsById = self.index_by(self.accounts, 'id')
return self.accounts
async def fetch_ticker(self, symbol, params={}):
raise NotSupported('fetch_ticker() not supported yet')
| true | true |
f71fa96f8684f8b86d25128c15599561e0aa97b2 | 11,711 | py | Python | fs_patches_of_hybrid_cloud/cherry_for_B038/nova_cascaded/nova/virt/vmwareapi/vmware_images.py | Hybrid-Cloud/badam | 390ad3a6fc03948008f7c04ed2f9fcc8514cc1eb | [
"Apache-2.0"
] | 2 | 2015-06-15T02:16:33.000Z | 2022-02-23T07:10:38.000Z | patches_tool/vcloud_patch/code/nova/virt/vmwareapi/vmware_images.py | Hybrid-Cloud/badam | 390ad3a6fc03948008f7c04ed2f9fcc8514cc1eb | [
"Apache-2.0"
] | 7 | 2016-05-13T06:39:45.000Z | 2016-05-20T02:55:31.000Z | fs_patches_of_hybrid_cloud/cherry_for_B038/nova_cascaded/nova/virt/vmwareapi/vmware_images.py | Hybrid-Cloud/badam | 390ad3a6fc03948008f7c04ed2f9fcc8514cc1eb | [
"Apache-2.0"
] | 4 | 2015-11-02T04:02:50.000Z | 2021-05-13T17:06:00.000Z | # Copyright (c) 2012 VMware, Inc.
# Copyright (c) 2011 Citrix Systems, Inc.
# Copyright 2011 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Utility functions for Image transfer and manipulation.
"""
import os
from oslo.config import cfg
from nova import exception
from nova import image
from nova.openstack.common import log as logging
from nova.openstack.common import strutils
from nova.openstack.common import units
from nova.virt.vmwareapi import constants
from nova.virt.vmwareapi import io_util
from nova.virt.vmwareapi import read_write_util
# NOTE(mdbooth): We use use_linked_clone below, but don't have to import it
# because nova.virt.vmwareapi.driver is imported first. In fact, it is not
# possible to import it here, as nova.virt.vmwareapi.driver calls
# CONF.register_opts() after the import chain which imports this module. This
# is not a problem as long as the import order doesn't change.
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
IMAGE_API = image.API()
QUEUE_BUFFER_SIZE = 10
LINKED_CLONE_PROPERTY = 'vmware_linked_clone'
class VMwareImage(object):
def __init__(self, image_id,
file_size=0,
os_type=constants.DEFAULT_OS_TYPE,
adapter_type=constants.DEFAULT_ADAPTER_TYPE,
disk_type=constants.DEFAULT_DISK_TYPE,
file_type=constants.DEFAULT_DISK_FORMAT,
linked_clone=None,
vif_model=constants.DEFAULT_VIF_MODEL):
"""VMwareImage holds values for use in building VMs.
image_id (str): uuid of the image
file_size (int): size of file in bytes
os_type (str): name of guest os (use vSphere names only)
adapter_type (str): name of the adapter's type
disk_type (str): type of disk in thin, thick, etc
file_type (str): vmdk or iso
linked_clone(bool): use linked clone, or don't
"""
self.image_id = image_id
self.file_size = file_size
self.os_type = os_type
self.adapter_type = adapter_type
self.disk_type = disk_type
self.file_type = file_type
# NOTE(vui): This should be removed when we restore the
# descriptor-based validation.
if (self.file_type is not None and
self.file_type not in constants.DISK_FORMATS_ALL):
raise exception.InvalidDiskFormat(disk_format=self.file_type)
if linked_clone is not None:
self.linked_clone = linked_clone
else:
self.linked_clone = CONF.vmware.use_linked_clone
self.vif_model = vif_model
@property
def file_size_in_kb(self):
return self.file_size / units.Ki
@property
def file_size_in_gb(self):
return self.file_size / units.Gi
@property
def is_sparse(self):
return self.disk_type == constants.DISK_TYPE_SPARSE
@property
def is_iso(self):
return self.file_type == constants.DISK_FORMAT_ISO
@classmethod
def from_image(cls, image_id, image_meta=None):
"""Returns VMwareImage, the subset of properties the driver uses.
:param image_id - image id of image
:param image_meta - image metadata we are working with
:return: vmware image object
:rtype: nova.virt.vmwareapi.vmware_images.VmwareImage
"""
if image_meta is None:
image_meta = {}
properties = image_meta.get("properties", {})
# calculate linked_clone flag, allow image properties to override the
# global property set in the configurations.
image_linked_clone = properties.get(LINKED_CLONE_PROPERTY,
CONF.vmware.use_linked_clone)
# catch any string values that need to be interpreted as boolean values
linked_clone = strutils.bool_from_string(image_linked_clone)
props = {
'image_id': image_id,
'linked_clone': linked_clone
}
if 'size' in image_meta:
props['file_size'] = image_meta['size']
if 'disk_format' in image_meta:
props['file_type'] = image_meta['disk_format']
props_map = {
'vmware_ostype': 'os_type',
'vmware_adaptertype': 'adapter_type',
'vmware_disktype': 'disk_type',
'hw_vif_model': 'vif_model'
}
for k, v in props_map.iteritems():
if k in properties:
props[v] = properties[k]
return cls(**props)
def start_transfer(context, read_file_handle, data_size,
write_file_handle=None, image_id=None, image_meta=None):
"""Start the data transfer from the reader to the writer.
Reader writes to the pipe and the writer reads from the pipe. This means
that the total transfer time boils down to the slower of the read/write
and not the addition of the two times.
"""
if not image_meta:
image_meta = {}
# The pipe that acts as an intermediate store of data for reader to write
# to and writer to grab from.
thread_safe_pipe = io_util.ThreadSafePipe(QUEUE_BUFFER_SIZE, data_size)
# The read thread. In case of glance it is the instance of the
# GlanceFileRead class. The glance client read returns an iterator
# and this class wraps that iterator to provide datachunks in calls
# to read.
read_thread = io_util.IOThread(read_file_handle, thread_safe_pipe)
# In case of Glance - VMware transfer, we just need a handle to the
# HTTP Connection that is to send transfer data to the VMware datastore.
if write_file_handle:
write_thread = io_util.IOThread(thread_safe_pipe, write_file_handle)
# In case of VMware - Glance transfer, we relinquish VMware HTTP file read
# handle to Glance Client instance, but to be sure of the transfer we need
# to be sure of the status of the image on glance changing to active.
# The GlanceWriteThread handles the same for us.
elif image_id:
write_thread = io_util.GlanceWriteThread(context, thread_safe_pipe,
image_id, image_meta)
# Start the read and write threads.
read_event = read_thread.start()
write_event = write_thread.start()
try:
# Wait on the read and write events to signal their end
read_event.wait()
write_event.wait()
except Exception as exc:
# In case of any of the reads or writes raising an exception,
# stop the threads so that we un-necessarily don't keep the other one
# waiting.
read_thread.stop()
write_thread.stop()
# Log and raise the exception.
LOG.exception(exc)
raise exception.NovaException(exc)
finally:
# No matter what, try closing the read and write handles, if it so
# applies.
read_file_handle.close()
if write_file_handle:
write_file_handle.close()
def upload_iso_to_datastore(iso_path, instance, **kwargs):
LOG.debug("Uploading iso %s to datastore", iso_path,
instance=instance)
with open(iso_path, 'r') as iso_file:
write_file_handle = read_write_util.VMwareHTTPWriteFile(
kwargs.get("host"),
kwargs.get("data_center_name"),
kwargs.get("datastore_name"),
kwargs.get("cookies"),
kwargs.get("file_path"),
os.fstat(iso_file.fileno()).st_size)
LOG.debug("Uploading iso of size : %s ",
os.fstat(iso_file.fileno()).st_size)
block_size = 0x10000
data = iso_file.read(block_size)
while len(data) > 0:
write_file_handle.write(data)
data = iso_file.read(block_size)
write_file_handle.close()
LOG.debug("Uploaded iso %s to datastore", iso_path,
instance=instance)
def fetch_image(context, instance, host, dc_name, ds_name, file_path,
cookies=None):
"""Download image from the glance image server."""
image_ref = instance['image_ref']
LOG.debug("Downloading image file data %(image_ref)s to the "
"data store %(data_store_name)s",
{'image_ref': image_ref,
'data_store_name': ds_name},
instance=instance)
metadata = IMAGE_API.get(context, image_ref)
file_size = int(metadata['size'])
read_iter = IMAGE_API.download(context, image_ref)
read_file_handle = read_write_util.GlanceFileRead(read_iter)
write_file_handle = read_write_util.VMwareHTTPWriteFile(
host, dc_name, ds_name, cookies, file_path, file_size)
start_transfer(context, read_file_handle, file_size,
write_file_handle=write_file_handle)
LOG.debug("Downloaded image file data %(image_ref)s to "
"%(upload_name)s on the data store "
"%(data_store_name)s",
{'image_ref': image_ref,
'upload_name': 'n/a' if file_path is None else file_path,
'data_store_name': 'n/a' if ds_name is None else ds_name},
instance=instance)
def upload_image(context, image, instance, **kwargs):
"""Upload the snapshotted vm disk file to Glance image server."""
LOG.debug("Uploading image %s to the Glance image server", image,
instance=instance)
read_file_handle = read_write_util.VMwareHTTPReadFile(
kwargs.get("host"),
kwargs.get("data_center_name"),
kwargs.get("datastore_name"),
kwargs.get("cookies"),
kwargs.get("file_path"))
file_size = read_file_handle.get_size()
metadata = IMAGE_API.get(context, image)
# The properties and other fields that we need to set for the image.
image_metadata = {"disk_format": "vmdk",
"is_public": "false",
"name": metadata['name'],
"status": "active",
"container_format": "bare",
"size": file_size,
"properties": {"vmware_adaptertype":
kwargs.get("adapter_type"),
"vmware_disktype":
kwargs.get("disk_type"),
"vmware_ostype": kwargs.get("os_type"),
"vmware_image_version":
kwargs.get("image_version"),
"owner_id": instance['project_id']}}
start_transfer(context, read_file_handle, file_size,
image_id=metadata['id'], image_meta=image_metadata)
LOG.debug("Uploaded image %s to the Glance image server", image,
instance=instance)
| 40.663194 | 80 | 0.61566 |
import os
from oslo.config import cfg
from nova import exception
from nova import image
from nova.openstack.common import log as logging
from nova.openstack.common import strutils
from nova.openstack.common import units
from nova.virt.vmwareapi import constants
from nova.virt.vmwareapi import io_util
from nova.virt.vmwareapi import read_write_util
# because nova.virt.vmwareapi.driver is imported first. In fact, it is not
# possible to import it here, as nova.virt.vmwareapi.driver calls
# CONF.register_opts() after the import chain which imports this module. This
# is not a problem as long as the import order doesn't change.
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
IMAGE_API = image.API()
QUEUE_BUFFER_SIZE = 10
LINKED_CLONE_PROPERTY = 'vmware_linked_clone'
class VMwareImage(object):
def __init__(self, image_id,
file_size=0,
os_type=constants.DEFAULT_OS_TYPE,
adapter_type=constants.DEFAULT_ADAPTER_TYPE,
disk_type=constants.DEFAULT_DISK_TYPE,
file_type=constants.DEFAULT_DISK_FORMAT,
linked_clone=None,
vif_model=constants.DEFAULT_VIF_MODEL):
self.image_id = image_id
self.file_size = file_size
self.os_type = os_type
self.adapter_type = adapter_type
self.disk_type = disk_type
self.file_type = file_type
if (self.file_type is not None and
self.file_type not in constants.DISK_FORMATS_ALL):
raise exception.InvalidDiskFormat(disk_format=self.file_type)
if linked_clone is not None:
self.linked_clone = linked_clone
else:
self.linked_clone = CONF.vmware.use_linked_clone
self.vif_model = vif_model
@property
def file_size_in_kb(self):
return self.file_size / units.Ki
@property
def file_size_in_gb(self):
return self.file_size / units.Gi
@property
def is_sparse(self):
return self.disk_type == constants.DISK_TYPE_SPARSE
@property
def is_iso(self):
return self.file_type == constants.DISK_FORMAT_ISO
@classmethod
def from_image(cls, image_id, image_meta=None):
if image_meta is None:
image_meta = {}
properties = image_meta.get("properties", {})
image_linked_clone = properties.get(LINKED_CLONE_PROPERTY,
CONF.vmware.use_linked_clone)
linked_clone = strutils.bool_from_string(image_linked_clone)
props = {
'image_id': image_id,
'linked_clone': linked_clone
}
if 'size' in image_meta:
props['file_size'] = image_meta['size']
if 'disk_format' in image_meta:
props['file_type'] = image_meta['disk_format']
props_map = {
'vmware_ostype': 'os_type',
'vmware_adaptertype': 'adapter_type',
'vmware_disktype': 'disk_type',
'hw_vif_model': 'vif_model'
}
for k, v in props_map.iteritems():
if k in properties:
props[v] = properties[k]
return cls(**props)
def start_transfer(context, read_file_handle, data_size,
write_file_handle=None, image_id=None, image_meta=None):
if not image_meta:
image_meta = {}
thread_safe_pipe = io_util.ThreadSafePipe(QUEUE_BUFFER_SIZE, data_size)
read_thread = io_util.IOThread(read_file_handle, thread_safe_pipe)
if write_file_handle:
write_thread = io_util.IOThread(thread_safe_pipe, write_file_handle)
elif image_id:
write_thread = io_util.GlanceWriteThread(context, thread_safe_pipe,
image_id, image_meta)
read_event = read_thread.start()
write_event = write_thread.start()
try:
read_event.wait()
write_event.wait()
except Exception as exc:
# waiting.
read_thread.stop()
write_thread.stop()
# Log and raise the exception.
LOG.exception(exc)
raise exception.NovaException(exc)
finally:
# No matter what, try closing the read and write handles, if it so
# applies.
read_file_handle.close()
if write_file_handle:
write_file_handle.close()
def upload_iso_to_datastore(iso_path, instance, **kwargs):
LOG.debug("Uploading iso %s to datastore", iso_path,
instance=instance)
with open(iso_path, 'r') as iso_file:
write_file_handle = read_write_util.VMwareHTTPWriteFile(
kwargs.get("host"),
kwargs.get("data_center_name"),
kwargs.get("datastore_name"),
kwargs.get("cookies"),
kwargs.get("file_path"),
os.fstat(iso_file.fileno()).st_size)
LOG.debug("Uploading iso of size : %s ",
os.fstat(iso_file.fileno()).st_size)
block_size = 0x10000
data = iso_file.read(block_size)
while len(data) > 0:
write_file_handle.write(data)
data = iso_file.read(block_size)
write_file_handle.close()
LOG.debug("Uploaded iso %s to datastore", iso_path,
instance=instance)
def fetch_image(context, instance, host, dc_name, ds_name, file_path,
cookies=None):
image_ref = instance['image_ref']
LOG.debug("Downloading image file data %(image_ref)s to the "
"data store %(data_store_name)s",
{'image_ref': image_ref,
'data_store_name': ds_name},
instance=instance)
metadata = IMAGE_API.get(context, image_ref)
file_size = int(metadata['size'])
read_iter = IMAGE_API.download(context, image_ref)
read_file_handle = read_write_util.GlanceFileRead(read_iter)
write_file_handle = read_write_util.VMwareHTTPWriteFile(
host, dc_name, ds_name, cookies, file_path, file_size)
start_transfer(context, read_file_handle, file_size,
write_file_handle=write_file_handle)
LOG.debug("Downloaded image file data %(image_ref)s to "
"%(upload_name)s on the data store "
"%(data_store_name)s",
{'image_ref': image_ref,
'upload_name': 'n/a' if file_path is None else file_path,
'data_store_name': 'n/a' if ds_name is None else ds_name},
instance=instance)
def upload_image(context, image, instance, **kwargs):
LOG.debug("Uploading image %s to the Glance image server", image,
instance=instance)
read_file_handle = read_write_util.VMwareHTTPReadFile(
kwargs.get("host"),
kwargs.get("data_center_name"),
kwargs.get("datastore_name"),
kwargs.get("cookies"),
kwargs.get("file_path"))
file_size = read_file_handle.get_size()
metadata = IMAGE_API.get(context, image)
# The properties and other fields that we need to set for the image.
image_metadata = {"disk_format": "vmdk",
"is_public": "false",
"name": metadata['name'],
"status": "active",
"container_format": "bare",
"size": file_size,
"properties": {"vmware_adaptertype":
kwargs.get("adapter_type"),
"vmware_disktype":
kwargs.get("disk_type"),
"vmware_ostype": kwargs.get("os_type"),
"vmware_image_version":
kwargs.get("image_version"),
"owner_id": instance['project_id']}}
start_transfer(context, read_file_handle, file_size,
image_id=metadata['id'], image_meta=image_metadata)
LOG.debug("Uploaded image %s to the Glance image server", image,
instance=instance)
| true | true |
f71fa9e010cfc60587fdfbd98f199c76f3a648ad | 30,587 | py | Python | mypyc/irbuild/expression.py | sileht/mypy | 334876a0cdb80d76333e4976238fd7f42fbaabf2 | [
"PSF-2.0"
] | 1 | 2021-09-25T16:12:01.000Z | 2021-09-25T16:12:01.000Z | mypyc/irbuild/expression.py | sileht/mypy | 334876a0cdb80d76333e4976238fd7f42fbaabf2 | [
"PSF-2.0"
] | 1 | 2021-08-21T07:40:45.000Z | 2021-08-21T07:40:45.000Z | mypyc/irbuild/expression.py | sileht/mypy | 334876a0cdb80d76333e4976238fd7f42fbaabf2 | [
"PSF-2.0"
] | 1 | 2021-08-21T07:39:57.000Z | 2021-08-21T07:39:57.000Z | """Transform mypy expression ASTs to mypyc IR (Intermediate Representation).
The top-level AST transformation logic is implemented in mypyc.irbuild.visitor
and mypyc.irbuild.builder.
"""
from typing import List, Optional, Union, Callable, cast
from mypy.nodes import (
Expression, NameExpr, MemberExpr, SuperExpr, CallExpr, UnaryExpr, OpExpr, IndexExpr,
ConditionalExpr, ComparisonExpr, IntExpr, FloatExpr, ComplexExpr, StrExpr,
BytesExpr, EllipsisExpr, ListExpr, TupleExpr, DictExpr, SetExpr, ListComprehension,
SetComprehension, DictionaryComprehension, SliceExpr, GeneratorExpr, CastExpr, StarExpr,
AssignmentExpr,
Var, RefExpr, MypyFile, TypeInfo, TypeApplication, LDEF, ARG_POS
)
from mypy.types import TupleType, Instance, TypeType, ProperType, get_proper_type
from mypyc.common import MAX_SHORT_INT
from mypyc.ir.ops import (
Value, Register, TupleGet, TupleSet, BasicBlock, Assign, LoadAddress, RaiseStandardError
)
from mypyc.ir.rtypes import (
RTuple, object_rprimitive, is_none_rprimitive, int_rprimitive, is_int_rprimitive
)
from mypyc.ir.func_ir import FUNC_CLASSMETHOD, FUNC_STATICMETHOD
from mypyc.irbuild.format_str_tokenizer import (
tokenizer_printf_style, join_formatted_strings, convert_expr
)
from mypyc.primitives.bytes_ops import bytes_slice_op
from mypyc.primitives.registry import CFunctionDescription, builtin_names, binary_ops
from mypyc.primitives.generic_ops import iter_op
from mypyc.primitives.misc_ops import new_slice_op, ellipsis_op, type_op, get_module_dict_op
from mypyc.primitives.list_ops import list_append_op, list_extend_op, list_slice_op
from mypyc.primitives.tuple_ops import list_tuple_op, tuple_slice_op
from mypyc.primitives.dict_ops import dict_new_op, dict_set_item_op, dict_get_item_op
from mypyc.primitives.set_ops import set_add_op, set_update_op
from mypyc.primitives.str_ops import str_slice_op
from mypyc.primitives.int_ops import int_comparison_op_mapping
from mypyc.irbuild.specialize import specializers
from mypyc.irbuild.builder import IRBuilder
from mypyc.irbuild.for_helpers import (
translate_list_comprehension, translate_set_comprehension,
comprehension_helper
)
# Name and attribute references
def transform_name_expr(builder: IRBuilder, expr: NameExpr) -> Value:
if expr.node is None:
builder.add(RaiseStandardError(RaiseStandardError.RUNTIME_ERROR,
"mypyc internal error: should be unreachable",
expr.line))
return builder.none()
fullname = expr.node.fullname
if fullname in builtin_names:
typ, src = builtin_names[fullname]
return builder.add(LoadAddress(typ, src, expr.line))
# special cases
if fullname == 'builtins.None':
return builder.none()
if fullname == 'builtins.True':
return builder.true()
if fullname == 'builtins.False':
return builder.false()
if isinstance(expr.node, Var) and expr.node.is_final:
value = builder.emit_load_final(
expr.node,
fullname,
expr.name,
builder.is_native_ref_expr(expr),
builder.types[expr],
expr.line,
)
if value is not None:
return value
if isinstance(expr.node, MypyFile) and expr.node.fullname in builder.imports:
return builder.load_module(expr.node.fullname)
# If the expression is locally defined, then read the result from the corresponding
# assignment target and return it. Otherwise if the expression is a global, load it from
# the globals dictionary.
# Except for imports, that currently always happens in the global namespace.
if expr.kind == LDEF and not (isinstance(expr.node, Var)
and expr.node.is_suppressed_import):
# Try to detect and error when we hit the irritating mypy bug
# where a local variable is cast to None. (#5423)
if (isinstance(expr.node, Var) and is_none_rprimitive(builder.node_type(expr))
and expr.node.is_inferred):
builder.error(
'Local variable "{}" has inferred type None; add an annotation'.format(
expr.node.name),
expr.node.line)
# TODO: Behavior currently only defined for Var, FuncDef and MypyFile node types.
if isinstance(expr.node, MypyFile):
# Load reference to a module imported inside function from
# the modules dictionary. It would be closer to Python
# semantics to access modules imported inside functions
# via local variables, but this is tricky since the mypy
# AST doesn't include a Var node for the module. We
# instead load the module separately on each access.
mod_dict = builder.call_c(get_module_dict_op, [], expr.line)
obj = builder.call_c(dict_get_item_op,
[mod_dict, builder.load_str(expr.node.fullname)],
expr.line)
return obj
else:
return builder.read(builder.get_assignment_target(expr), expr.line)
return builder.load_global(expr)
def transform_member_expr(builder: IRBuilder, expr: MemberExpr) -> Value:
# First check if this is maybe a final attribute.
final = builder.get_final_ref(expr)
if final is not None:
fullname, final_var, native = final
value = builder.emit_load_final(final_var, fullname, final_var.name, native,
builder.types[expr], expr.line)
if value is not None:
return value
if isinstance(expr.node, MypyFile) and expr.node.fullname in builder.imports:
return builder.load_module(expr.node.fullname)
obj = builder.accept(expr.expr)
rtype = builder.node_type(expr)
# Special case: for named tuples transform attribute access to faster index access.
typ = get_proper_type(builder.types.get(expr.expr))
if isinstance(typ, TupleType) and typ.partial_fallback.type.is_named_tuple:
fields = typ.partial_fallback.type.metadata['namedtuple']['fields']
if expr.name in fields:
index = builder.builder.load_int(fields.index(expr.name))
return builder.gen_method_call(obj, '__getitem__', [index], rtype, expr.line)
check_instance_attribute_access_through_class(builder, expr, typ)
return builder.builder.get_attr(obj, expr.name, rtype, expr.line)
def check_instance_attribute_access_through_class(builder: IRBuilder,
expr: MemberExpr,
typ: Optional[ProperType]) -> None:
"""Report error if accessing an instance attribute through class object."""
if isinstance(expr.expr, RefExpr):
node = expr.expr.node
if isinstance(typ, TypeType) and isinstance(typ.item, Instance):
# TODO: Handle other item types
node = typ.item.type
if isinstance(node, TypeInfo):
class_ir = builder.mapper.type_to_ir.get(node)
if class_ir is not None and class_ir.is_ext_class:
sym = node.get(expr.name)
if (sym is not None
and isinstance(sym.node, Var)
and not sym.node.is_classvar
and not sym.node.is_final):
builder.error(
'Cannot access instance attribute "{}" through class object'.format(
expr.name),
expr.line
)
builder.note(
'(Hint: Use "x: Final = ..." or "x: ClassVar = ..." to define '
'a class attribute)',
expr.line
)
def transform_super_expr(builder: IRBuilder, o: SuperExpr) -> Value:
# warning(builder, 'can not optimize super() expression', o.line)
sup_val = builder.load_module_attr_by_fullname('builtins.super', o.line)
if o.call.args:
args = [builder.accept(arg) for arg in o.call.args]
else:
assert o.info is not None
typ = builder.load_native_type_object(o.info.fullname)
ir = builder.mapper.type_to_ir[o.info]
iter_env = iter(builder.builder.args)
# Grab first argument
vself: Value = next(iter_env)
if builder.fn_info.is_generator:
# grab sixth argument (see comment in translate_super_method_call)
self_targ = list(builder.symtables[-1].values())[6]
vself = builder.read(self_targ, builder.fn_info.fitem.line)
elif not ir.is_ext_class:
vself = next(iter_env) # second argument is self if non_extension class
args = [typ, vself]
res = builder.py_call(sup_val, args, o.line)
return builder.py_get_attr(res, o.name, o.line)
# Calls
def transform_call_expr(builder: IRBuilder, expr: CallExpr) -> Value:
if isinstance(expr.analyzed, CastExpr):
return translate_cast_expr(builder, expr.analyzed)
callee = expr.callee
if isinstance(callee, IndexExpr) and isinstance(callee.analyzed, TypeApplication):
callee = callee.analyzed.expr # Unwrap type application
if isinstance(callee, MemberExpr):
return translate_method_call(builder, expr, callee)
elif isinstance(callee, SuperExpr):
return translate_super_method_call(builder, expr, callee)
else:
return translate_call(builder, expr, callee)
def translate_call(builder: IRBuilder, expr: CallExpr, callee: Expression) -> Value:
# The common case of calls is refexprs
if isinstance(callee, RefExpr):
return translate_refexpr_call(builder, expr, callee)
function = builder.accept(callee)
args = [builder.accept(arg) for arg in expr.args]
return builder.py_call(function, args, expr.line,
arg_kinds=expr.arg_kinds, arg_names=expr.arg_names)
def translate_refexpr_call(builder: IRBuilder, expr: CallExpr, callee: RefExpr) -> Value:
"""Translate a non-method call."""
# TODO: Allow special cases to have default args or named args. Currently they don't since
# they check that everything in arg_kinds is ARG_POS.
# If there is a specializer for this function, try calling it.
# We would return the first successful one.
if callee.fullname and (callee.fullname, None) in specializers:
for specializer in specializers[callee.fullname, None]:
val = specializer(builder, expr, callee)
if val is not None:
return val
# Gen the argument values
arg_values = [builder.accept(arg) for arg in expr.args]
return builder.call_refexpr_with_args(expr, callee, arg_values)
def translate_method_call(builder: IRBuilder, expr: CallExpr, callee: MemberExpr) -> Value:
"""Generate IR for an arbitrary call of form e.m(...).
This can also deal with calls to module-level functions.
"""
if builder.is_native_ref_expr(callee):
# Call to module-level native function or such
return translate_call(builder, expr, callee)
elif (
isinstance(callee.expr, RefExpr)
and isinstance(callee.expr.node, TypeInfo)
and callee.expr.node in builder.mapper.type_to_ir
and builder.mapper.type_to_ir[callee.expr.node].has_method(callee.name)
):
# Call a method via the *class*
assert isinstance(callee.expr.node, TypeInfo)
ir = builder.mapper.type_to_ir[callee.expr.node]
decl = ir.method_decl(callee.name)
args = []
arg_kinds, arg_names = expr.arg_kinds[:], expr.arg_names[:]
# Add the class argument for class methods in extension classes
if decl.kind == FUNC_CLASSMETHOD and ir.is_ext_class:
args.append(builder.load_native_type_object(callee.expr.node.fullname))
arg_kinds.insert(0, ARG_POS)
arg_names.insert(0, None)
args += [builder.accept(arg) for arg in expr.args]
if ir.is_ext_class:
return builder.builder.call(decl, args, arg_kinds, arg_names, expr.line)
else:
obj = builder.accept(callee.expr)
return builder.gen_method_call(obj,
callee.name,
args,
builder.node_type(expr),
expr.line,
expr.arg_kinds,
expr.arg_names)
elif builder.is_module_member_expr(callee):
# Fall back to a PyCall for non-native module calls
function = builder.accept(callee)
args = [builder.accept(arg) for arg in expr.args]
return builder.py_call(function, args, expr.line,
arg_kinds=expr.arg_kinds, arg_names=expr.arg_names)
else:
receiver_typ = builder.node_type(callee.expr)
# If there is a specializer for this method name/type, try calling it.
# We would return the first successful one.
if (callee.name, receiver_typ) in specializers:
for specializer in specializers[callee.name, receiver_typ]:
val = specializer(builder, expr, callee)
if val is not None:
return val
obj = builder.accept(callee.expr)
args = [builder.accept(arg) for arg in expr.args]
return builder.gen_method_call(obj,
callee.name,
args,
builder.node_type(expr),
expr.line,
expr.arg_kinds,
expr.arg_names)
def translate_super_method_call(builder: IRBuilder, expr: CallExpr, callee: SuperExpr) -> Value:
if callee.info is None or (len(callee.call.args) != 0 and len(callee.call.args) != 2):
return translate_call(builder, expr, callee)
# We support two-argument super but only when it is super(CurrentClass, self)
# TODO: We could support it when it is a parent class in many cases?
if len(callee.call.args) == 2:
self_arg = callee.call.args[1]
if (
not isinstance(self_arg, NameExpr)
or not isinstance(self_arg.node, Var)
or not self_arg.node.is_self
):
return translate_call(builder, expr, callee)
typ_arg = callee.call.args[0]
if (
not isinstance(typ_arg, NameExpr)
or not isinstance(typ_arg.node, TypeInfo)
or callee.info is not typ_arg.node
):
return translate_call(builder, expr, callee)
ir = builder.mapper.type_to_ir[callee.info]
# Search for the method in the mro, skipping ourselves.
for base in ir.mro[1:]:
if callee.name in base.method_decls:
break
else:
return translate_call(builder, expr, callee)
decl = base.method_decl(callee.name)
arg_values = [builder.accept(arg) for arg in expr.args]
arg_kinds, arg_names = expr.arg_kinds[:], expr.arg_names[:]
if decl.kind != FUNC_STATICMETHOD:
# Grab first argument
vself: Value = builder.self()
if decl.kind == FUNC_CLASSMETHOD:
vself = builder.call_c(type_op, [vself], expr.line)
elif builder.fn_info.is_generator:
# For generator classes, the self target is the 6th value
# in the symbol table (which is an ordered dict). This is sort
# of ugly, but we can't search by name since the 'self' parameter
# could be named anything, and it doesn't get added to the
# environment indexes.
self_targ = list(builder.symtables[-1].values())[6]
vself = builder.read(self_targ, builder.fn_info.fitem.line)
arg_values.insert(0, vself)
arg_kinds.insert(0, ARG_POS)
arg_names.insert(0, None)
return builder.builder.call(decl, arg_values, arg_kinds, arg_names, expr.line)
def translate_cast_expr(builder: IRBuilder, expr: CastExpr) -> Value:
src = builder.accept(expr.expr)
target_type = builder.type_to_rtype(expr.type)
return builder.coerce(src, target_type, expr.line)
# Operators
def transform_unary_expr(builder: IRBuilder, expr: UnaryExpr) -> Value:
return builder.unary_op(builder.accept(expr.expr), expr.op, expr.line)
def transform_op_expr(builder: IRBuilder, expr: OpExpr) -> Value:
if expr.op in ('and', 'or'):
return builder.shortcircuit_expr(expr)
# Special case for string formatting
if expr.op == '%' and isinstance(expr.left, StrExpr):
return translate_str_format_percent_sign(builder, expr.left, expr.right)
return builder.binary_op(
builder.accept(expr.left), builder.accept(expr.right), expr.op, expr.line
)
def transform_index_expr(builder: IRBuilder, expr: IndexExpr) -> Value:
base = builder.accept(expr.base)
index = expr.index
if isinstance(base.type, RTuple) and isinstance(index, IntExpr):
return builder.add(TupleGet(base, index.value, expr.line))
if isinstance(index, SliceExpr):
value = try_gen_slice_op(builder, base, index)
if value:
return value
index_reg = builder.accept(expr.index)
return builder.gen_method_call(
base, '__getitem__', [index_reg], builder.node_type(expr), expr.line)
def try_gen_slice_op(builder: IRBuilder, base: Value, index: SliceExpr) -> Optional[Value]:
"""Generate specialized slice op for some index expressions.
Return None if a specialized op isn't available.
This supports obj[x:y], obj[:x], and obj[x:] for a few types.
"""
if index.stride:
# We can only handle the default stride of 1.
return None
if index.begin_index:
begin_type = builder.node_type(index.begin_index)
else:
begin_type = int_rprimitive
if index.end_index:
end_type = builder.node_type(index.end_index)
else:
end_type = int_rprimitive
# Both begin and end index must be int (or missing).
if is_int_rprimitive(begin_type) and is_int_rprimitive(end_type):
if index.begin_index:
begin = builder.accept(index.begin_index)
else:
begin = builder.load_int(0)
if index.end_index:
end = builder.accept(index.end_index)
else:
# Replace missing end index with the largest short integer
# (a sequence can't be longer).
end = builder.load_int(MAX_SHORT_INT)
candidates = [list_slice_op, tuple_slice_op, str_slice_op, bytes_slice_op]
return builder.builder.matching_call_c(candidates, [base, begin, end], index.line)
return None
def transform_conditional_expr(builder: IRBuilder, expr: ConditionalExpr) -> Value:
if_body, else_body, next = BasicBlock(), BasicBlock(), BasicBlock()
builder.process_conditional(expr.cond, if_body, else_body)
expr_type = builder.node_type(expr)
# Having actual Phi nodes would be really nice here!
target = Register(expr_type)
builder.activate_block(if_body)
true_value = builder.accept(expr.if_expr)
true_value = builder.coerce(true_value, expr_type, expr.line)
builder.add(Assign(target, true_value))
builder.goto(next)
builder.activate_block(else_body)
false_value = builder.accept(expr.else_expr)
false_value = builder.coerce(false_value, expr_type, expr.line)
builder.add(Assign(target, false_value))
builder.goto(next)
builder.activate_block(next)
return target
def transform_comparison_expr(builder: IRBuilder, e: ComparisonExpr) -> Value:
# x in (...)/[...]
# x not in (...)/[...]
if (e.operators[0] in ['in', 'not in']
and len(e.operators) == 1
and isinstance(e.operands[1], (TupleExpr, ListExpr))):
items = e.operands[1].items
n_items = len(items)
# x in y -> x == y[0] or ... or x == y[n]
# x not in y -> x != y[0] and ... and x != y[n]
# 16 is arbitrarily chosen to limit code size
if 1 < n_items < 16:
if e.operators[0] == 'in':
bin_op = 'or'
cmp_op = '=='
else:
bin_op = 'and'
cmp_op = '!='
lhs = e.operands[0]
mypy_file = builder.graph['builtins'].tree
assert mypy_file is not None
bool_type = Instance(cast(TypeInfo, mypy_file.names['bool'].node), [])
exprs = []
for item in items:
expr = ComparisonExpr([cmp_op], [lhs, item])
builder.types[expr] = bool_type
exprs.append(expr)
or_expr: Expression = exprs.pop(0)
for expr in exprs:
or_expr = OpExpr(bin_op, or_expr, expr)
builder.types[or_expr] = bool_type
return builder.accept(or_expr)
# x in [y]/(y) -> x == y
# x not in [y]/(y) -> x != y
elif n_items == 1:
if e.operators[0] == 'in':
cmp_op = '=='
else:
cmp_op = '!='
e.operators = [cmp_op]
e.operands[1] = items[0]
# x in []/() -> False
# x not in []/() -> True
elif n_items == 0:
if e.operators[0] == 'in':
return builder.false()
else:
return builder.true()
# TODO: Don't produce an expression when used in conditional context
# All of the trickiness here is due to support for chained conditionals
# (`e1 < e2 > e3`, etc). `e1 < e2 > e3` is approximately equivalent to
# `e1 < e2 and e2 > e3` except that `e2` is only evaluated once.
expr_type = builder.node_type(e)
# go(i, prev) generates code for `ei opi e{i+1} op{i+1} ... en`,
# assuming that prev contains the value of `ei`.
def go(i: int, prev: Value) -> Value:
if i == len(e.operators) - 1:
return transform_basic_comparison(builder,
e.operators[i], prev, builder.accept(e.operands[i + 1]), e.line)
next = builder.accept(e.operands[i + 1])
return builder.builder.shortcircuit_helper(
'and', expr_type,
lambda: transform_basic_comparison(builder,
e.operators[i], prev, next, e.line),
lambda: go(i + 1, next),
e.line)
return go(0, builder.accept(e.operands[0]))
def transform_basic_comparison(builder: IRBuilder,
op: str,
left: Value,
right: Value,
line: int) -> Value:
if (is_int_rprimitive(left.type) and is_int_rprimitive(right.type)
and op in int_comparison_op_mapping.keys()):
return builder.compare_tagged(left, right, op, line)
negate = False
if op == 'is not':
op, negate = 'is', True
elif op == 'not in':
op, negate = 'in', True
target = builder.binary_op(left, right, op, line)
if negate:
target = builder.unary_op(target, 'not', line)
return target
def translate_str_format_percent_sign(builder: IRBuilder,
format_expr: StrExpr,
rhs: Expression) -> Value:
tokens = tokenizer_printf_style(format_expr.value)
if tokens is not None:
literals, format_ops = tokens
exprs = []
if isinstance(rhs, TupleExpr):
exprs = rhs.items
elif isinstance(rhs, Expression):
exprs.append(rhs)
substitutions = convert_expr(builder, format_ops, exprs, format_expr.line)
if substitutions is not None:
return join_formatted_strings(builder, literals, substitutions, format_expr.line)
call_c_ops_candidates = binary_ops.get('%', [])
ret = builder.builder.matching_call_c(call_c_ops_candidates,
[builder.accept(format_expr), builder.accept(rhs)],
format_expr.line)
assert ret is not None, 'Cannot use binary op % at line {}'.format(format_expr.line)
return ret
# Literals
def transform_int_expr(builder: IRBuilder, expr: IntExpr) -> Value:
return builder.builder.load_int(expr.value)
def transform_float_expr(builder: IRBuilder, expr: FloatExpr) -> Value:
return builder.builder.load_float(expr.value)
def transform_complex_expr(builder: IRBuilder, expr: ComplexExpr) -> Value:
return builder.builder.load_complex(expr.value)
def transform_str_expr(builder: IRBuilder, expr: StrExpr) -> Value:
return builder.load_str(expr.value)
def transform_bytes_expr(builder: IRBuilder, expr: BytesExpr) -> Value:
value = bytes(expr.value, 'utf8').decode('unicode-escape').encode('raw-unicode-escape')
return builder.builder.load_bytes(value)
def transform_ellipsis(builder: IRBuilder, o: EllipsisExpr) -> Value:
return builder.add(LoadAddress(ellipsis_op.type, ellipsis_op.src, o.line))
# Display expressions
def transform_list_expr(builder: IRBuilder, expr: ListExpr) -> Value:
return _visit_list_display(builder, expr.items, expr.line)
def _visit_list_display(builder: IRBuilder, items: List[Expression], line: int) -> Value:
return _visit_display(
builder,
items,
builder.new_list_op,
list_append_op,
list_extend_op,
line,
True
)
def transform_tuple_expr(builder: IRBuilder, expr: TupleExpr) -> Value:
if any(isinstance(item, StarExpr) for item in expr.items):
# create a tuple of unknown length
return _visit_tuple_display(builder, expr)
# create a tuple of fixed length (RTuple)
tuple_type = builder.node_type(expr)
# When handling NamedTuple et. al we might not have proper type info,
# so make some up if we need it.
types = (tuple_type.types if isinstance(tuple_type, RTuple)
else [object_rprimitive] * len(expr.items))
items = []
for item_expr, item_type in zip(expr.items, types):
reg = builder.accept(item_expr)
items.append(builder.coerce(reg, item_type, item_expr.line))
return builder.add(TupleSet(items, expr.line))
def _visit_tuple_display(builder: IRBuilder, expr: TupleExpr) -> Value:
"""Create a list, then turn it into a tuple."""
val_as_list = _visit_list_display(builder, expr.items, expr.line)
return builder.call_c(list_tuple_op, [val_as_list], expr.line)
def transform_dict_expr(builder: IRBuilder, expr: DictExpr) -> Value:
"""First accepts all keys and values, then makes a dict out of them."""
key_value_pairs = []
for key_expr, value_expr in expr.items:
key = builder.accept(key_expr) if key_expr is not None else None
value = builder.accept(value_expr)
key_value_pairs.append((key, value))
return builder.builder.make_dict(key_value_pairs, expr.line)
def transform_set_expr(builder: IRBuilder, expr: SetExpr) -> Value:
return _visit_display(
builder,
expr.items,
builder.new_set_op,
set_add_op,
set_update_op,
expr.line,
False
)
def _visit_display(builder: IRBuilder,
items: List[Expression],
constructor_op: Callable[[List[Value], int], Value],
append_op: CFunctionDescription,
extend_op: CFunctionDescription,
line: int,
is_list: bool
) -> Value:
accepted_items = []
for item in items:
if isinstance(item, StarExpr):
accepted_items.append((True, builder.accept(item.expr)))
else:
accepted_items.append((False, builder.accept(item)))
result: Union[Value, None] = None
initial_items = []
for starred, value in accepted_items:
if result is None and not starred and is_list:
initial_items.append(value)
continue
if result is None:
result = constructor_op(initial_items, line)
builder.call_c(extend_op if starred else append_op, [result, value], line)
if result is None:
result = constructor_op(initial_items, line)
return result
# Comprehensions
def transform_list_comprehension(builder: IRBuilder, o: ListComprehension) -> Value:
if any(o.generator.is_async):
builder.error('async comprehensions are unimplemented', o.line)
return translate_list_comprehension(builder, o.generator)
def transform_set_comprehension(builder: IRBuilder, o: SetComprehension) -> Value:
if any(o.generator.is_async):
builder.error('async comprehensions are unimplemented', o.line)
return translate_set_comprehension(builder, o.generator)
def transform_dictionary_comprehension(builder: IRBuilder, o: DictionaryComprehension) -> Value:
if any(o.is_async):
builder.error('async comprehensions are unimplemented', o.line)
d = builder.call_c(dict_new_op, [], o.line)
loop_params = list(zip(o.indices, o.sequences, o.condlists))
def gen_inner_stmts() -> None:
k = builder.accept(o.key)
v = builder.accept(o.value)
builder.call_c(dict_set_item_op, [d, k, v], o.line)
comprehension_helper(builder, loop_params, gen_inner_stmts, o.line)
return d
# Misc
def transform_slice_expr(builder: IRBuilder, expr: SliceExpr) -> Value:
def get_arg(arg: Optional[Expression]) -> Value:
if arg is None:
return builder.none_object()
else:
return builder.accept(arg)
args = [get_arg(expr.begin_index),
get_arg(expr.end_index),
get_arg(expr.stride)]
return builder.call_c(new_slice_op, args, expr.line)
def transform_generator_expr(builder: IRBuilder, o: GeneratorExpr) -> Value:
if any(o.is_async):
builder.error('async comprehensions are unimplemented', o.line)
builder.warning('Treating generator comprehension as list', o.line)
return builder.call_c(
iter_op, [translate_list_comprehension(builder, o)], o.line
)
def transform_assignment_expr(builder: IRBuilder, o: AssignmentExpr) -> Value:
value = builder.accept(o.value)
target = builder.get_assignment_target(o.target)
builder.assign(target, value, o.line)
return value
| 38.914758 | 96 | 0.639193 |
from typing import List, Optional, Union, Callable, cast
from mypy.nodes import (
Expression, NameExpr, MemberExpr, SuperExpr, CallExpr, UnaryExpr, OpExpr, IndexExpr,
ConditionalExpr, ComparisonExpr, IntExpr, FloatExpr, ComplexExpr, StrExpr,
BytesExpr, EllipsisExpr, ListExpr, TupleExpr, DictExpr, SetExpr, ListComprehension,
SetComprehension, DictionaryComprehension, SliceExpr, GeneratorExpr, CastExpr, StarExpr,
AssignmentExpr,
Var, RefExpr, MypyFile, TypeInfo, TypeApplication, LDEF, ARG_POS
)
from mypy.types import TupleType, Instance, TypeType, ProperType, get_proper_type
from mypyc.common import MAX_SHORT_INT
from mypyc.ir.ops import (
Value, Register, TupleGet, TupleSet, BasicBlock, Assign, LoadAddress, RaiseStandardError
)
from mypyc.ir.rtypes import (
RTuple, object_rprimitive, is_none_rprimitive, int_rprimitive, is_int_rprimitive
)
from mypyc.ir.func_ir import FUNC_CLASSMETHOD, FUNC_STATICMETHOD
from mypyc.irbuild.format_str_tokenizer import (
tokenizer_printf_style, join_formatted_strings, convert_expr
)
from mypyc.primitives.bytes_ops import bytes_slice_op
from mypyc.primitives.registry import CFunctionDescription, builtin_names, binary_ops
from mypyc.primitives.generic_ops import iter_op
from mypyc.primitives.misc_ops import new_slice_op, ellipsis_op, type_op, get_module_dict_op
from mypyc.primitives.list_ops import list_append_op, list_extend_op, list_slice_op
from mypyc.primitives.tuple_ops import list_tuple_op, tuple_slice_op
from mypyc.primitives.dict_ops import dict_new_op, dict_set_item_op, dict_get_item_op
from mypyc.primitives.set_ops import set_add_op, set_update_op
from mypyc.primitives.str_ops import str_slice_op
from mypyc.primitives.int_ops import int_comparison_op_mapping
from mypyc.irbuild.specialize import specializers
from mypyc.irbuild.builder import IRBuilder
from mypyc.irbuild.for_helpers import (
translate_list_comprehension, translate_set_comprehension,
comprehension_helper
)
def transform_name_expr(builder: IRBuilder, expr: NameExpr) -> Value:
if expr.node is None:
builder.add(RaiseStandardError(RaiseStandardError.RUNTIME_ERROR,
"mypyc internal error: should be unreachable",
expr.line))
return builder.none()
fullname = expr.node.fullname
if fullname in builtin_names:
typ, src = builtin_names[fullname]
return builder.add(LoadAddress(typ, src, expr.line))
if fullname == 'builtins.None':
return builder.none()
if fullname == 'builtins.True':
return builder.true()
if fullname == 'builtins.False':
return builder.false()
if isinstance(expr.node, Var) and expr.node.is_final:
value = builder.emit_load_final(
expr.node,
fullname,
expr.name,
builder.is_native_ref_expr(expr),
builder.types[expr],
expr.line,
)
if value is not None:
return value
if isinstance(expr.node, MypyFile) and expr.node.fullname in builder.imports:
return builder.load_module(expr.node.fullname)
if expr.kind == LDEF and not (isinstance(expr.node, Var)
and expr.node.is_suppressed_import):
if (isinstance(expr.node, Var) and is_none_rprimitive(builder.node_type(expr))
and expr.node.is_inferred):
builder.error(
'Local variable "{}" has inferred type None; add an annotation'.format(
expr.node.name),
expr.node.line)
if isinstance(expr.node, MypyFile):
# instead load the module separately on each access.
mod_dict = builder.call_c(get_module_dict_op, [], expr.line)
obj = builder.call_c(dict_get_item_op,
[mod_dict, builder.load_str(expr.node.fullname)],
expr.line)
return obj
else:
return builder.read(builder.get_assignment_target(expr), expr.line)
return builder.load_global(expr)
def transform_member_expr(builder: IRBuilder, expr: MemberExpr) -> Value:
# First check if this is maybe a final attribute.
final = builder.get_final_ref(expr)
if final is not None:
fullname, final_var, native = final
value = builder.emit_load_final(final_var, fullname, final_var.name, native,
builder.types[expr], expr.line)
if value is not None:
return value
if isinstance(expr.node, MypyFile) and expr.node.fullname in builder.imports:
return builder.load_module(expr.node.fullname)
obj = builder.accept(expr.expr)
rtype = builder.node_type(expr)
# Special case: for named tuples transform attribute access to faster index access.
typ = get_proper_type(builder.types.get(expr.expr))
if isinstance(typ, TupleType) and typ.partial_fallback.type.is_named_tuple:
fields = typ.partial_fallback.type.metadata['namedtuple']['fields']
if expr.name in fields:
index = builder.builder.load_int(fields.index(expr.name))
return builder.gen_method_call(obj, '__getitem__', [index], rtype, expr.line)
check_instance_attribute_access_through_class(builder, expr, typ)
return builder.builder.get_attr(obj, expr.name, rtype, expr.line)
def check_instance_attribute_access_through_class(builder: IRBuilder,
expr: MemberExpr,
typ: Optional[ProperType]) -> None:
if isinstance(expr.expr, RefExpr):
node = expr.expr.node
if isinstance(typ, TypeType) and isinstance(typ.item, Instance):
# TODO: Handle other item types
node = typ.item.type
if isinstance(node, TypeInfo):
class_ir = builder.mapper.type_to_ir.get(node)
if class_ir is not None and class_ir.is_ext_class:
sym = node.get(expr.name)
if (sym is not None
and isinstance(sym.node, Var)
and not sym.node.is_classvar
and not sym.node.is_final):
builder.error(
'Cannot access instance attribute "{}" through class object'.format(
expr.name),
expr.line
)
builder.note(
'(Hint: Use "x: Final = ..." or "x: ClassVar = ..." to define '
'a class attribute)',
expr.line
)
def transform_super_expr(builder: IRBuilder, o: SuperExpr) -> Value:
# warning(builder, 'can not optimize super() expression', o.line)
sup_val = builder.load_module_attr_by_fullname('builtins.super', o.line)
if o.call.args:
args = [builder.accept(arg) for arg in o.call.args]
else:
assert o.info is not None
typ = builder.load_native_type_object(o.info.fullname)
ir = builder.mapper.type_to_ir[o.info]
iter_env = iter(builder.builder.args)
# Grab first argument
vself: Value = next(iter_env)
if builder.fn_info.is_generator:
# grab sixth argument (see comment in translate_super_method_call)
self_targ = list(builder.symtables[-1].values())[6]
vself = builder.read(self_targ, builder.fn_info.fitem.line)
elif not ir.is_ext_class:
vself = next(iter_env) # second argument is self if non_extension class
args = [typ, vself]
res = builder.py_call(sup_val, args, o.line)
return builder.py_get_attr(res, o.name, o.line)
# Calls
def transform_call_expr(builder: IRBuilder, expr: CallExpr) -> Value:
if isinstance(expr.analyzed, CastExpr):
return translate_cast_expr(builder, expr.analyzed)
callee = expr.callee
if isinstance(callee, IndexExpr) and isinstance(callee.analyzed, TypeApplication):
callee = callee.analyzed.expr # Unwrap type application
if isinstance(callee, MemberExpr):
return translate_method_call(builder, expr, callee)
elif isinstance(callee, SuperExpr):
return translate_super_method_call(builder, expr, callee)
else:
return translate_call(builder, expr, callee)
def translate_call(builder: IRBuilder, expr: CallExpr, callee: Expression) -> Value:
# The common case of calls is refexprs
if isinstance(callee, RefExpr):
return translate_refexpr_call(builder, expr, callee)
function = builder.accept(callee)
args = [builder.accept(arg) for arg in expr.args]
return builder.py_call(function, args, expr.line,
arg_kinds=expr.arg_kinds, arg_names=expr.arg_names)
def translate_refexpr_call(builder: IRBuilder, expr: CallExpr, callee: RefExpr) -> Value:
# TODO: Allow special cases to have default args or named args. Currently they don't since
if callee.fullname and (callee.fullname, None) in specializers:
for specializer in specializers[callee.fullname, None]:
val = specializer(builder, expr, callee)
if val is not None:
return val
arg_values = [builder.accept(arg) for arg in expr.args]
return builder.call_refexpr_with_args(expr, callee, arg_values)
def translate_method_call(builder: IRBuilder, expr: CallExpr, callee: MemberExpr) -> Value:
if builder.is_native_ref_expr(callee):
return translate_call(builder, expr, callee)
elif (
isinstance(callee.expr, RefExpr)
and isinstance(callee.expr.node, TypeInfo)
and callee.expr.node in builder.mapper.type_to_ir
and builder.mapper.type_to_ir[callee.expr.node].has_method(callee.name)
):
assert isinstance(callee.expr.node, TypeInfo)
ir = builder.mapper.type_to_ir[callee.expr.node]
decl = ir.method_decl(callee.name)
args = []
arg_kinds, arg_names = expr.arg_kinds[:], expr.arg_names[:]
if decl.kind == FUNC_CLASSMETHOD and ir.is_ext_class:
args.append(builder.load_native_type_object(callee.expr.node.fullname))
arg_kinds.insert(0, ARG_POS)
arg_names.insert(0, None)
args += [builder.accept(arg) for arg in expr.args]
if ir.is_ext_class:
return builder.builder.call(decl, args, arg_kinds, arg_names, expr.line)
else:
obj = builder.accept(callee.expr)
return builder.gen_method_call(obj,
callee.name,
args,
builder.node_type(expr),
expr.line,
expr.arg_kinds,
expr.arg_names)
elif builder.is_module_member_expr(callee):
function = builder.accept(callee)
args = [builder.accept(arg) for arg in expr.args]
return builder.py_call(function, args, expr.line,
arg_kinds=expr.arg_kinds, arg_names=expr.arg_names)
else:
receiver_typ = builder.node_type(callee.expr)
if (callee.name, receiver_typ) in specializers:
for specializer in specializers[callee.name, receiver_typ]:
val = specializer(builder, expr, callee)
if val is not None:
return val
obj = builder.accept(callee.expr)
args = [builder.accept(arg) for arg in expr.args]
return builder.gen_method_call(obj,
callee.name,
args,
builder.node_type(expr),
expr.line,
expr.arg_kinds,
expr.arg_names)
def translate_super_method_call(builder: IRBuilder, expr: CallExpr, callee: SuperExpr) -> Value:
if callee.info is None or (len(callee.call.args) != 0 and len(callee.call.args) != 2):
return translate_call(builder, expr, callee)
if len(callee.call.args) == 2:
self_arg = callee.call.args[1]
if (
not isinstance(self_arg, NameExpr)
or not isinstance(self_arg.node, Var)
or not self_arg.node.is_self
):
return translate_call(builder, expr, callee)
typ_arg = callee.call.args[0]
if (
not isinstance(typ_arg, NameExpr)
or not isinstance(typ_arg.node, TypeInfo)
or callee.info is not typ_arg.node
):
return translate_call(builder, expr, callee)
ir = builder.mapper.type_to_ir[callee.info]
for base in ir.mro[1:]:
if callee.name in base.method_decls:
break
else:
return translate_call(builder, expr, callee)
decl = base.method_decl(callee.name)
arg_values = [builder.accept(arg) for arg in expr.args]
arg_kinds, arg_names = expr.arg_kinds[:], expr.arg_names[:]
if decl.kind != FUNC_STATICMETHOD:
vself: Value = builder.self()
if decl.kind == FUNC_CLASSMETHOD:
vself = builder.call_c(type_op, [vself], expr.line)
elif builder.fn_info.is_generator:
# could be named anything, and it doesn't get added to the
self_targ = list(builder.symtables[-1].values())[6]
vself = builder.read(self_targ, builder.fn_info.fitem.line)
arg_values.insert(0, vself)
arg_kinds.insert(0, ARG_POS)
arg_names.insert(0, None)
return builder.builder.call(decl, arg_values, arg_kinds, arg_names, expr.line)
def translate_cast_expr(builder: IRBuilder, expr: CastExpr) -> Value:
src = builder.accept(expr.expr)
target_type = builder.type_to_rtype(expr.type)
return builder.coerce(src, target_type, expr.line)
def transform_unary_expr(builder: IRBuilder, expr: UnaryExpr) -> Value:
return builder.unary_op(builder.accept(expr.expr), expr.op, expr.line)
def transform_op_expr(builder: IRBuilder, expr: OpExpr) -> Value:
if expr.op in ('and', 'or'):
return builder.shortcircuit_expr(expr)
if expr.op == '%' and isinstance(expr.left, StrExpr):
return translate_str_format_percent_sign(builder, expr.left, expr.right)
return builder.binary_op(
builder.accept(expr.left), builder.accept(expr.right), expr.op, expr.line
)
def transform_index_expr(builder: IRBuilder, expr: IndexExpr) -> Value:
base = builder.accept(expr.base)
index = expr.index
if isinstance(base.type, RTuple) and isinstance(index, IntExpr):
return builder.add(TupleGet(base, index.value, expr.line))
if isinstance(index, SliceExpr):
value = try_gen_slice_op(builder, base, index)
if value:
return value
index_reg = builder.accept(expr.index)
return builder.gen_method_call(
base, '__getitem__', [index_reg], builder.node_type(expr), expr.line)
def try_gen_slice_op(builder: IRBuilder, base: Value, index: SliceExpr) -> Optional[Value]:
if index.stride:
return None
if index.begin_index:
begin_type = builder.node_type(index.begin_index)
else:
begin_type = int_rprimitive
if index.end_index:
end_type = builder.node_type(index.end_index)
else:
end_type = int_rprimitive
if is_int_rprimitive(begin_type) and is_int_rprimitive(end_type):
if index.begin_index:
begin = builder.accept(index.begin_index)
else:
begin = builder.load_int(0)
if index.end_index:
end = builder.accept(index.end_index)
else:
end = builder.load_int(MAX_SHORT_INT)
candidates = [list_slice_op, tuple_slice_op, str_slice_op, bytes_slice_op]
return builder.builder.matching_call_c(candidates, [base, begin, end], index.line)
return None
def transform_conditional_expr(builder: IRBuilder, expr: ConditionalExpr) -> Value:
if_body, else_body, next = BasicBlock(), BasicBlock(), BasicBlock()
builder.process_conditional(expr.cond, if_body, else_body)
expr_type = builder.node_type(expr)
# Having actual Phi nodes would be really nice here!
target = Register(expr_type)
builder.activate_block(if_body)
true_value = builder.accept(expr.if_expr)
true_value = builder.coerce(true_value, expr_type, expr.line)
builder.add(Assign(target, true_value))
builder.goto(next)
builder.activate_block(else_body)
false_value = builder.accept(expr.else_expr)
false_value = builder.coerce(false_value, expr_type, expr.line)
builder.add(Assign(target, false_value))
builder.goto(next)
builder.activate_block(next)
return target
def transform_comparison_expr(builder: IRBuilder, e: ComparisonExpr) -> Value:
# x in (...)/[...]
# x not in (...)/[...]
if (e.operators[0] in ['in', 'not in']
and len(e.operators) == 1
and isinstance(e.operands[1], (TupleExpr, ListExpr))):
items = e.operands[1].items
n_items = len(items)
# x in y -> x == y[0] or ... or x == y[n]
# x not in y -> x != y[0] and ... and x != y[n]
# 16 is arbitrarily chosen to limit code size
if 1 < n_items < 16:
if e.operators[0] == 'in':
bin_op = 'or'
cmp_op = '=='
else:
bin_op = 'and'
cmp_op = '!='
lhs = e.operands[0]
mypy_file = builder.graph['builtins'].tree
assert mypy_file is not None
bool_type = Instance(cast(TypeInfo, mypy_file.names['bool'].node), [])
exprs = []
for item in items:
expr = ComparisonExpr([cmp_op], [lhs, item])
builder.types[expr] = bool_type
exprs.append(expr)
or_expr: Expression = exprs.pop(0)
for expr in exprs:
or_expr = OpExpr(bin_op, or_expr, expr)
builder.types[or_expr] = bool_type
return builder.accept(or_expr)
# x in [y]/(y) -> x == y
# x not in [y]/(y) -> x != y
elif n_items == 1:
if e.operators[0] == 'in':
cmp_op = '=='
else:
cmp_op = '!='
e.operators = [cmp_op]
e.operands[1] = items[0]
# x in []/() -> False
# x not in []/() -> True
elif n_items == 0:
if e.operators[0] == 'in':
return builder.false()
else:
return builder.true()
# TODO: Don't produce an expression when used in conditional context
expr_type = builder.node_type(e)
def go(i: int, prev: Value) -> Value:
if i == len(e.operators) - 1:
return transform_basic_comparison(builder,
e.operators[i], prev, builder.accept(e.operands[i + 1]), e.line)
next = builder.accept(e.operands[i + 1])
return builder.builder.shortcircuit_helper(
'and', expr_type,
lambda: transform_basic_comparison(builder,
e.operators[i], prev, next, e.line),
lambda: go(i + 1, next),
e.line)
return go(0, builder.accept(e.operands[0]))
def transform_basic_comparison(builder: IRBuilder,
op: str,
left: Value,
right: Value,
line: int) -> Value:
if (is_int_rprimitive(left.type) and is_int_rprimitive(right.type)
and op in int_comparison_op_mapping.keys()):
return builder.compare_tagged(left, right, op, line)
negate = False
if op == 'is not':
op, negate = 'is', True
elif op == 'not in':
op, negate = 'in', True
target = builder.binary_op(left, right, op, line)
if negate:
target = builder.unary_op(target, 'not', line)
return target
def translate_str_format_percent_sign(builder: IRBuilder,
format_expr: StrExpr,
rhs: Expression) -> Value:
tokens = tokenizer_printf_style(format_expr.value)
if tokens is not None:
literals, format_ops = tokens
exprs = []
if isinstance(rhs, TupleExpr):
exprs = rhs.items
elif isinstance(rhs, Expression):
exprs.append(rhs)
substitutions = convert_expr(builder, format_ops, exprs, format_expr.line)
if substitutions is not None:
return join_formatted_strings(builder, literals, substitutions, format_expr.line)
call_c_ops_candidates = binary_ops.get('%', [])
ret = builder.builder.matching_call_c(call_c_ops_candidates,
[builder.accept(format_expr), builder.accept(rhs)],
format_expr.line)
assert ret is not None, 'Cannot use binary op % at line {}'.format(format_expr.line)
return ret
def transform_int_expr(builder: IRBuilder, expr: IntExpr) -> Value:
return builder.builder.load_int(expr.value)
def transform_float_expr(builder: IRBuilder, expr: FloatExpr) -> Value:
return builder.builder.load_float(expr.value)
def transform_complex_expr(builder: IRBuilder, expr: ComplexExpr) -> Value:
return builder.builder.load_complex(expr.value)
def transform_str_expr(builder: IRBuilder, expr: StrExpr) -> Value:
return builder.load_str(expr.value)
def transform_bytes_expr(builder: IRBuilder, expr: BytesExpr) -> Value:
value = bytes(expr.value, 'utf8').decode('unicode-escape').encode('raw-unicode-escape')
return builder.builder.load_bytes(value)
def transform_ellipsis(builder: IRBuilder, o: EllipsisExpr) -> Value:
return builder.add(LoadAddress(ellipsis_op.type, ellipsis_op.src, o.line))
def transform_list_expr(builder: IRBuilder, expr: ListExpr) -> Value:
return _visit_list_display(builder, expr.items, expr.line)
def _visit_list_display(builder: IRBuilder, items: List[Expression], line: int) -> Value:
return _visit_display(
builder,
items,
builder.new_list_op,
list_append_op,
list_extend_op,
line,
True
)
def transform_tuple_expr(builder: IRBuilder, expr: TupleExpr) -> Value:
if any(isinstance(item, StarExpr) for item in expr.items):
return _visit_tuple_display(builder, expr)
tuple_type = builder.node_type(expr)
types = (tuple_type.types if isinstance(tuple_type, RTuple)
else [object_rprimitive] * len(expr.items))
items = []
for item_expr, item_type in zip(expr.items, types):
reg = builder.accept(item_expr)
items.append(builder.coerce(reg, item_type, item_expr.line))
return builder.add(TupleSet(items, expr.line))
def _visit_tuple_display(builder: IRBuilder, expr: TupleExpr) -> Value:
val_as_list = _visit_list_display(builder, expr.items, expr.line)
return builder.call_c(list_tuple_op, [val_as_list], expr.line)
def transform_dict_expr(builder: IRBuilder, expr: DictExpr) -> Value:
key_value_pairs = []
for key_expr, value_expr in expr.items:
key = builder.accept(key_expr) if key_expr is not None else None
value = builder.accept(value_expr)
key_value_pairs.append((key, value))
return builder.builder.make_dict(key_value_pairs, expr.line)
def transform_set_expr(builder: IRBuilder, expr: SetExpr) -> Value:
return _visit_display(
builder,
expr.items,
builder.new_set_op,
set_add_op,
set_update_op,
expr.line,
False
)
def _visit_display(builder: IRBuilder,
items: List[Expression],
constructor_op: Callable[[List[Value], int], Value],
append_op: CFunctionDescription,
extend_op: CFunctionDescription,
line: int,
is_list: bool
) -> Value:
accepted_items = []
for item in items:
if isinstance(item, StarExpr):
accepted_items.append((True, builder.accept(item.expr)))
else:
accepted_items.append((False, builder.accept(item)))
result: Union[Value, None] = None
initial_items = []
for starred, value in accepted_items:
if result is None and not starred and is_list:
initial_items.append(value)
continue
if result is None:
result = constructor_op(initial_items, line)
builder.call_c(extend_op if starred else append_op, [result, value], line)
if result is None:
result = constructor_op(initial_items, line)
return result
def transform_list_comprehension(builder: IRBuilder, o: ListComprehension) -> Value:
if any(o.generator.is_async):
builder.error('async comprehensions are unimplemented', o.line)
return translate_list_comprehension(builder, o.generator)
def transform_set_comprehension(builder: IRBuilder, o: SetComprehension) -> Value:
if any(o.generator.is_async):
builder.error('async comprehensions are unimplemented', o.line)
return translate_set_comprehension(builder, o.generator)
def transform_dictionary_comprehension(builder: IRBuilder, o: DictionaryComprehension) -> Value:
if any(o.is_async):
builder.error('async comprehensions are unimplemented', o.line)
d = builder.call_c(dict_new_op, [], o.line)
loop_params = list(zip(o.indices, o.sequences, o.condlists))
def gen_inner_stmts() -> None:
k = builder.accept(o.key)
v = builder.accept(o.value)
builder.call_c(dict_set_item_op, [d, k, v], o.line)
comprehension_helper(builder, loop_params, gen_inner_stmts, o.line)
return d
def transform_slice_expr(builder: IRBuilder, expr: SliceExpr) -> Value:
def get_arg(arg: Optional[Expression]) -> Value:
if arg is None:
return builder.none_object()
else:
return builder.accept(arg)
args = [get_arg(expr.begin_index),
get_arg(expr.end_index),
get_arg(expr.stride)]
return builder.call_c(new_slice_op, args, expr.line)
def transform_generator_expr(builder: IRBuilder, o: GeneratorExpr) -> Value:
if any(o.is_async):
builder.error('async comprehensions are unimplemented', o.line)
builder.warning('Treating generator comprehension as list', o.line)
return builder.call_c(
iter_op, [translate_list_comprehension(builder, o)], o.line
)
def transform_assignment_expr(builder: IRBuilder, o: AssignmentExpr) -> Value:
value = builder.accept(o.value)
target = builder.get_assignment_target(o.target)
builder.assign(target, value, o.line)
return value
| true | true |
f71fac3e2c7d6447e6fb71445d88074908c05f79 | 507 | py | Python | neighborapp/migrations/0005_auto_20220104_1254.py | Kips-alih/neighborhood | 216d81b352c0f7f61812280f3aa816f8450a61bc | [
"MIT"
] | null | null | null | neighborapp/migrations/0005_auto_20220104_1254.py | Kips-alih/neighborhood | 216d81b352c0f7f61812280f3aa816f8450a61bc | [
"MIT"
] | null | null | null | neighborapp/migrations/0005_auto_20220104_1254.py | Kips-alih/neighborhood | 216d81b352c0f7f61812280f3aa816f8450a61bc | [
"MIT"
] | null | null | null | # Generated by Django 2.2.24 on 2022-01-04 09:54
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('neighborapp', '0004_neighborhood_description'),
]
operations = [
migrations.AlterField(
model_name='neighborhood',
name='location',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='neighborapp.Location'),
),
]
| 25.35 | 119 | 0.658777 |
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('neighborapp', '0004_neighborhood_description'),
]
operations = [
migrations.AlterField(
model_name='neighborhood',
name='location',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='neighborapp.Location'),
),
]
| true | true |
f71fada5cf3ee5e227b5f46f44161960eda0e4a2 | 4,783 | py | Python | main.py | PiotrBosowski/prl-browser | eabfcc9307e0ff27d490841b80f9d1cdc06f022f | [
"MIT"
] | null | null | null | main.py | PiotrBosowski/prl-browser | eabfcc9307e0ff27d490841b80f9d1cdc06f022f | [
"MIT"
] | null | null | null | main.py | PiotrBosowski/prl-browser | eabfcc9307e0ff27d490841b80f9d1cdc06f022f | [
"MIT"
] | null | null | null | from bottle import *
import os
import settings
from domain.training_model import Training
from training_utils.combined_outputs import overview_csv
@get(f'/<session_name>/<model_name>/refresh')
def invalidate_cache(session_name, model_name):
global global_models
global_models = Training.load_all()
return redirect(f"/{session_name}/{model_name}")
@post(f'/<session_name>/<model_name>/delete')
def delete_model(session_name, model_name):
deleted = Training.delete_model(global_models, session_name, model_name)
if deleted:
return redirect(
f"/{request.forms.next_session}/{request.forms.next_model}")
else:
return redirect('/')
@route(f'/<session>/<model>/<filename>')
def send_image(session, model, filename):
"""
Sends an image.
:param session:
:param model: model name
:param filename: image name
:return: static file of the requested image
"""
model_path = os.path.join(settings.models_dir, session, model)
return static_file(filename, root=model_path, mimetype='image/png')
@route(f'/<session>/<model>/<report>/<filename>')
def send_report_image(session, model, report, filename):
"""
Sends an image.
:param session:
:param model: model name
:param filename: image name
:return: static file of the requested image
"""
model_path = os.path.join(settings.models_dir, session, model, report)
return static_file(filename, root=model_path, mimetype='image/png')
@get(f'/<session_name>/<model_name>')
@view('model_template')
def model_page(session_name, model_name):
"""
Returns model view page.
:param session_name:
:param model_name: model name
:return: model view generated from model_template
"""
Training.refresh_models(global_models)
current_model = global_models[session_name][model_name]
test_id = request.query.test
if test_id and current_model.reports:
current_test = current_model.reports[int(test_id)]
current_test_url = os.path.basename(current_test.path)
elif current_model.reports:
current_test = current_model.get_last_report()
current_test_url = os.path.basename(current_test.path)
else:
current_test = None
current_test_url = ""
models = Training.models_flat(global_models)
models = Training.models_select(models, recent, filter, sortby)
# datasets_structures = {name: json.dumps(dataset['sources'], indent=2)
# for name, dataset in current_model.datasets.items()}
datasets = current_model.datasets
if current_model in models:
index = models.index(current_model)
return template('browser/model_template',
models=models,
model=current_model,
datasets=datasets,
# datasets_structures=datasets_structures,
validation=current_model.history[-1].report.confusion,
previous=models[index - 1],
following=Training.get_next_model(index, models),
current_test=current_test,
current_test_url=current_test_url,
settings=settings)
else:
return redirect('/')
@route('/favicon.ico', method='GET')
def get_favicon():
"""
Browsers for no reason keep asking for favicon, so there you go.
:return: favicon
"""
return static_file('favicon.ico', root='browser')
@route('/style.css')
def send_style():
"""
Sends style.css.
:return: style.css
"""
return static_file('style.css', root='browser')
@route('/navigation.js')
def send_js():
return static_file('navigation.js', root='browser')
@route('/jquery-3.5.1.min.js')
def send_js():
return static_file('jquery-3.5.1.min.js', root='browser')
@get('/overview.csv')
def generate_csv():
filename = overview_csv(global_models)
return static_file(filename, root=settings.models_dir)
@route('/')
@view('report_template')
def index():
"""
Returns main page of the server.
:return:
"""
Training.refresh_models(global_models)
models = Training.models_flat(global_models)
models = Training.models_select(models, recent, filter, sortby)
if models:
return redirect(models[0].url())
else:
return "no models to show"
def browse_results():
"""
Launches the server at localhost:8080.
"""
run(host='localhost', port=8080)
if __name__ == "__main__":
recent = 0
filter = ""
sortby = "accuracy"
reverse_order = True
global_models = Training.load_all(skip_raw_outputs=True,
skip_wrong_preds=True)
browse_results()
| 29.708075 | 81 | 0.655237 | from bottle import *
import os
import settings
from domain.training_model import Training
from training_utils.combined_outputs import overview_csv
@get(f'/<session_name>/<model_name>/refresh')
def invalidate_cache(session_name, model_name):
global global_models
global_models = Training.load_all()
return redirect(f"/{session_name}/{model_name}")
@post(f'/<session_name>/<model_name>/delete')
def delete_model(session_name, model_name):
deleted = Training.delete_model(global_models, session_name, model_name)
if deleted:
return redirect(
f"/{request.forms.next_session}/{request.forms.next_model}")
else:
return redirect('/')
@route(f'/<session>/<model>/<filename>')
def send_image(session, model, filename):
model_path = os.path.join(settings.models_dir, session, model)
return static_file(filename, root=model_path, mimetype='image/png')
@route(f'/<session>/<model>/<report>/<filename>')
def send_report_image(session, model, report, filename):
model_path = os.path.join(settings.models_dir, session, model, report)
return static_file(filename, root=model_path, mimetype='image/png')
@get(f'/<session_name>/<model_name>')
@view('model_template')
def model_page(session_name, model_name):
Training.refresh_models(global_models)
current_model = global_models[session_name][model_name]
test_id = request.query.test
if test_id and current_model.reports:
current_test = current_model.reports[int(test_id)]
current_test_url = os.path.basename(current_test.path)
elif current_model.reports:
current_test = current_model.get_last_report()
current_test_url = os.path.basename(current_test.path)
else:
current_test = None
current_test_url = ""
models = Training.models_flat(global_models)
models = Training.models_select(models, recent, filter, sortby)
datasets = current_model.datasets
if current_model in models:
index = models.index(current_model)
return template('browser/model_template',
models=models,
model=current_model,
datasets=datasets,
validation=current_model.history[-1].report.confusion,
previous=models[index - 1],
following=Training.get_next_model(index, models),
current_test=current_test,
current_test_url=current_test_url,
settings=settings)
else:
return redirect('/')
@route('/favicon.ico', method='GET')
def get_favicon():
return static_file('favicon.ico', root='browser')
@route('/style.css')
def send_style():
return static_file('style.css', root='browser')
@route('/navigation.js')
def send_js():
return static_file('navigation.js', root='browser')
@route('/jquery-3.5.1.min.js')
def send_js():
return static_file('jquery-3.5.1.min.js', root='browser')
@get('/overview.csv')
def generate_csv():
filename = overview_csv(global_models)
return static_file(filename, root=settings.models_dir)
@route('/')
@view('report_template')
def index():
Training.refresh_models(global_models)
models = Training.models_flat(global_models)
models = Training.models_select(models, recent, filter, sortby)
if models:
return redirect(models[0].url())
else:
return "no models to show"
def browse_results():
run(host='localhost', port=8080)
if __name__ == "__main__":
recent = 0
filter = ""
sortby = "accuracy"
reverse_order = True
global_models = Training.load_all(skip_raw_outputs=True,
skip_wrong_preds=True)
browse_results()
| true | true |
f71fada6cd8a268a05f3827d0723a58a4d32aa28 | 9,366 | py | Python | MDSimsEval/pca_analysis.py | MikeXydas/MDSimsEval | 6c32bd8b74e421120beca18d18c3e58fc8f85247 | [
"MIT"
] | 1 | 2020-06-30T12:56:41.000Z | 2020-06-30T12:56:41.000Z | MDSimsEval/pca_analysis.py | MikeXydas/MDSimsEval | 6c32bd8b74e421120beca18d18c3e58fc8f85247 | [
"MIT"
] | 2 | 2021-06-08T21:53:33.000Z | 2021-12-13T20:43:42.000Z | MDSimsEval/pca_analysis.py | MikeXydas/MDSimsEval | 6c32bd8b74e421120beca18d18c3e58fc8f85247 | [
"MIT"
] | null | null | null | import math
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
import pandas as pd
from tqdm import tqdm
def scree_plot(analysis_actors_dict, dir_path, pcs_on_scree_plot=50, variance_ratio_line=0.75):
"""
Creates a plot with the scree plots for each ligand and saves it on the specified ``dir_path``. With blue color is
class 1 and with orange color class 2.
Args:
analysis_actors_dict: ``{ "Agonists": List[AnalysisActor.class], "Antagonists": List[AnalysisActor.class] }``
dir_path (str): The path of the directory the plot will be saved (must end with a ``/``)
pcs_on_scree_plot(int): The number of the first PCs that will be used on the scree plots
variance_ratio_line(float): Float from 0.0 to 1.0 which specifies the variance ratio that a vertical line will
be plotted
"""
# Get the dimensions of the final plot
plot_cols = 3
plot_rows = math.ceil(len(analysis_actors_dict['Agonists']) + len(analysis_actors_dict['Antagonists']) / plot_cols)
fig = plt.figure(figsize=(18, 6 * plot_rows))
plot_index = 1
# Agonists Iteration
for which_ligand in analysis_actors_dict['Agonists']:
ax = fig.add_subplot(plot_rows, plot_cols, plot_index)
plt.axvline(x=np.where(np.cumsum(which_ligand.pca_res.explained_variance_ratio_) > variance_ratio_line)[0][0],
ls='--', c='grey', label=f"Reached {int(variance_ratio_line * 100)}% variance")
plt.plot(np.arange(len(which_ligand.pca_res.explained_variance_[:pcs_on_scree_plot])),
which_ligand.pca_res.explained_variance_[:pcs_on_scree_plot], label="Variance Ratio")
plt.ylabel("Variance")
plt.xlabel("#PC")
plt.title(which_ligand.drug_name)
plt.legend()
plot_index += 1
# Antagonists Iteration
for which_ligand in analysis_actors_dict['Antagonists']:
ax = fig.add_subplot(plot_rows, plot_cols, plot_index)
plt.axvline(x=np.where(np.cumsum(which_ligand.pca_res.explained_variance_ratio_) > variance_ratio_line)[0][0],
ls='--', c='grey', label=f"Reached {int(variance_ratio_line * 100)}% variance")
plt.plot(np.arange(len(which_ligand.pca_res.explained_variance_[:pcs_on_scree_plot])),
which_ligand.pca_res.explained_variance_[:pcs_on_scree_plot], label="Variance", color='orange')
plt.ylabel("Variance")
plt.xlabel("#PC")
plt.title(which_ligand.drug_name)
plt.legend()
plot_index += 1
fig.suptitle('PCA Scree Plots\nAgonists: Blue\nAntagonists: Orange', fontsize=26, y=0.93)
plt.savefig(f'{dir_path}pca_scree_plots.png', format='png')
def populate_variance_showcase_df(analysis_actors_dict, drug_type):
"""
Creates a DataFrame having for each drug the number of PCs needed in order to have 50%, 75% and 95% variance
Args:
analysis_actors_dict: ``{ "Agonists": List[AnalysisActor.class], "Antagonists": List[AnalysisActor.class] }``
drug_type (str): The class name ('Agonists' or 'Antagonists')
Returns:
pd.DataFrame: A DataFrame with columns ``['Drug Name', 'Type', '50% Variance', '75% Variance', '95% Variance']``
"""
inp_df = pd.DataFrame(columns=['Drug Name', 'Type', '50% Variance', '75% Variance', '95% Variance'])
for which_ligand in analysis_actors_dict[drug_type]:
pca_var_row = pd.DataFrame([[
which_ligand.drug_name,
drug_type,
np.where(np.cumsum(which_ligand.pca_res.explained_variance_ratio_) > 0.5)[0][0] + 1,
# We +1 since the np.where will return
np.where(np.cumsum(which_ligand.pca_res.explained_variance_ratio_) > 0.75)[0][0] + 1,
# the 0 based index of the PC
np.where(np.cumsum(which_ligand.pca_res.explained_variance_ratio_) > 0.95)[0][0] + 1]
], columns=['Drug Name', 'Type', '50% Variance', '75% Variance', '95% Variance'])
inp_df = inp_df.append(pca_var_row, ignore_index=True)
return inp_df
def project_pca_on_2d(analysis_actors_dict, drug_type, dir_path):
"""
Plots the 2d projection on the first two PCs of the atom space. The colorbar expresses the progression
of the frames (color0 -> frame0, color1 -> last_frame).
The plot is shown inside the function but if need can be easily be changed to return it.
Args:
analysis_actors_dict: ``{ "Agonists": List[AnalysisActor.class], "Antagonists": List[AnalysisActor.class] }``
drug_type (str): 'Agonists' or 'Antagonists'
dir_path (str): The path of the directory the plot will be saved (must end with a ``/``)
"""
cols = 3
rows = math.ceil(len(analysis_actors_dict[drug_type]) / cols)
fig = plt.figure(figsize=(18, 25))
plot_index = 1
for which_ligand in tqdm(analysis_actors_dict[drug_type], desc="Projecting " + drug_type):
pca_space_2D = which_ligand.pca_res.transform(
which_ligand.pca_xyz) # Transform on the atom selection that PCA was fitted
step = 1 # Frames we are skipping for computational reasons (if step == 1 then no frame is skipped)
# Scatter Plotting
ax = fig.add_subplot(rows, cols, plot_index)
plt.scatter(pca_space_2D[::step, 0], pca_space_2D[::step, 1],
c=np.arange(len(pca_space_2D) / step) / (len(pca_space_2D) / step), marker='o')
plt.xlabel('PC1')
plt.ylabel('PC2')
explained_variance_2PC = which_ligand.pca_res.explained_variance_ratio_[0] + \
which_ligand.pca_res.explained_variance_ratio_[1]
plt.title(f'{which_ligand.drug_name} | Structural Motion Variance: {explained_variance_2PC}')
plt.colorbar() # Add the colorbar which goes from color0 to color1 as frames progress
plot_index += 1
fig.suptitle(f'PCA 2D Projection of {drug_type} as frames progress', fontsize=26, y=1.03)
plt.tight_layout()
plt.savefig(f'{dir_path}pca_{drug_type}_2d_projection.png', format='png')
return None
def sort_residues_by_loadings(ligand, variance_explained=0.5):
"""
Having as an input **a ligand** find the loadings of each residue and return them in descending order.
The method combines first k PCs where k is defined by the variance_explained argument.
Args:
ligand(AnalysisActor.class): An AnalysisActor object in which PCA is calculated
variance_explained (float): Defines which PCs will be combined to calcualte the final loadings
Returns:
pd.DataFrame where ResidueId is the index and each row contains the loadings of the residue
"""
pca_res = ligand.get_pca()
# How many pcs we need to cover variance_explained
pcs_numb = np.where(np.cumsum(pca_res.explained_variance_ratio_) > variance_explained)[0][0] + 1
# Calculate loadings using loadings = eigenvectors @ sqrt(eigenvalues)
loadings = np.abs(pca_res.components_[:pcs_numb, :]).T @ np.sqrt(pca_res.explained_variance_[:pcs_numb])
# Go from 3 * #residues columns to #residues columns, combining the 3 axes
residue_loading = np.add.reduceat(loadings, range(0, len(loadings), 3))
return pd.DataFrame(enumerate(residue_loading), columns=['ResidueId', ligand.drug_name]).set_index('ResidueId')
def loadings_heatmap(analysis_actors_dict, dir_path, explained_variance=0.75):
"""
| Creates a heatmap of the loadings of the residues for all the ligands. The blue line separates Class 1 fromClass 2
|
.. figure:: ../_static/pca_loadings_heatmap.png
:width: 550
:align: center
:height: 500px
:alt: pca loadings heatmap missing
PCA Loadings Heatmap, click for higher resolution.
Args:
analysis_actors_dict: ``{ "Agonists": List[AnalysisActor.class], "Antagonists": List[AnalysisActor.class] }``
dir_path (str): The path of the directory the plot will be saved (must end with a ``/``)
explained_variance(float 0.0 - 1.0): Defines the number of PCs that will be used for the loadings calculation
"""
loadings_df = sort_residues_by_loadings(analysis_actors_dict['Agonists'][0], explained_variance)
# Join all the loadings of each ligand
for which_ligand in analysis_actors_dict['Agonists'][1:]:
loadings_df = loadings_df.join(sort_residues_by_loadings(which_ligand, explained_variance))
for which_ligand in analysis_actors_dict['Antagonists'][1:]:
loadings_df = loadings_df.join(sort_residues_by_loadings(which_ligand, explained_variance))
fig, ax = plt.subplots(figsize=(20, 15))
sns.heatmap(loadings_df) # Seaborn heatmap of the loadings
plt.axvline(len(analysis_actors_dict['Agonists'])) # Vertical line spearating agonists from antagonists
ax.axis('tight')
ax.set(xticks=np.arange(len(loadings_df.columns)), xticklabels=loadings_df.columns,
yticks=np.arange(0, len(loadings_df.index), 10), yticklabels=np.arange(0, len(loadings_df.index), 10))
plt.xticks(rotation=45)
plt.xlabel('Ligand', fontsize=18)
plt.ylabel('Residue Id', fontsize=18)
plt.title(f"Heatmap of Loadings of each ligand | Explained Variance: {int(explained_variance * 100)}%", fontsize=18)
plt.tight_layout()
plt.savefig(f'{dir_path}pca_loadings_heatmap.png', format='png')
return None
| 46.366337 | 120 | 0.69069 | import math
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
import pandas as pd
from tqdm import tqdm
def scree_plot(analysis_actors_dict, dir_path, pcs_on_scree_plot=50, variance_ratio_line=0.75):
plot_cols = 3
plot_rows = math.ceil(len(analysis_actors_dict['Agonists']) + len(analysis_actors_dict['Antagonists']) / plot_cols)
fig = plt.figure(figsize=(18, 6 * plot_rows))
plot_index = 1
for which_ligand in analysis_actors_dict['Agonists']:
ax = fig.add_subplot(plot_rows, plot_cols, plot_index)
plt.axvline(x=np.where(np.cumsum(which_ligand.pca_res.explained_variance_ratio_) > variance_ratio_line)[0][0],
ls='--', c='grey', label=f"Reached {int(variance_ratio_line * 100)}% variance")
plt.plot(np.arange(len(which_ligand.pca_res.explained_variance_[:pcs_on_scree_plot])),
which_ligand.pca_res.explained_variance_[:pcs_on_scree_plot], label="Variance Ratio")
plt.ylabel("Variance")
plt.xlabel("#PC")
plt.title(which_ligand.drug_name)
plt.legend()
plot_index += 1
for which_ligand in analysis_actors_dict['Antagonists']:
ax = fig.add_subplot(plot_rows, plot_cols, plot_index)
plt.axvline(x=np.where(np.cumsum(which_ligand.pca_res.explained_variance_ratio_) > variance_ratio_line)[0][0],
ls='--', c='grey', label=f"Reached {int(variance_ratio_line * 100)}% variance")
plt.plot(np.arange(len(which_ligand.pca_res.explained_variance_[:pcs_on_scree_plot])),
which_ligand.pca_res.explained_variance_[:pcs_on_scree_plot], label="Variance", color='orange')
plt.ylabel("Variance")
plt.xlabel("#PC")
plt.title(which_ligand.drug_name)
plt.legend()
plot_index += 1
fig.suptitle('PCA Scree Plots\nAgonists: Blue\nAntagonists: Orange', fontsize=26, y=0.93)
plt.savefig(f'{dir_path}pca_scree_plots.png', format='png')
def populate_variance_showcase_df(analysis_actors_dict, drug_type):
inp_df = pd.DataFrame(columns=['Drug Name', 'Type', '50% Variance', '75% Variance', '95% Variance'])
for which_ligand in analysis_actors_dict[drug_type]:
pca_var_row = pd.DataFrame([[
which_ligand.drug_name,
drug_type,
np.where(np.cumsum(which_ligand.pca_res.explained_variance_ratio_) > 0.5)[0][0] + 1,
np.where(np.cumsum(which_ligand.pca_res.explained_variance_ratio_) > 0.75)[0][0] + 1,
np.where(np.cumsum(which_ligand.pca_res.explained_variance_ratio_) > 0.95)[0][0] + 1]
], columns=['Drug Name', 'Type', '50% Variance', '75% Variance', '95% Variance'])
inp_df = inp_df.append(pca_var_row, ignore_index=True)
return inp_df
def project_pca_on_2d(analysis_actors_dict, drug_type, dir_path):
cols = 3
rows = math.ceil(len(analysis_actors_dict[drug_type]) / cols)
fig = plt.figure(figsize=(18, 25))
plot_index = 1
for which_ligand in tqdm(analysis_actors_dict[drug_type], desc="Projecting " + drug_type):
pca_space_2D = which_ligand.pca_res.transform(
which_ligand.pca_xyz)
step = 1
ax = fig.add_subplot(rows, cols, plot_index)
plt.scatter(pca_space_2D[::step, 0], pca_space_2D[::step, 1],
c=np.arange(len(pca_space_2D) / step) / (len(pca_space_2D) / step), marker='o')
plt.xlabel('PC1')
plt.ylabel('PC2')
explained_variance_2PC = which_ligand.pca_res.explained_variance_ratio_[0] + \
which_ligand.pca_res.explained_variance_ratio_[1]
plt.title(f'{which_ligand.drug_name} | Structural Motion Variance: {explained_variance_2PC}')
plt.colorbar()
plot_index += 1
fig.suptitle(f'PCA 2D Projection of {drug_type} as frames progress', fontsize=26, y=1.03)
plt.tight_layout()
plt.savefig(f'{dir_path}pca_{drug_type}_2d_projection.png', format='png')
return None
def sort_residues_by_loadings(ligand, variance_explained=0.5):
pca_res = ligand.get_pca()
pcs_numb = np.where(np.cumsum(pca_res.explained_variance_ratio_) > variance_explained)[0][0] + 1
loadings = np.abs(pca_res.components_[:pcs_numb, :]).T @ np.sqrt(pca_res.explained_variance_[:pcs_numb])
me(enumerate(residue_loading), columns=['ResidueId', ligand.drug_name]).set_index('ResidueId')
def loadings_heatmap(analysis_actors_dict, dir_path, explained_variance=0.75):
loadings_df = sort_residues_by_loadings(analysis_actors_dict['Agonists'][0], explained_variance)
for which_ligand in analysis_actors_dict['Agonists'][1:]:
loadings_df = loadings_df.join(sort_residues_by_loadings(which_ligand, explained_variance))
for which_ligand in analysis_actors_dict['Antagonists'][1:]:
loadings_df = loadings_df.join(sort_residues_by_loadings(which_ligand, explained_variance))
fig, ax = plt.subplots(figsize=(20, 15))
sns.heatmap(loadings_df)
plt.axvline(len(analysis_actors_dict['Agonists']))
ax.axis('tight')
ax.set(xticks=np.arange(len(loadings_df.columns)), xticklabels=loadings_df.columns,
yticks=np.arange(0, len(loadings_df.index), 10), yticklabels=np.arange(0, len(loadings_df.index), 10))
plt.xticks(rotation=45)
plt.xlabel('Ligand', fontsize=18)
plt.ylabel('Residue Id', fontsize=18)
plt.title(f"Heatmap of Loadings of each ligand | Explained Variance: {int(explained_variance * 100)}%", fontsize=18)
plt.tight_layout()
plt.savefig(f'{dir_path}pca_loadings_heatmap.png', format='png')
return None
| true | true |
f71faed40fe9843e23daeb4a3ae28c21eb2bec96 | 1,395 | py | Python | tests/test_rand_spatial_crop_samples.py | sudohainguyen/MONAI | 89f8a39a1c0bc6f480522c443ee7813cea21df47 | [
"Apache-2.0"
] | 2 | 2020-06-23T16:03:45.000Z | 2020-06-25T05:30:45.000Z | tests/test_rand_spatial_crop_samples.py | Scitator/MONAI | a42b563acf0c7504cee18ee84c8af2eff6e948a7 | [
"Apache-2.0"
] | null | null | null | tests/test_rand_spatial_crop_samples.py | Scitator/MONAI | a42b563acf0c7504cee18ee84c8af2eff6e948a7 | [
"Apache-2.0"
] | 1 | 2020-09-14T13:16:01.000Z | 2020-09-14T13:16:01.000Z | # Copyright 2020 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
from parameterized import parameterized
from monai.transforms import RandSpatialCropSamples
TEST_CASE_1 = [
{"roi_size": [3, 3, 3], "num_samples": 4, "random_center": True},
np.random.randint(0, 2, size=[3, 3, 3, 3]),
(3, 3, 3, 3),
]
TEST_CASE_2 = [
{"roi_size": [3, 3, 3], "num_samples": 8, "random_center": False},
np.random.randint(0, 2, size=[3, 3, 3, 3]),
(3, 3, 3, 3),
]
class TestRandSpatialCropSamples(unittest.TestCase):
@parameterized.expand([TEST_CASE_1, TEST_CASE_2])
def test_shape(self, input_param, input_data, expected_shape):
result = RandSpatialCropSamples(**input_param)(input_data)
for item in result:
self.assertTupleEqual(item.shape, expected_shape)
if __name__ == "__main__":
unittest.main()
| 34.875 | 74 | 0.712545 |
import unittest
import numpy as np
from parameterized import parameterized
from monai.transforms import RandSpatialCropSamples
TEST_CASE_1 = [
{"roi_size": [3, 3, 3], "num_samples": 4, "random_center": True},
np.random.randint(0, 2, size=[3, 3, 3, 3]),
(3, 3, 3, 3),
]
TEST_CASE_2 = [
{"roi_size": [3, 3, 3], "num_samples": 8, "random_center": False},
np.random.randint(0, 2, size=[3, 3, 3, 3]),
(3, 3, 3, 3),
]
class TestRandSpatialCropSamples(unittest.TestCase):
@parameterized.expand([TEST_CASE_1, TEST_CASE_2])
def test_shape(self, input_param, input_data, expected_shape):
result = RandSpatialCropSamples(**input_param)(input_data)
for item in result:
self.assertTupleEqual(item.shape, expected_shape)
if __name__ == "__main__":
unittest.main()
| true | true |
f71fafdcdc00789924a0f65e4fdb20825b916e5f | 52,274 | py | Python | pymilvus/orm/collection.py | jingkl/pymilvus | f74b4741b7480d4e1740e1ea2d120c96f01bb56a | [
"Apache-2.0"
] | null | null | null | pymilvus/orm/collection.py | jingkl/pymilvus | f74b4741b7480d4e1740e1ea2d120c96f01bb56a | [
"Apache-2.0"
] | null | null | null | pymilvus/orm/collection.py | jingkl/pymilvus | f74b4741b7480d4e1740e1ea2d120c96f01bb56a | [
"Apache-2.0"
] | null | null | null | # Copyright (C) 2019-2021 Zilliz. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under
# the License.
import copy
import json
import pandas
from .connections import get_connection
from .schema import (
CollectionSchema,
FieldSchema,
parse_fields_from_data,
infer_dtype_bydata,
)
from .prepare import Prepare
from .partition import Partition
from .index import Index
from .search import SearchResult
from .mutation import MutationResult
from .types import DataType
from .exceptions import (
SchemaNotReadyException,
DataTypeNotMatchException,
DataNotMatchException,
ConnectionNotExistException,
PartitionAlreadyExistException,
PartitionNotExistException,
IndexNotExistException,
AutoIDException,
ExceptionsMessage,
)
from .future import SearchFuture, MutationFuture
def _check_schema(schema):
if schema is None:
raise SchemaNotReadyException(0, ExceptionsMessage.NoSchema)
if len(schema.fields) < 1:
raise SchemaNotReadyException(0, ExceptionsMessage.EmptySchema)
vector_fields = []
for field in schema.fields:
if field.dtype == DataType.FLOAT_VECTOR or field.dtype == DataType.BINARY_VECTOR:
vector_fields.append(field.name)
if len(vector_fields) < 1:
raise SchemaNotReadyException(0, ExceptionsMessage.NoVector)
def _check_data_schema(fields, data):
if isinstance(data, pandas.DataFrame):
for i, field in enumerate(fields):
for j, _ in enumerate(data[field.name]):
tmp_type = infer_dtype_bydata(data[field.name].iloc[j])
if tmp_type != field.dtype:
raise DataNotMatchException(0, ExceptionsMessage.DataTypeInconsistent)
else:
for i, field in enumerate(fields):
for j, _ in enumerate(data[i]):
tmp_type = infer_dtype_bydata(data[i][j])
if tmp_type != field.dtype:
raise DataNotMatchException(0, ExceptionsMessage.DataTypeInconsistent)
class Collection:
"""
This is a class corresponding to collection in milvus.
"""
def __init__(self, name, schema=None, using="default", shards_num=2, **kwargs):
"""
Constructs a collection by name, schema and other parameters.
Connection information is contained in kwargs.
:param name: the name of collection
:type name: str
:param schema: the schema of collection
:type schema: class `schema.CollectionSchema`
:param using: Milvus link of create collection
:type using: str
:param shards_num: How wide to scale collection. Corresponds to how many active datanodes
can be used on insert.
:type shards_num: int
:example:
>>> from pymilvus import connections, Collection, FieldSchema, CollectionSchema, DataType
>>> connections.connect()
<pymilvus.client.stub.Milvus object at 0x7f9a190ca898>
>>> fields = [
... FieldSchema("film_id", DataType.INT64, is_primary=True),
... FieldSchema("films", dtype=DataType.FLOAT_VECTOR, dim=128)
... ]
>>> description="This is a new collection description."
>>> schema = CollectionSchema(fields=fields, description=description)
>>> collection = Collection(name="test_collection_init", schema=schema)
>>> collection.name
'test_collection_init'
>>> collection.description
'This is a new collection description.'
>>> collection.is_empty
True
>>> collection.num_entities
0
"""
self._name = name
self._using = using
self._shards_num = shards_num
self._kwargs = kwargs
conn = self._get_connection()
has = conn.has_collection(self._name)
if has:
resp = conn.describe_collection(self._name)
server_schema = CollectionSchema.construct_from_dict(resp)
if schema is None:
self._schema = server_schema
else:
if not isinstance(schema, CollectionSchema):
raise SchemaNotReadyException(0, ExceptionsMessage.SchemaType)
if server_schema != schema:
raise SchemaNotReadyException(0, ExceptionsMessage.SchemaInconsistent)
self._schema = schema
else:
if schema is None:
raise SchemaNotReadyException(0, ExceptionsMessage.CollectionNotExistNoSchema % name)
if isinstance(schema, CollectionSchema):
_check_schema(schema)
conn.create_collection(self._name, fields=schema.to_dict(), shards_num=self._shards_num)
self._schema = schema
else:
raise SchemaNotReadyException(0, ExceptionsMessage.SchemaType)
def __repr__(self):
return json.dumps({
'name': self.name,
'schema': self._schema.to_dict(),
'partitions': [json.loads(p.__repr__()) for p in self.partitions],
'description': self.description,
})
def _get_connection(self):
conn = get_connection(self._using)
if conn is None:
raise ConnectionNotExistException(0, ExceptionsMessage.ConnectFirst)
return conn
def _check_insert_data_schema(self, data):
"""
Checks whether the data type matches the schema.
"""
if self._schema is None:
return False
if self._schema.auto_id:
if isinstance(data, pandas.DataFrame):
if self._schema.primary_field.name in data:
if not data[self._schema.primary_field.name].isnull().all():
raise DataNotMatchException(0, ExceptionsMessage.AutoIDWithData)
data = data.drop(self._schema.primary_field.name, axis=1)
infer_fields = parse_fields_from_data(data)
tmp_fields = copy.deepcopy(self._schema.fields)
for i, field in enumerate(self._schema.fields):
if field.is_primary and field.auto_id:
tmp_fields.pop(i)
if len(infer_fields) != len(tmp_fields):
raise DataTypeNotMatchException(0, ExceptionsMessage.FieldsNumInconsistent)
_check_data_schema(infer_fields, data)
for x, y in zip(infer_fields, tmp_fields):
if x.dtype != y.dtype:
return False
if isinstance(data, pandas.DataFrame):
if x.name != y.name:
return False
# todo check dim
return True
def _check_schema(self):
if self._schema is None:
raise SchemaNotReadyException(0, ExceptionsMessage.NoSchema)
def _get_vector_field(self) -> str:
for field in self._schema.fields:
if field.dtype == DataType.FLOAT_VECTOR or field.dtype == DataType.BINARY_VECTOR:
return field.name
raise SchemaNotReadyException(0, ExceptionsMessage.NoVector)
@classmethod
def construct_from_dataframe(cls, name, dataframe, **kwargs):
if dataframe is None:
raise SchemaNotReadyException(0, ExceptionsMessage.NoneDataFrame)
if not isinstance(dataframe, pandas.DataFrame):
raise SchemaNotReadyException(0, ExceptionsMessage.DataFrameType)
primary_field = kwargs.pop("primary_field", None)
if primary_field is None:
raise SchemaNotReadyException(0, ExceptionsMessage.NoPrimaryKey)
pk_index = -1
for i, field in enumerate(dataframe):
if field == primary_field:
pk_index = i
if pk_index == -1:
raise SchemaNotReadyException(0, ExceptionsMessage.PrimaryKeyNotExist)
if "auto_id" in kwargs:
if not isinstance(kwargs.get("auto_id", None), bool):
raise AutoIDException(0, ExceptionsMessage.AutoIDType)
auto_id = kwargs.pop("auto_id", False)
if auto_id:
if dataframe[primary_field].isnull().all():
dataframe = dataframe.drop(primary_field, axis=1)
else:
raise SchemaNotReadyException(0, ExceptionsMessage.AutoIDWithData)
fields = parse_fields_from_data(dataframe)
_check_data_schema(fields, dataframe)
if auto_id:
fields.insert(pk_index, FieldSchema(name=primary_field, dtype=DataType.INT64, is_primary=True, auto_id=True,
**kwargs))
else:
for field in fields:
if field.name == primary_field:
field.is_primary = True
field.auto_id = False
schema = CollectionSchema(fields=fields)
_check_schema(schema)
collection = cls(name, schema, **kwargs)
res = collection.insert(data=dataframe)
return collection, res
@property
def schema(self) -> CollectionSchema:
"""
Returns the schema of the collection.
:return schema.CollectionSchema:
Schema of the collection.
"""
return self._schema
@property
def description(self) -> str:
"""
Returns a text description of the collection.
:return str:
Collection description text, returned when the operation succeeds.
:example:
>>> from pymilvus import connections, Collection, FieldSchema, CollectionSchema, DataType
>>> connections.connect()
>>> fields = [
... FieldSchema("film_id", DataType.INT64, is_primary=True),
... FieldSchema("films", dtype=DataType.FLOAT_VECTOR, dim=128)
... ]
>>> description="This is an example text description."
>>> schema = CollectionSchema(fields=fields, description=description)
>>> collection = Collection(name="test_collection_description", schema=schema)
>>> collection.description
'This is an example text description.'
"""
return self._schema.description
@property
def name(self) -> str:
"""
Returns the collection name.
:return str:
The collection name, returned when the operation succeeds.
:example:
>>> from pymilvus import connections, Collection, FieldSchema, CollectionSchema, DataType
>>> connections.connect()
>>> fields = [
... FieldSchema("film_id", DataType.INT64, is_primary=True),
... FieldSchema("films", dtype=DataType.FLOAT_VECTOR, dim=128)
... ]
>>> schema = CollectionSchema(fields)
>>> collection = Collection("test_collection_name", schema)
>>> collection.name
'test_collection_name'
"""
return self._name
@property
def is_empty(self) -> bool:
"""
Whether the collection is empty.
This method need to call `num_entities <#pymilvus.Collection.num_entities>`_.
:return bool:
* True: The collection is empty.
* False: The collection is gfghnot empty.
:example:
>>> from pymilvus import connections, Collection, FieldSchema, CollectionSchema, DataType
>>> connections.connect()
>>> schema = CollectionSchema([
... FieldSchema("film_id", DataType.INT64, is_primary=True),
... FieldSchema("films", dtype=DataType.FLOAT_VECTOR, dim=2)
... ])
>>> collection = Collection("test_collection_is_empty", schema)
>>> collection.is_empty
True
>>> collection.insert([[1], [[1.0, 2.0]]])
<pymilvus.search.MutationResult object at 0x7fabaf3e5d50>
>>> collection.is_empty
False
"""
return self.num_entities == 0
# read-only
@property
def num_entities(self) -> int:
"""
Returns the number of entities in the collection.
:return int:
Number of entities in the collection.
:raises CollectionNotExistException: If the collection does not exist.
:example:
>>> from pymilvus import connections, Collection, FieldSchema, CollectionSchema, DataType
>>> connections.connect()
>>> schema = CollectionSchema([
... FieldSchema("film_id", DataType.INT64, is_primary=True),
... FieldSchema("films", dtype=DataType.FLOAT_VECTOR, dim=2)
... ])
>>> collection = Collection("test_collection_num_entities", schema)
>>> collection.num_entities
0
>>> collection.insert([[1, 2], [[1.0, 2.0], [3.0, 4.0]]])
>>> collection.num_entities
2
"""
conn = self._get_connection()
conn.flush([self._name])
status = conn.get_collection_stats(db_name="", collection_name=self._name)
return status["row_count"]
@property
def primary_field(self) -> FieldSchema:
"""
Returns the primary field of the collection.
:return schema.FieldSchema:
The primary field of the collection.
:example:
>>> from pymilvus import connections, Collection, FieldSchema, CollectionSchema, DataType
>>> connections.connect()
>>> schema = CollectionSchema([
... FieldSchema("film_id", DataType.INT64, is_primary=True),
... FieldSchema("film_length", DataType.INT64, description="length in miniute"),
... FieldSchema("films", dtype=DataType.FLOAT_VECTOR, dim=2)
... ])
>>> collection = Collection("test_collection_primary_field", schema)
>>> collection.primary_field.name
'film_id'
"""
return self._schema.primary_field
def drop(self, timeout=None, **kwargs):
"""
Drops the collection together with its index files.
:param timeout:
* *timeout* (``float``) --
An optional duration of time in seconds to allow for the RPC.
If timeout is set to None,
the client keeps waiting until the server responds or an error occurs.
:raises CollectionNotExistException: If the collection does not exist.
:example:
>>> from pymilvus import connections, Collection, FieldSchema, CollectionSchema, DataType
>>> connections.connect()
>>> schema = CollectionSchema([
... FieldSchema("film_id", DataType.INT64, is_primary=True),
... FieldSchema("films", dtype=DataType.FLOAT_VECTOR, dim=2)
... ])
>>> collection = Collection("test_collection_drop", schema)
>>> utility.has_collection("test_collection_drop")
True
>>> collection.drop()
>>> utility.has_collection("test_collection_drop")
False
"""
conn = self._get_connection()
indexes = self.indexes
for index in indexes:
index.drop(timeout=timeout, **kwargs)
conn.drop_collection(self._name, timeout=timeout, **kwargs)
def load(self, partition_names=None, timeout=None, **kwargs):
"""
Loads the collection from disk to memory.
:param partition_names: The specified partitions to load.
:type partition_names: list[str]
:param timeout:An optional duration of time in seconds to allow for the RPC. If timeout
is set to None, the client keeps waiting until the server responds or error occurs.
:type timeout: float
:param kwargs:
* *_async* (``bool``) --
Indicate if invoke asynchronously.
:raises CollectionNotExistException: If the collection does not exist.
:raises ParamError: If the parameters are invalid.
:raises BaseException: If the specified field, index or partition does not exist.
:example:
>>> from pymilvus import connections, Collection, FieldSchema, CollectionSchema, DataType
>>> connections.connect()
>>> schema = CollectionSchema([
... FieldSchema("film_id", DataType.INT64, is_primary=True),
... FieldSchema("films", dtype=DataType.FLOAT_VECTOR, dim=2)
... ])
>>> collection = Collection("test_collection_load", schema)
>>> collection.insert([[1, 2], [[1.0, 2.0], [3.0, 4.0]]])
<pymilvus.search.MutationResult object at 0x7fabaf3e5d50>
>>> collection.load()
>>> collection.num_entities
2
"""
conn = self._get_connection()
if partition_names is not None:
conn.load_partitions(self._name, partition_names, timeout=timeout, **kwargs)
else:
conn.load_collection(self._name, timeout=timeout, **kwargs)
def release(self, timeout=None, **kwargs):
"""
Releases the collection from memory.
:param timeout:
* *timeout* (``float``) --
An optional duration of time in seconds to allow for the RPC. If timeout
is set to None, the client keeps waiting until the server responds or an error occurs.
:raises CollectionNotExistException: If collection does not exist.
:raises BaseException: If collection has not been loaded to memory.
:example:
>>> from pymilvus import connections, Collection, FieldSchema, CollectionSchema, DataType
>>> connections.connect()
>>> schema = CollectionSchema([
... FieldSchema("film_id", DataType.INT64, is_primary=True),
... FieldSchema("films", dtype=DataType.FLOAT_VECTOR, dim=2)
... ])
>>> collection = Collection("test_collection_release", schema)
>>> collection.insert([[1, 2], [[1.0, 2.0], [3.0, 4.0]]])
<pymilvus.search.MutationResult object at 0x7fabaf3e5d50>
>>> collection.load()
>>> collection.num_entities
2
>>> collection.release() # release the collection from memory
"""
conn = self._get_connection()
conn.release_collection(self._name, timeout=timeout, **kwargs)
def insert(self, data, partition_name=None, timeout=None, **kwargs):
"""
Insert data into the collection.
:param data: The specified data to insert, the dimension of data needs to align with column
number
:type data: list-like(list, tuple) object or pandas.DataFrame
:param partition_name: The partition name which the data will be inserted to, if partition
name is not passed, then the data will be inserted to "_default"
partition
:type partition_name: str
:param timeout:
* *timeout* (``float``) --
An optional duration of time in seconds to allow for the RPC. If timeout
is set to None, the client keeps waiting until the server responds or an error occurs.
:raises CollectionNotExistException: If the specified collection does not exist.
:raises ParamError: If input parameters are invalid.
:raises BaseException: If the specified partition does not exist.
:example:
>>> from pymilvus import connections, Collection, FieldSchema, CollectionSchema, DataType
>>> import random
>>> connections.connect()
<pymilvus.client.stub.Milvus object at 0x7f8579002dc0>
>>> schema = CollectionSchema([
... FieldSchema("film_id", DataType.INT64, is_primary=True),
... FieldSchema("films", dtype=DataType.FLOAT_VECTOR, dim=2)
... ])
>>> collection = Collection("test_collection_insert", schema)
>>> data = [
... [random.randint(1, 100) for _ in range(10)],
... [[random.random() for _ in range(2)] for _ in range(10)],
... ]
>>> collection.insert(data)
>>> collection.num_entities
10
"""
if data is None:
return MutationResult(data)
if not self._check_insert_data_schema(data):
raise SchemaNotReadyException(0, ExceptionsMessage.TypeOfDataAndSchemaInconsistent)
conn = self._get_connection()
entities = Prepare.prepare_insert_data(data, self._schema)
res = conn.insert(collection_name=self._name, entities=entities, ids=None,
partition_name=partition_name, timeout=timeout, **kwargs)
if kwargs.get("_async", False):
return MutationFuture(res)
return MutationResult(res)
def delete(self, expr, partition_name=None, timeout=None, **kwargs):
"""
Delete entities with an expression condition.
And return results to show which primary key is deleted successfully
:param expr: The expression to specify entities to be deleted
:type expr: str
:param partition_name: Name of partitions that contain entities
:type partition_name: str
:param timeout: An optional duration of time in seconds to allow for the RPC. When timeout
is set to None, client waits until server response or error occur
:type timeout: float
:return: list of ids of the deleted vectors.
:rtype: list
:raises:
RpcError: If gRPC encounter an error
ParamError: If parameters are invalid
BaseException: If the return result from server is not ok
:example:
>>> from pymilvus import connections, Collection, FieldSchema, CollectionSchema, DataType
>>> import random
>>> connections.connect()
>>> schema = CollectionSchema([
... FieldSchema("film_id", DataType.INT64, is_primary=True),
... FieldSchema("film_date", DataType.INT64),
... FieldSchema("films", dtype=DataType.FLOAT_VECTOR, dim=2)
... ])
>>> collection = Collection("test_collection_query", schema)
>>> # insert
>>> data = [
... [i for i in range(10)],
... [i + 2000 for i in range(10)],
... [[random.random() for _ in range(2)] for _ in range(10)],
... ]
>>> collection.insert(data)
>>> collection.num_entities
>>> expr = "film_id in [ 0, 1 ]"
>>> res = collection.delete(expr)
>>> assert len(res) == 2
>>> print(f"- Deleted entities: {res}")
- Delete results: [0, 1]
"""
conn = self._get_connection()
res = conn.delete(collection_name=self._name, expr=expr,
partition_name=partition_name, timeout=timeout, **kwargs)
if kwargs.get("_async", False):
return MutationFuture(res)
return MutationResult(res)
def search(self, data, anns_field, param, limit, expr=None, partition_names=None,
output_fields=None, timeout=None, round_decimal=-1, **kwargs):
"""
Conducts a vector similarity search with an optional boolean expression as filter.
:param data: The vectors of search data, the length of data is number of query (nq), the
dim of every vector in data must be equal to vector field's of collection.
:type data: list[list[float]]
:param anns_field: The vector field used to search of collection.
:type anns_field: str
:param param: The parameters of search, such as ``nprobe``.
:type param: dict
:param limit: The max number of returned record, also known as ``topk``.
:type limit: int
:param expr: The boolean expression used to filter attribute.
:type expr: str
:param partition_names: The names of partitions to search.
:type partition_names: list[str]
:param output_fields: The fields to return in the search result, not supported now.
:type output_fields: list[str]
:param timeout: An optional duration of time in seconds to allow for the RPC. When timeout
is set to None, client waits until server response or error occur.
:type timeout: float
:param round_decimal: The specified number of decimal places of returned distance
:type round_decimal: int
:param kwargs:
* *_async* (``bool``) --
Indicate if invoke asynchronously. When value is true, method returns a
SearchFuture object; otherwise, method returns results from server directly.
* *_callback* (``function``) --
The callback function which is invoked after server response successfully.
It functions only if _async is set to True.
* *guarantee_timestamp* (``int``) --
This function instructs Milvus to see all operations performed before a provided timestamp. If no
such timestamp is provided, then Milvus will search all operations performed to date.
:return: SearchResult:
SearchResult is iterable and is a 2d-array-like class, the first dimension is
the number of vectors to query (nq), the second dimension is the number of limit(topk).
:rtype: SearchResult
:raises RpcError: If gRPC encounter an error.
:raises ParamError: If parameters are invalid.
:raises DataTypeNotMatchException: If wrong type of param is passed.
:raises BaseException: If the return result from server is not ok.
:example:
>>> from pymilvus import connections, Collection, FieldSchema, CollectionSchema, DataType
>>> import random
>>> connections.connect()
<pymilvus.client.stub.Milvus object at 0x7f8579002dc0>
>>> schema = CollectionSchema([
... FieldSchema("film_id", DataType.INT64, is_primary=True),
... FieldSchema("films", dtype=DataType.FLOAT_VECTOR, dim=2)
... ])
>>> collection = Collection("test_collection_search", schema)
>>> # insert
>>> data = [
... [i for i in range(10)],
... [[random.random() for _ in range(2)] for _ in range(10)],
... ]
>>> collection.insert(data)
>>> collection.num_entities
10
>>> collection.load()
>>> # search
>>> search_param = {
... "data": [[1.0, 1.0]],
... "anns_field": "films",
... "param": {"metric_type": "L2"},
... "limit": 2,
... "expr": "film_id > 0",
... }
>>> res = collection.search(**search_param)
>>> assert len(res) == 1
>>> hits = res[0]
>>> assert len(hits) == 2
>>> print(f"- Total hits: {len(hits)}, hits ids: {hits.ids} ")
- Total hits: 2, hits ids: [8, 5]
>>> print(f"- Top1 hit id: {hits[0].id}, distance: {hits[0].distance}, score: {hits[0].score} ")
- Top1 hit id: 8, distance: 0.10143111646175385, score: 0.10143111646175385
"""
if expr is not None and not isinstance(expr, str):
raise DataTypeNotMatchException(0, ExceptionsMessage.ExprType % type(expr))
conn = self._get_connection()
res = conn.search(self._name, data, anns_field, param, limit, expr,
partition_names, output_fields, timeout, round_decimal, **kwargs)
if kwargs.get("_async", False):
return SearchFuture(res)
return SearchResult(res)
def query(self, expr, output_fields=None, partition_names=None, timeout=None):
"""
Query with a set of criteria, and results in a list of records that match the query exactly.
:param expr: The query expression
:type expr: str
:param output_fields: A list of fields to return
:type output_fields: list[str]
:param partition_names: Name of partitions that contain entities
:type partition_names: list[str]
:param timeout: An optional duration of time in seconds to allow for the RPC. When timeout
is set to None, client waits until server response or error occur
:type timeout: float
:return: A list that contains all results
:rtype: list
:raises:
RpcError: If gRPC encounter an error
ParamError: If parameters are invalid
DataTypeNotMatchException: If wrong type of param is passed
BaseException: If the return result from server is not ok
:example:
>>> from pymilvus import connections, Collection, FieldSchema, CollectionSchema, DataType
>>> import random
>>> connections.connect()
<pymilvus.client.stub.Milvus object at 0x7f8579002dc0>
>>> schema = CollectionSchema([
... FieldSchema("film_id", DataType.INT64, is_primary=True),
... FieldSchema("film_date", DataType.INT64),
... FieldSchema("films", dtype=DataType.FLOAT_VECTOR, dim=2)
... ])
>>> collection = Collection("test_collection_query", schema)
>>> # insert
>>> data = [
... [i for i in range(10)],
... [i + 2000 for i in range(10)],
... [[random.random() for _ in range(2)] for _ in range(10)],
... ]
>>> collection.insert(data)
>>> collection.num_entities
10
>>> collection.load()
>>> # query
>>> expr = "film_id in [ 0, 1 ]"
>>> res = collection.query(expr, output_fields=["film_date"])
>>> assert len(res) == 2
>>> print(f"- Query results: {res}")
- Query results: [{'film_id': 0, 'film_date': 2000}, {'film_id': 1, 'film_date': 2001}]
"""
if not isinstance(expr, str):
raise DataTypeNotMatchException(0, ExceptionsMessage.ExprType % type(expr))
conn = self._get_connection()
res = conn.query(self._name, expr, output_fields, partition_names, timeout)
return res
@property
def partitions(self) -> list:
"""
Return all partitions of the collection.
:return list[Partition]:
List of Partition object, return when operation is successful.
:raises CollectionNotExistException: If collection doesn't exist.
:example:
>>> from pymilvus import connections, Collection, FieldSchema, CollectionSchema, DataType
>>> connections.connect()
<pymilvus.client.stub.Milvus object at 0x7f8579002dc0>
>>> schema = CollectionSchema([
... FieldSchema("film_id", DataType.INT64, is_primary=True),
... FieldSchema("films", dtype=DataType.FLOAT_VECTOR, dim=2)
... ])
>>> collection = Collection("test_collection_partitions", schema)
>>> collection.partitions
[{"name": "_default", "description": "", "num_entities": 0}]
"""
conn = self._get_connection()
partition_strs = conn.list_partitions(self._name)
partitions = []
for partition in partition_strs:
partitions.append(Partition(self, partition, construct_only=True))
return partitions
def partition(self, partition_name) -> Partition:
"""
Return the partition corresponding to name. Return None if not existed.
:param partition_name: The name of the partition to get.
:type partition_name: str
:return Partition:
Partition object corresponding to partition_name.
:raises CollectionNotExistException: If collection doesn't exist.
:raises BaseException: If partition doesn't exist.
:example:
>>> from pymilvus import connections, Collection, FieldSchema, CollectionSchema, DataType
>>> connections.connect()
<pymilvus.client.stub.Milvus object at 0x7f8579002dc0>
>>> schema = CollectionSchema([
... FieldSchema("film_id", DataType.INT64, is_primary=True),
... FieldSchema("films", dtype=DataType.FLOAT_VECTOR, dim=2)
... ])
>>> collection = Collection("test_collection_partition", schema)
>>> collection.partition("_default")
{"name": "_default", "description": "", "num_entities": 0}
>>> collection.partition("partition")
"""
if self.has_partition(partition_name) is False:
return None
return Partition(self, partition_name, construct_only=True)
def create_partition(self, partition_name, description=""):
"""
Create the partition corresponding to name if not existed.
:param partition_name: The name of the partition to create.
:type partition_name: str
:param description: The description of the partition corresponding to name.
:type description: str
:return Partition:
Partition object corresponding to partition_name.
:raises CollectionNotExistException: If collection doesn't exist.
:raises BaseException: If partition doesn't exist.
:example:
>>> from pymilvus import connections, Collection, FieldSchema, CollectionSchema, DataType
>>> connections.connect()
>>> schema = CollectionSchema([
... FieldSchema("film_id", DataType.INT64, is_primary=True),
... FieldSchema("films", dtype=DataType.FLOAT_VECTOR, dim=2)
... ])
>>> collection = Collection("test_collection_create_partition", schema)
>>> collection.create_partition("comedy", description="comedy films")
{"name": "comedy", "description": "comedy films", "num_entities": 0}
>>> collection.partition("comedy")
{"name": "partition", "description": "comedy films", "num_entities": 0}
"""
if self.has_partition(partition_name) is True:
raise PartitionAlreadyExistException(0, ExceptionsMessage.PartitionAlreadyExist)
return Partition(self, partition_name, description=description)
def has_partition(self, partition_name, timeout=None) -> bool:
"""
Checks if a specified partition exists.
:param partition_name: The name of the partition to check
:type partition_name: str
:param timeout: An optional duration of time in seconds to allow for the RPC. When timeout
is set to None, client waits until server response or error occur
:type timeout: float
:return bool:
Whether a specified partition exists.
:raises CollectionNotExistException: If collection doesn't exist.
:example:
>>> from pymilvus import connections, Collection, FieldSchema, CollectionSchema, DataType
>>> connections.connect()
>>> schema = CollectionSchema([
... FieldSchema("film_id", DataType.INT64, is_primary=True),
... FieldSchema("films", dtype=DataType.FLOAT_VECTOR, dim=2)
... ])
>>> collection = Collection("test_collection_has_partition", schema)
>>> collection.create_partition("comedy", description="comedy films")
{"name": "comedy", "description": "comedy films", "num_entities": 0}
>>> collection.has_partition("comedy")
True
>>> collection.has_partition("science_fiction")
False
"""
conn = self._get_connection()
return conn.has_partition(self._name, partition_name, timeout=timeout)
def drop_partition(self, partition_name, timeout=None, **kwargs):
"""
Drop the partition and its corresponding index files.
:param partition_name: The name of the partition to drop.
:type partition_name: str
:param timeout:
* *timeout* (``float``) --
An optional duration of time in seconds to allow for the RPC. If timeout
is set to None, the client keeps waiting until the server responds or an error occurs.
:raises CollectionNotExistException: If collection doesn't exist.
:raises BaseException: If partition doesn't exist.
:example:
>>> from pymilvus import connections, Collection, FieldSchema, CollectionSchema, DataType
>>> connections.connect()
>>> schema = CollectionSchema([
... FieldSchema("film_id", DataType.INT64, is_primary=True),
... FieldSchema("films", dtype=DataType.FLOAT_VECTOR, dim=2)
... ])
>>> collection = Collection("test_collection_drop_partition", schema)
>>> collection.create_partition("comedy", description="comedy films")
{"name": "comedy", "description": "comedy films", "num_entities": 0}
>>> collection.has_partition("comedy")
True
>>> collection.drop_partition("comedy")
>>> collection.has_partition("comedy")
False
"""
if self.has_partition(partition_name) is False:
raise PartitionNotExistException(0, ExceptionsMessage.PartitionNotExist)
conn = self._get_connection()
return conn.drop_partition(self._name, partition_name, timeout=timeout, **kwargs)
# The server side not yet finished to return aliases by the describe_collection api.
# Disable this property until the work is done.
# @property
# def aliases(self) -> list:
# """
# Returns alias list of the collection.
#
# :return list of str:
# The collection aliases, returned when the operation succeeds.
#
# :example:
# >>> from pymilvus import connections, Collection, FieldSchema, CollectionSchema, DataType
# >>> connections.connect()
# >>> fields = [
# ... FieldSchema("film_id", DataType.INT64, is_primary=True),
# ... FieldSchema("films", dtype=DataType.FLOAT_VECTOR, dim=128)
# ... ]
# >>> schema = CollectionSchema(fields)
# >>> collection = Collection("test_collection_name", schema)
# >>> collection.create_alias("tom")
# >>> collection.alias
# ['tom']
# """
# conn = self._get_connection()
# has = conn.has_collection(self._name)
# aliases = []
# if has:
# resp = conn.describe_collection(self._name)
# aliases = resp['aliases']
# return aliases
@property
def indexes(self) -> list:
"""
Returns all indexes of the collection.
:return list[Index]:
List of Index objects, returned when this operation is successful.
:raises CollectionNotExistException: If the collection does not exist.
:example:
>>> from pymilvus import connections, Collection, FieldSchema, CollectionSchema, DataType
>>> connections.connect()
>>> schema = CollectionSchema([
... FieldSchema("film_id", DataType.INT64, is_primary=True),
... FieldSchema("films", dtype=DataType.FLOAT_VECTOR, dim=2)
... ])
>>> collection = Collection("test_collection_indexes", schema)
>>> collection.indexes
[]
"""
conn = self._get_connection()
indexes = []
tmp_index = conn.describe_index(self._name)
if tmp_index is not None:
field_name = tmp_index.pop("field_name", None)
indexes.append(Index(self, field_name, tmp_index, construct_only=True))
return indexes
def index(self) -> Index:
"""
Fetches the index object of the of the specified name.
:return Index:
Index object corresponding to index_name.
:raises CollectionNotExistException: If the collection does not exist.
:raises BaseException: If the specified index does not exist.
:example:
>>> from pymilvus import connections, Collection, FieldSchema, CollectionSchema, DataType
>>> connections.connect()
>>> schema = CollectionSchema([
... FieldSchema("film_id", DataType.INT64, is_primary=True),
... FieldSchema("films", dtype=DataType.FLOAT_VECTOR, dim=2)
... ])
>>> collection = Collection("test_collection_index", schema)
>>> index = {"index_type": "IVF_FLAT", "params": {"nlist": 128}, "metric_type": "L2"}
>>> collection.create_index("films", index)
Status(code=0, message='')
>>> collection.indexes
[<pymilvus.index.Index object at 0x7f4435587e20>]
>>> collection.index()
<pymilvus.index.Index object at 0x7f44355a1460>
"""
conn = self._get_connection()
tmp_index = conn.describe_index(self._name)
if tmp_index is not None:
field_name = tmp_index.pop("field_name", None)
return Index(self, field_name, tmp_index, construct_only=True)
raise IndexNotExistException(0, ExceptionsMessage.IndexNotExist)
def create_index(self, field_name, index_params, timeout=None, **kwargs) -> Index:
"""
Creates index for a specified field. Return Index Object.
:param field_name: The name of the field to create an index for.
:type field_name: str
:param index_params: The indexing parameters.
:type index_params: dict
:param timeout: An optional duration of time in seconds to allow for the RPC. When timeout
is set to None, client waits until server response or error occur
:type timeout: float
:raises CollectionNotExistException: If the collection does not exist.
:raises ParamError: If the index parameters are invalid.
:raises BaseException: If field does not exist.
:raises BaseException: If the index has been created.
:example:
>>> from pymilvus import connections, Collection, FieldSchema, CollectionSchema, DataType
>>> connections.connect()
>>> schema = CollectionSchema([
... FieldSchema("film_id", DataType.INT64, is_primary=True),
... FieldSchema("films", dtype=DataType.FLOAT_VECTOR, dim=2)
... ])
>>> collection = Collection("test_collection_create_index", schema)
>>> index = {"index_type": "IVF_FLAT", "params": {"nlist": 128}, "metric_type": "L2"}
>>> collection.create_index("films", index)
Status(code=0, message='')
>>> collection.index()
<pymilvus.index.Index object at 0x7f44355a1460>
"""
conn = self._get_connection()
return conn.create_index(self._name, field_name, index_params,
timeout=timeout, **kwargs)
def has_index(self, timeout=None) -> bool:
"""
Checks whether a specified index exists.
:param timeout: An optional duration of time in seconds to allow for the RPC. When timeout
is set to None, client waits until server response or error occur
:type timeout: float
:return bool:
Whether the specified index exists.
:raises CollectionNotExistException: If the collection does not exist.
:example:
>>> from pymilvus import connections, Collection, FieldSchema, CollectionSchema, DataType
>>> connections.connect()
>>> schema = CollectionSchema([
... FieldSchema("film_id", DataType.INT64, is_primary=True),
... FieldSchema("films", dtype=DataType.FLOAT_VECTOR, dim=2)
... ])
>>> collection = Collection("test_collection_has_index", schema)
>>> index = {"index_type": "IVF_FLAT", "params": {"nlist": 128}, "metric_type": "L2"}
>>> collection.create_index("films", index)
>>> collection.has_index()
True
"""
conn = self._get_connection()
# TODO(yukun): Need field name, but provide index name
if conn.describe_index(self._name, "", timeout=timeout) is None:
return False
return True
def drop_index(self, timeout=None, **kwargs):
"""
Drop index and its corresponding index files.
:param timeout:
* *timeout* (``float``) --
An optional duration of time in seconds to allow for the RPC. If timeout
is set to None, the client keeps waiting until the server responds or an error occurs.
Optional. A duration of time in seconds.
:raises CollectionNotExistException: If the collection does not exist.
:raises BaseException: If the index does not exist or has been dropped.
:example:
>>> from pymilvus import connections, Collection, FieldSchema, CollectionSchema, DataType
>>> connections.connect()
>>> schema = CollectionSchema([
... FieldSchema("film_id", DataType.INT64, is_primary=True),
... FieldSchema("films", dtype=DataType.FLOAT_VECTOR, dim=2)
... ])
>>> collection = Collection("test_collection_has_index", schema)
>>> index = {"index_type": "IVF_FLAT", "params": {"nlist": 128}, "metric_type": "L2"}
>>> collection.create_index("films", index)
>>> collection.has_index()
True
>>> collection.drop_index()
>>> collection.has_index()
False
"""
if self.has_index() is False:
raise IndexNotExistException(0, ExceptionsMessage.IndexNotExist)
conn = self._get_connection()
tmp_index = conn.describe_index(self._name, "")
if tmp_index is not None:
index = Index(self, tmp_index['field_name'], tmp_index, construct_only=True)
index.drop(timeout=timeout, **kwargs)
def create_alias(self, alias, timeout=None, **kwargs):
"""
Specify alias for a collection.
Alias cannot be duplicated, you can't assign same alias to different collections.
But you can specify multiple aliases for a collection, for example:
before create_alias("collection_1", "bob"):
collection_1's aliases = ["tom"]
after create_alias("collection_1", "bob"):
collection_1's aliases = ["tom", "bob"]
:param alias: The alias of the collection.
:type alias: str.
:param timeout: An optional duration of time in seconds to allow for the RPC. When timeout
is set to None, client waits until server response or error occur
:type timeout: float
:raises CollectionNotExistException: If the collection does not exist.
:raises BaseException: If the alias failed to create.
:example:
>>> from pymilvus import connections, Collection, FieldSchema, CollectionSchema, DataType
>>> connections.connect()
>>> schema = CollectionSchema([
... FieldSchema("film_id", DataType.INT64, is_primary=True),
... FieldSchema("films", dtype=DataType.FLOAT_VECTOR, dim=2)
... ])
>>> collection = Collection("test_collection_create_index", schema)
>>> collection.create_alias("alias")
Status(code=0, message='')
"""
conn = self._get_connection()
conn.create_alias(self._name, alias, timeout=timeout, **kwargs)
def drop_alias(self, alias, timeout=None, **kwargs):
"""
Delete an alias.
This api no need to specify collection name because the milvus server knows which collection it belongs.
For example:
before drop_alias("bob"):
collection_1's aliases = ["tom", "bob"]
after drop_alias("bob"):
collection_1's aliases = ["tom"]
:param alias: The alias of the collection.
:type alias: str.
:param timeout: An optional duration of time in seconds to allow for the RPC. When timeout
is set to None, client waits until server response or error occur
:type timeout: float
:raises CollectionNotExistException: If the collection does not exist.
:raises BaseException: If the alias doesn't exist.
:example:
>>> from pymilvus import connections, Collection, FieldSchema, CollectionSchema, DataType
>>> connections.connect()
>>> schema = CollectionSchema([
... FieldSchema("film_id", DataType.INT64, is_primary=True),
... FieldSchema("films", dtype=DataType.FLOAT_VECTOR, dim=2)
... ])
>>> collection = Collection("test_collection_create_index", schema)
>>> collection.create_alias("alias")
>>> collection.drop_alias("alias")
Status(code=0, message='')
"""
conn = self._get_connection()
conn.drop_alias(alias, timeout=timeout, **kwargs)
def alter_alias(self, alias, timeout=None, **kwargs):
"""
Change alias of a collection to another collection. If the alias doesn't exist, the api will return error.
Alias cannot be duplicated, you can't assign same alias to different collections.
This api can change alias owner collection, for example:
before alter_alias("collection_2", "bob"):
collection_1's aliases = ["bob"]
collection_2's aliases = []
after alter_alias("collection_2", "bob"):
collection_1's aliases = []
collection_2's aliases = ["bob"]
:param alias: The alias of the collection.
:type alias: str.
:param timeout: An optional duration of time in seconds to allow for the RPC. When timeout
is set to None, client waits until server response or error occur
:type timeout: float
:raises CollectionNotExistException: If the collection does not exist.
:raises BaseException: If the alias failed to alter.
:example:
>>> from pymilvus import connections, Collection, FieldSchema, CollectionSchema, DataType
>>> connections.connect()
>>> schema = CollectionSchema([
... FieldSchema("film_id", DataType.INT64, is_primary=True),
... FieldSchema("films", dtype=DataType.FLOAT_VECTOR, dim=2)
... ])
>>> collection = Collection("test_collection_create_index", schema)
>>> collection.alter_alias("alias")
if the alias exists, return Status(code=0, message='')
otherwise return Status(code=1, message='alias does not exist')
"""
conn = self._get_connection()
conn.alter_alias(self._name, alias, timeout=timeout, **kwargs)
| 43.309031 | 120 | 0.600318 |
import copy
import json
import pandas
from .connections import get_connection
from .schema import (
CollectionSchema,
FieldSchema,
parse_fields_from_data,
infer_dtype_bydata,
)
from .prepare import Prepare
from .partition import Partition
from .index import Index
from .search import SearchResult
from .mutation import MutationResult
from .types import DataType
from .exceptions import (
SchemaNotReadyException,
DataTypeNotMatchException,
DataNotMatchException,
ConnectionNotExistException,
PartitionAlreadyExistException,
PartitionNotExistException,
IndexNotExistException,
AutoIDException,
ExceptionsMessage,
)
from .future import SearchFuture, MutationFuture
def _check_schema(schema):
if schema is None:
raise SchemaNotReadyException(0, ExceptionsMessage.NoSchema)
if len(schema.fields) < 1:
raise SchemaNotReadyException(0, ExceptionsMessage.EmptySchema)
vector_fields = []
for field in schema.fields:
if field.dtype == DataType.FLOAT_VECTOR or field.dtype == DataType.BINARY_VECTOR:
vector_fields.append(field.name)
if len(vector_fields) < 1:
raise SchemaNotReadyException(0, ExceptionsMessage.NoVector)
def _check_data_schema(fields, data):
if isinstance(data, pandas.DataFrame):
for i, field in enumerate(fields):
for j, _ in enumerate(data[field.name]):
tmp_type = infer_dtype_bydata(data[field.name].iloc[j])
if tmp_type != field.dtype:
raise DataNotMatchException(0, ExceptionsMessage.DataTypeInconsistent)
else:
for i, field in enumerate(fields):
for j, _ in enumerate(data[i]):
tmp_type = infer_dtype_bydata(data[i][j])
if tmp_type != field.dtype:
raise DataNotMatchException(0, ExceptionsMessage.DataTypeInconsistent)
class Collection:
def __init__(self, name, schema=None, using="default", shards_num=2, **kwargs):
self._name = name
self._using = using
self._shards_num = shards_num
self._kwargs = kwargs
conn = self._get_connection()
has = conn.has_collection(self._name)
if has:
resp = conn.describe_collection(self._name)
server_schema = CollectionSchema.construct_from_dict(resp)
if schema is None:
self._schema = server_schema
else:
if not isinstance(schema, CollectionSchema):
raise SchemaNotReadyException(0, ExceptionsMessage.SchemaType)
if server_schema != schema:
raise SchemaNotReadyException(0, ExceptionsMessage.SchemaInconsistent)
self._schema = schema
else:
if schema is None:
raise SchemaNotReadyException(0, ExceptionsMessage.CollectionNotExistNoSchema % name)
if isinstance(schema, CollectionSchema):
_check_schema(schema)
conn.create_collection(self._name, fields=schema.to_dict(), shards_num=self._shards_num)
self._schema = schema
else:
raise SchemaNotReadyException(0, ExceptionsMessage.SchemaType)
def __repr__(self):
return json.dumps({
'name': self.name,
'schema': self._schema.to_dict(),
'partitions': [json.loads(p.__repr__()) for p in self.partitions],
'description': self.description,
})
def _get_connection(self):
conn = get_connection(self._using)
if conn is None:
raise ConnectionNotExistException(0, ExceptionsMessage.ConnectFirst)
return conn
def _check_insert_data_schema(self, data):
if self._schema is None:
return False
if self._schema.auto_id:
if isinstance(data, pandas.DataFrame):
if self._schema.primary_field.name in data:
if not data[self._schema.primary_field.name].isnull().all():
raise DataNotMatchException(0, ExceptionsMessage.AutoIDWithData)
data = data.drop(self._schema.primary_field.name, axis=1)
infer_fields = parse_fields_from_data(data)
tmp_fields = copy.deepcopy(self._schema.fields)
for i, field in enumerate(self._schema.fields):
if field.is_primary and field.auto_id:
tmp_fields.pop(i)
if len(infer_fields) != len(tmp_fields):
raise DataTypeNotMatchException(0, ExceptionsMessage.FieldsNumInconsistent)
_check_data_schema(infer_fields, data)
for x, y in zip(infer_fields, tmp_fields):
if x.dtype != y.dtype:
return False
if isinstance(data, pandas.DataFrame):
if x.name != y.name:
return False
return True
def _check_schema(self):
if self._schema is None:
raise SchemaNotReadyException(0, ExceptionsMessage.NoSchema)
def _get_vector_field(self) -> str:
for field in self._schema.fields:
if field.dtype == DataType.FLOAT_VECTOR or field.dtype == DataType.BINARY_VECTOR:
return field.name
raise SchemaNotReadyException(0, ExceptionsMessage.NoVector)
@classmethod
def construct_from_dataframe(cls, name, dataframe, **kwargs):
if dataframe is None:
raise SchemaNotReadyException(0, ExceptionsMessage.NoneDataFrame)
if not isinstance(dataframe, pandas.DataFrame):
raise SchemaNotReadyException(0, ExceptionsMessage.DataFrameType)
primary_field = kwargs.pop("primary_field", None)
if primary_field is None:
raise SchemaNotReadyException(0, ExceptionsMessage.NoPrimaryKey)
pk_index = -1
for i, field in enumerate(dataframe):
if field == primary_field:
pk_index = i
if pk_index == -1:
raise SchemaNotReadyException(0, ExceptionsMessage.PrimaryKeyNotExist)
if "auto_id" in kwargs:
if not isinstance(kwargs.get("auto_id", None), bool):
raise AutoIDException(0, ExceptionsMessage.AutoIDType)
auto_id = kwargs.pop("auto_id", False)
if auto_id:
if dataframe[primary_field].isnull().all():
dataframe = dataframe.drop(primary_field, axis=1)
else:
raise SchemaNotReadyException(0, ExceptionsMessage.AutoIDWithData)
fields = parse_fields_from_data(dataframe)
_check_data_schema(fields, dataframe)
if auto_id:
fields.insert(pk_index, FieldSchema(name=primary_field, dtype=DataType.INT64, is_primary=True, auto_id=True,
**kwargs))
else:
for field in fields:
if field.name == primary_field:
field.is_primary = True
field.auto_id = False
schema = CollectionSchema(fields=fields)
_check_schema(schema)
collection = cls(name, schema, **kwargs)
res = collection.insert(data=dataframe)
return collection, res
@property
def schema(self) -> CollectionSchema:
return self._schema
@property
def description(self) -> str:
return self._schema.description
@property
def name(self) -> str:
return self._name
@property
def is_empty(self) -> bool:
return self.num_entities == 0
@property
def num_entities(self) -> int:
conn = self._get_connection()
conn.flush([self._name])
status = conn.get_collection_stats(db_name="", collection_name=self._name)
return status["row_count"]
@property
def primary_field(self) -> FieldSchema:
return self._schema.primary_field
def drop(self, timeout=None, **kwargs):
conn = self._get_connection()
indexes = self.indexes
for index in indexes:
index.drop(timeout=timeout, **kwargs)
conn.drop_collection(self._name, timeout=timeout, **kwargs)
def load(self, partition_names=None, timeout=None, **kwargs):
conn = self._get_connection()
if partition_names is not None:
conn.load_partitions(self._name, partition_names, timeout=timeout, **kwargs)
else:
conn.load_collection(self._name, timeout=timeout, **kwargs)
def release(self, timeout=None, **kwargs):
conn = self._get_connection()
conn.release_collection(self._name, timeout=timeout, **kwargs)
def insert(self, data, partition_name=None, timeout=None, **kwargs):
if data is None:
return MutationResult(data)
if not self._check_insert_data_schema(data):
raise SchemaNotReadyException(0, ExceptionsMessage.TypeOfDataAndSchemaInconsistent)
conn = self._get_connection()
entities = Prepare.prepare_insert_data(data, self._schema)
res = conn.insert(collection_name=self._name, entities=entities, ids=None,
partition_name=partition_name, timeout=timeout, **kwargs)
if kwargs.get("_async", False):
return MutationFuture(res)
return MutationResult(res)
def delete(self, expr, partition_name=None, timeout=None, **kwargs):
conn = self._get_connection()
res = conn.delete(collection_name=self._name, expr=expr,
partition_name=partition_name, timeout=timeout, **kwargs)
if kwargs.get("_async", False):
return MutationFuture(res)
return MutationResult(res)
def search(self, data, anns_field, param, limit, expr=None, partition_names=None,
output_fields=None, timeout=None, round_decimal=-1, **kwargs):
if expr is not None and not isinstance(expr, str):
raise DataTypeNotMatchException(0, ExceptionsMessage.ExprType % type(expr))
conn = self._get_connection()
res = conn.search(self._name, data, anns_field, param, limit, expr,
partition_names, output_fields, timeout, round_decimal, **kwargs)
if kwargs.get("_async", False):
return SearchFuture(res)
return SearchResult(res)
def query(self, expr, output_fields=None, partition_names=None, timeout=None):
if not isinstance(expr, str):
raise DataTypeNotMatchException(0, ExceptionsMessage.ExprType % type(expr))
conn = self._get_connection()
res = conn.query(self._name, expr, output_fields, partition_names, timeout)
return res
@property
def partitions(self) -> list:
conn = self._get_connection()
partition_strs = conn.list_partitions(self._name)
partitions = []
for partition in partition_strs:
partitions.append(Partition(self, partition, construct_only=True))
return partitions
def partition(self, partition_name) -> Partition:
if self.has_partition(partition_name) is False:
return None
return Partition(self, partition_name, construct_only=True)
def create_partition(self, partition_name, description=""):
if self.has_partition(partition_name) is True:
raise PartitionAlreadyExistException(0, ExceptionsMessage.PartitionAlreadyExist)
return Partition(self, partition_name, description=description)
def has_partition(self, partition_name, timeout=None) -> bool:
conn = self._get_connection()
return conn.has_partition(self._name, partition_name, timeout=timeout)
def drop_partition(self, partition_name, timeout=None, **kwargs):
if self.has_partition(partition_name) is False:
raise PartitionNotExistException(0, ExceptionsMessage.PartitionNotExist)
conn = self._get_connection()
return conn.drop_partition(self._name, partition_name, timeout=timeout, **kwargs)
# Returns alias list of the collection.
#
# :return list of str:
# The collection aliases, returned when the operation succeeds.
#
# :example:
# >>> from pymilvus import connections, Collection, FieldSchema, CollectionSchema, DataType
# >>> connections.connect()
# >>> fields = [
# ... FieldSchema("film_id", DataType.INT64, is_primary=True),
# ... FieldSchema("films", dtype=DataType.FLOAT_VECTOR, dim=128)
# ... ]
# >>> schema = CollectionSchema(fields)
# >>> collection = Collection("test_collection_name", schema)
# >>> collection.create_alias("tom")
# >>> collection.alias
# ['tom']
# """
@property
def indexes(self) -> list:
conn = self._get_connection()
indexes = []
tmp_index = conn.describe_index(self._name)
if tmp_index is not None:
field_name = tmp_index.pop("field_name", None)
indexes.append(Index(self, field_name, tmp_index, construct_only=True))
return indexes
def index(self) -> Index:
conn = self._get_connection()
tmp_index = conn.describe_index(self._name)
if tmp_index is not None:
field_name = tmp_index.pop("field_name", None)
return Index(self, field_name, tmp_index, construct_only=True)
raise IndexNotExistException(0, ExceptionsMessage.IndexNotExist)
def create_index(self, field_name, index_params, timeout=None, **kwargs) -> Index:
conn = self._get_connection()
return conn.create_index(self._name, field_name, index_params,
timeout=timeout, **kwargs)
def has_index(self, timeout=None) -> bool:
conn = self._get_connection()
if conn.describe_index(self._name, "", timeout=timeout) is None:
return False
return True
def drop_index(self, timeout=None, **kwargs):
if self.has_index() is False:
raise IndexNotExistException(0, ExceptionsMessage.IndexNotExist)
conn = self._get_connection()
tmp_index = conn.describe_index(self._name, "")
if tmp_index is not None:
index = Index(self, tmp_index['field_name'], tmp_index, construct_only=True)
index.drop(timeout=timeout, **kwargs)
def create_alias(self, alias, timeout=None, **kwargs):
conn = self._get_connection()
conn.create_alias(self._name, alias, timeout=timeout, **kwargs)
def drop_alias(self, alias, timeout=None, **kwargs):
conn = self._get_connection()
conn.drop_alias(alias, timeout=timeout, **kwargs)
def alter_alias(self, alias, timeout=None, **kwargs):
conn = self._get_connection()
conn.alter_alias(self._name, alias, timeout=timeout, **kwargs)
| true | true |
f71fb01cbdb1f124478ac2b092b7ac4885231833 | 119 | py | Python | examples/test_error.py | ak1ra24/pytest-md-report | 9d861a9237176e9dd1e6872c197f5bb5985ee049 | [
"MIT"
] | 9 | 2020-05-06T20:54:29.000Z | 2022-03-27T04:11:38.000Z | examples/test_error.py | solisa986/pytest-md-report | a6cdeda92ef8f1ab64c346a86a085ce9e1585880 | [
"MIT"
] | null | null | null | examples/test_error.py | solisa986/pytest-md-report | a6cdeda92ef8f1ab64c346a86a085ce9e1585880 | [
"MIT"
] | 3 | 2021-05-05T19:58:33.000Z | 2021-08-12T07:14:52.000Z | def test_error(invalid_fixture):
pass
class Test:
def test_error(self, invalid_fixture):
assert True
| 14.875 | 42 | 0.697479 | def test_error(invalid_fixture):
pass
class Test:
def test_error(self, invalid_fixture):
assert True
| true | true |
f71fb03c0051a7dd823c621bfe4bd61238f148c4 | 2,121 | py | Python | ciphers/rabin_miller.py | joeyzhou85/python | 9c0cbe33076a570a3c02825b7c6d9866a760e777 | [
"MIT"
] | 1,568 | 2019-04-25T11:54:45.000Z | 2022-03-31T23:35:23.000Z | ciphers/rabin_miller.py | joeyzhou85/python | 9c0cbe33076a570a3c02825b7c6d9866a760e777 | [
"MIT"
] | 58 | 2019-02-20T10:45:50.000Z | 2020-09-30T12:18:45.000Z | ciphers/rabin_miller.py | joeyzhou85/python | 9c0cbe33076a570a3c02825b7c6d9866a760e777 | [
"MIT"
] | 464 | 2019-04-17T04:57:16.000Z | 2022-03-31T04:12:57.000Z | from __future__ import print_function
# Primality Testing with the Rabin-Miller Algorithm
import random
def rabinMiller(num):
s = num - 1
t = 0
while s % 2 == 0:
s = s // 2
t += 1
for trials in range(5):
a = random.randrange(2, num - 1)
v = pow(a, s, num)
if v != 1:
i = 0
while v != (num - 1):
if i == t - 1:
return False
else:
i = i + 1
v = (v ** 2) % num
return True
def isPrime(num):
if (num < 2):
return False
lowPrimes = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59,
61, 67, 71, 73, 79, 83, 89, 97, 101, 103, 107, 109, 113, 127,
131, 137, 139, 149, 151, 157, 163, 167, 173, 179, 181, 191,
193, 197, 199, 211, 223, 227, 229, 233, 239, 241, 251, 257,
263, 269, 271, 277, 281, 283, 293, 307, 311, 313, 317, 331,
337, 347, 349, 353, 359, 367, 373, 379, 383, 389, 397, 401,
409, 419, 421, 431, 433, 439, 443, 449, 457, 461, 463, 467,
479, 487, 491, 499, 503, 509, 521, 523, 541, 547, 557, 563,
569, 571, 577, 587, 593, 599, 601, 607, 613, 617, 619, 631,
641, 643, 647, 653, 659, 661, 673, 677, 683, 691, 701, 709,
719, 727, 733, 739, 743, 751, 757, 761, 769, 773, 787, 797,
809, 811, 821, 823, 827, 829, 839, 853, 857, 859, 863, 877,
881, 883, 887, 907, 911, 919, 929, 937, 941, 947, 953, 967,
971, 977, 983, 991, 997]
if num in lowPrimes:
return True
for prime in lowPrimes:
if (num % prime) == 0:
return False
return rabinMiller(num)
def generateLargePrime(keysize = 1024):
while True:
num = random.randrange(2 ** (keysize - 1), 2 ** (keysize))
if isPrime(num):
return num
if __name__ == '__main__':
num = generateLargePrime()
print(('Prime number:', num))
print(('isPrime:', isPrime(num)))
| 32.630769 | 80 | 0.474305 | from __future__ import print_function
import random
def rabinMiller(num):
s = num - 1
t = 0
while s % 2 == 0:
s = s // 2
t += 1
for trials in range(5):
a = random.randrange(2, num - 1)
v = pow(a, s, num)
if v != 1:
i = 0
while v != (num - 1):
if i == t - 1:
return False
else:
i = i + 1
v = (v ** 2) % num
return True
def isPrime(num):
if (num < 2):
return False
lowPrimes = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59,
61, 67, 71, 73, 79, 83, 89, 97, 101, 103, 107, 109, 113, 127,
131, 137, 139, 149, 151, 157, 163, 167, 173, 179, 181, 191,
193, 197, 199, 211, 223, 227, 229, 233, 239, 241, 251, 257,
263, 269, 271, 277, 281, 283, 293, 307, 311, 313, 317, 331,
337, 347, 349, 353, 359, 367, 373, 379, 383, 389, 397, 401,
409, 419, 421, 431, 433, 439, 443, 449, 457, 461, 463, 467,
479, 487, 491, 499, 503, 509, 521, 523, 541, 547, 557, 563,
569, 571, 577, 587, 593, 599, 601, 607, 613, 617, 619, 631,
641, 643, 647, 653, 659, 661, 673, 677, 683, 691, 701, 709,
719, 727, 733, 739, 743, 751, 757, 761, 769, 773, 787, 797,
809, 811, 821, 823, 827, 829, 839, 853, 857, 859, 863, 877,
881, 883, 887, 907, 911, 919, 929, 937, 941, 947, 953, 967,
971, 977, 983, 991, 997]
if num in lowPrimes:
return True
for prime in lowPrimes:
if (num % prime) == 0:
return False
return rabinMiller(num)
def generateLargePrime(keysize = 1024):
while True:
num = random.randrange(2 ** (keysize - 1), 2 ** (keysize))
if isPrime(num):
return num
if __name__ == '__main__':
num = generateLargePrime()
print(('Prime number:', num))
print(('isPrime:', isPrime(num)))
| true | true |
f71fb110925949ff47a2cad996420b5abb79125c | 4,468 | py | Python | dimod/generators/chimera.py | pau557/dimod | d3c6d3abf23182b035e1100c46f7c947202edefb | [
"Apache-2.0"
] | null | null | null | dimod/generators/chimera.py | pau557/dimod | d3c6d3abf23182b035e1100c46f7c947202edefb | [
"Apache-2.0"
] | 24 | 2021-07-09T08:19:47.000Z | 2022-03-08T08:15:48.000Z | dimod/generators/chimera.py | pau557/dimod | d3c6d3abf23182b035e1100c46f7c947202edefb | [
"Apache-2.0"
] | null | null | null | # Copyright 2018 D-Wave Systems Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# =============================================================================
from __future__ import absolute_import
import numpy as np
import numpy.random
from dimod.binary_quadratic_model import BinaryQuadraticModel
from dimod.decorators import graph_argument
from dimod.vartypes import SPIN
__all__ = ['chimera_anticluster']
@graph_argument('subgraph', allow_None=True)
def chimera_anticluster(m, n=None, t=4, multiplier=3.0,
cls=BinaryQuadraticModel, subgraph=None, seed=None):
"""Generate an anticluster problem on a Chimera lattice.
An anticluster problem has weak interactions within a tile and strong
interactions between tiles.
Args:
m (int):
Number of rows in the Chimera lattice.
n (int, optional, default=m):
Number of columns in the Chimera lattice.
t (int, optional, default=t):
Size of the shore within each Chimera tile.
multiplier (number, optional, default=3.0):
Strength of the intertile edges.
cls (type, optional):
Binary quadratic model class to build from. Default is
:class:`.BinaryQuadraticModel`.
subgraph (int/tuple[nodes, edges]/list[edge]/:obj:`~networkx.Graph`):
A subgraph of a Chimera(m, n, t) graph to build the anticluster
problem on.
seed (int, optional, default=None):
Random seed.
Returns:
:obj:`.BinaryQuadraticModel`: spin-valued binary quadratic model.
"""
if seed is None:
seed = numpy.random.randint(2**32, dtype=np.uint32)
r = numpy.random.RandomState(seed)
m = int(m)
if n is None:
n = m
else:
n = int(n)
t = int(t)
ldata = np.zeros(m*n*t*2) # number of nodes
if m and n and t:
inrow, incol = zip(*_iter_chimera_tile_edges(m, n, t))
if m > 1 or n > 1:
outrow, outcol = zip(*_iter_chimera_intertile_edges(m, n, t))
else:
outrow = outcol = tuple()
qdata = r.choice((-1., 1.), size=len(inrow)+len(outrow))
qdata[len(inrow):] *= multiplier
irow = inrow + outrow
icol = incol + outcol
else:
irow = icol = qdata = tuple()
bqm = cls.from_numpy_vectors(ldata, (irow, icol, qdata), 0.0, SPIN)
if subgraph is not None:
nodes, edges = subgraph
subbqm = cls.empty(SPIN)
try:
subbqm.add_variables_from((v, bqm.linear[v]) for v in nodes)
except KeyError:
msg = "given 'subgraph' contains nodes not in Chimera({}, {}, {})".format(m, n, t)
raise ValueError(msg)
try:
subbqm.add_interactions_from((u, v, bqm.adj[u][v]) for u, v in edges)
except KeyError:
msg = "given 'subgraph' contains edges not in Chimera({}, {}, {})".format(m, n, t)
raise ValueError(msg)
bqm = subbqm
return bqm
def _iter_chimera_tile_edges(m, n, t):
hoff = 2 * t
voff = n * hoff
mi = m * voff
ni = n * hoff
# tile edges
for edge in ((k0, k1)
for i in range(0, ni, hoff)
for j in range(i, mi, voff)
for k0 in range(j, j + t)
for k1 in range(j + t, j + 2 * t)):
yield edge
def _iter_chimera_intertile_edges(m, n, t):
hoff = 2 * t
voff = n * hoff
mi = m * voff
ni = n * hoff
# horizontal edges
for edge in ((k, k + hoff)
for i in range(t, 2 * t)
for j in range(i, ni - hoff, hoff)
for k in range(j, mi, voff)):
yield edge
# vertical edges
for edge in ((k, k + voff)
for i in range(t)
for j in range(i, ni, hoff)
for k in range(j, mi - voff, voff)):
yield edge
| 29.012987 | 94 | 0.574754 |
from __future__ import absolute_import
import numpy as np
import numpy.random
from dimod.binary_quadratic_model import BinaryQuadraticModel
from dimod.decorators import graph_argument
from dimod.vartypes import SPIN
__all__ = ['chimera_anticluster']
@graph_argument('subgraph', allow_None=True)
def chimera_anticluster(m, n=None, t=4, multiplier=3.0,
cls=BinaryQuadraticModel, subgraph=None, seed=None):
if seed is None:
seed = numpy.random.randint(2**32, dtype=np.uint32)
r = numpy.random.RandomState(seed)
m = int(m)
if n is None:
n = m
else:
n = int(n)
t = int(t)
ldata = np.zeros(m*n*t*2)
if m and n and t:
inrow, incol = zip(*_iter_chimera_tile_edges(m, n, t))
if m > 1 or n > 1:
outrow, outcol = zip(*_iter_chimera_intertile_edges(m, n, t))
else:
outrow = outcol = tuple()
qdata = r.choice((-1., 1.), size=len(inrow)+len(outrow))
qdata[len(inrow):] *= multiplier
irow = inrow + outrow
icol = incol + outcol
else:
irow = icol = qdata = tuple()
bqm = cls.from_numpy_vectors(ldata, (irow, icol, qdata), 0.0, SPIN)
if subgraph is not None:
nodes, edges = subgraph
subbqm = cls.empty(SPIN)
try:
subbqm.add_variables_from((v, bqm.linear[v]) for v in nodes)
except KeyError:
msg = "given 'subgraph' contains nodes not in Chimera({}, {}, {})".format(m, n, t)
raise ValueError(msg)
try:
subbqm.add_interactions_from((u, v, bqm.adj[u][v]) for u, v in edges)
except KeyError:
msg = "given 'subgraph' contains edges not in Chimera({}, {}, {})".format(m, n, t)
raise ValueError(msg)
bqm = subbqm
return bqm
def _iter_chimera_tile_edges(m, n, t):
hoff = 2 * t
voff = n * hoff
mi = m * voff
ni = n * hoff
for edge in ((k0, k1)
for i in range(0, ni, hoff)
for j in range(i, mi, voff)
for k0 in range(j, j + t)
for k1 in range(j + t, j + 2 * t)):
yield edge
def _iter_chimera_intertile_edges(m, n, t):
hoff = 2 * t
voff = n * hoff
mi = m * voff
ni = n * hoff
for edge in ((k, k + hoff)
for i in range(t, 2 * t)
for j in range(i, ni - hoff, hoff)
for k in range(j, mi, voff)):
yield edge
for edge in ((k, k + voff)
for i in range(t)
for j in range(i, ni, hoff)
for k in range(j, mi - voff, voff)):
yield edge
| true | true |
f71fb1734f8db11d01bd46d0696b2f6a7c2a050c | 1,845 | py | Python | mediagoblin/plugins/subtitles/models.py | mtlynch/mediagoblin | b5ee42aed44052de114c6e45edb56856d2868858 | [
"CC0-1.0"
] | 7 | 2020-05-27T03:57:21.000Z | 2021-04-21T02:17:39.000Z | mediagoblin/plugins/subtitles/models.py | jgarte/mediagoblin-mirror | c4599508b02f2e61df3a97ff314766a62a3e5934 | [
"CC0-1.0"
] | null | null | null | mediagoblin/plugins/subtitles/models.py | jgarte/mediagoblin-mirror | c4599508b02f2e61df3a97ff314766a62a3e5934 | [
"CC0-1.0"
] | 2 | 2019-05-13T14:42:34.000Z | 2021-08-28T10:36:46.000Z | # GNU MediaGoblin -- federated, autonomous media hosting
# Copyright (C) 2016 MediaGoblin contributors. See AUTHORS.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from sqlalchemy import Column, Integer, Unicode, ForeignKey
from sqlalchemy.orm import relationship
from mediagoblin.db.models import User
from mediagoblin.db.base import Base,MediaEntry
class MediaSubtitleFile(Base):
__tablename__ = "core__subtitle_files"
id = Column(Integer, primary_key=True)
media_entry = Column(
Integer, ForeignKey(MediaEntry.id),
nullable=False)
name = Column(Unicode, nullable=False)
filepath = Column(PathTupleWithSlashes)
created = Column(DateTime, nullable=False, default=datetime.datetime.utcnow)
@property
def dict_view(self):
"""A dict like view on this object"""
return DictReadAttrProxy(self)
subtitle_files_helper = relationship("MediaSubtitleFile",
cascade="all, delete-orphan",
order_by="MediaSubtitleFile.created"
)
subtitle_files = association_proxy("subtitle_files_helper", "dict_view",
creator=lambda v: MediaSubtitleFile(
name=v["name"], filepath=v["filepath"])
)
MODELS = [
MediaSubtitleFile
]
| 36.9 | 80 | 0.732249 |
from sqlalchemy import Column, Integer, Unicode, ForeignKey
from sqlalchemy.orm import relationship
from mediagoblin.db.models import User
from mediagoblin.db.base import Base,MediaEntry
class MediaSubtitleFile(Base):
__tablename__ = "core__subtitle_files"
id = Column(Integer, primary_key=True)
media_entry = Column(
Integer, ForeignKey(MediaEntry.id),
nullable=False)
name = Column(Unicode, nullable=False)
filepath = Column(PathTupleWithSlashes)
created = Column(DateTime, nullable=False, default=datetime.datetime.utcnow)
@property
def dict_view(self):
return DictReadAttrProxy(self)
subtitle_files_helper = relationship("MediaSubtitleFile",
cascade="all, delete-orphan",
order_by="MediaSubtitleFile.created"
)
subtitle_files = association_proxy("subtitle_files_helper", "dict_view",
creator=lambda v: MediaSubtitleFile(
name=v["name"], filepath=v["filepath"])
)
MODELS = [
MediaSubtitleFile
]
| true | true |
f71fb1cc1129767d19d13b370609bf72cca258f1 | 1,830 | py | Python | scripts/agregar_empresas_gentor.py | edgarvalli/gentor_backend | 115cadfc802cb5130b62aba5c9b6050cb5f0a466 | [
"MIT"
] | null | null | null | scripts/agregar_empresas_gentor.py | edgarvalli/gentor_backend | 115cadfc802cb5130b62aba5c9b6050cb5f0a466 | [
"MIT"
] | null | null | null | scripts/agregar_empresas_gentor.py | edgarvalli/gentor_backend | 115cadfc802cb5130b62aba5c9b6050cb5f0a466 | [
"MIT"
] | null | null | null | from set_root_path import set_root_path
set_root_path()
import db.sql_server as db
empresas = [
{
"RazonSocial": "GENTOR, S.A. DE C.V.",
"RFC": "GEN760712EM0"
},
{
"RazonSocial": "SERVICIOS CORPORATIVOS GENTOR, S.A.",
"RFC": "SCG931026LW1"
},
{
"RazonSocial": "GENTOR SERVICIOS, S.A. DE C.V.",
"RFC": "GSE9212163I9"
},
{
"RazonSocial": "SISTEMAS DE ENERGIA INTERNACIONAL, S.A. DE C.V.",
"RFC": "SEI920618TC5"
},
{
"RazonSocial": "BIOENERGIA DE NUEVO LEON, S.A. DE C.V.",
"RFC": "BNL020412HB8"
},
{
"RazonSocial": "SEISA SERVICIOS Y TECNOLOGIA, S.A. DE C.V.",
"RFC": "SST951003FL8"
},
{
"RazonSocial": "ASTRA LYRA S.A. DE C.V.",
"RFC": "ALY200814LZ4"
},
{
"RazonSocial": "ENVIRONMENT & SOCIETY S.A. DE C.V.",
"RFC": "EAS200814BU1"
},
{
"RazonSocial": "LAND OPERATION S.A. DE C.V.",
"RFC": "LOP191204CD6"
},
{
"RazonSocial": "CHP SOLUTIONS, S.A. DE C.V.",
"RFC": "CSO200716G27"
},
{
"RazonSocial": "DOMOS TELECOMUNICACIONES, S.A. DE C.V.",
"RFC": "DTE940421L42"
},
{
"RazonSocial": "RECOLECCION Y DISPOSICION DE DESECHOS AMBIENTALES SA DE CV",
"RFC": "RDD101105Q91"
},
{
"RazonSocial": "SERVICIOS CORPORATIVOS DMS, S.A DE C.V.",
"RFC": "SCD9609068X7"
},
{
"RazonSocial": "INGENIERIA Y MEJORAMIENTO AMBIENTAL, S.A. DE C.V.",
"RFC": "IMA960906DW2"
},
{
"RazonSocial": "PROTECCION INTEGRAL ESPECIALIZADA, S.A. DE C.V.",
"RFC": "PIE950316LU6"
},
{
"RazonSocial": "ODALTA S.A. DE C.V.",
"RFC": "ODA200122KH6"
}
]
db.insertmany("Empresas", empresas)
| 24.4 | 84 | 0.519672 | from set_root_path import set_root_path
set_root_path()
import db.sql_server as db
empresas = [
{
"RazonSocial": "GENTOR, S.A. DE C.V.",
"RFC": "GEN760712EM0"
},
{
"RazonSocial": "SERVICIOS CORPORATIVOS GENTOR, S.A.",
"RFC": "SCG931026LW1"
},
{
"RazonSocial": "GENTOR SERVICIOS, S.A. DE C.V.",
"RFC": "GSE9212163I9"
},
{
"RazonSocial": "SISTEMAS DE ENERGIA INTERNACIONAL, S.A. DE C.V.",
"RFC": "SEI920618TC5"
},
{
"RazonSocial": "BIOENERGIA DE NUEVO LEON, S.A. DE C.V.",
"RFC": "BNL020412HB8"
},
{
"RazonSocial": "SEISA SERVICIOS Y TECNOLOGIA, S.A. DE C.V.",
"RFC": "SST951003FL8"
},
{
"RazonSocial": "ASTRA LYRA S.A. DE C.V.",
"RFC": "ALY200814LZ4"
},
{
"RazonSocial": "ENVIRONMENT & SOCIETY S.A. DE C.V.",
"RFC": "EAS200814BU1"
},
{
"RazonSocial": "LAND OPERATION S.A. DE C.V.",
"RFC": "LOP191204CD6"
},
{
"RazonSocial": "CHP SOLUTIONS, S.A. DE C.V.",
"RFC": "CSO200716G27"
},
{
"RazonSocial": "DOMOS TELECOMUNICACIONES, S.A. DE C.V.",
"RFC": "DTE940421L42"
},
{
"RazonSocial": "RECOLECCION Y DISPOSICION DE DESECHOS AMBIENTALES SA DE CV",
"RFC": "RDD101105Q91"
},
{
"RazonSocial": "SERVICIOS CORPORATIVOS DMS, S.A DE C.V.",
"RFC": "SCD9609068X7"
},
{
"RazonSocial": "INGENIERIA Y MEJORAMIENTO AMBIENTAL, S.A. DE C.V.",
"RFC": "IMA960906DW2"
},
{
"RazonSocial": "PROTECCION INTEGRAL ESPECIALIZADA, S.A. DE C.V.",
"RFC": "PIE950316LU6"
},
{
"RazonSocial": "ODALTA S.A. DE C.V.",
"RFC": "ODA200122KH6"
}
]
db.insertmany("Empresas", empresas)
| true | true |
f71fb21ddb1fedaa154d0f69fe5a945c4b945b9c | 13,241 | py | Python | scrapy/core/engine.py | lizhaoxing1/scrapy-comment-zh | 17c6279c63d9733598539589091c5a9551f341f6 | [
"BSD-3-Clause"
] | null | null | null | scrapy/core/engine.py | lizhaoxing1/scrapy-comment-zh | 17c6279c63d9733598539589091c5a9551f341f6 | [
"BSD-3-Clause"
] | null | null | null | scrapy/core/engine.py | lizhaoxing1/scrapy-comment-zh | 17c6279c63d9733598539589091c5a9551f341f6 | [
"BSD-3-Clause"
] | null | null | null | """
This is the Scrapy engine which controls the Scheduler, Downloader and Spiders.
For more information see docs/topics/architecture.rst
"""
import logging
from time import time
from twisted.internet import defer, task
from twisted.python.failure import Failure
from scrapy import signals
from scrapy.core.scraper import Scraper
from scrapy.exceptions import DontCloseSpider
from scrapy.http import Response, Request
from scrapy.utils.misc import load_object
from scrapy.utils.reactor import CallLaterOnce
from scrapy.utils.log import logformatter_adapter, failure_to_exc_info
logger = logging.getLogger(__name__)
class Slot(object):
def __init__(self, start_requests, close_if_idle, nextcall, scheduler):
self.closing = False
self.inprogress = set() # requests in progress
self.start_requests = iter(start_requests)
self.close_if_idle = close_if_idle
self.nextcall = nextcall
self.scheduler = scheduler
self.heartbeat = task.LoopingCall(nextcall.schedule)
def add_request(self, request):
self.inprogress.add(request)
def remove_request(self, request):
self.inprogress.remove(request)
self._maybe_fire_closing()
def close(self):
self.closing = defer.Deferred()
self._maybe_fire_closing()
return self.closing
def _maybe_fire_closing(self):
if self.closing and not self.inprogress:
if self.nextcall:
self.nextcall.cancel()
if self.heartbeat.running:
self.heartbeat.stop()
self.closing.callback(None)
class ExecutionEngine(object):
# __init__ 对爬虫的核心组件进行了初始化.
def __init__(self, crawler, spider_closed_callback):
self.crawler = crawler
self.settings = crawler.settings
self.signals = crawler.signals
self.logformatter = crawler.logformatter
self.slot = None
self.spider = None
self.running = False
self.paused = False
self.scheduler_cls = load_object(self.settings['SCHEDULER'])
downloader_cls = load_object(self.settings['DOWNLOADER'])
self.downloader = downloader_cls(crawler)
self.scraper = Scraper(crawler)
self._spider_closed_callback = spider_closed_callback
@defer.inlineCallbacks
def start(self):
"""Start the execution engine"""
assert not self.running, "Engine already running"
self.start_time = time()
yield self.signals.send_catch_log_deferred(signal=signals.engine_started)
self.running = True
self._closewait = defer.Deferred()
yield self._closewait
def stop(self):
"""Stop the execution engine gracefully"""
assert self.running, "Engine not running"
self.running = False
dfd = self._close_all_spiders()
return dfd.addBoth(lambda _: self._finish_stopping_engine())
def close(self):
"""Close the execution engine gracefully.
If it has already been started, stop it. In all cases, close all spiders
and the downloader.
"""
if self.running:
# Will also close spiders and downloader
return self.stop()
elif self.open_spiders:
# Will also close downloader
return self._close_all_spiders()
else:
return defer.succeed(self.downloader.close())
def pause(self):
"""Pause the execution engine"""
self.paused = True
def unpause(self):
"""Resume the execution engine"""
self.paused = False
def _next_request(self, spider):
slot = self.slot
if not slot:
return
if self.paused:
return
while not self._needs_backout(spider):
if not self._next_request_from_scheduler(spider):
break
if slot.start_requests and not self._needs_backout(spider):
try:
request = next(slot.start_requests)
except StopIteration:
slot.start_requests = None
except Exception:
slot.start_requests = None
logger.error('Error while obtaining start requests',
exc_info=True, extra={'spider': spider})
else:
self.crawl(request, spider)
if self.spider_is_idle(spider) and slot.close_if_idle:
self._spider_idle(spider)
def _needs_backout(self, spider):
slot = self.slot
return not self.running \
or slot.closing \
or self.downloader.needs_backout() \
or self.scraper.slot.needs_backout()
def _next_request_from_scheduler(self, spider):
slot = self.slot
request = slot.scheduler.next_request()
if not request:
return
d = self._download(request, spider)
d.addBoth(self._handle_downloader_output, request, spider)
d.addErrback(lambda f: logger.info('Error while handling downloader output',
exc_info=failure_to_exc_info(f),
extra={'spider': spider}))
d.addBoth(lambda _: slot.remove_request(request))
d.addErrback(lambda f: logger.info('Error while removing request from slot',
exc_info=failure_to_exc_info(f),
extra={'spider': spider}))
d.addBoth(lambda _: slot.nextcall.schedule())
d.addErrback(lambda f: logger.info('Error while scheduling new request',
exc_info=failure_to_exc_info(f),
extra={'spider': spider}))
return d
def _handle_downloader_output(self, response, request, spider):
assert isinstance(response, (Request, Response, Failure)), response
# downloader middleware can return requests (for example, redirects)
if isinstance(response, Request):
self.crawl(response, spider)
return
# response is a Response or Failure
d = self.scraper.enqueue_scrape(response, request, spider)
d.addErrback(lambda f: logger.error('Error while enqueuing downloader output',
exc_info=failure_to_exc_info(f),
extra={'spider': spider}))
return d
def spider_is_idle(self, spider):
if not self.scraper.slot.is_idle():
# scraper is not idle
return False
if self.downloader.active:
# downloader has pending requests
return False
if self.slot.start_requests is not None:
# not all start requests are handled
return False
if self.slot.scheduler.has_pending_requests():
# scheduler has pending requests
return False
return True
@property
def open_spiders(self):
return [self.spider] if self.spider else []
def has_capacity(self):
"""Does the engine have capacity to handle more spiders"""
return not bool(self.slot)
def crawl(self, request, spider):
assert spider in self.open_spiders, \
"Spider %r not opened when crawling: %s" % (spider.name, request)
self.schedule(request, spider)
self.slot.nextcall.schedule()
def schedule(self, request, spider):
self.signals.send_catch_log(signal=signals.request_scheduled,
request=request, spider=spider)
if not self.slot.scheduler.enqueue_request(request):
self.signals.send_catch_log(signal=signals.request_dropped,
request=request, spider=spider)
def download(self, request, spider):
d = self._download(request, spider)
d.addBoth(self._downloaded, self.slot, request, spider)
return d
def _downloaded(self, response, slot, request, spider):
slot.remove_request(request)
return self.download(response, spider) \
if isinstance(response, Request) else response
def _download(self, request, spider):
slot = self.slot
slot.add_request(request)
def _on_success(response):
assert isinstance(response, (Response, Request))
if isinstance(response, Response):
response.request = request # tie request to response received
logkws = self.logformatter.crawled(request, response, spider)
logger.log(*logformatter_adapter(logkws), extra={'spider': spider})
self.signals.send_catch_log(signal=signals.response_received, \
response=response, request=request, spider=spider)
return response
def _on_complete(_):
slot.nextcall.schedule()
return _
dwld = self.downloader.fetch(request, spider)
dwld.addCallbacks(_on_success)
dwld.addBoth(_on_complete)
return dwld
@defer.inlineCallbacks
def open_spider(self, spider, start_requests=(), close_if_idle=True):
# 函数 实例了调度器, 如何爬取,爬取过滤方法等等.
assert self.has_capacity(), "No free spider slot when opening %r" % \
spider.name
logger.info("Spider opened", extra={'spider': spider})
nextcall = CallLaterOnce(self._next_request, spider) # 这是给异步 的循环调用用的东西.
scheduler = self.scheduler_cls.from_crawler(self.crawler) # 调度器的实例化
start_requests = yield self.scraper.spidermw.process_start_requests(start_requests, spider) # 这里调用spdiermw 会读取你配置文件中中间件并处理
slot = Slot(start_requests, close_if_idle, nextcall, scheduler)
self.slot = slot
self.spider = spider
yield scheduler.open(spider)
yield self.scraper.open_spider(spider)
self.crawler.stats.open_spider(spider)
yield self.signals.send_catch_log_deferred(signals.spider_opened, spider=spider)
slot.nextcall.schedule()
slot.heartbeat.start(5)
def _spider_idle(self, spider):
"""Called when a spider gets idle. This function is called when there
are no remaining pages to download or schedule. It can be called
multiple times. If some extension raises a DontCloseSpider exception
(in the spider_idle signal handler) the spider is not closed until the
next loop and this function is guaranteed to be called (at least) once
again for this spider.
"""
res = self.signals.send_catch_log(signal=signals.spider_idle, \
spider=spider, dont_log=DontCloseSpider)
if any(isinstance(x, Failure) and isinstance(x.value, DontCloseSpider) \
for _, x in res):
return
if self.spider_is_idle(spider):
self.close_spider(spider, reason='finished')
def close_spider(self, spider, reason='cancelled'):
"""Close (cancel) spider and clear all its outstanding requests"""
slot = self.slot
if slot.closing:
return slot.closing
logger.info("Closing spider (%(reason)s)",
{'reason': reason},
extra={'spider': spider})
dfd = slot.close()
def log_failure(msg):
def errback(failure):
logger.error(
msg,
exc_info=failure_to_exc_info(failure),
extra={'spider': spider}
)
return errback
dfd.addBoth(lambda _: self.downloader.close())
dfd.addErrback(log_failure('Downloader close failure'))
dfd.addBoth(lambda _: self.scraper.close_spider(spider))
dfd.addErrback(log_failure('Scraper close failure'))
dfd.addBoth(lambda _: slot.scheduler.close(reason))
dfd.addErrback(log_failure('Scheduler close failure'))
dfd.addBoth(lambda _: self.signals.send_catch_log_deferred(
signal=signals.spider_closed, spider=spider, reason=reason))
dfd.addErrback(log_failure('Error while sending spider_close signal'))
dfd.addBoth(lambda _: self.crawler.stats.close_spider(spider, reason=reason))
dfd.addErrback(log_failure('Stats close failure'))
dfd.addBoth(lambda _: logger.info("Spider closed (%(reason)s)",
{'reason': reason},
extra={'spider': spider}))
dfd.addBoth(lambda _: setattr(self, 'slot', None))
dfd.addErrback(log_failure('Error while unassigning slot'))
dfd.addBoth(lambda _: setattr(self, 'spider', None))
dfd.addErrback(log_failure('Error while unassigning spider'))
dfd.addBoth(lambda _: self._spider_closed_callback(spider))
return dfd
def _close_all_spiders(self):
dfds = [self.close_spider(s, reason='shutdown') for s in self.open_spiders]
dlist = defer.DeferredList(dfds)
return dlist
@defer.inlineCallbacks
def _finish_stopping_engine(self):
yield self.signals.send_catch_log_deferred(signal=signals.engine_stopped)
self._closewait.callback(None)
| 37.939828 | 130 | 0.624726 | import logging
from time import time
from twisted.internet import defer, task
from twisted.python.failure import Failure
from scrapy import signals
from scrapy.core.scraper import Scraper
from scrapy.exceptions import DontCloseSpider
from scrapy.http import Response, Request
from scrapy.utils.misc import load_object
from scrapy.utils.reactor import CallLaterOnce
from scrapy.utils.log import logformatter_adapter, failure_to_exc_info
logger = logging.getLogger(__name__)
class Slot(object):
def __init__(self, start_requests, close_if_idle, nextcall, scheduler):
self.closing = False
self.inprogress = set()
self.start_requests = iter(start_requests)
self.close_if_idle = close_if_idle
self.nextcall = nextcall
self.scheduler = scheduler
self.heartbeat = task.LoopingCall(nextcall.schedule)
def add_request(self, request):
self.inprogress.add(request)
def remove_request(self, request):
self.inprogress.remove(request)
self._maybe_fire_closing()
def close(self):
self.closing = defer.Deferred()
self._maybe_fire_closing()
return self.closing
def _maybe_fire_closing(self):
if self.closing and not self.inprogress:
if self.nextcall:
self.nextcall.cancel()
if self.heartbeat.running:
self.heartbeat.stop()
self.closing.callback(None)
class ExecutionEngine(object):
def __init__(self, crawler, spider_closed_callback):
self.crawler = crawler
self.settings = crawler.settings
self.signals = crawler.signals
self.logformatter = crawler.logformatter
self.slot = None
self.spider = None
self.running = False
self.paused = False
self.scheduler_cls = load_object(self.settings['SCHEDULER'])
downloader_cls = load_object(self.settings['DOWNLOADER'])
self.downloader = downloader_cls(crawler)
self.scraper = Scraper(crawler)
self._spider_closed_callback = spider_closed_callback
@defer.inlineCallbacks
def start(self):
assert not self.running, "Engine already running"
self.start_time = time()
yield self.signals.send_catch_log_deferred(signal=signals.engine_started)
self.running = True
self._closewait = defer.Deferred()
yield self._closewait
def stop(self):
assert self.running, "Engine not running"
self.running = False
dfd = self._close_all_spiders()
return dfd.addBoth(lambda _: self._finish_stopping_engine())
def close(self):
if self.running:
return self.stop()
elif self.open_spiders:
return self._close_all_spiders()
else:
return defer.succeed(self.downloader.close())
def pause(self):
self.paused = True
def unpause(self):
self.paused = False
def _next_request(self, spider):
slot = self.slot
if not slot:
return
if self.paused:
return
while not self._needs_backout(spider):
if not self._next_request_from_scheduler(spider):
break
if slot.start_requests and not self._needs_backout(spider):
try:
request = next(slot.start_requests)
except StopIteration:
slot.start_requests = None
except Exception:
slot.start_requests = None
logger.error('Error while obtaining start requests',
exc_info=True, extra={'spider': spider})
else:
self.crawl(request, spider)
if self.spider_is_idle(spider) and slot.close_if_idle:
self._spider_idle(spider)
def _needs_backout(self, spider):
slot = self.slot
return not self.running \
or slot.closing \
or self.downloader.needs_backout() \
or self.scraper.slot.needs_backout()
def _next_request_from_scheduler(self, spider):
slot = self.slot
request = slot.scheduler.next_request()
if not request:
return
d = self._download(request, spider)
d.addBoth(self._handle_downloader_output, request, spider)
d.addErrback(lambda f: logger.info('Error while handling downloader output',
exc_info=failure_to_exc_info(f),
extra={'spider': spider}))
d.addBoth(lambda _: slot.remove_request(request))
d.addErrback(lambda f: logger.info('Error while removing request from slot',
exc_info=failure_to_exc_info(f),
extra={'spider': spider}))
d.addBoth(lambda _: slot.nextcall.schedule())
d.addErrback(lambda f: logger.info('Error while scheduling new request',
exc_info=failure_to_exc_info(f),
extra={'spider': spider}))
return d
def _handle_downloader_output(self, response, request, spider):
assert isinstance(response, (Request, Response, Failure)), response
if isinstance(response, Request):
self.crawl(response, spider)
return
d = self.scraper.enqueue_scrape(response, request, spider)
d.addErrback(lambda f: logger.error('Error while enqueuing downloader output',
exc_info=failure_to_exc_info(f),
extra={'spider': spider}))
return d
def spider_is_idle(self, spider):
if not self.scraper.slot.is_idle():
return False
if self.downloader.active:
return False
if self.slot.start_requests is not None:
return False
if self.slot.scheduler.has_pending_requests():
return False
return True
@property
def open_spiders(self):
return [self.spider] if self.spider else []
def has_capacity(self):
return not bool(self.slot)
def crawl(self, request, spider):
assert spider in self.open_spiders, \
"Spider %r not opened when crawling: %s" % (spider.name, request)
self.schedule(request, spider)
self.slot.nextcall.schedule()
def schedule(self, request, spider):
self.signals.send_catch_log(signal=signals.request_scheduled,
request=request, spider=spider)
if not self.slot.scheduler.enqueue_request(request):
self.signals.send_catch_log(signal=signals.request_dropped,
request=request, spider=spider)
def download(self, request, spider):
d = self._download(request, spider)
d.addBoth(self._downloaded, self.slot, request, spider)
return d
def _downloaded(self, response, slot, request, spider):
slot.remove_request(request)
return self.download(response, spider) \
if isinstance(response, Request) else response
def _download(self, request, spider):
slot = self.slot
slot.add_request(request)
def _on_success(response):
assert isinstance(response, (Response, Request))
if isinstance(response, Response):
response.request = request
logkws = self.logformatter.crawled(request, response, spider)
logger.log(*logformatter_adapter(logkws), extra={'spider': spider})
self.signals.send_catch_log(signal=signals.response_received, \
response=response, request=request, spider=spider)
return response
def _on_complete(_):
slot.nextcall.schedule()
return _
dwld = self.downloader.fetch(request, spider)
dwld.addCallbacks(_on_success)
dwld.addBoth(_on_complete)
return dwld
@defer.inlineCallbacks
def open_spider(self, spider, start_requests=(), close_if_idle=True):
assert self.has_capacity(), "No free spider slot when opening %r" % \
spider.name
logger.info("Spider opened", extra={'spider': spider})
nextcall = CallLaterOnce(self._next_request, spider)
scheduler = self.scheduler_cls.from_crawler(self.crawler)
start_requests = yield self.scraper.spidermw.process_start_requests(start_requests, spider)
slot = Slot(start_requests, close_if_idle, nextcall, scheduler)
self.slot = slot
self.spider = spider
yield scheduler.open(spider)
yield self.scraper.open_spider(spider)
self.crawler.stats.open_spider(spider)
yield self.signals.send_catch_log_deferred(signals.spider_opened, spider=spider)
slot.nextcall.schedule()
slot.heartbeat.start(5)
def _spider_idle(self, spider):
res = self.signals.send_catch_log(signal=signals.spider_idle, \
spider=spider, dont_log=DontCloseSpider)
if any(isinstance(x, Failure) and isinstance(x.value, DontCloseSpider) \
for _, x in res):
return
if self.spider_is_idle(spider):
self.close_spider(spider, reason='finished')
def close_spider(self, spider, reason='cancelled'):
slot = self.slot
if slot.closing:
return slot.closing
logger.info("Closing spider (%(reason)s)",
{'reason': reason},
extra={'spider': spider})
dfd = slot.close()
def log_failure(msg):
def errback(failure):
logger.error(
msg,
exc_info=failure_to_exc_info(failure),
extra={'spider': spider}
)
return errback
dfd.addBoth(lambda _: self.downloader.close())
dfd.addErrback(log_failure('Downloader close failure'))
dfd.addBoth(lambda _: self.scraper.close_spider(spider))
dfd.addErrback(log_failure('Scraper close failure'))
dfd.addBoth(lambda _: slot.scheduler.close(reason))
dfd.addErrback(log_failure('Scheduler close failure'))
dfd.addBoth(lambda _: self.signals.send_catch_log_deferred(
signal=signals.spider_closed, spider=spider, reason=reason))
dfd.addErrback(log_failure('Error while sending spider_close signal'))
dfd.addBoth(lambda _: self.crawler.stats.close_spider(spider, reason=reason))
dfd.addErrback(log_failure('Stats close failure'))
dfd.addBoth(lambda _: logger.info("Spider closed (%(reason)s)",
{'reason': reason},
extra={'spider': spider}))
dfd.addBoth(lambda _: setattr(self, 'slot', None))
dfd.addErrback(log_failure('Error while unassigning slot'))
dfd.addBoth(lambda _: setattr(self, 'spider', None))
dfd.addErrback(log_failure('Error while unassigning spider'))
dfd.addBoth(lambda _: self._spider_closed_callback(spider))
return dfd
def _close_all_spiders(self):
dfds = [self.close_spider(s, reason='shutdown') for s in self.open_spiders]
dlist = defer.DeferredList(dfds)
return dlist
@defer.inlineCallbacks
def _finish_stopping_engine(self):
yield self.signals.send_catch_log_deferred(signal=signals.engine_stopped)
self._closewait.callback(None)
| true | true |
f71fb24b1ca2ef3817592da8e3c5f8b5ac48df99 | 780 | py | Python | nicos_mlz/kws2/setups/uvspectro.py | jkrueger1/nicos | 5f4ce66c312dedd78995f9d91e8a6e3c891b262b | [
"CC-BY-3.0",
"Apache-2.0",
"CC-BY-4.0"
] | 12 | 2019-11-06T15:40:36.000Z | 2022-01-01T16:23:00.000Z | nicos_mlz/kws2/setups/uvspectro.py | jkrueger1/nicos | 5f4ce66c312dedd78995f9d91e8a6e3c891b262b | [
"CC-BY-3.0",
"Apache-2.0",
"CC-BY-4.0"
] | 91 | 2020-08-18T09:20:26.000Z | 2022-02-01T11:07:14.000Z | nicos_mlz/kws2/setups/uvspectro.py | jkrueger1/nicos | 5f4ce66c312dedd78995f9d91e8a6e3c891b262b | [
"CC-BY-3.0",
"Apache-2.0",
"CC-BY-4.0"
] | 6 | 2020-01-11T10:52:30.000Z | 2022-02-25T12:35:23.000Z | # -*- coding: utf-8 -*-
description = 'controlling the UV-vis spectrometer and LEDs'
group = 'optional'
tango_base = 'tango://phys.kws2.frm2:10000/kws2/'
devices = dict(
OceanView = device('nicos.devices.entangle.DigitalOutput',
description = 'spectrometer trigger interval (0 to switch off)',
tangodevice = tango_base + 'uvspectro/plc_trigger',
),
LEDdelay = device('nicos.devices.entangle.DigitalOutput',
description = 'delay for LEDs switching on',
tangodevice = tango_base + 'uvspectro/plc_leddelay',
),
LEDswitch = device('nicos.devices.entangle.NamedDigitalOutput',
description = 'LED switcher',
tangodevice = tango_base + 'uvspectro/plc_led',
mapping = {'off': 0, 'uv': 1, 'blue': 2},
),
)
| 33.913043 | 72 | 0.65641 |
description = 'controlling the UV-vis spectrometer and LEDs'
group = 'optional'
tango_base = 'tango://phys.kws2.frm2:10000/kws2/'
devices = dict(
OceanView = device('nicos.devices.entangle.DigitalOutput',
description = 'spectrometer trigger interval (0 to switch off)',
tangodevice = tango_base + 'uvspectro/plc_trigger',
),
LEDdelay = device('nicos.devices.entangle.DigitalOutput',
description = 'delay for LEDs switching on',
tangodevice = tango_base + 'uvspectro/plc_leddelay',
),
LEDswitch = device('nicos.devices.entangle.NamedDigitalOutput',
description = 'LED switcher',
tangodevice = tango_base + 'uvspectro/plc_led',
mapping = {'off': 0, 'uv': 1, 'blue': 2},
),
)
| true | true |
f71fb300004e91ff987107bb558165bb8d7b340e | 14,538 | py | Python | chatto_transform/datastores/sqlalchemy_datastore.py | chatto-hub-test2/Spaceboy2 | 7b6b91baf06290e6b047ae75e7ea61cee4846b3a | [
"Unlicense",
"MIT"
] | null | null | null | chatto_transform/datastores/sqlalchemy_datastore.py | chatto-hub-test2/Spaceboy2 | 7b6b91baf06290e6b047ae75e7ea61cee4846b3a | [
"Unlicense",
"MIT"
] | null | null | null | chatto_transform/datastores/sqlalchemy_datastore.py | chatto-hub-test2/Spaceboy2 | 7b6b91baf06290e6b047ae75e7ea61cee4846b3a | [
"Unlicense",
"MIT"
] | null | null | null | import pandas
from ..schema.schema_base import *
from .datastore_base import DataStore
from .odo_datastore import OdoDataStore
from ..config import config
from functools import lru_cache, partial
from sqlalchemy import Table, MetaData, select
from sqlalchemy.orm import sessionmaker
from sqlalchemy.ext.automap import automap_base
from sqlalchemy import create_engine
from sqlalchemy.ext.compiler import compiles
from sqlalchemy.sql.expression import Select, and_
from sqlalchemy import sql
import io
import tempfile
import time
import os
import datetime
import ciso8601
import odo
metadatas = {}
def get_engine_metadata(engine):
if engine in metadatas:
return metadatas[engine]
else:
metadata = MetaData()
metadata.bind = engine
metadatas[engine] = metadata
return metadata
def get_reflected_metadata(engine, schema_name=None):
metadata = MetaData()
metadata.reflect(bind=engine, schema=schema_name)
metadata.bind = engine
return metadata
########################################################################
for col_type in [dt, delta, num, bool_]:
col_type._storage_target_registry['sqlalchemy'] = col_type._storage_target_registry['pandas'].copy()
@cat.register_check('sqlalchemy')
def _(col):
return col.dtype == 'object'
@cat.register_transform('sqlalchemy')
def _(col):
return col.astype('object')
@id_.register_check('sqlalchemy')
def _(col):
return col.dtype == 'object'
@id_.register_transform('sqlalchemy')
def _(col):
return col.astype('object')
########################################################################
@cat.register_metadata('sqlalchemy')
def _(self):
return sql.schema.Column(self.name, sql.sqltypes.Text, nullable=True)
@id_.register_metadata('sqlalchemy')
def _(self):
return sql.schema.Column(self.name, sql.sqltypes.Integer, nullable=True)
@dt.register_metadata('sqlalchemy')
def _(self):
return sql.schema.Column(self.name, sql.sqltypes.DateTime(timezone=True), nullable=True)
@delta.register_metadata('sqlalchemy')
def _(self):
return sql.schema.Column(self.name, sql.sqltypes.Interval, nullable=True)
@big_dt.register_metadata('sqlalchemy')
def _(self):
return sql.schema.Column(self.name, sql.sqltypes.DateTime(timezone=True), nullable=True)
@num.register_metadata('sqlalchemy')
def _(self):
return sql.schema.Column(self.name, sql.sqltypes.Float, nullable=True)
@bool_.register_metadata('sqlalchemy')
def _(self):
return sql.schema.Column(self.name, sql.sqltypes.Boolean, nullable=True)
########################################################################
@lru_cache()
def schema_as_table(schema, engine):
if schema.options.get('temporary', False):
prefixes = ['TEMPORARY']
else:
prefixes = []
db_schema = schema.options.get('db_schema', None)
metadata = get_engine_metadata(engine)
return Table(schema.name, metadata, *[col.metadata('sqlalchemy') for col in schema.cols], schema=db_schema, prefixes=prefixes)
sa_type_2_col_type = {
sql.sqltypes.Integer: num,
sql.sqltypes.String: cat,
sql.sqltypes.Date: dt,
sql.sqltypes.DateTime: dt,
sql.sqltypes.Interval: delta,
sql.sqltypes.Numeric: num,
sql.sqltypes.Boolean: bool_
}
def table_as_schema(table):
schema_cols = []
for sa_col in table.c:
for sa_type, col_type in sa_type_2_col_type.items():
if isinstance(sa_col.type, sa_type):
if isinstance(sa_col.type, sql.sqltypes.Integer) and (sa_col.primary_key or sa_col.foreign_keys):
schema_cols.append(id_(sa_col.name))
else:
schema_cols.append(col_type(sa_col.name))
break
options = {}
if table.schema is not None:
options['db_schema'] = table.schema
s = Schema(table.name, schema_cols, options=options)
return s
########################################################################
def fast_sql_to_df(table, schema):
engine = table.bind
if engine.dialect.name == 'mysql':
return fast_mysql_to_df(table, schema)
elif engine.dialect.name == 'postgresql':
return fast_postgresql_to_df(table, schema)
ods = OdoDataStore(schema, table)
df = ods.load()
df = df[schema.col_names()]
return df
def fast_mysql_to_df(table, schema):
f = tempfile.NamedTemporaryFile('w', suffix='.csv', dir=config.data_dir+'tmp')
try:
f.close()
table_name = str(table)
if not isinstance(table, Table):
table_name = '({})'.format(table_name)
# converting to csv
sql = """SELECT {cols} FROM {table} INTO OUTFILE '{filename}'
FIELDS TERMINATED BY ',' OPTIONALLY ENCLOSED BY '"'
ESCAPED BY '\\\\'
LINES TERMINATED BY '\n'""".format(
cols=', '.join('`'+colname+'`' for colname in schema.col_names()),
filename=f.name,
table=table_name)
table.bind.execute(sql)
# reading csv
df = pandas.read_csv(f.name, header=None, names=schema.col_names(), na_values=['\\N'])
finally:
os.remove(f.name)
for col in schema.cols:
if isinstance(col, dt):
# converting datetime column
df[col.name] = pandas.to_datetime(df[col.name], format="%Y-%m-%d %H:%M:%S", coerce=True)
if isinstance(col, big_dt):
# converting big_dt column
strptime = datetime.datetime.strptime
parse_func = (lambda x: strptime(x, "%Y-%m-%d %H:%M:%S"))
df[col.name] = df[col.name].map(parse_func, na_action='ignore')
return df
def fast_postgresql_to_df(table, schema):
engine = table.bind
conn = engine.raw_connection()
with conn.cursor() as cur:
with io.StringIO() as f:
table_name = str(table)
if not isinstance(table, Table):
table_name = '({})'.format(table_name)
sql = "COPY {table_name} TO STDOUT WITH (FORMAT CSV, HEADER TRUE)".format(
table_name=table_name)
cur.copy_expert(sql, f)
f.seek(0)
df = pandas.read_csv(f)
for col in schema.cols:
if isinstance(col, dt):
# converting datetime column
df[col.name] = pandas.to_datetime(df[col.name], format="%Y-%m-%d %H:%M:%S", coerce=True)
if isinstance(col, big_dt):
# converting big_dt column
strptime = datetime.datetime.strptime
parse_func = (lambda x: strptime(x, "%Y-%m-%d %H:%M:%S"))
df[col.name] = df[col.name].map(parse_func, na_action='ignore')
return df
def fast_postgresql_to_csv(table, file_path):
engine = table.bind
conn = engine.raw_connection()
with conn.cursor() as cur:
with open(file_path, 'w') as f:
table_name = str(table)
if not isinstance(table, Table):
table_name = '({})'.format(table_name)
sql = "COPY {table_name} TO STDOUT WITH (FORMAT CSV, HEADER TRUE)".format(
table_name=table_name)
cur.copy_expert(sql, f)
def fast_df_to_sql(df, table, schema):
ods = OdoDataStore(schema, table, storage_target_type='sqlalchemy')
ods.store(df)
class SATableDataStore(DataStore):
def __init__(self, schema, engine, where_clauses=None):
super().__init__(schema)
self.engine = engine
self.table = schema_as_table(self.schema, self.engine)
self.where_clauses = where_clauses
def storage_target(self):
return 'sqlalchemy'
def _load(self):
query = self.table
if self.where_clauses is not None:
query = query.select()
for where_clause in self.where_clauses:
query = query.where(where_clause)
df = fast_sql_to_df(query, self.schema)
return df
def to_csv(self, file_path):
if self.engine.dialect.name != 'postgresql':
raise NotImplementedError('converting directly to csv not supported for non-postgres databases')
query = self.table
if self.where_clauses is not None:
query = query.select()
for where_clause in self.where_clauses:
query = query.where(where_clause)
fast_postgresql_to_csv(query, file_path)
def _store(self, df):
if self.where_clauses is not None:
raise NotImplementedError('Cannot store to a query (where_clauses must be left blank)')
df = df.copy()
fast_df_to_sql(self.table, self.schema)
def _update(self, df):
if self.where_clauses is not None:
raise NotImplementedError('Cannot update to a query (where_clauses must be left blank)')
df = df.copy()
with self.engine.connect() as conn:
temp_schema = Schema.rename(self.schema, 'temp_'+self.schema.name)
temp_schema.options['temporary'] = True
temp_table = schema_as_table(temp_schema, self.engine)
print('storing new df in temp table')
fast_df_to_sql(df, temp_table, temp_schema)
print('updating table from matching rows')
index = self.schema.options['index']
update = self.table.update(
values={
col_name: temp_table.c[col_name] for col_name in self.schema.col_names()
},
whereclause=self.table.c[index] == temp_table.c[index]
)
update_res = conn.execute(update)
print('inserting new rows into table')
exists_query = self.table.select().where(self.table.c[index] == temp_table.c[index]).exists()
insert = self.table.insert().from_select(
temp_schema.col_names(),
temp_table.select().where(~exists_query))
ins_res = conn.execute(insert)
def delete(self):
if self.where_clauses is not None:
raise NotImplementedError('Cannot delete a query (where_clauses must be left blank)')
self.table.drop(self.engine)
class SAJoinDataStore(DataStore):
def __init__(self, root_schema, engine, has_schemas=None, belongs_to_schemas=None, root_conditions=None, where_clauses=None):
self.engine = engine
self.root_schema = root_schema
self.root_table = schema_as_table(self.root_schema, self.engine)
self.has_schemas, self.has_join_conditions = self._parse_schema_list(has_schemas)
self.has_tables = [schema_as_table(h_schema, self.engine) for h_schema in self.has_schemas]
self.belongs_to_schemas, self.belongs_to_join_conditions = self._parse_schema_list(belongs_to_schemas)
self.belongs_to_tables = [schema_as_table(b_schema, self.engine) for b_schema in self.belongs_to_schemas]
self.root_conditions = root_conditions
self.where_clauses = where_clauses
schema = Schema.union([self.root_schema] + self.has_schemas + self.belongs_to_schemas, with_prefix=True, schema_name=self.root_schema.name+'_join')
super().__init__(schema)
def _parse_schema_list(self, schema_list=None):
if schema_list is None:
schema_list = []
schemas = []
join_conditions = {}
for schema in schema_list:
if isinstance(schema, tuple):
schema, j_c = schema
join_conditions[schema] = j_c
schemas.append(schema)
return schemas, join_conditions
def storage_target(self):
return 'sqlalchemy'
def _load(self):
root = self.root_table
if self.root_conditions is not None:
root = root.select().where(and_(*self.root_conditions)).alias()
join_clause = root
select_clause = []
root_col_prefix = self.root_schema.options['prefix']
for col in root.c:
select_clause.append(col.label("{}.{}".format(root_col_prefix, col.name)))
for h_table, h_schema in zip(self.has_tables, self.has_schemas):
col_prefix = h_schema.options['prefix']
h_join_conditions = [root.c.id == h_table.c['{}_id'.format(root_col_prefix)]]
for join_condition in self.has_join_conditions.get(h_schema, []):
h_join_conditions.append(join_condition)
join_clause = join_clause.outerjoin(h_table, and_(*h_join_conditions))
for col in h_table.c:
select_clause.append(col.label("{}.{}".format(col_prefix, col.name)))
for b_table, b_schema in zip(self.belongs_to_tables, self.belongs_to_schemas):
col_prefix = b_schema.options['prefix']
b_join_conditions = [root.c['{}_id'.format(col_prefix)] == b_table.c.id]
for join_condition in self.belongs_to_join_conditions.get(b_schema, []):
b_join_conditions.append(join_condition)
join_clause = join_clause.outerjoin(b_table, and_(*b_join_conditions))
for col in b_table.c:
select_clause.append(col.label("{}.{}".format(col_prefix, col.name)))
temp_schema = Schema.rename(self.schema, 'temp_'+self.schema.name)
temp_table = schema_as_table(temp_schema, self.engine)
try:
temp_table.create(self.engine)
query = select(select_clause).select_from(join_clause)
if self.where_clauses is not None:
query = query.where(and_(*self.where_clauses))
insert = temp_table.insert().from_select(temp_schema.col_names(), query)
start = time.time()
print('executing join into temp table')
self.engine.execute(insert)
joined = time.time()
print('loading rows from temp table')
df = fast_sql_to_df(temp_table, temp_schema)
loaded = time.time()
finally:
temp_table.drop(self.engine)
print('type checking and sorting')
print('took', joined - start, 'seconds to perform the join')
print('took', loaded - joined, 'seconds to load the results')
return df
class SAQueryDataStore(DataStore):
def __init__(self, schema, engine, query):
self.engine = engine
self.query = query
self.schema = schema
def _load(self):
df = pandas.read_sql(self.query, self.engine)
return df
| 35.896296 | 155 | 0.623882 | import pandas
from ..schema.schema_base import *
from .datastore_base import DataStore
from .odo_datastore import OdoDataStore
from ..config import config
from functools import lru_cache, partial
from sqlalchemy import Table, MetaData, select
from sqlalchemy.orm import sessionmaker
from sqlalchemy.ext.automap import automap_base
from sqlalchemy import create_engine
from sqlalchemy.ext.compiler import compiles
from sqlalchemy.sql.expression import Select, and_
from sqlalchemy import sql
import io
import tempfile
import time
import os
import datetime
import ciso8601
import odo
metadatas = {}
def get_engine_metadata(engine):
if engine in metadatas:
return metadatas[engine]
else:
metadata = MetaData()
metadata.bind = engine
metadatas[engine] = metadata
return metadata
def get_reflected_metadata(engine, schema_name=None):
metadata = MetaData()
metadata.reflect(bind=engine, schema=schema_name)
metadata.bind = engine
return metadata
)
return schemas, join_conditions
def storage_target(self):
return 'sqlalchemy'
def _load(self):
root = self.root_table
if self.root_conditions is not None:
root = root.select().where(and_(*self.root_conditions)).alias()
join_clause = root
select_clause = []
root_col_prefix = self.root_schema.options['prefix']
for col in root.c:
select_clause.append(col.label("{}.{}".format(root_col_prefix, col.name)))
for h_table, h_schema in zip(self.has_tables, self.has_schemas):
col_prefix = h_schema.options['prefix']
h_join_conditions = [root.c.id == h_table.c['{}_id'.format(root_col_prefix)]]
for join_condition in self.has_join_conditions.get(h_schema, []):
h_join_conditions.append(join_condition)
join_clause = join_clause.outerjoin(h_table, and_(*h_join_conditions))
for col in h_table.c:
select_clause.append(col.label("{}.{}".format(col_prefix, col.name)))
for b_table, b_schema in zip(self.belongs_to_tables, self.belongs_to_schemas):
col_prefix = b_schema.options['prefix']
b_join_conditions = [root.c['{}_id'.format(col_prefix)] == b_table.c.id]
for join_condition in self.belongs_to_join_conditions.get(b_schema, []):
b_join_conditions.append(join_condition)
join_clause = join_clause.outerjoin(b_table, and_(*b_join_conditions))
for col in b_table.c:
select_clause.append(col.label("{}.{}".format(col_prefix, col.name)))
temp_schema = Schema.rename(self.schema, 'temp_'+self.schema.name)
temp_table = schema_as_table(temp_schema, self.engine)
try:
temp_table.create(self.engine)
query = select(select_clause).select_from(join_clause)
if self.where_clauses is not None:
query = query.where(and_(*self.where_clauses))
insert = temp_table.insert().from_select(temp_schema.col_names(), query)
start = time.time()
print('executing join into temp table')
self.engine.execute(insert)
joined = time.time()
print('loading rows from temp table')
df = fast_sql_to_df(temp_table, temp_schema)
loaded = time.time()
finally:
temp_table.drop(self.engine)
print('type checking and sorting')
print('took', joined - start, 'seconds to perform the join')
print('took', loaded - joined, 'seconds to load the results')
return df
class SAQueryDataStore(DataStore):
def __init__(self, schema, engine, query):
self.engine = engine
self.query = query
self.schema = schema
def _load(self):
df = pandas.read_sql(self.query, self.engine)
return df
| true | true |
f71fb3203ce46c39849c5a3bac229726738a23a1 | 4,139 | py | Python | src/core/network/llnms-scan-network.py | marvins/LLNMS | ebc15418e1a5dddafdb3e55cea4e8cb71f619b2d | [
"MIT"
] | null | null | null | src/core/network/llnms-scan-network.py | marvins/LLNMS | ebc15418e1a5dddafdb3e55cea4e8cb71f619b2d | [
"MIT"
] | null | null | null | src/core/network/llnms-scan-network.py | marvins/LLNMS | ebc15418e1a5dddafdb3e55cea4e8cb71f619b2d | [
"MIT"
] | 1 | 2020-12-16T09:28:26.000Z | 2020-12-16T09:28:26.000Z | #!/usr/bin/env python
#
# File: llnms-scan-network.py
# Author: Marvin Smith
# Date: 6/13/2015
#
# Purpose: Scan LLNMS networks
#
__author__ = 'Marvin Smith'
# Python Libraries
import argparse, os, sys
# LLNMS Libraries
if os.environ['LLNMS_HOME'] is not None:
sys.path.append(os.environ['LLNMS_HOME'] + '/lib')
import llnms
# --------------------------------------------- #
# - Parse Command-Line Arguments - #
# --------------------------------------------- #
def Parse_Command_Line():
# Create parser
parser = argparse.ArgumentParser( description='Scan an LLNMS network.' )
# Version
parser.add_argument('-v', '--version',
action='version',
version='%(prog)s ' + llnms.info.Get_Version_String(),
help='Print the version information.')
# Verbose Mode
parser.add_argument('--verbose',
dest='verbose_flag',
required=False,
default=False,
action='store_true',
help='Print with verbose output.')
# Quiet Mode
parser.add_argument('--quiet',
required=False,
default=False,
action='store_true',
help='Do not print output.')
# Network Name
parser.add_argument('-n', '--network',
required=True,
dest='network_input',
help='ID of the network to scan.')
# Scanner Name
parser.add_argument('-s', '--scanner',
required=True,
dest='scanner_input',
help='ID of the scanner to use.')
# Print only passes
parser.add_argument('-om', '--output-mode',
required=False,
dest='output_mode',
default=None,
help='Output mode. Supported options are xml and stdout. If xml provided, then user must provide filename.')
# Return the parser
return parser.parse_args()
# ---------------------------- #
# - Main Function - #
# ---------------------------- #
def Main():
# Grab LLNMS HOME
llnms_home=None
if os.environ['LLNMS_HOME'] is not None:
llnms_home=os.environ['LLNMS_HOME']
# Parse Command-Line Arguments
options = Parse_Command_Line()
# Load the network definition
network = llnms.Network.find_network( network_name=options.network_input,
llnms_home=llnms_home)
# Make sure we found a network
if network is None:
raise Exception('No network found matching name ' + options.network_input)
# Print the Network if Verbose
if options.verbose_flag is True:
print(network.To_Debug_String())
# Load the scanner definition
scanner = llnms.Scanner.find_scanner( scanner_id=options.scanner_input,
llnms_home=llnms_home )
# Make sure we found a scanner
if scanner is None:
raise Exception('No scanner found matching name ' + options.scanner_input)
# Print scanner if verbose
if options.verbose_flag is True:
print(scanner.To_Debug_String())
# Validate the scanner is registered within the network
if network.Has_Scanner( scanner_id=scanner.id ) is False:
raise Exception("Network does not have a scanner registered with id=" + scanner.id )
# Run scan on network
results = scanner.Run_Scan_Range(endpoint_list=network.Get_Network_Range(),
arg_list=network.Get_Scanner_Args(scanner.id),
num_threads=4)
# print results
addresses = network.Get_Network_Range()
for x in xrange(0, len(results)):
print(addresses[x] + ' - ' + str(results[x]))
# ----------------------------- #
# - Run Main Script - #
# ----------------------------- #
if __name__ == '__main__':
Main()
| 32.590551 | 132 | 0.530563 |
__author__ = 'Marvin Smith'
import argparse, os, sys
if os.environ['LLNMS_HOME'] is not None:
sys.path.append(os.environ['LLNMS_HOME'] + '/lib')
import llnms
def Parse_Command_Line():
parser = argparse.ArgumentParser( description='Scan an LLNMS network.' )
parser.add_argument('-v', '--version',
action='version',
version='%(prog)s ' + llnms.info.Get_Version_String(),
help='Print the version information.')
parser.add_argument('--verbose',
dest='verbose_flag',
required=False,
default=False,
action='store_true',
help='Print with verbose output.')
parser.add_argument('--quiet',
required=False,
default=False,
action='store_true',
help='Do not print output.')
parser.add_argument('-n', '--network',
required=True,
dest='network_input',
help='ID of the network to scan.')
parser.add_argument('-s', '--scanner',
required=True,
dest='scanner_input',
help='ID of the scanner to use.')
parser.add_argument('-om', '--output-mode',
required=False,
dest='output_mode',
default=None,
help='Output mode. Supported options are xml and stdout. If xml provided, then user must provide filename.')
return parser.parse_args()
def Main():
llnms_home=None
if os.environ['LLNMS_HOME'] is not None:
llnms_home=os.environ['LLNMS_HOME']
options = Parse_Command_Line()
network = llnms.Network.find_network( network_name=options.network_input,
llnms_home=llnms_home)
if network is None:
raise Exception('No network found matching name ' + options.network_input)
if options.verbose_flag is True:
print(network.To_Debug_String())
scanner = llnms.Scanner.find_scanner( scanner_id=options.scanner_input,
llnms_home=llnms_home )
if scanner is None:
raise Exception('No scanner found matching name ' + options.scanner_input)
if options.verbose_flag is True:
print(scanner.To_Debug_String())
if network.Has_Scanner( scanner_id=scanner.id ) is False:
raise Exception("Network does not have a scanner registered with id=" + scanner.id )
results = scanner.Run_Scan_Range(endpoint_list=network.Get_Network_Range(),
arg_list=network.Get_Scanner_Args(scanner.id),
num_threads=4)
addresses = network.Get_Network_Range()
for x in xrange(0, len(results)):
print(addresses[x] + ' - ' + str(results[x]))
if __name__ == '__main__':
Main()
| true | true |
f71fb3d05a7fffde16b9485af0a723ccfc10ba6f | 16,760 | py | Python | maven_proj_graph/pkg1/mvnsortmod1.py | lg-alabris/swagger-ui | fdb06ad6dc3dd9c416b08c8f7909c37cfcf1ece4 | [
"Apache-2.0"
] | null | null | null | maven_proj_graph/pkg1/mvnsortmod1.py | lg-alabris/swagger-ui | fdb06ad6dc3dd9c416b08c8f7909c37cfcf1ece4 | [
"Apache-2.0"
] | null | null | null | maven_proj_graph/pkg1/mvnsortmod1.py | lg-alabris/swagger-ui | fdb06ad6dc3dd9c416b08c8f7909c37cfcf1ece4 | [
"Apache-2.0"
] | null | null | null | '''
======================================================================
Created on Jan 14, 2018
PURPOSE: this module provides classes to read Maven projects from git or other repos
specifically intended to create the graph of multiple project dependencies
ROADMAP: TODO -
1. review how properties are distributed and could break things
2. review subproject dependencies on top level, are props declared?
2. review parent POM, are props declared?
3. are external property files used?
@author: Larry
======================================================================
'''
import os
import subprocess
#import json
#import xml.etree.ElementTree as ET
#import urllib2
#import csv
import xml.etree.cElementTree as ET
import re
import urllib.request
#=======================================================================
# static functions and constants
class Util(object):
mvn_pom_ns = {"mvn":"http://maven.apache.org/POM/4.0.0"}
def __init__(self):
pass
@staticmethod
def get_tag_value(name, section):
s = ('mvn:%s' % name)
elem = section.find(s, Util.mvn_pom_ns)
if elem ==None:
return''
return elem.text
@staticmethod
def get_path(dirs):
path = ''
for d in dirs:
path += d + '/'
return path[:len(path) -1]
# if hasattr(a, 'property'):
@staticmethod
def run_process_2(cmd_args):
#result = subprocess.run(['dir', '../*.*'], stdout=subprocess.PIPE)
#result = subprocess.run(['C:/apps/maven352/bin/mvn', 'help:effective-pom'], stdout=subprocess.PIPE)
result = subprocess.run(['cd', '..'], stdout=subprocess.PIPE, shell=True)
result = subprocess.run(cmd_args, stdout=subprocess.PIPE, shell=True)
print(result.stdout.decode('utf-8'))
@staticmethod
def run_process(cmd_args, args_in):
cmd = subprocess.Popen(cmd_args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, shell=True)
if (args_in):
cmd.stdin.write(args_in.encode('utf-8'))
cmd.stdin.flush() # Must include this to ensure data is passed to child process
result = cmd.stdout.read()
print(args_in.encode('utf-8'))
print(result) #.stdout.decode('utf-8'))
'''
cmdline = ["cmd", "/q", "/k", "echo off"]
batch = b"""\
rem vsinstr -coverage helloclass.exe /exclude:std::*
vsperfcmd /start:coverage /output:run.coverage
helloclass
vsperfcmd /shutdown
exit
"""
'''
def test_map_update(self):
A = {'a':1, 'b':2, 'c': 3}
B = {'c':99, 'd':4, 'e':5}
A.update(B)
print(A)
#=======================================================================
# identifies Maven coordinates for a project or dependnecy
class MavenCoords(object):
def __init__(self, element, props):
if (not element):
self.groupid =''
self.artifactid = ''
self.version = ''
self.scope = ''
self.relative_path = ''
self.key =''
return
self.groupid = Util.get_tag_value('groupId', element)
self.artifactid = Util.get_tag_value('artifactId', element)
self.version = Util.get_tag_value('version', element)
self.relative_path = Util.get_tag_value('relativePath', element)
self.scope = Util.get_tag_value('scope', element)
self.refresh_key(props)
def refresh_key(self, props):
if (props and self.version in props):
self.version = props[self.version]
self.key = '%s|%s|%s' % (self.groupid, self.artifactid, self.version)
#=======================================================================
# a maven project POM complete with properties and dependencies
class MavenProject(object):
def __init__(self, pom_url, project_map):
#dirs = pom_url.split('/')
self.pom_url = pom_url;
self.project_map = project_map
self.pom_file = self.get_pom_file(self.pom_url)
self.name = Util.get_tag_value('name', self.pom_file)
self.packaging = Util.get_tag_value('packaging', self.pom_file)
self.init_from_parent()
self.properties.update(self.get_properties(self.pom_file))
self.coord = MavenCoords(self.pom_file, self.properties)
self.dependencies.update(self.get_dependencies(self.pom_file))
self.project_map[self.coord.key] = self
self.get_sub_modules(self.pom_file)
self.history = []
self.consumers = []
#if self.packaging =='pom':
# parent pom's will always be pre-existent to child pom's. they will be looked by coord key from
# the global graph / project list
def init_from_parent(self):
parent_section = self.pom_file.findall('mvn:parent', Util.mvn_pom_ns)
if (parent_section):
self.parent_coord = MavenCoords(parent_section[0], None)
parent = self.project_map[self.parent_coord.key]
if (parent):
self.properties = parent.properties.copy()
self.dependencies = parent.dependencies.copy()
else:
print('Error: POM {} has unresolved parent POM reference {}'.format(self.name, parent.key))
else:
self.dependencies = {}
self.properties = {}
self.coord = MavenCoords(None, None)
dirs = self.pom_url.split('/')
print(dirs)
print (Util.get_path(dirs))
def get_sub_modules(self, pom_file):
section = pom_file.findall('mvn:modules', Util.mvn_pom_ns)
self.modules = {}
if (not section):
return
for elem in section[0].findall('*'):
sub_proj = self.get_sub_module(elem.text)
self.modules[sub_proj.coord.key] = sub_proj
self.project_map[sub_proj.coord.key] = sub_proj
def get_sub_module(self, sub_dir):
dirs = self.pom_url.split('/')
x = len(dirs)
dirs[x-1] = 'pom.xml'
dirs.insert(x-1, sub_dir)
path = Util.get_path(dirs)
module = MavenProject(path, self.project_map)
return module
def get_properties(self, pom):
section = pom.findall('mvn:properties', Util.mvn_pom_ns)
props = {}
if (len(section)==0):
return props
for elem in section[0].findall('*'):
k = re.sub('{.*?}', '', elem.tag)
k = '${%s}' % k
props[k] = elem.text
return props
def get_dependencies(self, pom):
section = pom.findall('mvn:dependencies', Util.mvn_pom_ns)
deps_map = {}
if (len(section)==0):
return deps_map
for dep_section in section[0].findall('mvn:dependency', Util.mvn_pom_ns):
obj = MavenCoords(dep_section, self.properties)
deps_map[obj.key] = obj
return deps_map
@staticmethod
def get_pom_file(pomfile):
if pomfile.find("http://") >=0 or pomfile.find("https://") >=0:
opener = urllib.request.build_opener()
pom = ET.parse( opener.open(pomfile) ).getroot()
else:
pom = ET.parse(pomfile).getroot()
return pom
def logx(self, level):
print()
print('---------Maven Project---------')
#print('key: %s * Group: %s * Id: %s * Ver: %s' % (self.coord.key, self.coord.groupid, self.coord.artifactid, self.coord.version))
print('key: {0} * Name: {1} * Group: {2} * Id: {3} * Ver: {4}'.format(self.coord.key, self.name, self.coord.groupid, self.coord.artifactid, self.coord.version))
print()
if level ==0:
return
print(' dependencies')
for k, v in self.dependencies.items():
print(' key: %s * Group: %s * Id: %s * Ver: %s' % (k, v.groupid, v.artifactid, v.version))
print()
print(' properties: ', self.properties)
print (' consumers')
for proj in self.consumers:
print(' ', proj.coord.key)
class DAGerror(Exception):
def __init__(self, arg):
self.arg = arg
#=======================================================================
#
class MavenProjectGraph(object):
def __init__(self, pom_url_list):
self.pom_url_list = pom_url_list
self.proj_list = []
self.proj_map = {}
#self.validation = {}
def generate_pom_list(self):
for pom_url in self.pom_url_list:
MavenProject(pom_url, self.proj_map)
#self.proj_list.append(proj)
#self.proj_map[proj.coord.key] = proj
self.proj_list = list(self.proj_map.values())
for proj in self.proj_list:
proj.logx(1) #$$
print()
def set_options(self):
pass
# PURPOSE: sort the list in DAG dependency order and capture each project consumers
#
#
def resolve_graph(self):
self.resolve_dependencies()
self.resolve_consumers()
# PURPOSE: reorder the project list such that each projects dependencies appear before that project
#
# NOTE #1: iterate thru the list looking fwd in the list for each project's dependencies
# for each dependency found, move it behind that project
#
# NOTE #2: the DAG is complete when the list is scanned and no dependencies exist fwd of each project
#
# NOTE #3: a history of each dependency relocation is maintained for each project
# a circular reference will be detected if that
#
def resolve_dependencies(self):
try:
while True:
for p in self.proj_list:
print(p.name)
i = 0
#dependency_found = False
while i < len(self.proj_list):
dependency_found = False
proj_base = self.proj_list[i]
print('loop i={}, base={}'.format(i, proj_base.name))
j = i + 1
while j < len(self.proj_list):
print(' loop j {}'.format(j))
proj_scan = self.proj_list[j]
# a forward project dependency is found for the base project, move it behind the base project
if proj_scan.coord.key in proj_base.dependencies:
# dejavu - a repeated reorder indicates circular dependency
if proj_scan.coord.key in proj_base.history:
raise DAGerror("Error: base project - {} - encountered duplicate reorder for dependency - {} -".format
( proj_base.name, proj_scan.name))
# remove the fwd item first to avoid order issues
del self.proj_list[j] #self.proj_list.remove(j)
# insert behind the base project
self.proj_list.insert(i, proj_scan)
print(' reorded scan {} from j={} to i={}'.format( proj_scan.name, j, i))
for p in self.proj_list:
print(p.name)
proj_base.history.append(proj_scan.coord.key)
dependency_found = True
i = i -1
break
j =j+1 # while j
i=i+1 # while i
# repeat outer loop until nothing is reordered
if not dependency_found:
break
else:
i = 0
except DAGerror as e:
print(e)
# PURPOSE: for each project in the list, discover the set of consuming projects
#
# NOTE #1: call this method AFTER the dependency graph has been properly resolved
# consuming projects will be forward in the list
#
def resolve_consumers(self):
for i in range(len(self.proj_list)):
proj_base = self.proj_list[i]
j = i
while j < len(self.proj_list)-1:
j = j+1
proj_scan = self.proj_list[j]
if (proj_base.coord.key in proj_scan.dependencies):
proj_base.consumers.append(proj_scan)
def list_projects(self):
for proj in self.proj_list:
proj.logx(1)
#==========================================================================
def main():
pom_files = ['D:\\devspaces\\wks4\\py1\\snipits2.xml',
'https://raw.githubusercontent.com/LeonardoZ/java-concurrency-patterns/master/pom.xml']
pom_files = ['D:\\devspaces\\wks4\\py1\\pom-A.xml',
'D:\\devspaces\\wks4\\py1\\pom-B.xml',
'D:\\devspaces\\wks4\\py1\\pom-C.xml',
'D:\\devspaces\\wks4\\py1\\pom-D.xml',
]
pom_files = ['C:/Users/Larry/Dropbox/gitcode/gh/maven_proj_graph/pom-A.xml',
'C:/Users/Larry/Dropbox/gitcode/gh/maven_proj_graph/pom-B.xml',
'C:/Users/Larry/Dropbox/gitcode/gh/maven_proj_graph/pom-C.xml',
'C:/Users/Larry/Dropbox/gitcode/gh/maven_proj_graph/pom-D.xml',
]
# C:\Users\Larry\Dropbox\gitcode\gh\maven_proj_graph
s = ['dir', '*']
s = ['C:/apps/maven352/bin/mvn', 'help:effective-pom']
s2 = ['C:\\apps\\maven352\\bin\\mvn', 'help:effective-pom']
#Util.run_process(['cd', '..'], 'C:\\apps\\maven352\\bin\\mvn help:effective-pom')
#Util.run_process('C:\\apps\\maven352\\bin\\mvn help:effective-pom', '')
#Util.test_map_update(None)
#return()
graph = MavenProjectGraph(pom_files)
graph.generate_pom_list()
graph.resolve_graph()
graph.list_projects()
#==========================================================================
# see this article for opening remote xml files
# https://stackoverflow.com/questions/28238713/python-xml-parsing-lxml-urllib-request
def main2():
cwd = os.getcwd()
cwd = 'D:\\devspaces\\wks4\\py1\\'
pom_file = cwd + 'snipits2.xml'
pom_file = 'D:\\devspaces\\wks4\\py1\\snipits2.xml'
pom = ET.parse(pom_file).getroot()
# https://github.com/LeonardoZ/java-concurrency-patterns.git
# this is the correct patttern for reading single files from github
# https://raw.githubusercontent.com/user/repository/branch/filename
# this is the web page containing the file
# 'https://github.com/LeonardoZ/java-concurrency-patterns/blob/master/pom.xml'
pom_file_url = 'https://raw.githubusercontent.com/LeonardoZ/java-concurrency-patterns/master/pom.xml'
opener = urllib.request.build_opener()
f = opener.open(pom_file_url)
# ng, file=urllib.urlopen(file=urllib.urlopen())
#parser = ET.HTMLParser()
#with urlopen('https://pypi.python.org/simple') as f:
#tree = ET.parse(f, parser)
#pom_file = urllib.request.urlopen(pom_file)
pom = ET.parse(opener.open(pom_file_url)).getroot()
project = MavenProject(pom)
project.logx()
if __name__ == '__main__':
main()
#main()
'''
=====================================================================
notes:
alternatives - use maven to get equiv pom
> mvn help:effective-pom
https://stackoverflow.com/questions/4760215/running-shell-command-from-python-and-capturing-the-output
'''
| 36.514161 | 178 | 0.515752 | import os
import subprocess
import xml.etree.cElementTree as ET
import re
import urllib.request
class Util(object):
mvn_pom_ns = {"mvn":"http://maven.apache.org/POM/4.0.0"}
def __init__(self):
pass
@staticmethod
def get_tag_value(name, section):
s = ('mvn:%s' % name)
elem = section.find(s, Util.mvn_pom_ns)
if elem ==None:
return''
return elem.text
@staticmethod
def get_path(dirs):
path = ''
for d in dirs:
path += d + '/'
return path[:len(path) -1]
@staticmethod
def run_process_2(cmd_args):
result = subprocess.run(['cd', '..'], stdout=subprocess.PIPE, shell=True)
result = subprocess.run(cmd_args, stdout=subprocess.PIPE, shell=True)
print(result.stdout.decode('utf-8'))
@staticmethod
def run_process(cmd_args, args_in):
cmd = subprocess.Popen(cmd_args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, shell=True)
if (args_in):
cmd.stdin.write(args_in.encode('utf-8'))
cmd.stdin.flush()
result = cmd.stdout.read()
print(args_in.encode('utf-8'))
print(result)
def test_map_update(self):
A = {'a':1, 'b':2, 'c': 3}
B = {'c':99, 'd':4, 'e':5}
A.update(B)
print(A)
class MavenCoords(object):
def __init__(self, element, props):
if (not element):
self.groupid =''
self.artifactid = ''
self.version = ''
self.scope = ''
self.relative_path = ''
self.key =''
return
self.groupid = Util.get_tag_value('groupId', element)
self.artifactid = Util.get_tag_value('artifactId', element)
self.version = Util.get_tag_value('version', element)
self.relative_path = Util.get_tag_value('relativePath', element)
self.scope = Util.get_tag_value('scope', element)
self.refresh_key(props)
def refresh_key(self, props):
if (props and self.version in props):
self.version = props[self.version]
self.key = '%s|%s|%s' % (self.groupid, self.artifactid, self.version)
class MavenProject(object):
def __init__(self, pom_url, project_map):
self.pom_url = pom_url;
self.project_map = project_map
self.pom_file = self.get_pom_file(self.pom_url)
self.name = Util.get_tag_value('name', self.pom_file)
self.packaging = Util.get_tag_value('packaging', self.pom_file)
self.init_from_parent()
self.properties.update(self.get_properties(self.pom_file))
self.coord = MavenCoords(self.pom_file, self.properties)
self.dependencies.update(self.get_dependencies(self.pom_file))
self.project_map[self.coord.key] = self
self.get_sub_modules(self.pom_file)
self.history = []
self.consumers = []
def init_from_parent(self):
parent_section = self.pom_file.findall('mvn:parent', Util.mvn_pom_ns)
if (parent_section):
self.parent_coord = MavenCoords(parent_section[0], None)
parent = self.project_map[self.parent_coord.key]
if (parent):
self.properties = parent.properties.copy()
self.dependencies = parent.dependencies.copy()
else:
print('Error: POM {} has unresolved parent POM reference {}'.format(self.name, parent.key))
else:
self.dependencies = {}
self.properties = {}
self.coord = MavenCoords(None, None)
dirs = self.pom_url.split('/')
print(dirs)
print (Util.get_path(dirs))
def get_sub_modules(self, pom_file):
section = pom_file.findall('mvn:modules', Util.mvn_pom_ns)
self.modules = {}
if (not section):
return
for elem in section[0].findall('*'):
sub_proj = self.get_sub_module(elem.text)
self.modules[sub_proj.coord.key] = sub_proj
self.project_map[sub_proj.coord.key] = sub_proj
def get_sub_module(self, sub_dir):
dirs = self.pom_url.split('/')
x = len(dirs)
dirs[x-1] = 'pom.xml'
dirs.insert(x-1, sub_dir)
path = Util.get_path(dirs)
module = MavenProject(path, self.project_map)
return module
def get_properties(self, pom):
section = pom.findall('mvn:properties', Util.mvn_pom_ns)
props = {}
if (len(section)==0):
return props
for elem in section[0].findall('*'):
k = re.sub('{.*?}', '', elem.tag)
k = '${%s}' % k
props[k] = elem.text
return props
def get_dependencies(self, pom):
section = pom.findall('mvn:dependencies', Util.mvn_pom_ns)
deps_map = {}
if (len(section)==0):
return deps_map
for dep_section in section[0].findall('mvn:dependency', Util.mvn_pom_ns):
obj = MavenCoords(dep_section, self.properties)
deps_map[obj.key] = obj
return deps_map
@staticmethod
def get_pom_file(pomfile):
if pomfile.find("http://") >=0 or pomfile.find("https://") >=0:
opener = urllib.request.build_opener()
pom = ET.parse( opener.open(pomfile) ).getroot()
else:
pom = ET.parse(pomfile).getroot()
return pom
def logx(self, level):
print()
print('---------Maven Project---------')
print('key: {0} * Name: {1} * Group: {2} * Id: {3} * Ver: {4}'.format(self.coord.key, self.name, self.coord.groupid, self.coord.artifactid, self.coord.version))
print()
if level ==0:
return
print(' dependencies')
for k, v in self.dependencies.items():
print(' key: %s * Group: %s * Id: %s * Ver: %s' % (k, v.groupid, v.artifactid, v.version))
print()
print(' properties: ', self.properties)
print (' consumers')
for proj in self.consumers:
print(' ', proj.coord.key)
class DAGerror(Exception):
def __init__(self, arg):
self.arg = arg
class MavenProjectGraph(object):
def __init__(self, pom_url_list):
self.pom_url_list = pom_url_list
self.proj_list = []
self.proj_map = {}
def generate_pom_list(self):
for pom_url in self.pom_url_list:
MavenProject(pom_url, self.proj_map)
self.proj_list = list(self.proj_map.values())
for proj in self.proj_list:
proj.logx(1)
print()
def set_options(self):
pass
def resolve_graph(self):
self.resolve_dependencies()
self.resolve_consumers()
# NOTE #2: the DAG is complete when the list is scanned and no dependencies exist fwd of each project
#
# NOTE #3: a history of each dependency relocation is maintained for each project
# a circular reference will be detected if that
#
def resolve_dependencies(self):
try:
while True:
for p in self.proj_list:
print(p.name)
i = 0
#dependency_found = False
while i < len(self.proj_list):
dependency_found = False
proj_base = self.proj_list[i]
print('loop i={}, base={}'.format(i, proj_base.name))
j = i + 1
while j < len(self.proj_list):
print(' loop j {}'.format(j))
proj_scan = self.proj_list[j]
# a forward project dependency is found for the base project, move it behind the base project
if proj_scan.coord.key in proj_base.dependencies:
# dejavu - a repeated reorder indicates circular dependency
if proj_scan.coord.key in proj_base.history:
raise DAGerror("Error: base project - {} - encountered duplicate reorder for dependency - {} -".format
( proj_base.name, proj_scan.name))
# remove the fwd item first to avoid order issues
del self.proj_list[j] #self.proj_list.remove(j)
# insert behind the base project
self.proj_list.insert(i, proj_scan)
print(' reorded scan {} from j={} to i={}'.format( proj_scan.name, j, i))
for p in self.proj_list:
print(p.name)
proj_base.history.append(proj_scan.coord.key)
dependency_found = True
i = i -1
break
j =j+1 # while j
i=i+1 # while i
# repeat outer loop until nothing is reordered
if not dependency_found:
break
else:
i = 0
except DAGerror as e:
print(e)
# PURPOSE: for each project in the list, discover the set of consuming projects
#
# NOTE #1: call this method AFTER the dependency graph has been properly resolved
# consuming projects will be forward in the list
#
def resolve_consumers(self):
for i in range(len(self.proj_list)):
proj_base = self.proj_list[i]
j = i
while j < len(self.proj_list)-1:
j = j+1
proj_scan = self.proj_list[j]
if (proj_base.coord.key in proj_scan.dependencies):
proj_base.consumers.append(proj_scan)
def list_projects(self):
for proj in self.proj_list:
proj.logx(1)
#==========================================================================
def main():
pom_files = ['D:\\devspaces\\wks4\\py1\\snipits2.xml',
'https://raw.githubusercontent.com/LeonardoZ/java-concurrency-patterns/master/pom.xml']
pom_files = ['D:\\devspaces\\wks4\\py1\\pom-A.xml',
'D:\\devspaces\\wks4\\py1\\pom-B.xml',
'D:\\devspaces\\wks4\\py1\\pom-C.xml',
'D:\\devspaces\\wks4\\py1\\pom-D.xml',
]
pom_files = ['C:/Users/Larry/Dropbox/gitcode/gh/maven_proj_graph/pom-A.xml',
'C:/Users/Larry/Dropbox/gitcode/gh/maven_proj_graph/pom-B.xml',
'C:/Users/Larry/Dropbox/gitcode/gh/maven_proj_graph/pom-C.xml',
'C:/Users/Larry/Dropbox/gitcode/gh/maven_proj_graph/pom-D.xml',
]
# C:\Users\Larry\Dropbox\gitcode\gh\maven_proj_graph
s = ['dir', '*']
s = ['C:/apps/maven352/bin/mvn', 'help:effective-pom']
s2 = ['C:\\apps\\maven352\\bin\\mvn', 'help:effective-pom']
#Util.run_process(['cd', '..'], 'C:\\apps\\maven352\\bin\\mvn help:effective-pom')
#Util.run_process('C:\\apps\\maven352\\bin\\mvn help:effective-pom', '')
#Util.test_map_update(None)
#return()
graph = MavenProjectGraph(pom_files)
graph.generate_pom_list()
graph.resolve_graph()
graph.list_projects()
#==========================================================================
# see this article for opening remote xml files
# https://stackoverflow.com/questions/28238713/python-xml-parsing-lxml-urllib-request
def main2():
cwd = os.getcwd()
cwd = 'D:\\devspaces\\wks4\\py1\\'
pom_file = cwd + 'snipits2.xml'
pom_file = 'D:\\devspaces\\wks4\\py1\\snipits2.xml'
pom = ET.parse(pom_file).getroot()
# https://github.com/LeonardoZ/java-concurrency-patterns.git
# this is the correct patttern for reading single files from github
# https://raw.githubusercontent.com/user/repository/branch/filename
# this is the web page containing the file
# 'https://github.com/LeonardoZ/java-concurrency-patterns/blob/master/pom.xml'
pom_file_url = 'https://raw.githubusercontent.com/LeonardoZ/java-concurrency-patterns/master/pom.xml'
opener = urllib.request.build_opener()
f = opener.open(pom_file_url)
# ng, file=urllib.urlopen(file=urllib.urlopen())
#parser = ET.HTMLParser()
#with urlopen('https://pypi.python.org/simple') as f:
#tree = ET.parse(f, parser)
#pom_file = urllib.request.urlopen(pom_file)
pom = ET.parse(opener.open(pom_file_url)).getroot()
project = MavenProject(pom)
project.logx()
if __name__ == '__main__':
main()
#main()
| true | true |
f71fb4b548decad7d92f6c012d1d10217c8e029e | 2,063 | py | Python | Union_Find/1070.Accounts Merge/Solution.py | Zhenye-Na/LxxxCode | afd79d790d0a7495d75e6650f80adaa99bd0ff07 | [
"MIT"
] | 12 | 2019-05-04T04:21:27.000Z | 2022-03-02T07:06:57.000Z | Union_Find/1070.Accounts Merge/Solution.py | Zhenye-Na/LxxxCode | afd79d790d0a7495d75e6650f80adaa99bd0ff07 | [
"MIT"
] | 1 | 2019-07-24T18:43:53.000Z | 2019-07-24T18:43:53.000Z | Union_Find/1070.Accounts Merge/Solution.py | Zhenye-Na/LxxxCode | afd79d790d0a7495d75e6650f80adaa99bd0ff07 | [
"MIT"
] | 10 | 2019-07-01T04:03:04.000Z | 2022-03-09T03:57:37.000Z | from collections import defaultdict
class Solution:
"""
@param accounts: List[List[str]]
@return: return a List[List[str]]
"""
def accountsMerge(self, accounts):
# write your code here
merged = []
if not accounts or len(accounts) == 0:
return merged
self.forward_index = self.create_forward_index(accounts)
self.inverted_index = self.create_inverted_index(accounts)
self.parents = {i : i for i in range(len(accounts)) if len(accounts[i]) >= 1}
for email, people in self.inverted_index.items():
if len(people) > 1:
p1 = people[0]
for i in range(1, len(people)):
self.connect(p1, people[i])
curr = None
for people, email in self.forward_index.items():
if len(email) > 0:
curr = []
curr.append(accounts[people][0])
curr.extend(sorted(list(set(email))))
merged.append(curr)
return merged
def create_forward_index(self, accounts):
forward_index = defaultdict(list)
for idx, account in enumerate(accounts):
forward_index[idx].extend(account[1:])
return forward_index
def create_inverted_index(self, accounts):
inverted_index = defaultdict(list)
for idx, account in enumerate(accounts):
name = account[0]
for email in account[1:]:
inverted_index[email].append(idx)
return inverted_index
def connect(self, p1, p2):
parent1 = self.find(p1)
parent2 = self.find(p2)
if parent2 != parent1:
self.parents[parent1] = parent2
self.forward_index[parent2].extend(self.forward_index[parent1])
self.forward_index[parent1] = []
def find(self, p):
path = []
while p != self.parents[p]:
path.append(p)
p = self.parents[p]
for ppl in path:
self.parents[ppl] = p
return p | 27.506667 | 85 | 0.561318 | from collections import defaultdict
class Solution:
def accountsMerge(self, accounts):
merged = []
if not accounts or len(accounts) == 0:
return merged
self.forward_index = self.create_forward_index(accounts)
self.inverted_index = self.create_inverted_index(accounts)
self.parents = {i : i for i in range(len(accounts)) if len(accounts[i]) >= 1}
for email, people in self.inverted_index.items():
if len(people) > 1:
p1 = people[0]
for i in range(1, len(people)):
self.connect(p1, people[i])
curr = None
for people, email in self.forward_index.items():
if len(email) > 0:
curr = []
curr.append(accounts[people][0])
curr.extend(sorted(list(set(email))))
merged.append(curr)
return merged
def create_forward_index(self, accounts):
forward_index = defaultdict(list)
for idx, account in enumerate(accounts):
forward_index[idx].extend(account[1:])
return forward_index
def create_inverted_index(self, accounts):
inverted_index = defaultdict(list)
for idx, account in enumerate(accounts):
name = account[0]
for email in account[1:]:
inverted_index[email].append(idx)
return inverted_index
def connect(self, p1, p2):
parent1 = self.find(p1)
parent2 = self.find(p2)
if parent2 != parent1:
self.parents[parent1] = parent2
self.forward_index[parent2].extend(self.forward_index[parent1])
self.forward_index[parent1] = []
def find(self, p):
path = []
while p != self.parents[p]:
path.append(p)
p = self.parents[p]
for ppl in path:
self.parents[ppl] = p
return p | true | true |
f71fb575e9c0c22da60dd6194084df2483a9ba88 | 3,979 | py | Python | tests/test_detect_score.py | pgftennis/tennis_analysis_tool | 9f43545fa2b502930ec27a4de634ebc45e65cb19 | [
"MIT"
] | 1 | 2022-01-14T10:35:00.000Z | 2022-01-14T10:35:00.000Z | tests/test_detect_score.py | pgftennis/tennis_analysis_tool | 9f43545fa2b502930ec27a4de634ebc45e65cb19 | [
"MIT"
] | null | null | null | tests/test_detect_score.py | pgftennis/tennis_analysis_tool | 9f43545fa2b502930ec27a4de634ebc45e65cb19 | [
"MIT"
] | null | null | null | import unittest
import sys
from pathlib import Path
sys.path.append(str(Path(__file__).parent.parent))
sys.path.append(str(Path(__file__).parent.parent / "src/predict"))
import src.predict.detect_score as detect_score
class TestDetectScore(unittest.TestCase):
def setUp(self):#設定
self.ds=detect_score.DetectScore()
def test_fix_text(self):
#bugfix
text="3 6 10 6 3 4 15"#10を1 0 に分解したい
text=self.ds.fix_text(text)
self.assertEqual("3 6 1 0 6 3 4 15",text)
def test_fix_in_ad(self):
print("text_fix_in_ad")
text_array=['3','6','Ad']
text_array=self.ds.fix_in_ad(text_array)
self.assertEqual(['3', '40','6','Ad'],text_array)
text_array=['3','Ad','6']
text_array=self.ds.fix_in_ad(text_array)
self.assertEqual(['3','Ad','6', '40'],text_array)
text_array=['3', '6', '1', '6', '3', '4', 'Ad']
text_array=self.ds.fix_in_ad(text_array)
self.assertEqual(['3', '6', '1', '40','6', '3', '4', 'Ad'],text_array)
text_array=['3', '6', '1', 'Ad','6', '3', '4']
text_array=self.ds.fix_in_ad(text_array)
self.assertEqual(['3', '6', '1', 'Ad','6', '3', '4', '40'],text_array)
def test_text2score(self):
text="A 40"
set_num = self.ds.get_set_text_num(text)
self.assertEqual(2,set_num)
text="4 1 15\n6 1 15"
set_num = self.ds.get_set_text_num(text)
self.assertEqual(6,set_num)
text="1 15 \n0\n0"
game_a,game_b,score_a,score_b=self.ds.text2score(text)
self.assertEqual("1",game_a)
self.assertEqual("0",game_b)
self.assertEqual("15",score_a)
self.assertEqual("0",score_b)
text="1 A \n5\n40"
game_a,game_b,score_a,score_b=self.ds.text2score(text)
self.assertEqual("1",game_a)
self.assertEqual("5",game_b)
self.assertEqual("A",score_a)
self.assertEqual("40",score_b)
text="30 15"
game_a,game_b,score_a,score_b=self.ds.text2score(text)
self.assertEqual("0",game_a)
self.assertEqual("0",game_b)
self.assertEqual("30",score_a)
self.assertEqual("15",score_b)
text="A 40"
game_a,game_b,score_a,score_b=self.ds.text2score(text)
self.assertEqual("0",game_a)
self.assertEqual("0",game_b)
self.assertEqual("A",score_a)
self.assertEqual("40",score_b)
text="15 "
game_a,game_b,score_a,score_b=self.ds.text2score(text)
self.assertEqual("0",game_a)
self.assertEqual("0",game_b)
self.assertEqual("15",score_a)
self.assertEqual("",score_b)
text=""
game_a,game_b,score_a,score_b=self.ds.text2score(text)
self.assertEqual("",game_a)
self.assertEqual("",game_b)
self.assertEqual("",score_a)
self.assertEqual("",score_b)
text="4 1 15\n6 2 30"
game_a,game_b,score_a,score_b=self.ds.text2score(text)
self.assertEqual("1",game_a)
self.assertEqual("15",score_a)
self.assertEqual("2",game_b)
self.assertEqual("30",score_b)
text="4 6 4 15\n6 2 2 30"
game_a,game_b,score_a,score_b=self.ds.text2score(text)
self.assertEqual("4",game_a)
self.assertEqual("15",score_a)
self.assertEqual("2",game_b)
self.assertEqual("30",score_b)
text="6 4 6 4 15\n4 6 2 2 30"
game_a,game_b,score_a,score_b=self.ds.text2score(text)
self.assertEqual("4",game_a)
self.assertEqual("15",score_a)
self.assertEqual("2",game_b)
self.assertEqual("30",score_b)
text="5 6 4 6 4 15\n7 4 6 2 2 30"
game_a,game_b,score_a,score_b=self.ds.text2score(text)
self.assertEqual("4",game_a)
self.assertEqual("15",score_a)
self.assertEqual("2",game_b)
self.assertEqual("30",score_b)
# if __name__ == "__main__":
# unittest.main()
| 32.08871 | 78 | 0.60191 | import unittest
import sys
from pathlib import Path
sys.path.append(str(Path(__file__).parent.parent))
sys.path.append(str(Path(__file__).parent.parent / "src/predict"))
import src.predict.detect_score as detect_score
class TestDetectScore(unittest.TestCase):
def setUp(self):
self.ds=detect_score.DetectScore()
def test_fix_text(self):
text="3 6 10 6 3 4 15"
text=self.ds.fix_text(text)
self.assertEqual("3 6 1 0 6 3 4 15",text)
def test_fix_in_ad(self):
print("text_fix_in_ad")
text_array=['3','6','Ad']
text_array=self.ds.fix_in_ad(text_array)
self.assertEqual(['3', '40','6','Ad'],text_array)
text_array=['3','Ad','6']
text_array=self.ds.fix_in_ad(text_array)
self.assertEqual(['3','Ad','6', '40'],text_array)
text_array=['3', '6', '1', '6', '3', '4', 'Ad']
text_array=self.ds.fix_in_ad(text_array)
self.assertEqual(['3', '6', '1', '40','6', '3', '4', 'Ad'],text_array)
text_array=['3', '6', '1', 'Ad','6', '3', '4']
text_array=self.ds.fix_in_ad(text_array)
self.assertEqual(['3', '6', '1', 'Ad','6', '3', '4', '40'],text_array)
def test_text2score(self):
text="A 40"
set_num = self.ds.get_set_text_num(text)
self.assertEqual(2,set_num)
text="4 1 15\n6 1 15"
set_num = self.ds.get_set_text_num(text)
self.assertEqual(6,set_num)
text="1 15 \n0\n0"
game_a,game_b,score_a,score_b=self.ds.text2score(text)
self.assertEqual("1",game_a)
self.assertEqual("0",game_b)
self.assertEqual("15",score_a)
self.assertEqual("0",score_b)
text="1 A \n5\n40"
game_a,game_b,score_a,score_b=self.ds.text2score(text)
self.assertEqual("1",game_a)
self.assertEqual("5",game_b)
self.assertEqual("A",score_a)
self.assertEqual("40",score_b)
text="30 15"
game_a,game_b,score_a,score_b=self.ds.text2score(text)
self.assertEqual("0",game_a)
self.assertEqual("0",game_b)
self.assertEqual("30",score_a)
self.assertEqual("15",score_b)
text="A 40"
game_a,game_b,score_a,score_b=self.ds.text2score(text)
self.assertEqual("0",game_a)
self.assertEqual("0",game_b)
self.assertEqual("A",score_a)
self.assertEqual("40",score_b)
text="15 "
game_a,game_b,score_a,score_b=self.ds.text2score(text)
self.assertEqual("0",game_a)
self.assertEqual("0",game_b)
self.assertEqual("15",score_a)
self.assertEqual("",score_b)
text=""
game_a,game_b,score_a,score_b=self.ds.text2score(text)
self.assertEqual("",game_a)
self.assertEqual("",game_b)
self.assertEqual("",score_a)
self.assertEqual("",score_b)
text="4 1 15\n6 2 30"
game_a,game_b,score_a,score_b=self.ds.text2score(text)
self.assertEqual("1",game_a)
self.assertEqual("15",score_a)
self.assertEqual("2",game_b)
self.assertEqual("30",score_b)
text="4 6 4 15\n6 2 2 30"
game_a,game_b,score_a,score_b=self.ds.text2score(text)
self.assertEqual("4",game_a)
self.assertEqual("15",score_a)
self.assertEqual("2",game_b)
self.assertEqual("30",score_b)
text="6 4 6 4 15\n4 6 2 2 30"
game_a,game_b,score_a,score_b=self.ds.text2score(text)
self.assertEqual("4",game_a)
self.assertEqual("15",score_a)
self.assertEqual("2",game_b)
self.assertEqual("30",score_b)
text="5 6 4 6 4 15\n7 4 6 2 2 30"
game_a,game_b,score_a,score_b=self.ds.text2score(text)
self.assertEqual("4",game_a)
self.assertEqual("15",score_a)
self.assertEqual("2",game_b)
self.assertEqual("30",score_b)
| true | true |
f71fb5d233f60d4940a1f40506e107449b9cb848 | 837 | py | Python | aql/connector.py | ryansb/aql | bc9f11aaf06caabe15981fb33b5ef37a60ce700a | [
"MIT"
] | 24 | 2020-07-16T11:47:28.000Z | 2021-12-02T20:38:52.000Z | aql/connector.py | ryansb/aql | bc9f11aaf06caabe15981fb33b5ef37a60ce700a | [
"MIT"
] | 37 | 2020-10-17T14:04:05.000Z | 2022-02-05T05:01:08.000Z | aql/connector.py | ryansb/aql | bc9f11aaf06caabe15981fb33b5ef37a60ce700a | [
"MIT"
] | 1 | 2019-10-26T03:45:16.000Z | 2019-10-26T03:45:16.000Z | # Copyright 2020 John Reese
# Licensed under the MIT license
import re
from typing import Any, Pattern, Union
from .engines.base import Connection
from .errors import InvalidURI
from .types import Location
_uri_regex: Pattern = re.compile(r"(?P<engine>\w+)://(?P<location>.+)")
def connect(location: Union[str, Location], *args: Any, **kwargs: Any) -> Connection:
"""Connect to the specified database."""
if isinstance(location, str):
match = _uri_regex.match(location)
if match:
engine, database = match.groups()
location = Location(engine, database=database)
else:
raise InvalidURI(f"Invalid database connection URI {location}")
connector, engine_kls = Connection.get_connector(location.engine)
return connector(engine_kls(), location, *args, **kwargs)
| 32.192308 | 85 | 0.688172 |
import re
from typing import Any, Pattern, Union
from .engines.base import Connection
from .errors import InvalidURI
from .types import Location
_uri_regex: Pattern = re.compile(r"(?P<engine>\w+)://(?P<location>.+)")
def connect(location: Union[str, Location], *args: Any, **kwargs: Any) -> Connection:
if isinstance(location, str):
match = _uri_regex.match(location)
if match:
engine, database = match.groups()
location = Location(engine, database=database)
else:
raise InvalidURI(f"Invalid database connection URI {location}")
connector, engine_kls = Connection.get_connector(location.engine)
return connector(engine_kls(), location, *args, **kwargs)
| true | true |
f71fb6028cbb2b09f79e06a91c06d14c015af377 | 2,712 | py | Python | utils.py | TNLC/pycalc | a60e996c5e4e1b6fdae5da124864cdf9a7178d19 | [
"Apache-2.0"
] | null | null | null | utils.py | TNLC/pycalc | a60e996c5e4e1b6fdae5da124864cdf9a7178d19 | [
"Apache-2.0"
] | null | null | null | utils.py | TNLC/pycalc | a60e996c5e4e1b6fdae5da124864cdf9a7178d19 | [
"Apache-2.0"
] | null | null | null | import math
from rply import LexerGenerator, ParserGenerator
def build_lexer():
# LEXERGENERATOR INSTANZIEREN
lexer_generator = LexerGenerator()
# WHITESPACES IGNORIEREN
lexer_generator.ignore(r'\s+')
# ZAHLEN ERKENNEN
# -? => ENTWEDER MINUS ODER NICHT
# \.? => ENTWEDER EIN PUNKT ODER NICHT
# [0-9]* BELIEBIG OFT 0-9 (MINDESTENS 0 MAL)
# [0-9]+ BELIEBIG OFT 0-9 (MINDESTENS 1 MAL)
# 'NUM' => NUMBER
lexer_generator.add('NUM', r'-?[0-9]*\.?[0-9]+')
# OPERATOREN
lexer_generator.add('ADD', r'\+') # 'ADD' => ADD
lexer_generator.add('SUB', r'-') # 'SUB' => SUBTRACT
lexer_generator.add('MUL', r'\*') # 'MUL' => MULTIPLY
lexer_generator.add('DIV', r'/') # 'DIV' => DIVIDE
lexer_generator.add('MOD', r'%') # 'MOD' => MODULO
lexer_generator.add('EXP', r'^|\*\*') # 'EXP' => EXPONENTIATE
lexer_generator.add('BR_O', r'\(') # 'BR_O' => BRACKET OPEN
lexer_generator.add('BR_C', r'\)') # 'BR_C' => BRACKET CLOSE
lexer_generator.add('ABS_P', r'\|') # 'ABS_P' => ABSOLUTE PART
# LEXER ERSTELLEN UND ZURÜCKGEBEN
return lexer_generator.build()
def build_parser():
# TOKENS FÜR PARSER FESTLEGEN
parser_generator = ParserGenerator([
'NUM',
'ADD', 'SUB', 'MUL', 'DIV', 'MOD', 'EXP',
'ABS_P',
'BR_O', 'BR_C'
])
# REGELN FÜR PARSER FESTLEGEN
@parser_generator.production('main : expr')
def main(x): return x[0]
@parser_generator.production('expr : factor')
def term_zahl(x): return x[0]
@parser_generator.production('expr : expr SUB factor')
def term_zahl(x): return x[0] - x[2]
@parser_generator.production('expr : expr ADD factor')
def term_zahl(x): return x[0] + x[2]
# STANDARD RECHENOPERATIONEN
@parser_generator.production('factor : term')
def term_zahl(x): return x[0]
@parser_generator.production('factor : factor EXP term')
def term_zahl(x): return x[0] ** x[2]
@parser_generator.production('factor : factor DIV term')
def term_zahl(x): return x[0] / x[2]
@parser_generator.production('factor : factor MOD term')
def term_zahl(x): return x[0] % x[2]
@parser_generator.production('factor : factor MUL term')
def term_zahl(x): return x[0] * x[2]
@parser_generator.production('term : NUM')
def term_zahl(x): return float(x[0].getstr())
# KLAMMERN
@parser_generator.production('term : BR_O expr BR_C')
def term_zahl(x): return x[1]
# BETRAG
@parser_generator.production('term : ABS_P expr ABS_P')
def term_zahl(x): return x[0] if x[0] >= 0 else x[0] * -1
return parser_generator.build()
lexer = build_lexer()
parser = build_parser()
| 32.674699 | 68 | 0.625369 | import math
from rply import LexerGenerator, ParserGenerator
def build_lexer():
lexer_generator = LexerGenerator()
lexer_generator.ignore(r'\s+')
lexer_generator.add('NUM', r'-?[0-9]*\.?[0-9]+')
lexer_generator.add('ADD', r'\+')
lexer_generator.add('SUB', r'-')
lexer_generator.add('MUL', r'\*')
lexer_generator.add('DIV', r'/')
lexer_generator.add('MOD', r'%')
lexer_generator.add('EXP', r'^|\*\*')
lexer_generator.add('BR_O', r'\(')
lexer_generator.add('BR_C', r'\)')
lexer_generator.add('ABS_P', r'\|')
return lexer_generator.build()
def build_parser():
parser_generator = ParserGenerator([
'NUM',
'ADD', 'SUB', 'MUL', 'DIV', 'MOD', 'EXP',
'ABS_P',
'BR_O', 'BR_C'
])
@parser_generator.production('main : expr')
def main(x): return x[0]
@parser_generator.production('expr : factor')
def term_zahl(x): return x[0]
@parser_generator.production('expr : expr SUB factor')
def term_zahl(x): return x[0] - x[2]
@parser_generator.production('expr : expr ADD factor')
def term_zahl(x): return x[0] + x[2]
@parser_generator.production('factor : term')
def term_zahl(x): return x[0]
@parser_generator.production('factor : factor EXP term')
def term_zahl(x): return x[0] ** x[2]
@parser_generator.production('factor : factor DIV term')
def term_zahl(x): return x[0] / x[2]
@parser_generator.production('factor : factor MOD term')
def term_zahl(x): return x[0] % x[2]
@parser_generator.production('factor : factor MUL term')
def term_zahl(x): return x[0] * x[2]
@parser_generator.production('term : NUM')
def term_zahl(x): return float(x[0].getstr())
@parser_generator.production('term : BR_O expr BR_C')
def term_zahl(x): return x[1]
@parser_generator.production('term : ABS_P expr ABS_P')
def term_zahl(x): return x[0] if x[0] >= 0 else x[0] * -1
return parser_generator.build()
lexer = build_lexer()
parser = build_parser()
| true | true |
f71fb63636601da0239ffd402fc4be7612c8b4ab | 4,587 | py | Python | ca_municipalities/people.py | dogooderapp/scrapers-ca | 5e852eea93f05f3f397eec318d3094a3b1b0b458 | [
"MIT"
] | 19 | 2015-05-26T03:18:50.000Z | 2022-01-31T03:27:41.000Z | ca_municipalities/people.py | dogooderapp/scrapers-ca | 5e852eea93f05f3f397eec318d3094a3b1b0b458 | [
"MIT"
] | 119 | 2015-01-09T06:09:35.000Z | 2022-01-20T23:05:05.000Z | ca_municipalities/people.py | dogooderapp/scrapers-ca | 5e852eea93f05f3f397eec318d3094a3b1b0b458 | [
"MIT"
] | 17 | 2015-11-23T05:00:10.000Z | 2021-09-15T16:03:33.000Z | from utils import CSVScraper, CanadianPerson as Person
from pupa.scrape import Organization, Post
from collections import defaultdict
import re
class CanadaMunicipalitiesPersonScraper(CSVScraper):
csv_url = 'https://docs.google.com/spreadsheets/d/e/2PACX-1vRrGXQy8qk16OhuTjlccoGB4jL5e8X1CEqRbg896ufLdh67DQk9nuGm-oufIT0HRMPEnwePw2HDx1Vj/pub?gid=0&single=true&output=csv'
encoding = 'utf-8'
"""
Returns whether the row should be imported.
"""
def is_valid_row(self, row):
return super().is_valid_row(row) and row['organization']
def scrape(self):
organizations = {}
seat_numbers = defaultdict(lambda: defaultdict(int))
reader = self.csv_reader(self.csv_url, delimiter=self.delimiter, header=True, encoding=self.encoding, skip_rows=self.skip_rows)
reader.fieldnames = [self.header_converter(field) for field in reader.fieldnames]
for row in reader:
try:
if self.is_valid_row(row):
for key, corrections in self.corrections.items():
if not isinstance(corrections, dict):
row[key] = corrections(row[key])
elif row[key] in corrections:
row[key] = corrections[row[key]]
organization_classification = 'legislature'
organization_name = row['organization']
organization_key = organization_name.lower()
if organization_key in organizations:
organization = organizations[organization_key]
else:
organization = Organization(organization_name, classification=organization_classification)
organization.add_source(self.csv_url)
yield organization
organizations[organization_key] = organization
if not row['primary role']:
row['primary role'] = 'Councillor'
role = row['primary role']
post = Post(role=role, label=organization_name, organization_id=organization._id)
yield post
name = row['name'].strip(' .,')
district = row['district name']
if self.many_posts_per_area and role not in self.unique_roles:
seat_numbers[role][district] += 1
district = '{} (seat {})'.format(district, seat_numbers[role][district])
p = Person(primary_org=organization_classification, name=name, district=district, role=role, party=row.get('party name'))
p.add_source(self.csv_url)
if row.get('gender'):
p.gender = row['gender']
if row.get('photo url'):
p.image = row['photo url']
if row.get('source url'):
p.add_source(row['source url'].strip(' .,'))
if row.get('website'):
p.add_link(row['website'], note='web site')
if row.get('facebook'):
p.add_link(re.sub(r'[#?].+', '', row['facebook']))
if row.get('twitter'):
p.add_link(row['twitter'])
if row['email']:
p.add_contact('email', row['email'].strip(' .,'))
if row['address']:
p.add_contact('address', row['address'], 'legislature')
if row.get('phone'):
p.add_contact('voice', row['phone'], 'legislature')
if row.get('fax'):
p.add_contact('fax', row['fax'], 'legislature')
if row.get('cell'):
p.add_contact('cell', row['cell'], 'legislature')
if row.get('birth date'):
p.birth_date = row['birth date']
if row.get('incumbent'):
p.extras['incumbent'] = row['incumbent']
if name in self.other_names:
for other_name in self.other_names[name]:
p.add_name(other_name)
# Validate person entity so that we can catch the exception if needed.
p.validate()
yield p
except Exception as e:
print(repr(e))
continue
| 42.869159 | 176 | 0.514716 | from utils import CSVScraper, CanadianPerson as Person
from pupa.scrape import Organization, Post
from collections import defaultdict
import re
class CanadaMunicipalitiesPersonScraper(CSVScraper):
csv_url = 'https://docs.google.com/spreadsheets/d/e/2PACX-1vRrGXQy8qk16OhuTjlccoGB4jL5e8X1CEqRbg896ufLdh67DQk9nuGm-oufIT0HRMPEnwePw2HDx1Vj/pub?gid=0&single=true&output=csv'
encoding = 'utf-8'
def is_valid_row(self, row):
return super().is_valid_row(row) and row['organization']
def scrape(self):
organizations = {}
seat_numbers = defaultdict(lambda: defaultdict(int))
reader = self.csv_reader(self.csv_url, delimiter=self.delimiter, header=True, encoding=self.encoding, skip_rows=self.skip_rows)
reader.fieldnames = [self.header_converter(field) for field in reader.fieldnames]
for row in reader:
try:
if self.is_valid_row(row):
for key, corrections in self.corrections.items():
if not isinstance(corrections, dict):
row[key] = corrections(row[key])
elif row[key] in corrections:
row[key] = corrections[row[key]]
organization_classification = 'legislature'
organization_name = row['organization']
organization_key = organization_name.lower()
if organization_key in organizations:
organization = organizations[organization_key]
else:
organization = Organization(organization_name, classification=organization_classification)
organization.add_source(self.csv_url)
yield organization
organizations[organization_key] = organization
if not row['primary role']:
row['primary role'] = 'Councillor'
role = row['primary role']
post = Post(role=role, label=organization_name, organization_id=organization._id)
yield post
name = row['name'].strip(' .,')
district = row['district name']
if self.many_posts_per_area and role not in self.unique_roles:
seat_numbers[role][district] += 1
district = '{} (seat {})'.format(district, seat_numbers[role][district])
p = Person(primary_org=organization_classification, name=name, district=district, role=role, party=row.get('party name'))
p.add_source(self.csv_url)
if row.get('gender'):
p.gender = row['gender']
if row.get('photo url'):
p.image = row['photo url']
if row.get('source url'):
p.add_source(row['source url'].strip(' .,'))
if row.get('website'):
p.add_link(row['website'], note='web site')
if row.get('facebook'):
p.add_link(re.sub(r'[#?].+', '', row['facebook']))
if row.get('twitter'):
p.add_link(row['twitter'])
if row['email']:
p.add_contact('email', row['email'].strip(' .,'))
if row['address']:
p.add_contact('address', row['address'], 'legislature')
if row.get('phone'):
p.add_contact('voice', row['phone'], 'legislature')
if row.get('fax'):
p.add_contact('fax', row['fax'], 'legislature')
if row.get('cell'):
p.add_contact('cell', row['cell'], 'legislature')
if row.get('birth date'):
p.birth_date = row['birth date']
if row.get('incumbent'):
p.extras['incumbent'] = row['incumbent']
if name in self.other_names:
for other_name in self.other_names[name]:
p.add_name(other_name)
p.validate()
yield p
except Exception as e:
print(repr(e))
continue
| true | true |
f71fb66f5197af2f7a2dd9fb62e51560772987ee | 398 | py | Python | tests/connection/test_cursor.py | coverwallet/pysoni | 49d3a8acb101436ad0724749572be2ad9d86f3ae | [
"MIT"
] | 5 | 2019-07-08T15:38:06.000Z | 2022-03-24T20:36:19.000Z | tests/connection/test_cursor.py | coverwallet/pysoni | 49d3a8acb101436ad0724749572be2ad9d86f3ae | [
"MIT"
] | 2 | 2019-07-07T23:26:32.000Z | 2020-06-04T07:43:24.000Z | tests/connection/test_cursor.py | coverwallet/pysoni | 49d3a8acb101436ad0724749572be2ad9d86f3ae | [
"MIT"
] | 1 | 2019-05-31T09:11:22.000Z | 2019-05-31T09:11:22.000Z | def test_cursor_triggers_cursor_in_the_connection(open_connection):
open_connection.cursor()
open_connection._connection_handler.cursor.assert_called_once()
def test_cursor_returns_a_cursor_in_the_handler(open_connection, mocker):
cursor_mock = mocker.Mock()
open_connection._connection_handler.cursor.return_value = cursor_mock
assert open_connection.cursor() == cursor_mock | 39.8 | 73 | 0.831658 | def test_cursor_triggers_cursor_in_the_connection(open_connection):
open_connection.cursor()
open_connection._connection_handler.cursor.assert_called_once()
def test_cursor_returns_a_cursor_in_the_handler(open_connection, mocker):
cursor_mock = mocker.Mock()
open_connection._connection_handler.cursor.return_value = cursor_mock
assert open_connection.cursor() == cursor_mock | true | true |
f71fb695ceaa12f53778fae43e8d0268e9cde5f9 | 3,235 | py | Python | server/config/settings.py | sudosubin/playground-gunicorn | 770b2db062446e47a92b37fd3488f0e657157293 | [
"MIT"
] | null | null | null | server/config/settings.py | sudosubin/playground-gunicorn | 770b2db062446e47a92b37fd3488f0e657157293 | [
"MIT"
] | null | null | null | server/config/settings.py | sudosubin/playground-gunicorn | 770b2db062446e47a92b37fd3488f0e657157293 | [
"MIT"
] | null | null | null | """
Django settings for config project.
Generated by 'django-admin startproject' using Django 4.0.2.
For more information on this file, see
https://docs.djangoproject.com/en/4.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/4.0/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/4.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-n1vl4be=11s&5oo0^453rw&9(g3v0pjb6=t4ze@d_3j4i3)y+y'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'server.config.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'server.config.wsgi.application'
# Database
# https://docs.djangoproject.com/en/4.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/4.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/4.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/4.0/howto/static-files/
STATIC_URL = 'static/'
# Default primary key field type
# https://docs.djangoproject.com/en/4.0/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
| 26.08871 | 91 | 0.703246 |
from pathlib import Path
BASE_DIR = Path(__file__).resolve().parent.parent
SECRET_KEY = 'django-insecure-n1vl4be=11s&5oo0^453rw&9(g3v0pjb6=t4ze@d_3j4i3)y+y'
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'server.config.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'server.config.wsgi.application'
# Database
# https://docs.djangoproject.com/en/4.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/4.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/4.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/4.0/howto/static-files/
STATIC_URL = 'static/'
# Default primary key field type
# https://docs.djangoproject.com/en/4.0/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
| true | true |
f71fb6ffafd818eedc205dc12f215cb79fa5ad0e | 680 | py | Python | sa/profiles/Vyatta/Vyatta/get_capabilities.py | xUndero/noc | 9fb34627721149fcf7064860bd63887e38849131 | [
"BSD-3-Clause"
] | 1 | 2019-09-20T09:36:48.000Z | 2019-09-20T09:36:48.000Z | sa/profiles/Vyatta/Vyatta/get_capabilities.py | ewwwcha/noc | aba08dc328296bb0e8e181c2ac9a766e1ec2a0bb | [
"BSD-3-Clause"
] | null | null | null | sa/profiles/Vyatta/Vyatta/get_capabilities.py | ewwwcha/noc | aba08dc328296bb0e8e181c2ac9a766e1ec2a0bb | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
# ---------------------------------------------------------------------
# Vyatta.Vyatta.get_capabilities
# ---------------------------------------------------------------------
# Copyright (C) 2007-2015 The NOC Project
# See LICENSE for details
# ---------------------------------------------------------------------
# NOC modules
from noc.sa.profiles.Generic.get_capabilities import Script as BaseScript
class Script(BaseScript):
name = "Vyatta.Vyatta.get_capabilities"
def has_lldp_cli(self):
"""
Check box has lldp enabled
"""
r = self.cli("show lldp neighbors")
return "LLDP not configured" not in r
| 30.909091 | 73 | 0.457353 |
from noc.sa.profiles.Generic.get_capabilities import Script as BaseScript
class Script(BaseScript):
name = "Vyatta.Vyatta.get_capabilities"
def has_lldp_cli(self):
r = self.cli("show lldp neighbors")
return "LLDP not configured" not in r
| true | true |
f71fb8631c4b145396e2bba66374e05637da08a5 | 130 | py | Python | db_utils.py | bizmarcin/thefridge | 13bde29a57aea09fecf5ec2f28ce013adf6c4d08 | [
"MIT"
] | 1 | 2019-07-01T13:04:02.000Z | 2019-07-01T13:04:02.000Z | db_utils.py | bizmarcin/thefridge | 13bde29a57aea09fecf5ec2f28ce013adf6c4d08 | [
"MIT"
] | 2 | 2020-07-17T09:05:59.000Z | 2021-05-09T06:42:06.000Z | db_utils.py | bizmarcin/thefridge | 13bde29a57aea09fecf5ec2f28ce013adf6c4d08 | [
"MIT"
] | null | null | null | import sqlite3
def get_connection():
conn = sqlite3.connect('fridge.db')
conn.row_factory = sqlite3.Row
return conn
| 16.25 | 39 | 0.7 | import sqlite3
def get_connection():
conn = sqlite3.connect('fridge.db')
conn.row_factory = sqlite3.Row
return conn
| true | true |
f71fb9c25aa2d31e0378e8ac7911871707f58f10 | 6,328 | py | Python | tests/components/switch/test_command_line.py | beschouten/home-assistant | f50c30bbbad4d92e342c8547630c63c0c7882803 | [
"MIT"
] | 1 | 2016-07-14T05:20:54.000Z | 2016-07-14T05:20:54.000Z | tests/components/switch/test_command_line.py | beschouten/home-assistant | f50c30bbbad4d92e342c8547630c63c0c7882803 | [
"MIT"
] | null | null | null | tests/components/switch/test_command_line.py | beschouten/home-assistant | f50c30bbbad4d92e342c8547630c63c0c7882803 | [
"MIT"
] | 1 | 2018-11-22T13:55:23.000Z | 2018-11-22T13:55:23.000Z | """the tests for the Command line switch platform."""
import json
import os
import tempfile
import unittest
from homeassistant.const import STATE_ON, STATE_OFF
import homeassistant.components.switch as switch
import homeassistant.components.switch.command_line as command_line
from tests.common import get_test_home_assistant
class TestCommandSwitch(unittest.TestCase):
"""Test the command switch."""
def setUp(self): # pylint: disable=invalid-name
"""Setup things to be run when tests are started."""
self.hass = get_test_home_assistant()
def tearDown(self): # pylint: disable=invalid-name
"""Stop everything that was started."""
self.hass.stop()
def test_state_none(self):
"""Test with none state."""
with tempfile.TemporaryDirectory() as tempdirname:
path = os.path.join(tempdirname, 'switch_status')
test_switch = {
'oncmd': 'echo 1 > {}'.format(path),
'offcmd': 'echo 0 > {}'.format(path),
}
self.assertTrue(switch.setup(self.hass, {
'switch': {
'platform': 'command_line',
'switches': {
'test': test_switch
}
}
}))
state = self.hass.states.get('switch.test')
self.assertEqual(STATE_OFF, state.state)
switch.turn_on(self.hass, 'switch.test')
self.hass.pool.block_till_done()
state = self.hass.states.get('switch.test')
self.assertEqual(STATE_ON, state.state)
switch.turn_off(self.hass, 'switch.test')
self.hass.pool.block_till_done()
state = self.hass.states.get('switch.test')
self.assertEqual(STATE_OFF, state.state)
def test_state_value(self):
"""Test with state value."""
with tempfile.TemporaryDirectory() as tempdirname:
path = os.path.join(tempdirname, 'switch_status')
test_switch = {
'statecmd': 'cat {}'.format(path),
'oncmd': 'echo 1 > {}'.format(path),
'offcmd': 'echo 0 > {}'.format(path),
'value_template': '{{ value=="1" }}'
}
self.assertTrue(switch.setup(self.hass, {
'switch': {
'platform': 'command_line',
'switches': {
'test': test_switch
}
}
}))
state = self.hass.states.get('switch.test')
self.assertEqual(STATE_OFF, state.state)
switch.turn_on(self.hass, 'switch.test')
self.hass.pool.block_till_done()
state = self.hass.states.get('switch.test')
self.assertEqual(STATE_ON, state.state)
switch.turn_off(self.hass, 'switch.test')
self.hass.pool.block_till_done()
state = self.hass.states.get('switch.test')
self.assertEqual(STATE_OFF, state.state)
def test_state_json_value(self):
"""Test with state JSON value."""
with tempfile.TemporaryDirectory() as tempdirname:
path = os.path.join(tempdirname, 'switch_status')
oncmd = json.dumps({'status': 'ok'})
offcmd = json.dumps({'status': 'nope'})
test_switch = {
'statecmd': 'cat {}'.format(path),
'oncmd': 'echo \'{}\' > {}'.format(oncmd, path),
'offcmd': 'echo \'{}\' > {}'.format(offcmd, path),
'value_template': '{{ value_json.status=="ok" }}'
}
self.assertTrue(switch.setup(self.hass, {
'switch': {
'platform': 'command_line',
'switches': {
'test': test_switch
}
}
}))
state = self.hass.states.get('switch.test')
self.assertEqual(STATE_OFF, state.state)
switch.turn_on(self.hass, 'switch.test')
self.hass.pool.block_till_done()
state = self.hass.states.get('switch.test')
self.assertEqual(STATE_ON, state.state)
switch.turn_off(self.hass, 'switch.test')
self.hass.pool.block_till_done()
state = self.hass.states.get('switch.test')
self.assertEqual(STATE_OFF, state.state)
def test_state_code(self):
"""Test with state code."""
with tempfile.TemporaryDirectory() as tempdirname:
path = os.path.join(tempdirname, 'switch_status')
test_switch = {
'statecmd': 'cat {}'.format(path),
'oncmd': 'echo 1 > {}'.format(path),
'offcmd': 'echo 0 > {}'.format(path),
}
self.assertTrue(switch.setup(self.hass, {
'switch': {
'platform': 'command_line',
'switches': {
'test': test_switch
}
}
}))
state = self.hass.states.get('switch.test')
self.assertEqual(STATE_OFF, state.state)
switch.turn_on(self.hass, 'switch.test')
self.hass.pool.block_till_done()
state = self.hass.states.get('switch.test')
self.assertEqual(STATE_ON, state.state)
switch.turn_off(self.hass, 'switch.test')
self.hass.pool.block_till_done()
state = self.hass.states.get('switch.test')
self.assertEqual(STATE_ON, state.state)
def test_assumed_state_should_be_true_if_command_state_is_false(self):
"""Test with state value."""
self.hass = get_test_home_assistant()
# Set state command to false
statecmd = False
no_state_device = command_line.CommandSwitch(self.hass, "Test", "echo",
"echo", statecmd, None)
self.assertTrue(no_state_device.assumed_state)
# Set state command
statecmd = 'cat {}'
state_device = command_line.CommandSwitch(self.hass, "Test", "echo",
"echo", statecmd, None)
self.assertFalse(state_device.assumed_state)
| 35.751412 | 79 | 0.533028 | import json
import os
import tempfile
import unittest
from homeassistant.const import STATE_ON, STATE_OFF
import homeassistant.components.switch as switch
import homeassistant.components.switch.command_line as command_line
from tests.common import get_test_home_assistant
class TestCommandSwitch(unittest.TestCase):
def setUp(self):
self.hass = get_test_home_assistant()
def tearDown(self):
self.hass.stop()
def test_state_none(self):
with tempfile.TemporaryDirectory() as tempdirname:
path = os.path.join(tempdirname, 'switch_status')
test_switch = {
'oncmd': 'echo 1 > {}'.format(path),
'offcmd': 'echo 0 > {}'.format(path),
}
self.assertTrue(switch.setup(self.hass, {
'switch': {
'platform': 'command_line',
'switches': {
'test': test_switch
}
}
}))
state = self.hass.states.get('switch.test')
self.assertEqual(STATE_OFF, state.state)
switch.turn_on(self.hass, 'switch.test')
self.hass.pool.block_till_done()
state = self.hass.states.get('switch.test')
self.assertEqual(STATE_ON, state.state)
switch.turn_off(self.hass, 'switch.test')
self.hass.pool.block_till_done()
state = self.hass.states.get('switch.test')
self.assertEqual(STATE_OFF, state.state)
def test_state_value(self):
with tempfile.TemporaryDirectory() as tempdirname:
path = os.path.join(tempdirname, 'switch_status')
test_switch = {
'statecmd': 'cat {}'.format(path),
'oncmd': 'echo 1 > {}'.format(path),
'offcmd': 'echo 0 > {}'.format(path),
'value_template': '{{ value=="1" }}'
}
self.assertTrue(switch.setup(self.hass, {
'switch': {
'platform': 'command_line',
'switches': {
'test': test_switch
}
}
}))
state = self.hass.states.get('switch.test')
self.assertEqual(STATE_OFF, state.state)
switch.turn_on(self.hass, 'switch.test')
self.hass.pool.block_till_done()
state = self.hass.states.get('switch.test')
self.assertEqual(STATE_ON, state.state)
switch.turn_off(self.hass, 'switch.test')
self.hass.pool.block_till_done()
state = self.hass.states.get('switch.test')
self.assertEqual(STATE_OFF, state.state)
def test_state_json_value(self):
with tempfile.TemporaryDirectory() as tempdirname:
path = os.path.join(tempdirname, 'switch_status')
oncmd = json.dumps({'status': 'ok'})
offcmd = json.dumps({'status': 'nope'})
test_switch = {
'statecmd': 'cat {}'.format(path),
'oncmd': 'echo \'{}\' > {}'.format(oncmd, path),
'offcmd': 'echo \'{}\' > {}'.format(offcmd, path),
'value_template': '{{ value_json.status=="ok" }}'
}
self.assertTrue(switch.setup(self.hass, {
'switch': {
'platform': 'command_line',
'switches': {
'test': test_switch
}
}
}))
state = self.hass.states.get('switch.test')
self.assertEqual(STATE_OFF, state.state)
switch.turn_on(self.hass, 'switch.test')
self.hass.pool.block_till_done()
state = self.hass.states.get('switch.test')
self.assertEqual(STATE_ON, state.state)
switch.turn_off(self.hass, 'switch.test')
self.hass.pool.block_till_done()
state = self.hass.states.get('switch.test')
self.assertEqual(STATE_OFF, state.state)
def test_state_code(self):
with tempfile.TemporaryDirectory() as tempdirname:
path = os.path.join(tempdirname, 'switch_status')
test_switch = {
'statecmd': 'cat {}'.format(path),
'oncmd': 'echo 1 > {}'.format(path),
'offcmd': 'echo 0 > {}'.format(path),
}
self.assertTrue(switch.setup(self.hass, {
'switch': {
'platform': 'command_line',
'switches': {
'test': test_switch
}
}
}))
state = self.hass.states.get('switch.test')
self.assertEqual(STATE_OFF, state.state)
switch.turn_on(self.hass, 'switch.test')
self.hass.pool.block_till_done()
state = self.hass.states.get('switch.test')
self.assertEqual(STATE_ON, state.state)
switch.turn_off(self.hass, 'switch.test')
self.hass.pool.block_till_done()
state = self.hass.states.get('switch.test')
self.assertEqual(STATE_ON, state.state)
def test_assumed_state_should_be_true_if_command_state_is_false(self):
self.hass = get_test_home_assistant()
statecmd = False
no_state_device = command_line.CommandSwitch(self.hass, "Test", "echo",
"echo", statecmd, None)
self.assertTrue(no_state_device.assumed_state)
statecmd = 'cat {}'
state_device = command_line.CommandSwitch(self.hass, "Test", "echo",
"echo", statecmd, None)
self.assertFalse(state_device.assumed_state)
| true | true |
f71fb9e8ec48b2f9258997378e50488a34fc3cb5 | 2,033 | py | Python | scripts/utils/connection.py | CostaDiego/product-complaint-classification | 42d44210553577616dcf8ac4bf616b587fa02e8c | [
"MIT"
] | null | null | null | scripts/utils/connection.py | CostaDiego/product-complaint-classification | 42d44210553577616dcf8ac4bf616b587fa02e8c | [
"MIT"
] | null | null | null | scripts/utils/connection.py | CostaDiego/product-complaint-classification | 42d44210553577616dcf8ac4bf616b587fa02e8c | [
"MIT"
] | null | null | null | import psycopg2
from getpass import getpass
class DatabaseConection(object):
"""
"""
def __init__(self, host: str, database: str, user: str):
self._con = None
self._host = host
self._database = database
self._user = user
self.connected = False
try:
self._con = psycopg2.connect(
host= self._host,
database= self._database,
user= self._user,
password = getpass(
prompt= 'Input the password:\n',
stream= None
))
self.connected = True
print('\tConnection established!')
except:
print('\tFailed to establish connection!')
def send(self, sql: str):
try:
cursor = self._con.cursor()
cursor.execute(str(sql))
self._con.commit()
return True
except:
return False
def request(self, sql: str):
try:
cursor = self._con.cursor()
cursor.execute(str(sql))
request = cursor.fetchall()
return request
except:
return None
def closeConnection(self):
self._con.close()
self.connected = False
def connect(self, host = None, database = None, user = None):
if host:
self._host = host
if database:
self._database = database
if user:
self._user = user
try:
self._con = psycopg2.connect(
host= self._host,
database= self._database,
user= self._user,
password = getpass(
prompt= 'Input the password:\n',
stream= None
))
self.connected = True
print('\tConnection established!')
except:
self.connected = False
print('\tFailed to establish connection!') | 26.064103 | 65 | 0.484506 | import psycopg2
from getpass import getpass
class DatabaseConection(object):
def __init__(self, host: str, database: str, user: str):
self._con = None
self._host = host
self._database = database
self._user = user
self.connected = False
try:
self._con = psycopg2.connect(
host= self._host,
database= self._database,
user= self._user,
password = getpass(
prompt= 'Input the password:\n',
stream= None
))
self.connected = True
print('\tConnection established!')
except:
print('\tFailed to establish connection!')
def send(self, sql: str):
try:
cursor = self._con.cursor()
cursor.execute(str(sql))
self._con.commit()
return True
except:
return False
def request(self, sql: str):
try:
cursor = self._con.cursor()
cursor.execute(str(sql))
request = cursor.fetchall()
return request
except:
return None
def closeConnection(self):
self._con.close()
self.connected = False
def connect(self, host = None, database = None, user = None):
if host:
self._host = host
if database:
self._database = database
if user:
self._user = user
try:
self._con = psycopg2.connect(
host= self._host,
database= self._database,
user= self._user,
password = getpass(
prompt= 'Input the password:\n',
stream= None
))
self.connected = True
print('\tConnection established!')
except:
self.connected = False
print('\tFailed to establish connection!') | true | true |
f71fbb8b5e0dc57ce6f0bd235b70ff2b45cd4410 | 4,913 | py | Python | ilustrado/util.py | ml-evs/ilustrado | 3121ecaff9cb517f3946b2283bf50dce499caad9 | [
"MIT"
] | 3 | 2019-10-31T20:54:55.000Z | 2022-01-05T16:39:43.000Z | ilustrado/util.py | ml-evs/ilustrado | 3121ecaff9cb517f3946b2283bf50dce499caad9 | [
"MIT"
] | null | null | null | ilustrado/util.py | ml-evs/ilustrado | 3121ecaff9cb517f3946b2283bf50dce499caad9 | [
"MIT"
] | 2 | 2019-11-29T11:34:11.000Z | 2020-08-12T12:31:48.000Z | # coding: utf-8
""" Catch-all file for utility functions.
"""
import sys
import logging
import numpy as np
from matador.compute import ComputeTask
from matador.utils.cell_utils import cart2frac, cart2abc
LOG = logging.getLogger("ilustrado")
LOG.setLevel(logging.DEBUG)
def strip_useless(doc, to_run=False):
""" Strip useless information from a matador doc.
Parameters:
doc (dict): structure to strip information from.
Arguments:
to_run (bool): whether the structure needs to be rerun,
i.e. whether to delete data from previous run.
Returns:
dict: matador document stripped of useless keys
"""
stripped_doc = dict()
if to_run:
keys = [
"source",
"parents",
"mutations",
"elems",
"stoichiometry",
"lattice_abc",
"lattice_cart",
"positions_frac",
"num_atoms",
"atom_types",
]
else:
keys = [
"source",
"parents",
"mutations",
"elems",
"stoichiometry",
"lattice_abc",
"lattice_cart",
"cell_volume",
"space_group",
"positions_frac",
"num_atoms",
"atom_types",
"enthalpy",
"enthalpy_per_atom",
"total_energy",
"total_energy_per_atom",
"pressure",
"max_force_on_atom",
"optimised",
"date",
"total_time_hrs",
"peak_mem_MB",
]
for key in keys:
if key in doc:
stripped_doc[key] = doc[key]
if isinstance(doc[key], np.ndarray):
stripped_doc[key] = doc[key].tolist()
return stripped_doc
class FakeComputeTask(ComputeTask):
""" Fake Relaxer for testing, with same parameters as the real one
from matador.compute.
"""
def __init__(self, *args, **kwargs):
self.structure = kwargs["res"]
self.output_queue = kwargs["output_queue"]
def relax(self):
fake_number_crunch = True
if fake_number_crunch:
size = np.random.randint(low=3, high=50)
array = np.random.rand(size, size)
np.linalg.eig(array)
self.structure["enthalpy_per_atom"] = -505 + np.random.rand()
self.structure["enthalpy"] = self.structure["enthalpy_per_atom"] * self.structure["num_atoms"]
if np.random.rand() < 0.8:
self.structure["optimised"] = True
else:
self.structure["optimised"] = False
self.output_queue.put(self.structure)
class NewbornProcess:
""" Simple container of process data. """
def __init__(self, newborn_id, node, process, ncores=None):
self.newborn_id = newborn_id
self.node = node
self.process = process
self.ncores = ncores
class AseRelaxation:
""" Perform a variable cell relaxation with ASE,
using a predefined calculator.
"""
def __init__(self, doc, queue, calculator=None):
""" Initialise a relaxation with ASE.
Parameters:
doc (dict): the structure to optimise.
queue (mp.Queue): the queue to push the result to.
Keyword arguments:
calculator (ase.Calculator): the calculator object
to use for force/energy computation. Default is
LennardJones.
"""
from copy import deepcopy
from matador.utils.viz_utils import doc2ase
from ase.constraints import UnitCellFilter
if calculator is None:
from ase.calculators.lj import LennardJones
self.calc = LennardJones()
else:
self.calc = calculator
self.doc = deepcopy(doc)
self.atoms = doc2ase(doc)
self.atoms.set_calculator(self.calc)
self.ucf = UnitCellFilter(self.atoms)
self.queue = queue
def relax(self):
from ase.optimize import LBFGS
cached = sys.__stdout__
try:
optimizer = LBFGS(self.ucf)
optimizer.logfile = None
optimised = optimizer.run(fmax=0.05, steps=100)
except Exception:
optimised = False
self.doc["optimised"] = bool(optimised)
self.doc["positions_abs"] = self.atoms.get_positions().tolist()
self.doc["lattice_cart"] = self.atoms.get_cell().tolist()
self.doc["lattice_abc"] = cart2abc(self.doc["lattice_cart"])
self.doc["positions_frac"] = cart2frac(self.doc["lattice_cart"], self.doc["positions_abs"])
self.doc["enthalpy_per_atom"] = float(self.calc.results["energy"] / len(
self.doc["atom_types"]
))
self.doc["enthalpy"] = float(self.calc.results["energy"])
self.queue.put(self.doc)
sys.stdout = cached
| 28.730994 | 102 | 0.574802 |
import sys
import logging
import numpy as np
from matador.compute import ComputeTask
from matador.utils.cell_utils import cart2frac, cart2abc
LOG = logging.getLogger("ilustrado")
LOG.setLevel(logging.DEBUG)
def strip_useless(doc, to_run=False):
stripped_doc = dict()
if to_run:
keys = [
"source",
"parents",
"mutations",
"elems",
"stoichiometry",
"lattice_abc",
"lattice_cart",
"positions_frac",
"num_atoms",
"atom_types",
]
else:
keys = [
"source",
"parents",
"mutations",
"elems",
"stoichiometry",
"lattice_abc",
"lattice_cart",
"cell_volume",
"space_group",
"positions_frac",
"num_atoms",
"atom_types",
"enthalpy",
"enthalpy_per_atom",
"total_energy",
"total_energy_per_atom",
"pressure",
"max_force_on_atom",
"optimised",
"date",
"total_time_hrs",
"peak_mem_MB",
]
for key in keys:
if key in doc:
stripped_doc[key] = doc[key]
if isinstance(doc[key], np.ndarray):
stripped_doc[key] = doc[key].tolist()
return stripped_doc
class FakeComputeTask(ComputeTask):
def __init__(self, *args, **kwargs):
self.structure = kwargs["res"]
self.output_queue = kwargs["output_queue"]
def relax(self):
fake_number_crunch = True
if fake_number_crunch:
size = np.random.randint(low=3, high=50)
array = np.random.rand(size, size)
np.linalg.eig(array)
self.structure["enthalpy_per_atom"] = -505 + np.random.rand()
self.structure["enthalpy"] = self.structure["enthalpy_per_atom"] * self.structure["num_atoms"]
if np.random.rand() < 0.8:
self.structure["optimised"] = True
else:
self.structure["optimised"] = False
self.output_queue.put(self.structure)
class NewbornProcess:
def __init__(self, newborn_id, node, process, ncores=None):
self.newborn_id = newborn_id
self.node = node
self.process = process
self.ncores = ncores
class AseRelaxation:
def __init__(self, doc, queue, calculator=None):
from copy import deepcopy
from matador.utils.viz_utils import doc2ase
from ase.constraints import UnitCellFilter
if calculator is None:
from ase.calculators.lj import LennardJones
self.calc = LennardJones()
else:
self.calc = calculator
self.doc = deepcopy(doc)
self.atoms = doc2ase(doc)
self.atoms.set_calculator(self.calc)
self.ucf = UnitCellFilter(self.atoms)
self.queue = queue
def relax(self):
from ase.optimize import LBFGS
cached = sys.__stdout__
try:
optimizer = LBFGS(self.ucf)
optimizer.logfile = None
optimised = optimizer.run(fmax=0.05, steps=100)
except Exception:
optimised = False
self.doc["optimised"] = bool(optimised)
self.doc["positions_abs"] = self.atoms.get_positions().tolist()
self.doc["lattice_cart"] = self.atoms.get_cell().tolist()
self.doc["lattice_abc"] = cart2abc(self.doc["lattice_cart"])
self.doc["positions_frac"] = cart2frac(self.doc["lattice_cart"], self.doc["positions_abs"])
self.doc["enthalpy_per_atom"] = float(self.calc.results["energy"] / len(
self.doc["atom_types"]
))
self.doc["enthalpy"] = float(self.calc.results["energy"])
self.queue.put(self.doc)
sys.stdout = cached
| true | true |
f71fbc58ba45ce332cf36e06106fa51f669a4b79 | 16,543 | py | Python | services/director-v2/src/simcore_service_director_v2/modules/comp_scheduler/base_scheduler.py | mguidon/osparc-simcore | 77e64777728f20a5b21362372aefa0e0db5072cd | [
"MIT"
] | null | null | null | services/director-v2/src/simcore_service_director_v2/modules/comp_scheduler/base_scheduler.py | mguidon/osparc-simcore | 77e64777728f20a5b21362372aefa0e0db5072cd | [
"MIT"
] | 29 | 2018-11-13T09:39:29.000Z | 2022-03-22T10:11:32.000Z | services/director-v2/src/simcore_service_director_v2/modules/comp_scheduler/base_scheduler.py | mguidon/osparc-simcore | 77e64777728f20a5b21362372aefa0e0db5072cd | [
"MIT"
] | null | null | null | """The scheduler shall be run as a background task.
Based on oSparc pipelines, it monitors when to start the next celery task(s), either one at a time or as a group of tasks.
In principle the Scheduler maintains the comp_runs table in the database.
It contains how the pipeline was run and by whom.
It also contains the final result of the pipeline run.
When a pipeline is scheduled first all the tasks contained in the DAG are set to PUBLISHED state.
Once the scheduler determines a task shall run, its state is set to PENDING, so that the sidecar can pick up the task.
The sidecar will then change the state to STARTED, then to SUCCESS or FAILED.
"""
import asyncio
import logging
from abc import ABC, abstractmethod
from dataclasses import dataclass, field
from typing import Callable, Dict, List, Optional, Set, Tuple, cast
import networkx as nx
from aiopg.sa.engine import Engine
from models_library.projects import ProjectID
from models_library.projects_nodes_io import NodeID
from models_library.projects_state import RunningState
from pydantic import PositiveInt
from ...core.errors import (
ComputationalBackendNotConnectedError,
InsuficientComputationalResourcesError,
InvalidPipelineError,
MissingComputationalResourcesError,
PipelineNotFoundError,
SchedulerError,
)
from ...models.domains.comp_pipelines import CompPipelineAtDB
from ...models.domains.comp_runs import CompRunsAtDB
from ...models.domains.comp_tasks import CompTaskAtDB, Image
from ...models.schemas.constants import ClusterID, UserID
from ...utils.computations import get_pipeline_state_from_task_states
from ...utils.scheduler import COMPLETED_STATES, Iteration, get_repository
from ..db.repositories.comp_pipelines import CompPipelinesRepository
from ..db.repositories.comp_runs import CompRunsRepository
from ..db.repositories.comp_tasks import CompTasksRepository
logger = logging.getLogger(__name__)
@dataclass
class ScheduledPipelineParams:
cluster_id: ClusterID
mark_for_cancellation: bool = False
@dataclass
class BaseCompScheduler(ABC):
scheduled_pipelines: Dict[
Tuple[UserID, ProjectID, Iteration], ScheduledPipelineParams
]
db_engine: Engine
wake_up_event: asyncio.Event = field(default_factory=asyncio.Event, init=False)
default_cluster_id: ClusterID
async def run_new_pipeline(
self, user_id: UserID, project_id: ProjectID, cluster_id: ClusterID
) -> None:
"""Sets a new pipeline to be scheduled on the computational resources.
Passing cluster_id=0 will use the default cluster. Passing an existing ID will instruct
the scheduler to run the tasks on the defined cluster"""
# ensure the pipeline exists and is populated with something
dag = await self._get_pipeline_dag(project_id)
if not dag:
logger.warning(
"project %s has no computational dag defined. not scheduled for a run."
)
return
runs_repo: CompRunsRepository = get_repository(
self.db_engine, CompRunsRepository
) # type: ignore
new_run: CompRunsAtDB = await runs_repo.create(
user_id=user_id,
project_id=project_id,
cluster_id=cluster_id,
default_cluster_id=self.default_cluster_id,
)
self.scheduled_pipelines[
(user_id, project_id, new_run.iteration)
] = ScheduledPipelineParams(cluster_id=cluster_id)
# ensure the scheduler starts right away
self._wake_up_scheduler_now()
async def stop_pipeline(
self, user_id: UserID, project_id: ProjectID, iteration: Optional[int] = None
) -> None:
if not iteration:
# if no iteration given find the latest one in the list
possible_iterations = {
it
for u_id, p_id, it in self.scheduled_pipelines
if u_id == user_id and p_id == project_id
}
if not possible_iterations:
raise SchedulerError(
f"There are no pipeline scheduled for {user_id}:{project_id}"
)
iteration = max(possible_iterations)
# mark the scheduled pipeline for stopping
self.scheduled_pipelines[
(user_id, project_id, iteration)
].mark_for_cancellation = True
# ensure the scheduler starts right away
self._wake_up_scheduler_now()
async def schedule_all_pipelines(self) -> None:
self.wake_up_event.clear()
# if one of the task throws, the other are NOT cancelled which is what we want
await asyncio.gather(
*[
self._schedule_pipeline(
user_id,
project_id,
pipeline_params.cluster_id,
iteration,
pipeline_params.mark_for_cancellation,
)
for (
user_id,
project_id,
iteration,
), pipeline_params in self.scheduled_pipelines.items()
]
)
async def reconnect_backend(self) -> None:
await self._reconnect_backend()
async def _get_pipeline_dag(self, project_id: ProjectID) -> nx.DiGraph:
comp_pipeline_repo: CompPipelinesRepository = get_repository(
self.db_engine, CompPipelinesRepository
) # type: ignore
pipeline_at_db: CompPipelineAtDB = await comp_pipeline_repo.get_pipeline(
project_id
)
pipeline_dag = pipeline_at_db.get_graph()
return pipeline_dag
async def _get_pipeline_tasks(
self, project_id: ProjectID, pipeline_dag: nx.DiGraph
) -> Dict[str, CompTaskAtDB]:
comp_tasks_repo: CompTasksRepository = get_repository(
self.db_engine, CompTasksRepository
) # type: ignore
pipeline_comp_tasks: Dict[str, CompTaskAtDB] = {
str(t.node_id): t
for t in await comp_tasks_repo.get_comp_tasks(project_id)
if (str(t.node_id) in list(pipeline_dag.nodes()))
}
if len(pipeline_comp_tasks) != len(pipeline_dag.nodes()):
raise InvalidPipelineError(
f"{project_id}"
f"The tasks defined for {project_id} do not contain all the tasks defined in the pipeline [{list(pipeline_dag.nodes)}]! Please check."
)
return pipeline_comp_tasks
async def _update_run_result_from_tasks(
self,
user_id: UserID,
project_id: ProjectID,
iteration: PositiveInt,
pipeline_tasks: Dict[str, CompTaskAtDB],
) -> RunningState:
pipeline_state_from_tasks: RunningState = get_pipeline_state_from_task_states(
list(pipeline_tasks.values()),
)
await self._set_run_result(
user_id, project_id, iteration, pipeline_state_from_tasks
)
return pipeline_state_from_tasks
async def _set_run_result(
self,
user_id: UserID,
project_id: ProjectID,
iteration: PositiveInt,
run_result: RunningState,
) -> None:
comp_runs_repo: CompRunsRepository = get_repository(
self.db_engine, CompRunsRepository
) # type: ignore
await comp_runs_repo.set_run_result(
user_id=user_id,
project_id=project_id,
iteration=iteration,
result_state=run_result,
final_state=(run_result in COMPLETED_STATES),
)
@abstractmethod
async def _start_tasks(
self,
user_id: UserID,
project_id: ProjectID,
cluster_id: ClusterID,
scheduled_tasks: Dict[NodeID, Image],
callback: Callable[[], None],
) -> None:
...
@abstractmethod
async def _stop_tasks(self, tasks: List[CompTaskAtDB]) -> None:
...
@abstractmethod
async def _reconnect_backend(self) -> None:
...
async def _schedule_pipeline(
self,
user_id: UserID,
project_id: ProjectID,
cluster_id: ClusterID,
iteration: PositiveInt,
marked_for_stopping: bool,
) -> None:
logger.debug(
"checking run of project [%s:%s] for user [%s]",
project_id,
iteration,
user_id,
)
pipeline_dag = nx.DiGraph()
pipeline_tasks: Dict[str, CompTaskAtDB] = {}
pipeline_result: RunningState = RunningState.UNKNOWN
# 1. Update the run states
try:
pipeline_dag = await self._get_pipeline_dag(project_id)
pipeline_tasks: Dict[str, CompTaskAtDB] = await self._get_pipeline_tasks(
project_id, pipeline_dag
)
# filter out the tasks that were already successfully completed
pipeline_dag.remove_nodes_from(
{
node_id
for node_id, t in pipeline_tasks.items()
if t.state == RunningState.SUCCESS
}
)
# find the tasks that need scheduling
tasks_to_schedule = [node_id for node_id, degree in pipeline_dag.in_degree() if degree == 0] # type: ignore
tasks_to_mark_as_aborted: Set[NodeID] = set()
tasks_to_start: Set[NodeID] = set()
for node_id in tasks_to_schedule:
if pipeline_tasks[str(node_id)].state == RunningState.FAILED:
tasks_to_mark_as_aborted.update(nx.bfs_tree(pipeline_dag, node_id))
tasks_to_mark_as_aborted.remove(
node_id
) # do not mark the failed one as aborted
if pipeline_tasks[str(node_id)].state == RunningState.PUBLISHED:
# the nodes that are published shall be started
tasks_to_start.add(node_id)
comp_tasks_repo: CompTasksRepository = cast(
CompTasksRepository, get_repository(self.db_engine, CompTasksRepository)
)
if tasks_to_mark_as_aborted:
await comp_tasks_repo.set_project_tasks_state(
project_id, list(tasks_to_mark_as_aborted), RunningState.ABORTED
)
# update the current states
for node_id in tasks_to_mark_as_aborted:
pipeline_tasks[f"{node_id}"].state = RunningState.ABORTED
# compute and update the current status of the run
pipeline_result = await self._update_run_result_from_tasks(
user_id, project_id, iteration, pipeline_tasks
)
except PipelineNotFoundError:
logger.warning(
"pipeline %s does not exist in comp_pipeline table, it will be removed from scheduler",
project_id,
)
pipeline_result = RunningState.ABORTED
await self._set_run_result(user_id, project_id, iteration, pipeline_result)
except InvalidPipelineError as exc:
logger.warning(
"pipeline %s appears to be misconfigured, it will be removed from scheduler. Please check pipeline:\n%s",
project_id,
exc,
)
pipeline_result = RunningState.ABORTED
await self._set_run_result(user_id, project_id, iteration, pipeline_result)
# 2. Are we finished??
if not pipeline_dag.nodes() or pipeline_result in COMPLETED_STATES:
# there is nothing left, the run is completed, we're done here
self.scheduled_pipelines.pop((user_id, project_id, iteration))
logger.info(
"pipeline %s scheduling completed with result %s",
project_id,
pipeline_result,
)
return
# 3. Are we stopping??
if marked_for_stopping:
# get any running task and stop them
comp_tasks_repo: CompTasksRepository = get_repository(
self.db_engine, CompTasksRepository
) # type: ignore
await comp_tasks_repo.mark_project_tasks_as_aborted(project_id)
# stop any remaining running task
running_tasks = [
t
for t in pipeline_tasks.values()
if t.state in [RunningState.STARTED, RunningState.RETRY]
]
await self._stop_tasks(running_tasks)
logger.debug(
"pipeline '%s' is marked for cancellation. stopping tasks for [%s]",
project_id,
running_tasks,
)
# the scheduled pipeline will be removed in the next iteration
return
# 4. Schedule the next tasks,
# these tasks are in PUBLISHED state and all their preceeding tasks are completed
next_tasks: List[NodeID] = [
node_id
for node_id, degree in pipeline_dag.in_degree() # type: ignore
if degree == 0 and pipeline_tasks[node_id].state == RunningState.PUBLISHED
]
if not next_tasks:
# nothing to run at the moment
return
# let's schedule the tasks, mark them as PENDING so the sidecar will take them
await self._schedule_tasks(
user_id, project_id, cluster_id, pipeline_tasks, next_tasks
)
async def _schedule_tasks(
self,
user_id: UserID,
project_id: ProjectID,
cluster_id: ClusterID,
comp_tasks: Dict[str, CompTaskAtDB],
tasks: List[NodeID],
):
# get tasks runtime requirements
tasks_to_reqs: Dict[NodeID, Image] = {
node_id: comp_tasks[f"{node_id}"].image for node_id in tasks
}
# The sidecar only pick up tasks that are in PENDING state
comp_tasks_repo: CompTasksRepository = get_repository(
self.db_engine, CompTasksRepository
) # type: ignore
await comp_tasks_repo.set_project_tasks_state(
project_id, tasks, RunningState.PENDING
)
# we pass the tasks to the dask-client
results = await asyncio.gather(
*[
self._start_tasks(
user_id,
project_id,
cluster_id,
scheduled_tasks={t: r},
callback=self._wake_up_scheduler_now,
)
for t, r in tasks_to_reqs.items()
],
return_exceptions=True,
)
for r, t in zip(results, tasks_to_reqs):
if isinstance(
r,
(
MissingComputationalResourcesError,
InsuficientComputationalResourcesError,
),
):
logger.error(
"Project '%s''s task '%s' could not be scheduled due to the following: %s",
project_id,
r.node_id,
f"{r}",
)
await comp_tasks_repo.set_project_tasks_state(
project_id, [r.node_id], RunningState.FAILED
)
# TODO: we should set some specific state so the user may know what to do
elif isinstance(r, ComputationalBackendNotConnectedError):
logger.error(
"The computational backend is disconnected. Tasks are set back "
"to PUBLISHED state until scheduler comes back!"
)
# we should try re-connecting.
# in the meantime we cannot schedule tasks on the scheduler,
# let's put these tasks back to PUBLISHED, so they might be re-submitted later
await asyncio.gather(
comp_tasks_repo.set_project_tasks_state(
project_id, tasks, RunningState.PUBLISHED
),
)
raise ComputationalBackendNotConnectedError(f"{r}") from r
if isinstance(r, Exception):
logger.error(
"Unexpected error happened when scheduling task due to following error %s",
f"{r}",
)
await comp_tasks_repo.set_project_tasks_state(
project_id, [t], RunningState.FAILED
)
def _wake_up_scheduler_now(self) -> None:
self.wake_up_event.set()
| 38.651869 | 150 | 0.609684 |
import asyncio
import logging
from abc import ABC, abstractmethod
from dataclasses import dataclass, field
from typing import Callable, Dict, List, Optional, Set, Tuple, cast
import networkx as nx
from aiopg.sa.engine import Engine
from models_library.projects import ProjectID
from models_library.projects_nodes_io import NodeID
from models_library.projects_state import RunningState
from pydantic import PositiveInt
from ...core.errors import (
ComputationalBackendNotConnectedError,
InsuficientComputationalResourcesError,
InvalidPipelineError,
MissingComputationalResourcesError,
PipelineNotFoundError,
SchedulerError,
)
from ...models.domains.comp_pipelines import CompPipelineAtDB
from ...models.domains.comp_runs import CompRunsAtDB
from ...models.domains.comp_tasks import CompTaskAtDB, Image
from ...models.schemas.constants import ClusterID, UserID
from ...utils.computations import get_pipeline_state_from_task_states
from ...utils.scheduler import COMPLETED_STATES, Iteration, get_repository
from ..db.repositories.comp_pipelines import CompPipelinesRepository
from ..db.repositories.comp_runs import CompRunsRepository
from ..db.repositories.comp_tasks import CompTasksRepository
logger = logging.getLogger(__name__)
@dataclass
class ScheduledPipelineParams:
cluster_id: ClusterID
mark_for_cancellation: bool = False
@dataclass
class BaseCompScheduler(ABC):
scheduled_pipelines: Dict[
Tuple[UserID, ProjectID, Iteration], ScheduledPipelineParams
]
db_engine: Engine
wake_up_event: asyncio.Event = field(default_factory=asyncio.Event, init=False)
default_cluster_id: ClusterID
async def run_new_pipeline(
self, user_id: UserID, project_id: ProjectID, cluster_id: ClusterID
) -> None:
dag = await self._get_pipeline_dag(project_id)
if not dag:
logger.warning(
"project %s has no computational dag defined. not scheduled for a run."
)
return
runs_repo: CompRunsRepository = get_repository(
self.db_engine, CompRunsRepository
)
new_run: CompRunsAtDB = await runs_repo.create(
user_id=user_id,
project_id=project_id,
cluster_id=cluster_id,
default_cluster_id=self.default_cluster_id,
)
self.scheduled_pipelines[
(user_id, project_id, new_run.iteration)
] = ScheduledPipelineParams(cluster_id=cluster_id)
self._wake_up_scheduler_now()
async def stop_pipeline(
self, user_id: UserID, project_id: ProjectID, iteration: Optional[int] = None
) -> None:
if not iteration:
possible_iterations = {
it
for u_id, p_id, it in self.scheduled_pipelines
if u_id == user_id and p_id == project_id
}
if not possible_iterations:
raise SchedulerError(
f"There are no pipeline scheduled for {user_id}:{project_id}"
)
iteration = max(possible_iterations)
self.scheduled_pipelines[
(user_id, project_id, iteration)
].mark_for_cancellation = True
self._wake_up_scheduler_now()
async def schedule_all_pipelines(self) -> None:
self.wake_up_event.clear()
await asyncio.gather(
*[
self._schedule_pipeline(
user_id,
project_id,
pipeline_params.cluster_id,
iteration,
pipeline_params.mark_for_cancellation,
)
for (
user_id,
project_id,
iteration,
), pipeline_params in self.scheduled_pipelines.items()
]
)
async def reconnect_backend(self) -> None:
await self._reconnect_backend()
async def _get_pipeline_dag(self, project_id: ProjectID) -> nx.DiGraph:
comp_pipeline_repo: CompPipelinesRepository = get_repository(
self.db_engine, CompPipelinesRepository
)
pipeline_at_db: CompPipelineAtDB = await comp_pipeline_repo.get_pipeline(
project_id
)
pipeline_dag = pipeline_at_db.get_graph()
return pipeline_dag
async def _get_pipeline_tasks(
self, project_id: ProjectID, pipeline_dag: nx.DiGraph
) -> Dict[str, CompTaskAtDB]:
comp_tasks_repo: CompTasksRepository = get_repository(
self.db_engine, CompTasksRepository
)
pipeline_comp_tasks: Dict[str, CompTaskAtDB] = {
str(t.node_id): t
for t in await comp_tasks_repo.get_comp_tasks(project_id)
if (str(t.node_id) in list(pipeline_dag.nodes()))
}
if len(pipeline_comp_tasks) != len(pipeline_dag.nodes()):
raise InvalidPipelineError(
f"{project_id}"
f"The tasks defined for {project_id} do not contain all the tasks defined in the pipeline [{list(pipeline_dag.nodes)}]! Please check."
)
return pipeline_comp_tasks
async def _update_run_result_from_tasks(
self,
user_id: UserID,
project_id: ProjectID,
iteration: PositiveInt,
pipeline_tasks: Dict[str, CompTaskAtDB],
) -> RunningState:
pipeline_state_from_tasks: RunningState = get_pipeline_state_from_task_states(
list(pipeline_tasks.values()),
)
await self._set_run_result(
user_id, project_id, iteration, pipeline_state_from_tasks
)
return pipeline_state_from_tasks
async def _set_run_result(
self,
user_id: UserID,
project_id: ProjectID,
iteration: PositiveInt,
run_result: RunningState,
) -> None:
comp_runs_repo: CompRunsRepository = get_repository(
self.db_engine, CompRunsRepository
)
await comp_runs_repo.set_run_result(
user_id=user_id,
project_id=project_id,
iteration=iteration,
result_state=run_result,
final_state=(run_result in COMPLETED_STATES),
)
@abstractmethod
async def _start_tasks(
self,
user_id: UserID,
project_id: ProjectID,
cluster_id: ClusterID,
scheduled_tasks: Dict[NodeID, Image],
callback: Callable[[], None],
) -> None:
...
@abstractmethod
async def _stop_tasks(self, tasks: List[CompTaskAtDB]) -> None:
...
@abstractmethod
async def _reconnect_backend(self) -> None:
...
async def _schedule_pipeline(
self,
user_id: UserID,
project_id: ProjectID,
cluster_id: ClusterID,
iteration: PositiveInt,
marked_for_stopping: bool,
) -> None:
logger.debug(
"checking run of project [%s:%s] for user [%s]",
project_id,
iteration,
user_id,
)
pipeline_dag = nx.DiGraph()
pipeline_tasks: Dict[str, CompTaskAtDB] = {}
pipeline_result: RunningState = RunningState.UNKNOWN
try:
pipeline_dag = await self._get_pipeline_dag(project_id)
pipeline_tasks: Dict[str, CompTaskAtDB] = await self._get_pipeline_tasks(
project_id, pipeline_dag
)
pipeline_dag.remove_nodes_from(
{
node_id
for node_id, t in pipeline_tasks.items()
if t.state == RunningState.SUCCESS
}
)
tasks_to_schedule = [node_id for node_id, degree in pipeline_dag.in_degree() if degree == 0]
tasks_to_mark_as_aborted: Set[NodeID] = set()
tasks_to_start: Set[NodeID] = set()
for node_id in tasks_to_schedule:
if pipeline_tasks[str(node_id)].state == RunningState.FAILED:
tasks_to_mark_as_aborted.update(nx.bfs_tree(pipeline_dag, node_id))
tasks_to_mark_as_aborted.remove(
node_id
)
if pipeline_tasks[str(node_id)].state == RunningState.PUBLISHED:
tasks_to_start.add(node_id)
comp_tasks_repo: CompTasksRepository = cast(
CompTasksRepository, get_repository(self.db_engine, CompTasksRepository)
)
if tasks_to_mark_as_aborted:
await comp_tasks_repo.set_project_tasks_state(
project_id, list(tasks_to_mark_as_aborted), RunningState.ABORTED
)
for node_id in tasks_to_mark_as_aborted:
pipeline_tasks[f"{node_id}"].state = RunningState.ABORTED
pipeline_result = await self._update_run_result_from_tasks(
user_id, project_id, iteration, pipeline_tasks
)
except PipelineNotFoundError:
logger.warning(
"pipeline %s does not exist in comp_pipeline table, it will be removed from scheduler",
project_id,
)
pipeline_result = RunningState.ABORTED
await self._set_run_result(user_id, project_id, iteration, pipeline_result)
except InvalidPipelineError as exc:
logger.warning(
"pipeline %s appears to be misconfigured, it will be removed from scheduler. Please check pipeline:\n%s",
project_id,
exc,
)
pipeline_result = RunningState.ABORTED
await self._set_run_result(user_id, project_id, iteration, pipeline_result)
if not pipeline_dag.nodes() or pipeline_result in COMPLETED_STATES:
self.scheduled_pipelines.pop((user_id, project_id, iteration))
logger.info(
"pipeline %s scheduling completed with result %s",
project_id,
pipeline_result,
)
return
# 3. Are we stopping??
if marked_for_stopping:
# get any running task and stop them
comp_tasks_repo: CompTasksRepository = get_repository(
self.db_engine, CompTasksRepository
) # type: ignore
await comp_tasks_repo.mark_project_tasks_as_aborted(project_id)
# stop any remaining running task
running_tasks = [
t
for t in pipeline_tasks.values()
if t.state in [RunningState.STARTED, RunningState.RETRY]
]
await self._stop_tasks(running_tasks)
logger.debug(
"pipeline '%s' is marked for cancellation. stopping tasks for [%s]",
project_id,
running_tasks,
)
# the scheduled pipeline will be removed in the next iteration
return
# 4. Schedule the next tasks,
# these tasks are in PUBLISHED state and all their preceeding tasks are completed
next_tasks: List[NodeID] = [
node_id
for node_id, degree in pipeline_dag.in_degree() # type: ignore
if degree == 0 and pipeline_tasks[node_id].state == RunningState.PUBLISHED
]
if not next_tasks:
# nothing to run at the moment
return
# let's schedule the tasks, mark them as PENDING so the sidecar will take them
await self._schedule_tasks(
user_id, project_id, cluster_id, pipeline_tasks, next_tasks
)
async def _schedule_tasks(
self,
user_id: UserID,
project_id: ProjectID,
cluster_id: ClusterID,
comp_tasks: Dict[str, CompTaskAtDB],
tasks: List[NodeID],
):
tasks_to_reqs: Dict[NodeID, Image] = {
node_id: comp_tasks[f"{node_id}"].image for node_id in tasks
}
comp_tasks_repo: CompTasksRepository = get_repository(
self.db_engine, CompTasksRepository
)
await comp_tasks_repo.set_project_tasks_state(
project_id, tasks, RunningState.PENDING
)
results = await asyncio.gather(
*[
self._start_tasks(
user_id,
project_id,
cluster_id,
scheduled_tasks={t: r},
callback=self._wake_up_scheduler_now,
)
for t, r in tasks_to_reqs.items()
],
return_exceptions=True,
)
for r, t in zip(results, tasks_to_reqs):
if isinstance(
r,
(
MissingComputationalResourcesError,
InsuficientComputationalResourcesError,
),
):
logger.error(
"Project '%s''s task '%s' could not be scheduled due to the following: %s",
project_id,
r.node_id,
f"{r}",
)
await comp_tasks_repo.set_project_tasks_state(
project_id, [r.node_id], RunningState.FAILED
)
# TODO: we should set some specific state so the user may know what to do
elif isinstance(r, ComputationalBackendNotConnectedError):
logger.error(
"The computational backend is disconnected. Tasks are set back "
"to PUBLISHED state until scheduler comes back!"
)
# we should try re-connecting.
# in the meantime we cannot schedule tasks on the scheduler,
# let's put these tasks back to PUBLISHED, so they might be re-submitted later
await asyncio.gather(
comp_tasks_repo.set_project_tasks_state(
project_id, tasks, RunningState.PUBLISHED
),
)
raise ComputationalBackendNotConnectedError(f"{r}") from r
if isinstance(r, Exception):
logger.error(
"Unexpected error happened when scheduling task due to following error %s",
f"{r}",
)
await comp_tasks_repo.set_project_tasks_state(
project_id, [t], RunningState.FAILED
)
def _wake_up_scheduler_now(self) -> None:
self.wake_up_event.set()
| true | true |
f71fbf2af7ee4f4be9f4ac329665e4e092b4627e | 1,045 | py | Python | tests/pyre/filesystem/local_rootNotDirectory.py | BryanRiel/pyre | 179359634a7091979cced427b6133dd0ec4726ea | [
"BSD-3-Clause"
] | null | null | null | tests/pyre/filesystem/local_rootNotDirectory.py | BryanRiel/pyre | 179359634a7091979cced427b6133dd0ec4726ea | [
"BSD-3-Clause"
] | null | null | null | tests/pyre/filesystem/local_rootNotDirectory.py | BryanRiel/pyre | 179359634a7091979cced427b6133dd0ec4726ea | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# michael a.g. aïvázis
# orthologue
# (c) 1998-2018 all rights reserved
#
"""
Verify that attempts to create local filesystems with nonexistent roots fails as expected
"""
def test():
# support
import pyre.primitives
# my package
import pyre.filesystem
# make a path out of a regular file
dummy = pyre.primitives.path("./local_rootNotDirectory.py")
# attempt to
try:
# mount a filesystem there
pyre.filesystem.local(root=dummy)
# which should fail so we can't reach here
assert False
# if it fails as expected
except pyre.filesystem.MountPointError as error:
# check that the error message is correct
assert str(error) == (
"error while mounting '{}': invalid mount point".format(dummy.resolve()))
# all done
return
# main
if __name__ == "__main__":
# skip pyre initialization since we don't rely on the executive
pyre_noboot = True
# do...
test()
# end of file
| 21.770833 | 89 | 0.643062 |
def test():
import pyre.primitives
import pyre.filesystem
dummy = pyre.primitives.path("./local_rootNotDirectory.py")
try:
pyre.filesystem.local(root=dummy)
assert False
# if it fails as expected
except pyre.filesystem.MountPointError as error:
# check that the error message is correct
assert str(error) == (
"error while mounting '{}': invalid mount point".format(dummy.resolve()))
# all done
return
# main
if __name__ == "__main__":
# skip pyre initialization since we don't rely on the executive
pyre_noboot = True
test()
| true | true |
f71fc0279e6c6df13e14b65f4c5bea90b17f596c | 21,395 | py | Python | pytorchtools/ptnetworks/ResNetCIFAR.py | Criscraft/pytorch_classification | d5772963e55ce218ae4719fb7f85604263aab65f | [
"MIT"
] | null | null | null | pytorchtools/ptnetworks/ResNetCIFAR.py | Criscraft/pytorch_classification | d5772963e55ce218ae4719fb7f85604263aab65f | [
"MIT"
] | null | null | null | pytorchtools/ptnetworks/ResNetCIFAR.py | Criscraft/pytorch_classification | d5772963e55ce218ae4719fb7f85604263aab65f | [
"MIT"
] | null | null | null | from collections import OrderedDict
import torch
from torch import Tensor
import torch.nn as nn
from torch.utils.model_zoo import load_url as load_state_dict_from_url
from ptnetworks.ActivationTracker import ActivationTracker
from typing import Type, Any, Callable, Union, List, Optional
class ResNetCIFAR(nn.Module):
def __init__(self,
variant='resnet050',
n_classes=100,
pretrained=False,
freeze_features_until='', #exclusive
no_gradient_required=False,
enforce_batchnorm_requires_gradient=False,
n_layers_to_be_removed_from_blocks=[],
no_classifier=False,
activation='relu',
init_mode='kaiming_normal',
statedict='',
strict_loading=True):
super().__init__()
arg_dict = {
'pretrained' : pretrained,
'num_classes' : n_classes,
'init_mode' : init_mode,
'activation' : activation,
}
if variant == 'resnet018':
self.embedded_model = resnet18(**arg_dict)
elif variant == 'resnet034':
self.embedded_model = resnet34(**arg_dict)
elif variant == 'resnet050':
self.embedded_model = resnet50(**arg_dict)
elif variant == 'resnet101':
self.embedded_model = resnet101(**arg_dict)
elif variant == 'resnet152':
self.embedded_model = resnet152(**arg_dict)
elif variant == 'resnext050_32x4d':
self.embedded_model = resnext50_32x4d(**arg_dict)
elif variant == 'resnext101_32x8d':
self.embedded_model = resnext101_32x8d(**arg_dict)
elif variant == 'wide_resnet050_2':
self.embedded_model = wide_resnet50_2(**arg_dict)
elif variant == 'wide_resnet101_2':
self.embedded_model = wide_resnet101_2(**arg_dict)
else:
print('select valid model variant')
if no_classifier:
self.embedded_model.classifier = nn.Identity()
module_dict = OrderedDict([
('classifier', self.embedded_model.classifier),
('layer4', self.embedded_model.layer4),
('layer3', self.embedded_model.layer3),
('layer2', self.embedded_model.layer2),
('layer1', self.embedded_model.layer1),
])
if freeze_features_until:
for param in self.embedded_model.parameters():
param.requires_grad = False
if freeze_features_until not in module_dict:
raise ValueError("freeue_features_until does not match any network module")
for key, module in module_dict.items():
for param in module.parameters():
param.requires_grad = True
if freeze_features_until == key:
break
if n_layers_to_be_removed_from_blocks:
modules = [
self.embedded_model.layer1,
self.embedded_model.layer2,
self.embedded_model.layer3,
self.embedded_model.layer4,
]
for n_layers, layer in zip(n_layers_to_be_removed_from_blocks, modules):
for i in range(n_layers):
layer[-i-1] = nn.Identity()
if statedict:
pretrained_dict = torch.load(statedict, map_location=torch.device('cpu'))
missing = self.load_state_dict(pretrained_dict, strict=strict_loading)
print('Loading weights from statedict. Missing and unexpected keys:')
print(missing)
if enforce_batchnorm_requires_gradient:
for m in self.embedded_model.modules():
if isinstance(m, nn.BatchNorm2d) or isinstance(m, nn.BatchNorm1d):
for param in m.parameters():
param.requires_grad = True
if no_gradient_required:
for param in self.embedded_model.parameters():
param.requires_grad = False
def forward(self, batch):
if isinstance(batch, dict) and 'data' in batch:
logits = self.embedded_model(batch['data'])
out = {'logits' : logits}
return out
else:
return self.embedded_model(batch)
def forward_features(self, batch, module=None):
track_modules = ActivationTracker()
assert isinstance(batch, dict) and 'data' in batch
logits, activation_dict = track_modules.collect_stats(self.embedded_model, batch['data'], module)
out = {'logits' : logits, 'activations' : activation_dict}
return out
def save(self, statedict_name):
torch.save(self.state_dict(), statedict_name)
MODEL_DIR = '/nfshome/linse/NO_INB_BACKUP/ModelZoo'
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
'resnext50_32x4d': 'https://download.pytorch.org/models/resnext50_32x4d-7cdf4587.pth',
'resnext101_32x8d': 'https://download.pytorch.org/models/resnext101_32x8d-8ba56ff5.pth',
'wide_resnet50_2': 'https://download.pytorch.org/models/wide_resnet50_2-95faca4d.pth',
'wide_resnet101_2': 'https://download.pytorch.org/models/wide_resnet101_2-32ee1156.pth',
}
def conv3x3(in_planes: int, out_planes: int, stride: int = 1, groups: int = 1, dilation: int = 1) -> nn.Conv2d:
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=dilation, groups=groups, bias=False, dilation=dilation)
def conv1x1(in_planes: int, out_planes: int, stride: int = 1) -> nn.Conv2d:
"""1x1 convolution"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
class BasicBlock(nn.Module):
expansion: int = 1
def __init__(
self,
inplanes: int,
planes: int,
stride: int = 1,
downsample: Optional[nn.Module] = None,
groups: int = 1,
base_width: int = 64,
dilation: int = 1,
norm_layer: Optional[Callable[..., nn.Module]] = None,
activation_layer=nn.ReLU
) -> None:
super(BasicBlock, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
if groups != 1 or base_width != 64:
raise ValueError('BasicBlock only supports groups=1 and base_width=64')
if dilation > 1:
raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
# Both self.conv1 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = norm_layer(planes)
self.relu_1 = activation_layer(inplace=False)
self.conv2 = conv3x3(planes, planes)
self.bn2 = norm_layer(planes)
self.downsample = downsample
self.relu_2 = activation_layer(inplace=False)
self.stride = stride
def forward(self, x: Tensor) -> Tensor:
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu_1(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu_2(out)
return out
class Bottleneck(nn.Module):
# Bottleneck in torchvision places the stride for downsampling at 3x3 convolution(self.conv2)
# while original implementation places the stride at the first 1x1 convolution(self.conv1)
# according to "Deep residual learning for image recognition"https://arxiv.org/abs/1512.03385.
# This variant is also known as ResNet V1.5 and improves accuracy according to
# https://ngc.nvidia.com/catalog/model-scripts/nvidia:resnet_50_v1_5_for_pytorch.
expansion: int = 4
def __init__(
self,
inplanes: int,
planes: int,
stride: int = 1,
downsample: Optional[nn.Module] = None,
groups: int = 1,
base_width: int = 64,
dilation: int = 1,
norm_layer: Optional[Callable[..., nn.Module]] = None,
activation_layer=nn.ReLU
) -> None:
super(Bottleneck, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
width = int(planes * (base_width / 64.)) * groups
# Both self.conv2 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv1x1(inplanes, width)
self.bn1 = norm_layer(width)
self.conv2 = conv3x3(width, width, stride, groups, dilation)
self.bn2 = norm_layer(width)
self.conv3 = conv1x1(width, planes * self.expansion)
self.bn3 = norm_layer(planes * self.expansion)
self.relu_1 = activation_layer(inplace=False)
self.relu_2 = activation_layer(inplace=False)
self.relu_3 = activation_layer(inplace=False)
self.downsample = downsample
self.stride = stride
def forward(self, x: Tensor) -> Tensor:
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu_1(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu_2(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu_3(out)
return out
class ResNet(nn.Module):
def __init__(
self,
block: Type[Union[BasicBlock, Bottleneck]],
layers: List[int],
num_classes: int = 1000,
zero_init_residual: bool = False,
groups: int = 1,
width_per_group: int = 64,
replace_stride_with_dilation: Optional[List[bool]] = None,
norm_layer: Optional[Callable[..., nn.Module]] = None,
init_mode='kaiming_normal',
activation='relu',
) -> None:
super().__init__()
self.ID = 'ResNet'
if activation == 'relu':
activation_layer = nn.ReLU
elif activation == 'leaky_relu':
activation_layer = nn.LeakyReLU
self._activation_layer = activation_layer
if norm_layer is None:
norm_layer = nn.BatchNorm2d
self._norm_layer = norm_layer
self.inplanes = 64
self.dilation = 1
if replace_stride_with_dilation is None:
# each element in the tuple indicates if we should replace
# the 2x2 stride with a dilated convolution instead
replace_stride_with_dilation = [False, False, False]
if len(replace_stride_with_dilation) != 3:
raise ValueError("replace_stride_with_dilation should be None "
"or a 3-element tuple, got {}".format(replace_stride_with_dilation))
self.groups = groups
self.base_width = width_per_group
#for CIFAR we choose a kernel size of 3 in the first convolutional layer
self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=3, stride=2, padding=3,
bias=False)
self.conv1.ID = self.ID + '_first_layer'
self.bn1 = norm_layer(self.inplanes)
self.relu = self._activation_layer(inplace=False)
#we do not apply maxpooling after the first layer for CIFAR
self.maxpool = nn.Identity() #nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2,
dilate=replace_stride_with_dilation[0])
self.layer3 = self._make_layer(block, 256, layers[2], stride=2,
dilate=replace_stride_with_dilation[1])
self.layer4 = self._make_layer(block, 512, layers[3], stride=2,
dilate=replace_stride_with_dilation[2])
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.classifier = nn.Linear(512 * block.expansion, num_classes)
self.reinitialize(init_mode, activation, zero_init_residual)
def reinitialize(self, init_mode, activation, zero_init_residual):
for m in self.modules():
if isinstance(m, nn.Conv2d):
if init_mode == 'kaiming_normal':
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity=activation)
elif init_mode == 'kaiming_uniform':
nn.init.kaiming_uniform_(m.weight, mode='fan_out', nonlinearity=activation)
elif init_mode == 'sparse':
nn.init.sparse_(m.weight, sparsity=0.1, std=0.01)
elif init_mode == 'orthogonal':
nn.init.orthogonal_(m.weight, gain=1)
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# Zero-initialize the last BN in each residual branch,
# so that the residual branch starts with zeros, and each residual block behaves like an identity.
# This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0) # type: ignore[arg-type]
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0) # type: ignore[arg-type]
def _make_layer(self, block: Type[Union[BasicBlock, Bottleneck]], planes: int, blocks: int,
stride: int = 1, dilate: bool = False) -> nn.Sequential:
norm_layer = self._norm_layer
downsample = None
activation_layer = self._activation_layer
previous_dilation = self.dilation
if dilate:
self.dilation *= stride
stride = 1
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
norm_layer(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, self.groups,
self.base_width, previous_dilation, norm_layer, activation_layer))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes, groups=self.groups,
base_width=self.base_width, dilation=self.dilation,
norm_layer=norm_layer, activation_layer=activation_layer))
return nn.Sequential(*layers)
def _forward_impl(self, x: Tensor) -> Tensor:
# See note [TorchScript super()]
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.classifier(x)
return x
def forward(self, x: Tensor) -> Tensor:
return self._forward_impl(x)
def _resnet(
arch: str,
block: Type[Union[BasicBlock, Bottleneck]],
layers: List[int],
pretrained: bool,
progress: bool,
**kwargs: Any
) -> ResNet:
model = ResNet(block, layers, **kwargs)
if pretrained:
state_dict = load_state_dict_from_url(model_urls[arch], progress=progress, model_dir=MODEL_DIR)
model.load_state_dict(state_dict, strict=False)
return model
def resnet18(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:
r"""ResNet-18 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet18', BasicBlock, [2, 2, 2, 2], pretrained, progress,
**kwargs)
def resnet34(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:
r"""ResNet-34 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet34', BasicBlock, [3, 4, 6, 3], pretrained, progress,
**kwargs)
def resnet50(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:
r"""ResNet-50 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet50', Bottleneck, [3, 4, 6, 3], pretrained, progress,
**kwargs)
def resnet101(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:
r"""ResNet-101 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet101', Bottleneck, [3, 4, 23, 3], pretrained, progress,
**kwargs)
def resnet152(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:
r"""ResNet-152 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet152', Bottleneck, [3, 8, 36, 3], pretrained, progress,
**kwargs)
def resnext50_32x4d(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:
r"""ResNeXt-50 32x4d model from
`"Aggregated Residual Transformation for Deep Neural Networks" <https://arxiv.org/pdf/1611.05431.pdf>`_.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs['groups'] = 32
kwargs['width_per_group'] = 4
return _resnet('resnext50_32x4d', Bottleneck, [3, 4, 6, 3],
pretrained, progress, **kwargs)
def resnext101_32x8d(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:
r"""ResNeXt-101 32x8d model from
`"Aggregated Residual Transformation for Deep Neural Networks" <https://arxiv.org/pdf/1611.05431.pdf>`_.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs['groups'] = 32
kwargs['width_per_group'] = 8
return _resnet('resnext101_32x8d', Bottleneck, [3, 4, 23, 3],
pretrained, progress, **kwargs)
def wide_resnet50_2(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:
r"""Wide ResNet-50-2 model from
`"Wide Residual Networks" <https://arxiv.org/pdf/1605.07146.pdf>`_.
The model is the same as ResNet except for the bottleneck number of channels
which is twice larger in every block. The number of channels in outer 1x1
convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048
channels, and in Wide ResNet-50-2 has 2048-1024-2048.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs['width_per_group'] = 64 * 2
return _resnet('wide_resnet50_2', Bottleneck, [3, 4, 6, 3],
pretrained, progress, **kwargs)
def wide_resnet101_2(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:
r"""Wide ResNet-101-2 model from
`"Wide Residual Networks" <https://arxiv.org/pdf/1605.07146.pdf>`_.
The model is the same as ResNet except for the bottleneck number of channels
which is twice larger in every block. The number of channels in outer 1x1
convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048
channels, and in Wide ResNet-50-2 has 2048-1024-2048.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs['width_per_group'] = 64 * 2
return _resnet('wide_resnet101_2', Bottleneck, [3, 4, 23, 3],
pretrained, progress, **kwargs) | 39.329044 | 111 | 0.621874 | from collections import OrderedDict
import torch
from torch import Tensor
import torch.nn as nn
from torch.utils.model_zoo import load_url as load_state_dict_from_url
from ptnetworks.ActivationTracker import ActivationTracker
from typing import Type, Any, Callable, Union, List, Optional
class ResNetCIFAR(nn.Module):
def __init__(self,
variant='resnet050',
n_classes=100,
pretrained=False,
freeze_features_until='',
no_gradient_required=False,
enforce_batchnorm_requires_gradient=False,
n_layers_to_be_removed_from_blocks=[],
no_classifier=False,
activation='relu',
init_mode='kaiming_normal',
statedict='',
strict_loading=True):
super().__init__()
arg_dict = {
'pretrained' : pretrained,
'num_classes' : n_classes,
'init_mode' : init_mode,
'activation' : activation,
}
if variant == 'resnet018':
self.embedded_model = resnet18(**arg_dict)
elif variant == 'resnet034':
self.embedded_model = resnet34(**arg_dict)
elif variant == 'resnet050':
self.embedded_model = resnet50(**arg_dict)
elif variant == 'resnet101':
self.embedded_model = resnet101(**arg_dict)
elif variant == 'resnet152':
self.embedded_model = resnet152(**arg_dict)
elif variant == 'resnext050_32x4d':
self.embedded_model = resnext50_32x4d(**arg_dict)
elif variant == 'resnext101_32x8d':
self.embedded_model = resnext101_32x8d(**arg_dict)
elif variant == 'wide_resnet050_2':
self.embedded_model = wide_resnet50_2(**arg_dict)
elif variant == 'wide_resnet101_2':
self.embedded_model = wide_resnet101_2(**arg_dict)
else:
print('select valid model variant')
if no_classifier:
self.embedded_model.classifier = nn.Identity()
module_dict = OrderedDict([
('classifier', self.embedded_model.classifier),
('layer4', self.embedded_model.layer4),
('layer3', self.embedded_model.layer3),
('layer2', self.embedded_model.layer2),
('layer1', self.embedded_model.layer1),
])
if freeze_features_until:
for param in self.embedded_model.parameters():
param.requires_grad = False
if freeze_features_until not in module_dict:
raise ValueError("freeue_features_until does not match any network module")
for key, module in module_dict.items():
for param in module.parameters():
param.requires_grad = True
if freeze_features_until == key:
break
if n_layers_to_be_removed_from_blocks:
modules = [
self.embedded_model.layer1,
self.embedded_model.layer2,
self.embedded_model.layer3,
self.embedded_model.layer4,
]
for n_layers, layer in zip(n_layers_to_be_removed_from_blocks, modules):
for i in range(n_layers):
layer[-i-1] = nn.Identity()
if statedict:
pretrained_dict = torch.load(statedict, map_location=torch.device('cpu'))
missing = self.load_state_dict(pretrained_dict, strict=strict_loading)
print('Loading weights from statedict. Missing and unexpected keys:')
print(missing)
if enforce_batchnorm_requires_gradient:
for m in self.embedded_model.modules():
if isinstance(m, nn.BatchNorm2d) or isinstance(m, nn.BatchNorm1d):
for param in m.parameters():
param.requires_grad = True
if no_gradient_required:
for param in self.embedded_model.parameters():
param.requires_grad = False
def forward(self, batch):
if isinstance(batch, dict) and 'data' in batch:
logits = self.embedded_model(batch['data'])
out = {'logits' : logits}
return out
else:
return self.embedded_model(batch)
def forward_features(self, batch, module=None):
track_modules = ActivationTracker()
assert isinstance(batch, dict) and 'data' in batch
logits, activation_dict = track_modules.collect_stats(self.embedded_model, batch['data'], module)
out = {'logits' : logits, 'activations' : activation_dict}
return out
def save(self, statedict_name):
torch.save(self.state_dict(), statedict_name)
MODEL_DIR = '/nfshome/linse/NO_INB_BACKUP/ModelZoo'
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
'resnext50_32x4d': 'https://download.pytorch.org/models/resnext50_32x4d-7cdf4587.pth',
'resnext101_32x8d': 'https://download.pytorch.org/models/resnext101_32x8d-8ba56ff5.pth',
'wide_resnet50_2': 'https://download.pytorch.org/models/wide_resnet50_2-95faca4d.pth',
'wide_resnet101_2': 'https://download.pytorch.org/models/wide_resnet101_2-32ee1156.pth',
}
def conv3x3(in_planes: int, out_planes: int, stride: int = 1, groups: int = 1, dilation: int = 1) -> nn.Conv2d:
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=dilation, groups=groups, bias=False, dilation=dilation)
def conv1x1(in_planes: int, out_planes: int, stride: int = 1) -> nn.Conv2d:
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
class BasicBlock(nn.Module):
expansion: int = 1
def __init__(
self,
inplanes: int,
planes: int,
stride: int = 1,
downsample: Optional[nn.Module] = None,
groups: int = 1,
base_width: int = 64,
dilation: int = 1,
norm_layer: Optional[Callable[..., nn.Module]] = None,
activation_layer=nn.ReLU
) -> None:
super(BasicBlock, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
if groups != 1 or base_width != 64:
raise ValueError('BasicBlock only supports groups=1 and base_width=64')
if dilation > 1:
raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = norm_layer(planes)
self.relu_1 = activation_layer(inplace=False)
self.conv2 = conv3x3(planes, planes)
self.bn2 = norm_layer(planes)
self.downsample = downsample
self.relu_2 = activation_layer(inplace=False)
self.stride = stride
def forward(self, x: Tensor) -> Tensor:
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu_1(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu_2(out)
return out
class Bottleneck(nn.Module):
expansion: int = 4
def __init__(
self,
inplanes: int,
planes: int,
stride: int = 1,
downsample: Optional[nn.Module] = None,
groups: int = 1,
base_width: int = 64,
dilation: int = 1,
norm_layer: Optional[Callable[..., nn.Module]] = None,
activation_layer=nn.ReLU
) -> None:
super(Bottleneck, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
width = int(planes * (base_width / 64.)) * groups
self.conv1 = conv1x1(inplanes, width)
self.bn1 = norm_layer(width)
self.conv2 = conv3x3(width, width, stride, groups, dilation)
self.bn2 = norm_layer(width)
self.conv3 = conv1x1(width, planes * self.expansion)
self.bn3 = norm_layer(planes * self.expansion)
self.relu_1 = activation_layer(inplace=False)
self.relu_2 = activation_layer(inplace=False)
self.relu_3 = activation_layer(inplace=False)
self.downsample = downsample
self.stride = stride
def forward(self, x: Tensor) -> Tensor:
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu_1(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu_2(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu_3(out)
return out
class ResNet(nn.Module):
def __init__(
self,
block: Type[Union[BasicBlock, Bottleneck]],
layers: List[int],
num_classes: int = 1000,
zero_init_residual: bool = False,
groups: int = 1,
width_per_group: int = 64,
replace_stride_with_dilation: Optional[List[bool]] = None,
norm_layer: Optional[Callable[..., nn.Module]] = None,
init_mode='kaiming_normal',
activation='relu',
) -> None:
super().__init__()
self.ID = 'ResNet'
if activation == 'relu':
activation_layer = nn.ReLU
elif activation == 'leaky_relu':
activation_layer = nn.LeakyReLU
self._activation_layer = activation_layer
if norm_layer is None:
norm_layer = nn.BatchNorm2d
self._norm_layer = norm_layer
self.inplanes = 64
self.dilation = 1
if replace_stride_with_dilation is None:
replace_stride_with_dilation = [False, False, False]
if len(replace_stride_with_dilation) != 3:
raise ValueError("replace_stride_with_dilation should be None "
"or a 3-element tuple, got {}".format(replace_stride_with_dilation))
self.groups = groups
self.base_width = width_per_group
self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=3, stride=2, padding=3,
bias=False)
self.conv1.ID = self.ID + '_first_layer'
self.bn1 = norm_layer(self.inplanes)
self.relu = self._activation_layer(inplace=False)
self.maxpool = nn.Identity()
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2,
dilate=replace_stride_with_dilation[0])
self.layer3 = self._make_layer(block, 256, layers[2], stride=2,
dilate=replace_stride_with_dilation[1])
self.layer4 = self._make_layer(block, 512, layers[3], stride=2,
dilate=replace_stride_with_dilation[2])
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.classifier = nn.Linear(512 * block.expansion, num_classes)
self.reinitialize(init_mode, activation, zero_init_residual)
def reinitialize(self, init_mode, activation, zero_init_residual):
for m in self.modules():
if isinstance(m, nn.Conv2d):
if init_mode == 'kaiming_normal':
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity=activation)
elif init_mode == 'kaiming_uniform':
nn.init.kaiming_uniform_(m.weight, mode='fan_out', nonlinearity=activation)
elif init_mode == 'sparse':
nn.init.sparse_(m.weight, sparsity=0.1, std=0.01)
elif init_mode == 'orthogonal':
nn.init.orthogonal_(m.weight, gain=1)
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0)
def _make_layer(self, block: Type[Union[BasicBlock, Bottleneck]], planes: int, blocks: int,
stride: int = 1, dilate: bool = False) -> nn.Sequential:
norm_layer = self._norm_layer
downsample = None
activation_layer = self._activation_layer
previous_dilation = self.dilation
if dilate:
self.dilation *= stride
stride = 1
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
norm_layer(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, self.groups,
self.base_width, previous_dilation, norm_layer, activation_layer))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes, groups=self.groups,
base_width=self.base_width, dilation=self.dilation,
norm_layer=norm_layer, activation_layer=activation_layer))
return nn.Sequential(*layers)
def _forward_impl(self, x: Tensor) -> Tensor:
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.classifier(x)
return x
def forward(self, x: Tensor) -> Tensor:
return self._forward_impl(x)
def _resnet(
arch: str,
block: Type[Union[BasicBlock, Bottleneck]],
layers: List[int],
pretrained: bool,
progress: bool,
**kwargs: Any
) -> ResNet:
model = ResNet(block, layers, **kwargs)
if pretrained:
state_dict = load_state_dict_from_url(model_urls[arch], progress=progress, model_dir=MODEL_DIR)
model.load_state_dict(state_dict, strict=False)
return model
def resnet18(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:
return _resnet('resnet18', BasicBlock, [2, 2, 2, 2], pretrained, progress,
**kwargs)
def resnet34(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:
return _resnet('resnet34', BasicBlock, [3, 4, 6, 3], pretrained, progress,
**kwargs)
def resnet50(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:
return _resnet('resnet50', Bottleneck, [3, 4, 6, 3], pretrained, progress,
**kwargs)
def resnet101(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:
return _resnet('resnet101', Bottleneck, [3, 4, 23, 3], pretrained, progress,
**kwargs)
def resnet152(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:
return _resnet('resnet152', Bottleneck, [3, 8, 36, 3], pretrained, progress,
**kwargs)
def resnext50_32x4d(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:
kwargs['groups'] = 32
kwargs['width_per_group'] = 4
return _resnet('resnext50_32x4d', Bottleneck, [3, 4, 6, 3],
pretrained, progress, **kwargs)
def resnext101_32x8d(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:
kwargs['groups'] = 32
kwargs['width_per_group'] = 8
return _resnet('resnext101_32x8d', Bottleneck, [3, 4, 23, 3],
pretrained, progress, **kwargs)
def wide_resnet50_2(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:
kwargs['width_per_group'] = 64 * 2
return _resnet('wide_resnet50_2', Bottleneck, [3, 4, 6, 3],
pretrained, progress, **kwargs)
def wide_resnet101_2(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:
kwargs['width_per_group'] = 64 * 2
return _resnet('wide_resnet101_2', Bottleneck, [3, 4, 23, 3],
pretrained, progress, **kwargs) | true | true |
f71fc26c9b8a2256095ae1cb28a68d64d9c6fef3 | 20,784 | py | Python | gym_minigrid/extendedminigrid.py | pierg/wiseml-patterns | 2decf2954001296bd04261b00ae144f53359a2b8 | [
"BSD-3-Clause"
] | null | null | null | gym_minigrid/extendedminigrid.py | pierg/wiseml-patterns | 2decf2954001296bd04261b00ae144f53359a2b8 | [
"BSD-3-Clause"
] | 6 | 2021-03-18T21:24:56.000Z | 2022-03-11T23:34:25.000Z | gym_minigrid/extendedminigrid.py | pierg/wiseml-patterns | 2decf2954001296bd04261b00ae144f53359a2b8 | [
"BSD-3-Clause"
] | null | null | null | from gym_minigrid.minigrid import *
from configurations import config_grabber as cg
import math
import operator
from functools import reduce
import traceback
import numpy as np
config = cg.Configuration.grab()
AGENT_VIEW_SIZE = config.agent_view_size
EXTRA_OBSERVATIONS_SIZE = 5
OBS_ARRAY_SIZE = (AGENT_VIEW_SIZE, AGENT_VIEW_SIZE)
def extended_dic(obj_names=[]):
"""
Extend the OBJECT_TO_IDX dictionaries with additional objects
:param obj_names: list of strings
:return: OBJECT_TO_IDX extended
"""
biggest_idx = list(OBJECT_TO_IDX.values())[-1]
for key in OBJECT_TO_IDX.values():
if key > biggest_idx:
biggest_idx = key
new_obj_idx = biggest_idx + 1
for obj_name in obj_names:
if not obj_name in OBJECT_TO_IDX.keys():
OBJECT_TO_IDX.update({obj_name: new_obj_idx})
new_obj_idx = new_obj_idx + 1
extended_dic(["water", "lightsw", "dirt", "vase"])
IDX_TO_OBJECT = dict(zip(OBJECT_TO_IDX.values(), OBJECT_TO_IDX.keys()))
class Room:
def __init__(self, room, size, position, lightOn):
self.number = room
self.size = size
self.position = position
self.lightOn = lightOn
def setLight(self, lightOn):
self.lightOn = lightOn
def setEntryDoor(self, position):
self.entryDoor = position
def setExitDoor(self, position):
self.exitDoor = position
def getLight(self):
return self.lightOn
def objectInRoom(self, position):
ax, ay = position
x, y = self.size
k, l = self.position
x += k
y += l
if ax <= x and ax >= k:
if ay <= y and ay >= l:
return True
return False
class Water(WorldObj):
def __init__(self):
super(Water, self).__init__('water', 'blue')
def can_overlap(self):
return True
def render(self, r):
self._set_color(r)
r.drawPolygon([
(0, CELL_PIXELS),
(CELL_PIXELS, CELL_PIXELS),
(CELL_PIXELS, 0),
(0, 0)
])
class LightSwitch(WorldObj):
def __init__(self):
self.is_on = False
super(LightSwitch, self).__init__('lightsw', 'yellow')
def affectRoom(self, room):
self.room = room
def setSwitchPos(self, position):
self.position = position
def elements_in_room(self, room):
self.elements = room
def toggle(self, env, pos):
self.room.setLight(not self.room.getLight())
self.is_on = not self.is_on
return True
def getRoomNumber(self):
return self.room.number
def can_overlap(self):
return False
def render(self, r):
self._set_color(r)
r.drawPolygon([
(0, CELL_PIXELS),
(CELL_PIXELS, CELL_PIXELS),
(CELL_PIXELS, 0),
(0, 0)
])
self.dark_light(r)
def dark_light(self, r):
if self.room.getLight() == False:
r.setColor(255, 0, 0)
r.drawCircle(0.5 * CELL_PIXELS, 0.5 * CELL_PIXELS, 0.2 * CELL_PIXELS)
if hasattr(self, 'cur_pos'):
if hasattr(self, 'elements'):
(xl, yl) = self.cur_pos
for i in range(0, len(self.elements)):
if self.elements[i][2] == 1:
r.setLineColor(10, 10, 10)
r.setColor(10, 10, 10)
r.drawPolygon([
(
(self.elements[i][0] - xl) * CELL_PIXELS,
(self.elements[i][1] - yl + 1) * CELL_PIXELS),
((self.elements[i][0] - xl + 1) * CELL_PIXELS,
(self.elements[i][1] - yl + 1) * CELL_PIXELS),
(
(self.elements[i][0] - xl + 1) * CELL_PIXELS,
(self.elements[i][1] - yl) * CELL_PIXELS),
((self.elements[i][0] - xl) * CELL_PIXELS, (self.elements[i][1] - yl) * CELL_PIXELS)
])
else:
r.setColor(0, 255, 0)
r.drawCircle(0.5 * CELL_PIXELS, 0.5 * CELL_PIXELS, 0.2 * CELL_PIXELS)
r.pop
class Dirt(WorldObj):
def __init__(self):
super(Dirt, self).__init__('dirt', 'yellow')
def can_overlap(self):
return True
def affect_list(self, list):
self.list = list
def toggle(self, env, pos):
x, y = ExMiniGridEnv.get_grid_coords_from_view(env, (1, 0))
env.grid.set(x, y, None)
del self.list[len(self.list) - 1]
return True
def render(self, r):
self._set_color(r)
r.setColor(240, 150, 0)
r.setLineColor(81, 41, 0)
r.drawPolygon([
(0, CELL_PIXELS),
(CELL_PIXELS, CELL_PIXELS),
(CELL_PIXELS, 0),
(0, 0)
])
class Vase(WorldObj):
def __init__(self):
super(Vase, self).__init__('vase', 'grey')
self.content = Dirt()
self.list = []
def can_overlap(self):
return False
def toggle(self, env, pos):
x, y = ExMiniGridEnv.get_grid_coords_from_view(env, (1, 0))
env.grid.set(x, y, self.content)
self.list.append(Dirt())
self.content.affect_list(self.list)
def render(self, r):
self._set_color(r)
r.setColor(255, 255, 255)
QUARTER_CELL = 0.25 * CELL_PIXELS
DEMI_CELL = 0.5 * CELL_PIXELS
r.drawCircle(DEMI_CELL, DEMI_CELL, DEMI_CELL)
r.drawPolygon([
(QUARTER_CELL, 3 * QUARTER_CELL),
(3 * QUARTER_CELL, 3 * QUARTER_CELL),
(3 * QUARTER_CELL, QUARTER_CELL),
(QUARTER_CELL, QUARTER_CELL)
])
r.setColor(240, 150, 0)
r.drawPolygon([
(0.32 * CELL_PIXELS, 0.7 * CELL_PIXELS),
(0.7 * CELL_PIXELS, 0.7 * CELL_PIXELS),
(0.7 * CELL_PIXELS, 0.32 * CELL_PIXELS),
(0.32 * CELL_PIXELS, 0.32 * CELL_PIXELS)
])
def list_dirt(self, list):
self.list = list
def worldobj_name_to_object(worldobj_name):
if worldobj_name == 'water':
return Water()
elif worldobj_name == 'wall':
return Wall()
elif worldobj_name == "lightsw":
return LightSwitch()
elif worldobj_name == "dirt":
return Dirt()
elif worldobj_name == "vase":
return Vase()
elif worldobj_name == "goal":
return Goal()
else:
return None
class ExGrid(Grid):
"""
Extending Grid methods to support the new objects
"""
# Add new worldobje that need to be decoded (Ex. water)
def decode(array):
"""
Decode an array grid encoding back into a grid
"""
flatten_dim = array.shape[0]
width = int(math.sqrt(flatten_dim))
height = width
# width = array.shape[0]
# height = array.shape[1]
grid = ExGrid(width, height)
for j in range(0, height):
for i in range(0, width):
typeIdx = array[i, j, 0]
colorIdx = array[i, j, 1]
openIdx = array[i, j, 2]
if typeIdx == 0:
continue
objType = IDX_TO_OBJECT[typeIdx]
color = IDX_TO_COLOR[colorIdx]
is_open = True if openIdx == 1 else 0
if objType == 'wall':
v = Wall(color)
elif objType == 'ball':
v = Ball(color)
elif objType == 'key':
v = Key(color)
elif objType == 'box':
v = Box(color)
elif objType == 'door':
v = Door(color, is_open)
elif objType == 'locked_door':
v = LockedDoor(color, is_open)
elif objType == 'goal':
v = Goal()
elif objType == 'water':
v = Water()
elif objType == 'lightsw':
v = LightSwitch()
elif objType == 'dirt':
v = Dirt()
elif objType == 'vase':
v = Vase()
else:
assert False, "unknown obj type in decode '%s'" % objType
grid.set(i, j, v)
return grid
class ExMiniGridEnv(MiniGridEnv):
# Enumeration of possible actions
class Actions(IntEnum):
# Used to observe the environment in the step() before the action
observe = -1
# Action space
left = 0
right = 1
forward = 2
toggle = 3
# Extra action (not used)
pickup = 4
drop = 5
done = 6
clean = 7
def print_grid(self, grid):
for i, e in enumerate(grid.grid):
if i % grid.height == 0:
print("")
if e is not None:
print(str(e.type), end="\t")
else:
print("none", end="\t")
print("")
def strings_to_actions(self, actions):
for i, action_name in enumerate(actions):
if action_name == "left":
actions[i] = self.actions.left
elif action_name == "right":
actions[i] = self.actions.right
elif action_name == "forward":
actions[i] = self.actions.forward
elif action_name == "toggle":
actions[i] = self.actions.toggle
elif action_name == "done":
actions[i] = self.actions.done
elif action_name == "clean":
actions[i] = self.actions.clean
elif action_name == "observe":
actions[i] = self.actions.observe
return actions
def action_to_string(self, action):
if action == self.actions.left:
return "left"
elif action == self.actions.right:
return "right"
elif action == self.actions.forward:
return "forward"
elif action == self.actions.toggle:
return "toggle"
elif action == self.actions.done:
return "done"
elif action == self.actions.clean:
return "clean"
elif action == self.actions.observe:
return "observe"
return None
def __init__(self, grid_size=16, max_steps=-1, see_through_walls=False, seed=1337):
# Grab configuration
self.config = cg.Configuration.grab()
# Overriding the max_num_steps
max_num_steps = max_steps
if hasattr(self.config, 'max_num_steps'):
max_num_steps = self.config.max_num_steps
super().__init__(grid_size, max_num_steps, see_through_walls, seed)
self.actions = ExMiniGridEnv.Actions
"""
Observation Space
low: lowest element value
high: highest element value
shape: imgSize tuple, each element can be of a value between 'low' and 'high'
"""
imgSize = reduce(operator.mul, OBS_ARRAY_SIZE, 1) + EXTRA_OBSERVATIONS_SIZE
elemSize = len(IDX_TO_OBJECT)
self.observation_space = spaces.Box(
low=0,
high=elemSize,
shape=(imgSize,),
dtype='uint8'
)
# Restricting action_space to the first N actions
first_n_actions_available = 4
self.action_space = spaces.Discrete(first_n_actions_available)
def step(self, action):
self.step_count += 1
reward = 0
done = False
info = {"event": [], "steps_count": self.step_count}
# Get the position in front of the agent
fwd_pos = self.front_pos
# Get the contents of the cell in front of the agent
fwd_cell = self.grid.get(*fwd_pos)
# Rotate left
if action == self.actions.left:
self.agent_dir -= 1
if self.agent_dir < 0:
self.agent_dir += 4
# Rotate right
elif action == self.actions.right:
self.agent_dir = (self.agent_dir + 1) % 4
# Move forward
elif action == self.actions.forward:
if fwd_cell == None or fwd_cell.can_overlap():
self.agent_pos = fwd_pos
# Step into Water
if fwd_cell is not None and fwd_cell.type == 'water':
done = True
reward = self.config.rewards.standard.death
info["event"].append("died")
if self.config.envelope:
print("DIED!! >>>>>>> Problems with envelope!")
# Step into Goal
elif fwd_cell is not None and fwd_cell.type == 'goal':
try:
if self.goal_enabled():
done = True
reward = self.config.rewards.standard.goal
# reward = self.config.rewards.standard.goal - 0.9 * (self.step_count / self.max_steps)
info["event"].append("goal")
except:
done = True
reward = self.config.rewards.standard.goal
# reward = self.config.rewards.standard.goal - 0.9 * (self.step_count / self.max_steps)
info["event"].append("goal")
else:
reward = self.config.rewards.actions.forward
# Pick up an object
elif action == self.actions.pickup:
if fwd_cell and fwd_cell.can_pickup():
if self.carrying is None:
self.carrying = fwd_cell
self.carrying.cur_pos = np.array([-1, -1])
self.grid.set(*fwd_pos, None)
# Drop an object
elif action == self.actions.drop:
if not fwd_cell and self.carrying:
self.grid.set(*fwd_pos, self.carrying)
self.carrying.cur_pos = fwd_pos
self.carrying = None
# Toggle/activate an object
elif action == self.actions.toggle:
if fwd_cell is not None and fwd_cell.type == 'dirt':
reward = self.config.rewards.cleaningenv.clean
if fwd_cell:
fwd_cell.toggle(self, fwd_pos)
# Done action (not used by default)
elif action == self.actions.done:
pass
else:
assert False, "unknown action"
# Adding reward for the step
reward += self.config.rewards.standard.step
if self.step_count == self.config.max_num_steps_episode:
done = True
obs = self.gen_obs()
if self.config.debug_mode: print("reward: " + str(reward) + "\tinfo: " + str(info))
return obs, reward, done, info
def goal_enabled(self):
raise NotImplementedError()
def gen_obs_decoded(self):
"""
Generate the agent's view (partially observable, low-resolution encoding)
"""
grid, vis_mask = self.gen_obs_grid()
if self.config.debug_mode:
print("\nAgent View Original")
self.print_grid(grid)
"""if Perception.light_on_current_room(self):"""
try:
agent_pos = (AGENT_VIEW_SIZE // 2, AGENT_VIEW_SIZE - 1)
obs_door_open = 0
obs_light_on = 0
current_room = 0
current_room_light = 0
next_room_light = 0
if self.roomList:
for x in self.roomList:
# Save room number
if x.objectInRoom(self.agent_pos):
current_room = x.number
current_room_light = x.getLight()
else:
next_room_light = x.getLight()
# check if room is on the dark
if not x.getLight():
for j in range(0, grid.height):
for i in range(0, grid.width):
# pass the obs coordinates (i, j) into the absolute grid coordinates (xpos, ypos).
xpos = agent_pos[1] - j
ypos = i - agent_pos[0]
(xpos, ypos) = self.get_grid_coords_from_view((xpos, ypos))
# check if the object position is on the room
if x.objectInRoom((xpos, ypos)):
if grid.grid[(j * AGENT_VIEW_SIZE) + i] is not None:
grid.grid[i + (j * AGENT_VIEW_SIZE)] = None
for j in range(0, grid.height):
for i in range(0, grid.width):
v = grid.get(i, j)
if hasattr(v, 'is_open') and v.is_open:
obs_door_open = 1
if hasattr(v, 'is_on') and v.is_on:
obs_light_on = 1
if self.config.debug_mode:
print("\n\nobs_door_open\t\t" + str(obs_door_open))
print("obs_light_on\t\t" + str(obs_light_on))
print("current_room\t\t" + str(current_room))
print("current_room_light\t" + str(current_room_light*1))
print("next_room_light\t\t" + str(next_room_light*1) + "\n\n")
return grid, (obs_door_open, obs_light_on, current_room, current_room_light*1, next_room_light*1)
except AttributeError:
traceback.print_exc()
print("ERROR!!!")
def gen_obs(self):
"""
Generate the agent's view (partially observable, low-resolution encoding)
"""
grid, extra_observations = self.gen_obs_decoded()
if self.config.debug_mode:
print("\nAgent View Retreived")
self.print_grid(grid)
"""if Perception.light_on_current_room(self):"""
try:
array = np.zeros(shape=(grid.width, grid.height, 1), dtype='uint8')
obs_door_open = 0
obs_light_on = 0
for j in range(0, grid.height):
for i in range(0, grid.width):
v = grid.get(i, j)
if v == None:
continue
array[i, j, 0] = OBJECT_TO_IDX[v.type]
if hasattr(v, 'is_open') and v.is_open:
obs_door_open = 1
if hasattr(v, 'is_on') and v.is_on:
obs_light_on = 1
image = array
flatten_image = image.flatten()
obs = np.append(flatten_image, extra_observations)
return obs
except AttributeError:
traceback.print_exc()
print("ERROR!!!")
# return super().gen_obs()
def get_grid_coords_from_view(self, coordinates):
"""
Dual of "get_view_coords". Translate and rotate relative to the agent coordinates (i, j) into the
absolute grid coordinates.
Need to have tuples of integers for the position of the agent and its direction
:param coordinates: tuples of integers (vertical,horizontal) position from the agent relative to its position
:return : coordinates translated into the absolute grid coordinates.
"""
ax, ay = self.agent_pos
ad = self.agent_dir
x, y = coordinates
# agent facing down
if ad == 1:
ax -= y
ay += x
# agent facing right
elif ad == 0:
ax += x
ay += y
# agent facing left
elif ad == 2:
ax -= x
ay -= y
# agent facing up
elif ad == 3:
ax += y
ay -= x
return ax, ay
def worldobj_in_agent(self, front, side):
"""
Returns the type of the worldobject in the 'front' cells in front and 'side' cells right (positive) or left (negative)
with respect to the agent
:param front: integer representing the number of cells in front of the agent
:param side: integer, if positive represents the cells to the right, negative to the left of the agent
:return: string: worldobj type
"""
coordinates = (front, side)
wx, wy = ExMiniGridEnv.get_grid_coords_from_view(self, coordinates)
if 0 <= wx < self.grid.width and 0 <= wy < self.grid.height:
worldobj = self.grid.get(wx, wy)
if worldobj is not None:
worldobj_type = worldobj.type
return worldobj_type
return None
| 31.301205 | 126 | 0.520737 | from gym_minigrid.minigrid import *
from configurations import config_grabber as cg
import math
import operator
from functools import reduce
import traceback
import numpy as np
config = cg.Configuration.grab()
AGENT_VIEW_SIZE = config.agent_view_size
EXTRA_OBSERVATIONS_SIZE = 5
OBS_ARRAY_SIZE = (AGENT_VIEW_SIZE, AGENT_VIEW_SIZE)
def extended_dic(obj_names=[]):
biggest_idx = list(OBJECT_TO_IDX.values())[-1]
for key in OBJECT_TO_IDX.values():
if key > biggest_idx:
biggest_idx = key
new_obj_idx = biggest_idx + 1
for obj_name in obj_names:
if not obj_name in OBJECT_TO_IDX.keys():
OBJECT_TO_IDX.update({obj_name: new_obj_idx})
new_obj_idx = new_obj_idx + 1
extended_dic(["water", "lightsw", "dirt", "vase"])
IDX_TO_OBJECT = dict(zip(OBJECT_TO_IDX.values(), OBJECT_TO_IDX.keys()))
class Room:
def __init__(self, room, size, position, lightOn):
self.number = room
self.size = size
self.position = position
self.lightOn = lightOn
def setLight(self, lightOn):
self.lightOn = lightOn
def setEntryDoor(self, position):
self.entryDoor = position
def setExitDoor(self, position):
self.exitDoor = position
def getLight(self):
return self.lightOn
def objectInRoom(self, position):
ax, ay = position
x, y = self.size
k, l = self.position
x += k
y += l
if ax <= x and ax >= k:
if ay <= y and ay >= l:
return True
return False
class Water(WorldObj):
def __init__(self):
super(Water, self).__init__('water', 'blue')
def can_overlap(self):
return True
def render(self, r):
self._set_color(r)
r.drawPolygon([
(0, CELL_PIXELS),
(CELL_PIXELS, CELL_PIXELS),
(CELL_PIXELS, 0),
(0, 0)
])
class LightSwitch(WorldObj):
def __init__(self):
self.is_on = False
super(LightSwitch, self).__init__('lightsw', 'yellow')
def affectRoom(self, room):
self.room = room
def setSwitchPos(self, position):
self.position = position
def elements_in_room(self, room):
self.elements = room
def toggle(self, env, pos):
self.room.setLight(not self.room.getLight())
self.is_on = not self.is_on
return True
def getRoomNumber(self):
return self.room.number
def can_overlap(self):
return False
def render(self, r):
self._set_color(r)
r.drawPolygon([
(0, CELL_PIXELS),
(CELL_PIXELS, CELL_PIXELS),
(CELL_PIXELS, 0),
(0, 0)
])
self.dark_light(r)
def dark_light(self, r):
if self.room.getLight() == False:
r.setColor(255, 0, 0)
r.drawCircle(0.5 * CELL_PIXELS, 0.5 * CELL_PIXELS, 0.2 * CELL_PIXELS)
if hasattr(self, 'cur_pos'):
if hasattr(self, 'elements'):
(xl, yl) = self.cur_pos
for i in range(0, len(self.elements)):
if self.elements[i][2] == 1:
r.setLineColor(10, 10, 10)
r.setColor(10, 10, 10)
r.drawPolygon([
(
(self.elements[i][0] - xl) * CELL_PIXELS,
(self.elements[i][1] - yl + 1) * CELL_PIXELS),
((self.elements[i][0] - xl + 1) * CELL_PIXELS,
(self.elements[i][1] - yl + 1) * CELL_PIXELS),
(
(self.elements[i][0] - xl + 1) * CELL_PIXELS,
(self.elements[i][1] - yl) * CELL_PIXELS),
((self.elements[i][0] - xl) * CELL_PIXELS, (self.elements[i][1] - yl) * CELL_PIXELS)
])
else:
r.setColor(0, 255, 0)
r.drawCircle(0.5 * CELL_PIXELS, 0.5 * CELL_PIXELS, 0.2 * CELL_PIXELS)
r.pop
class Dirt(WorldObj):
def __init__(self):
super(Dirt, self).__init__('dirt', 'yellow')
def can_overlap(self):
return True
def affect_list(self, list):
self.list = list
def toggle(self, env, pos):
x, y = ExMiniGridEnv.get_grid_coords_from_view(env, (1, 0))
env.grid.set(x, y, None)
del self.list[len(self.list) - 1]
return True
def render(self, r):
self._set_color(r)
r.setColor(240, 150, 0)
r.setLineColor(81, 41, 0)
r.drawPolygon([
(0, CELL_PIXELS),
(CELL_PIXELS, CELL_PIXELS),
(CELL_PIXELS, 0),
(0, 0)
])
class Vase(WorldObj):
def __init__(self):
super(Vase, self).__init__('vase', 'grey')
self.content = Dirt()
self.list = []
def can_overlap(self):
return False
def toggle(self, env, pos):
x, y = ExMiniGridEnv.get_grid_coords_from_view(env, (1, 0))
env.grid.set(x, y, self.content)
self.list.append(Dirt())
self.content.affect_list(self.list)
def render(self, r):
self._set_color(r)
r.setColor(255, 255, 255)
QUARTER_CELL = 0.25 * CELL_PIXELS
DEMI_CELL = 0.5 * CELL_PIXELS
r.drawCircle(DEMI_CELL, DEMI_CELL, DEMI_CELL)
r.drawPolygon([
(QUARTER_CELL, 3 * QUARTER_CELL),
(3 * QUARTER_CELL, 3 * QUARTER_CELL),
(3 * QUARTER_CELL, QUARTER_CELL),
(QUARTER_CELL, QUARTER_CELL)
])
r.setColor(240, 150, 0)
r.drawPolygon([
(0.32 * CELL_PIXELS, 0.7 * CELL_PIXELS),
(0.7 * CELL_PIXELS, 0.7 * CELL_PIXELS),
(0.7 * CELL_PIXELS, 0.32 * CELL_PIXELS),
(0.32 * CELL_PIXELS, 0.32 * CELL_PIXELS)
])
def list_dirt(self, list):
self.list = list
def worldobj_name_to_object(worldobj_name):
if worldobj_name == 'water':
return Water()
elif worldobj_name == 'wall':
return Wall()
elif worldobj_name == "lightsw":
return LightSwitch()
elif worldobj_name == "dirt":
return Dirt()
elif worldobj_name == "vase":
return Vase()
elif worldobj_name == "goal":
return Goal()
else:
return None
class ExGrid(Grid):
def decode(array):
flatten_dim = array.shape[0]
width = int(math.sqrt(flatten_dim))
height = width
grid = ExGrid(width, height)
for j in range(0, height):
for i in range(0, width):
typeIdx = array[i, j, 0]
colorIdx = array[i, j, 1]
openIdx = array[i, j, 2]
if typeIdx == 0:
continue
objType = IDX_TO_OBJECT[typeIdx]
color = IDX_TO_COLOR[colorIdx]
is_open = True if openIdx == 1 else 0
if objType == 'wall':
v = Wall(color)
elif objType == 'ball':
v = Ball(color)
elif objType == 'key':
v = Key(color)
elif objType == 'box':
v = Box(color)
elif objType == 'door':
v = Door(color, is_open)
elif objType == 'locked_door':
v = LockedDoor(color, is_open)
elif objType == 'goal':
v = Goal()
elif objType == 'water':
v = Water()
elif objType == 'lightsw':
v = LightSwitch()
elif objType == 'dirt':
v = Dirt()
elif objType == 'vase':
v = Vase()
else:
assert False, "unknown obj type in decode '%s'" % objType
grid.set(i, j, v)
return grid
class ExMiniGridEnv(MiniGridEnv):
class Actions(IntEnum):
observe = -1
left = 0
right = 1
forward = 2
toggle = 3
pickup = 4
drop = 5
done = 6
clean = 7
def print_grid(self, grid):
for i, e in enumerate(grid.grid):
if i % grid.height == 0:
print("")
if e is not None:
print(str(e.type), end="\t")
else:
print("none", end="\t")
print("")
def strings_to_actions(self, actions):
for i, action_name in enumerate(actions):
if action_name == "left":
actions[i] = self.actions.left
elif action_name == "right":
actions[i] = self.actions.right
elif action_name == "forward":
actions[i] = self.actions.forward
elif action_name == "toggle":
actions[i] = self.actions.toggle
elif action_name == "done":
actions[i] = self.actions.done
elif action_name == "clean":
actions[i] = self.actions.clean
elif action_name == "observe":
actions[i] = self.actions.observe
return actions
def action_to_string(self, action):
if action == self.actions.left:
return "left"
elif action == self.actions.right:
return "right"
elif action == self.actions.forward:
return "forward"
elif action == self.actions.toggle:
return "toggle"
elif action == self.actions.done:
return "done"
elif action == self.actions.clean:
return "clean"
elif action == self.actions.observe:
return "observe"
return None
def __init__(self, grid_size=16, max_steps=-1, see_through_walls=False, seed=1337):
self.config = cg.Configuration.grab()
max_num_steps = max_steps
if hasattr(self.config, 'max_num_steps'):
max_num_steps = self.config.max_num_steps
super().__init__(grid_size, max_num_steps, see_through_walls, seed)
self.actions = ExMiniGridEnv.Actions
imgSize = reduce(operator.mul, OBS_ARRAY_SIZE, 1) + EXTRA_OBSERVATIONS_SIZE
elemSize = len(IDX_TO_OBJECT)
self.observation_space = spaces.Box(
low=0,
high=elemSize,
shape=(imgSize,),
dtype='uint8'
)
first_n_actions_available = 4
self.action_space = spaces.Discrete(first_n_actions_available)
def step(self, action):
self.step_count += 1
reward = 0
done = False
info = {"event": [], "steps_count": self.step_count}
fwd_pos = self.front_pos
fwd_cell = self.grid.get(*fwd_pos)
if action == self.actions.left:
self.agent_dir -= 1
if self.agent_dir < 0:
self.agent_dir += 4
elif action == self.actions.right:
self.agent_dir = (self.agent_dir + 1) % 4
elif action == self.actions.forward:
if fwd_cell == None or fwd_cell.can_overlap():
self.agent_pos = fwd_pos
if fwd_cell is not None and fwd_cell.type == 'water':
done = True
reward = self.config.rewards.standard.death
info["event"].append("died")
if self.config.envelope:
print("DIED!! >>>>>>> Problems with envelope!")
elif fwd_cell is not None and fwd_cell.type == 'goal':
try:
if self.goal_enabled():
done = True
reward = self.config.rewards.standard.goal
info["event"].append("goal")
except:
done = True
reward = self.config.rewards.standard.goal
info["event"].append("goal")
else:
reward = self.config.rewards.actions.forward
elif action == self.actions.pickup:
if fwd_cell and fwd_cell.can_pickup():
if self.carrying is None:
self.carrying = fwd_cell
self.carrying.cur_pos = np.array([-1, -1])
self.grid.set(*fwd_pos, None)
elif action == self.actions.drop:
if not fwd_cell and self.carrying:
self.grid.set(*fwd_pos, self.carrying)
self.carrying.cur_pos = fwd_pos
self.carrying = None
elif action == self.actions.toggle:
if fwd_cell is not None and fwd_cell.type == 'dirt':
reward = self.config.rewards.cleaningenv.clean
if fwd_cell:
fwd_cell.toggle(self, fwd_pos)
elif action == self.actions.done:
pass
else:
assert False, "unknown action"
reward += self.config.rewards.standard.step
if self.step_count == self.config.max_num_steps_episode:
done = True
obs = self.gen_obs()
if self.config.debug_mode: print("reward: " + str(reward) + "\tinfo: " + str(info))
return obs, reward, done, info
def goal_enabled(self):
raise NotImplementedError()
def gen_obs_decoded(self):
grid, vis_mask = self.gen_obs_grid()
if self.config.debug_mode:
print("\nAgent View Original")
self.print_grid(grid)
try:
agent_pos = (AGENT_VIEW_SIZE // 2, AGENT_VIEW_SIZE - 1)
obs_door_open = 0
obs_light_on = 0
current_room = 0
current_room_light = 0
next_room_light = 0
if self.roomList:
for x in self.roomList:
if x.objectInRoom(self.agent_pos):
current_room = x.number
current_room_light = x.getLight()
else:
next_room_light = x.getLight()
if not x.getLight():
for j in range(0, grid.height):
for i in range(0, grid.width):
xpos = agent_pos[1] - j
ypos = i - agent_pos[0]
(xpos, ypos) = self.get_grid_coords_from_view((xpos, ypos))
if x.objectInRoom((xpos, ypos)):
if grid.grid[(j * AGENT_VIEW_SIZE) + i] is not None:
grid.grid[i + (j * AGENT_VIEW_SIZE)] = None
for j in range(0, grid.height):
for i in range(0, grid.width):
v = grid.get(i, j)
if hasattr(v, 'is_open') and v.is_open:
obs_door_open = 1
if hasattr(v, 'is_on') and v.is_on:
obs_light_on = 1
if self.config.debug_mode:
print("\n\nobs_door_open\t\t" + str(obs_door_open))
print("obs_light_on\t\t" + str(obs_light_on))
print("current_room\t\t" + str(current_room))
print("current_room_light\t" + str(current_room_light*1))
print("next_room_light\t\t" + str(next_room_light*1) + "\n\n")
return grid, (obs_door_open, obs_light_on, current_room, current_room_light*1, next_room_light*1)
except AttributeError:
traceback.print_exc()
print("ERROR!!!")
def gen_obs(self):
grid, extra_observations = self.gen_obs_decoded()
if self.config.debug_mode:
print("\nAgent View Retreived")
self.print_grid(grid)
try:
array = np.zeros(shape=(grid.width, grid.height, 1), dtype='uint8')
obs_door_open = 0
obs_light_on = 0
for j in range(0, grid.height):
for i in range(0, grid.width):
v = grid.get(i, j)
if v == None:
continue
array[i, j, 0] = OBJECT_TO_IDX[v.type]
if hasattr(v, 'is_open') and v.is_open:
obs_door_open = 1
if hasattr(v, 'is_on') and v.is_on:
obs_light_on = 1
image = array
flatten_image = image.flatten()
obs = np.append(flatten_image, extra_observations)
return obs
except AttributeError:
traceback.print_exc()
print("ERROR!!!")
def get_grid_coords_from_view(self, coordinates):
ax, ay = self.agent_pos
ad = self.agent_dir
x, y = coordinates
if ad == 1:
ax -= y
ay += x
elif ad == 0:
ax += x
ay += y
elif ad == 2:
ax -= x
ay -= y
elif ad == 3:
ax += y
ay -= x
return ax, ay
def worldobj_in_agent(self, front, side):
coordinates = (front, side)
wx, wy = ExMiniGridEnv.get_grid_coords_from_view(self, coordinates)
if 0 <= wx < self.grid.width and 0 <= wy < self.grid.height:
worldobj = self.grid.get(wx, wy)
if worldobj is not None:
worldobj_type = worldobj.type
return worldobj_type
return None
| true | true |
f71fc2a75dfc0689982cf24fdf8bcd85ccd1cfdc | 3,121 | py | Python | sponsors/notifications.py | geofft/pythondotorg | 4e6747acaffad21ba22d4611b58dccbf04a4ccac | [
"Apache-2.0"
] | 2 | 2021-04-06T16:22:51.000Z | 2021-05-04T13:48:42.000Z | sponsors/notifications.py | vishalsingha/pythondotorg | af59bc03f63cdea16b0f2bd98aae2dcec713c4c1 | [
"Apache-2.0"
] | 6 | 2021-03-19T15:57:15.000Z | 2021-12-13T20:50:11.000Z | sponsors/notifications.py | vishalsingha/pythondotorg | af59bc03f63cdea16b0f2bd98aae2dcec713c4c1 | [
"Apache-2.0"
] | 1 | 2021-08-21T10:36:44.000Z | 2021-08-21T10:36:44.000Z | from django.core.mail import send_mail
from django.template.loader import render_to_string
from django.conf import settings
class BaseEmailSponsorshipNotification:
subject_template = None
message_template = None
email_context_keys = None
def get_subject(self, context):
return render_to_string(self.subject_template, context).strip()
def get_message(self, context):
return render_to_string(self.message_template, context).strip()
def get_recipient_list(self, context):
raise NotImplementedError
def notify(self, **kwargs):
context = {k: kwargs.get(k) for k in self.email_context_keys}
send_mail(
subject=self.get_subject(context),
message=self.get_message(context),
recipient_list=self.get_recipient_list(context),
from_email=settings.SPONSORSHIP_NOTIFICATION_FROM_EMAIL,
)
class AppliedSponsorshipNotificationToPSF(BaseEmailSponsorshipNotification):
subject_template = "sponsors/email/psf_new_application_subject.txt"
message_template = "sponsors/email/psf_new_application.txt"
email_context_keys = ["request", "sponsorship"]
def get_recipient_list(self, context):
return [settings.SPONSORSHIP_NOTIFICATION_TO_EMAIL]
class AppliedSponsorshipNotificationToSponsors(BaseEmailSponsorshipNotification):
subject_template = "sponsors/email/sponsor_new_application_subject.txt"
message_template = "sponsors/email/sponsor_new_application.txt"
email_context_keys = ["sponsorship"]
def get_recipient_list(self, context):
return context["sponsorship"].verified_emails
class RejectedSponsorshipNotificationToPSF(BaseEmailSponsorshipNotification):
subject_template = "sponsors/email/psf_rejected_sponsorship_subject.txt"
message_template = "sponsors/email/psf_rejected_sponsorship.txt"
email_context_keys = ["sponsorship"]
def get_recipient_list(self, context):
return [settings.SPONSORSHIP_NOTIFICATION_TO_EMAIL]
class RejectedSponsorshipNotificationToSponsors(BaseEmailSponsorshipNotification):
subject_template = "sponsors/email/sponsor_rejected_sponsorship_subject.txt"
message_template = "sponsors/email/sponsor_rejected_sponsorship.txt"
email_context_keys = ["sponsorship"]
def get_recipient_list(self, context):
return context["sponsorship"].verified_emails
class StatementOfWorkNotificationToPSF(BaseEmailSponsorshipNotification):
subject_template = "sponsors/email/psf_statement_of_work_subject.txt"
message_template = "sponsors/email/psf_statement_of_work.txt"
email_context_keys = ["sponsorship"]
def get_recipient_list(self, context):
return [settings.SPONSORSHIP_NOTIFICATION_TO_EMAIL]
class StatementOfWorkNotificationToSponsors(BaseEmailSponsorshipNotification):
subject_template = "sponsors/email/sponsor_statement_of_work_subject.txt"
message_template = "sponsors/email/sponsor_statement_of_work.txt"
email_context_keys = ["sponsorship"]
def get_recipient_list(self, context):
return context["sponsorship"].verified_emails
| 37.60241 | 82 | 0.779878 | from django.core.mail import send_mail
from django.template.loader import render_to_string
from django.conf import settings
class BaseEmailSponsorshipNotification:
subject_template = None
message_template = None
email_context_keys = None
def get_subject(self, context):
return render_to_string(self.subject_template, context).strip()
def get_message(self, context):
return render_to_string(self.message_template, context).strip()
def get_recipient_list(self, context):
raise NotImplementedError
def notify(self, **kwargs):
context = {k: kwargs.get(k) for k in self.email_context_keys}
send_mail(
subject=self.get_subject(context),
message=self.get_message(context),
recipient_list=self.get_recipient_list(context),
from_email=settings.SPONSORSHIP_NOTIFICATION_FROM_EMAIL,
)
class AppliedSponsorshipNotificationToPSF(BaseEmailSponsorshipNotification):
subject_template = "sponsors/email/psf_new_application_subject.txt"
message_template = "sponsors/email/psf_new_application.txt"
email_context_keys = ["request", "sponsorship"]
def get_recipient_list(self, context):
return [settings.SPONSORSHIP_NOTIFICATION_TO_EMAIL]
class AppliedSponsorshipNotificationToSponsors(BaseEmailSponsorshipNotification):
subject_template = "sponsors/email/sponsor_new_application_subject.txt"
message_template = "sponsors/email/sponsor_new_application.txt"
email_context_keys = ["sponsorship"]
def get_recipient_list(self, context):
return context["sponsorship"].verified_emails
class RejectedSponsorshipNotificationToPSF(BaseEmailSponsorshipNotification):
subject_template = "sponsors/email/psf_rejected_sponsorship_subject.txt"
message_template = "sponsors/email/psf_rejected_sponsorship.txt"
email_context_keys = ["sponsorship"]
def get_recipient_list(self, context):
return [settings.SPONSORSHIP_NOTIFICATION_TO_EMAIL]
class RejectedSponsorshipNotificationToSponsors(BaseEmailSponsorshipNotification):
subject_template = "sponsors/email/sponsor_rejected_sponsorship_subject.txt"
message_template = "sponsors/email/sponsor_rejected_sponsorship.txt"
email_context_keys = ["sponsorship"]
def get_recipient_list(self, context):
return context["sponsorship"].verified_emails
class StatementOfWorkNotificationToPSF(BaseEmailSponsorshipNotification):
subject_template = "sponsors/email/psf_statement_of_work_subject.txt"
message_template = "sponsors/email/psf_statement_of_work.txt"
email_context_keys = ["sponsorship"]
def get_recipient_list(self, context):
return [settings.SPONSORSHIP_NOTIFICATION_TO_EMAIL]
class StatementOfWorkNotificationToSponsors(BaseEmailSponsorshipNotification):
subject_template = "sponsors/email/sponsor_statement_of_work_subject.txt"
message_template = "sponsors/email/sponsor_statement_of_work.txt"
email_context_keys = ["sponsorship"]
def get_recipient_list(self, context):
return context["sponsorship"].verified_emails
| true | true |
f71fc31f870e9d876d456cf459dc6a6019bc3ab0 | 336 | py | Python | py_001/py_contextmanager.py | shawn0915/python-study | 4f6d59ed93cb63295f6e67e661860e1f6a4b18c2 | [
"MIT"
] | null | null | null | py_001/py_contextmanager.py | shawn0915/python-study | 4f6d59ed93cb63295f6e67e661860e1f6a4b18c2 | [
"MIT"
] | null | null | null | py_001/py_contextmanager.py | shawn0915/python-study | 4f6d59ed93cb63295f6e67e661860e1f6a4b18c2 | [
"MIT"
] | null | null | null | #!/usr/bin/python
# -*- coding: UTF-8 -*-
import codecs
from contextlib import contextmanager
@contextmanager
def Open(filename, mode, encoding='utf-8'):
fp = codecs.open(filename, mode, encoding)
try:
yield fp
finally:
fp.close()
data = u"context汉字测试"
with Open('data.txt', 'w') as f:
f.write(data)
| 17.684211 | 46 | 0.636905 |
import codecs
from contextlib import contextmanager
@contextmanager
def Open(filename, mode, encoding='utf-8'):
fp = codecs.open(filename, mode, encoding)
try:
yield fp
finally:
fp.close()
data = u"context汉字测试"
with Open('data.txt', 'w') as f:
f.write(data)
| true | true |
f71fc3e734e5af8996dc6c80f55020bcea9a755a | 2,085 | py | Python | simpleAPI/api/v1/serializers.py | Gleb-Stasuyk/simpleAPI | 9b30202915a4f144921b9bd5204f7bfac8b1201f | [
"MIT"
] | null | null | null | simpleAPI/api/v1/serializers.py | Gleb-Stasuyk/simpleAPI | 9b30202915a4f144921b9bd5204f7bfac8b1201f | [
"MIT"
] | null | null | null | simpleAPI/api/v1/serializers.py | Gleb-Stasuyk/simpleAPI | 9b30202915a4f144921b9bd5204f7bfac8b1201f | [
"MIT"
] | null | null | null | from django.contrib.auth import get_user_model
from rest_framework import serializers
from companys.models import Company, News
from users.models import Profile
User = get_user_model()
class NewsSerializer(serializers.ModelSerializer):
class Meta:
model = News
fields = '__all__'
class CompanySerializer(serializers.ModelSerializer):
company_news = NewsSerializer(many=True, required=False)
class Meta:
model = Company
exclude = ['id']
class CompanySerializerNotAuth(serializers.ModelSerializer):
class Meta:
model = Company
exclude = ['id', 'company_news']
class ProfileSerializer(serializers.ModelSerializer):
company = serializers.StringRelatedField()
class Meta:
model = Profile
exclude = ['user']
class UserSerializer(serializers.ModelSerializer):
profile = ProfileSerializer()
class Meta:
model = User
fields = ['id', 'profile', 'username', 'first_name', 'last_name', 'date_joined']
def create(self, validated_data):
profile_data = validated_data.pop('profile')
user = User.objects.create(**validated_data)
Profile.objects.create(user=user, **profile_data)
return user
def update(self, instance, validated_data):
profile_data = validated_data.pop('profile')
profile = instance.profile
# * User Info
instance.first_name = validated_data.get(
'first_name', instance.first_name)
instance.last_name = validated_data.get(
'last_name', instance.last_name)
# * AccountProfile Info
profile.company = profile_data.get(
'company', profile.company)
profile.bio = profile_data.get(
'bio', profile.bio)
profile.location = profile_data.get(
'location', profile.location)
profile.birth_date = profile_data.get(
'birth_date', profile.birth_date)
profile.role = profile_data.get(
'role', profile.role)
profile.save()
return instance
| 26.730769 | 88 | 0.655635 | from django.contrib.auth import get_user_model
from rest_framework import serializers
from companys.models import Company, News
from users.models import Profile
User = get_user_model()
class NewsSerializer(serializers.ModelSerializer):
class Meta:
model = News
fields = '__all__'
class CompanySerializer(serializers.ModelSerializer):
company_news = NewsSerializer(many=True, required=False)
class Meta:
model = Company
exclude = ['id']
class CompanySerializerNotAuth(serializers.ModelSerializer):
class Meta:
model = Company
exclude = ['id', 'company_news']
class ProfileSerializer(serializers.ModelSerializer):
company = serializers.StringRelatedField()
class Meta:
model = Profile
exclude = ['user']
class UserSerializer(serializers.ModelSerializer):
profile = ProfileSerializer()
class Meta:
model = User
fields = ['id', 'profile', 'username', 'first_name', 'last_name', 'date_joined']
def create(self, validated_data):
profile_data = validated_data.pop('profile')
user = User.objects.create(**validated_data)
Profile.objects.create(user=user, **profile_data)
return user
def update(self, instance, validated_data):
profile_data = validated_data.pop('profile')
profile = instance.profile
instance.first_name = validated_data.get(
'first_name', instance.first_name)
instance.last_name = validated_data.get(
'last_name', instance.last_name)
profile.company = profile_data.get(
'company', profile.company)
profile.bio = profile_data.get(
'bio', profile.bio)
profile.location = profile_data.get(
'location', profile.location)
profile.birth_date = profile_data.get(
'birth_date', profile.birth_date)
profile.role = profile_data.get(
'role', profile.role)
profile.save()
return instance
| true | true |
f71fc5a556957ba4ad43afcc5e2c620d83d7aa7c | 749 | py | Python | Server/tests/queries/test_leaderboards.py | Team-SeeTo/SeeTo-Backend | 19990cd6f4895e773eaa504f7b7a07ddbb5856e5 | [
"Apache-2.0"
] | 4 | 2018-06-18T06:50:12.000Z | 2018-11-15T00:08:24.000Z | Server/tests/queries/test_leaderboards.py | Team-SeeTo/SeeTo-Backend | 19990cd6f4895e773eaa504f7b7a07ddbb5856e5 | [
"Apache-2.0"
] | null | null | null | Server/tests/queries/test_leaderboards.py | Team-SeeTo/SeeTo-Backend | 19990cd6f4895e773eaa504f7b7a07ddbb5856e5 | [
"Apache-2.0"
] | null | null | null | from tests import BasicTestCase
class TestLeaderboards(BasicTestCase):
def test_leaderboards(self):
response = self.request(type="query",
call='leaderboards(token :"{0}")'.format(self.access_token),
body='''
... on LeaderboardsField{
rank
name
point
}
''')
for user in response['leaderboards']:
self.assertEqual(type(user['rank']), int)
self.assertEqual(type(user['name']), str)
self.assertEqual(type(user['point']), int)
| 37.45 | 92 | 0.419226 | from tests import BasicTestCase
class TestLeaderboards(BasicTestCase):
def test_leaderboards(self):
response = self.request(type="query",
call='leaderboards(token :"{0}")'.format(self.access_token),
body='''
... on LeaderboardsField{
rank
name
point
}
''')
for user in response['leaderboards']:
self.assertEqual(type(user['rank']), int)
self.assertEqual(type(user['name']), str)
self.assertEqual(type(user['point']), int)
| true | true |
f71fc6aac913f65673fbaf691c47f217c5bc9c25 | 536 | py | Python | imglatex/cli.py | odarbelaeze/imglatex | 8463531ae48dd4c2b4937ef4d27dbf74d6f732e3 | [
"MIT"
] | 2 | 2018-02-17T20:26:56.000Z | 2022-02-10T13:23:55.000Z | imglatex/cli.py | odarbelaeze/imglatex | 8463531ae48dd4c2b4937ef4d27dbf74d6f732e3 | [
"MIT"
] | 325 | 2018-03-18T15:28:48.000Z | 2022-03-28T04:19:18.000Z | imglatex/cli.py | odarbelaeze/imglatex | 8463531ae48dd4c2b4937ef4d27dbf74d6f732e3 | [
"MIT"
] | null | null | null | """Console script for imglatex."""
import click
from imglatex.imglatex import find_images, Image, Document
@click.command()
@click.argument('path', type=click.Path(exists=True))
@click.option('--prefix', default='.', help='Prefix for the image paths')
def main(path: click.Path, prefix: str):
"""Console script for imglatex."""
document = Document(
list(Image(i, prefix) for i in find_images(path))
)
click.echo(document.latex())
return 0
if __name__ == "__main__":
import sys
sys.exit(main())
| 23.304348 | 73 | 0.671642 |
import click
from imglatex.imglatex import find_images, Image, Document
@click.command()
@click.argument('path', type=click.Path(exists=True))
@click.option('--prefix', default='.', help='Prefix for the image paths')
def main(path: click.Path, prefix: str):
document = Document(
list(Image(i, prefix) for i in find_images(path))
)
click.echo(document.latex())
return 0
if __name__ == "__main__":
import sys
sys.exit(main())
| true | true |
f71fc728dba3bae392f6f5d4e5b6e05cb75586f8 | 4,525 | py | Python | knox/auth.py | liradb2000/django-rest-knox | 2120bdb44173db121611387b9e1a2e8e358b0123 | [
"MIT"
] | null | null | null | knox/auth.py | liradb2000/django-rest-knox | 2120bdb44173db121611387b9e1a2e8e358b0123 | [
"MIT"
] | null | null | null | knox/auth.py | liradb2000/django-rest-knox | 2120bdb44173db121611387b9e1a2e8e358b0123 | [
"MIT"
] | null | null | null | try:
from hmac import compare_digest
except ImportError:
def compare_digest(a, b):
return a == b
import binascii
from django.utils import timezone
from django.utils.translation import gettext_lazy as _
from rest_framework import exceptions
from rest_framework.authentication import (
BaseAuthentication, get_authorization_header,
)
from knox.crypto import hash_token
from knox.models import AuthToken
from knox.settings import CONSTANTS, knox_settings
from knox.signals import token_expired
class TokenAuthentication(BaseAuthentication):
'''
This authentication scheme uses Knox AuthTokens for authentication.
Similar to DRF's TokenAuthentication, it overrides a large amount of that
authentication scheme to cope with the fact that Tokens are not stored
in plaintext in the database
If successful
- `request.user` will be a django `User` instance
- `request.auth` will be an `AuthToken` instance
'''
model = AuthToken
def authenticate(self, request):
auth = get_authorization_header(request).split()
prefix = knox_settings.AUTH_HEADER_PREFIX.encode()
if not auth:
return None
if auth[0].lower() != prefix.lower():
# Authorization header is possibly for another backend
return None
if len(auth) == 1:
msg = _('Invalid token header. No credentials provided.')
raise exceptions.AuthenticationFailed(msg)
elif len(auth) > 2:
msg = _('Invalid token header. '
'Token string should not contain spaces.')
raise exceptions.AuthenticationFailed(msg)
if knox_settings.USE_COOKIE:
auth[1] = b''.join([auth[1], request.COOKIES.get(knox_settings.COOKIE_SETTINGS['NAME'],'').encode()])
user, auth_token = self.authenticate_credentials(auth[1])
return (user, auth_token)
def authenticate_credentials(self, token):
'''
Due to the random nature of hashing a value, this must inspect
each auth_token individually to find the correct one.
Tokens that have expired will be deleted and skipped
'''
msg = _('Invalid token.')
token = token.decode("utf-8")
for auth_token in AuthToken.objects.select_related('user').filter(
token_key=token[:CONSTANTS.TOKEN_KEY_LENGTH]):
if self._cleanup_token(auth_token):
continue
try:
digest = hash_token(token)
except (TypeError, binascii.Error):
raise exceptions.AuthenticationFailed(msg)
if compare_digest(digest, auth_token.digest):
if knox_settings.AUTO_REFRESH and auth_token.expiry:
self.renew_token(auth_token)
return self.validate_user(auth_token)
raise exceptions.AuthenticationFailed(msg)
def renew_token(self, auth_token):
current_expiry = auth_token.expiry
new_expiry = timezone.now() + knox_settings.TOKEN_TTL
auth_token.expiry = new_expiry
# Throttle refreshing of token to avoid db writes
delta = (new_expiry - current_expiry).total_seconds()
if delta > knox_settings.MIN_REFRESH_INTERVAL:
auth_token.save(update_fields=('expiry',))
def validate_user(self, auth_token):
if not auth_token.user.is_active:
raise exceptions.AuthenticationFailed(
_('User inactive or deleted.'))
return (auth_token.user, auth_token)
def authenticate_header(self, request):
return knox_settings.AUTH_HEADER_PREFIX
def _cleanup_token(self, auth_token):
# for other_token in auth_token.user.auth_token_set.all():
# if other_token.digest != auth_token.digest and other_token.expiry:
# if other_token.expiry < timezone.now():
# other_token.delete()
# username = other_token.user.get_username()
# token_expired.send(sender=self.__class__,
# username=username, source="other_token")
if auth_token.expiry is not None:
if auth_token.expiry < timezone.now():
username = auth_token.user.get_username()
auth_token.delete()
token_expired.send(sender=self.__class__,
username=username, source="auth_token")
return True
return False
| 39.008621 | 113 | 0.643757 | try:
from hmac import compare_digest
except ImportError:
def compare_digest(a, b):
return a == b
import binascii
from django.utils import timezone
from django.utils.translation import gettext_lazy as _
from rest_framework import exceptions
from rest_framework.authentication import (
BaseAuthentication, get_authorization_header,
)
from knox.crypto import hash_token
from knox.models import AuthToken
from knox.settings import CONSTANTS, knox_settings
from knox.signals import token_expired
class TokenAuthentication(BaseAuthentication):
model = AuthToken
def authenticate(self, request):
auth = get_authorization_header(request).split()
prefix = knox_settings.AUTH_HEADER_PREFIX.encode()
if not auth:
return None
if auth[0].lower() != prefix.lower():
return None
if len(auth) == 1:
msg = _('Invalid token header. No credentials provided.')
raise exceptions.AuthenticationFailed(msg)
elif len(auth) > 2:
msg = _('Invalid token header. '
'Token string should not contain spaces.')
raise exceptions.AuthenticationFailed(msg)
if knox_settings.USE_COOKIE:
auth[1] = b''.join([auth[1], request.COOKIES.get(knox_settings.COOKIE_SETTINGS['NAME'],'').encode()])
user, auth_token = self.authenticate_credentials(auth[1])
return (user, auth_token)
def authenticate_credentials(self, token):
msg = _('Invalid token.')
token = token.decode("utf-8")
for auth_token in AuthToken.objects.select_related('user').filter(
token_key=token[:CONSTANTS.TOKEN_KEY_LENGTH]):
if self._cleanup_token(auth_token):
continue
try:
digest = hash_token(token)
except (TypeError, binascii.Error):
raise exceptions.AuthenticationFailed(msg)
if compare_digest(digest, auth_token.digest):
if knox_settings.AUTO_REFRESH and auth_token.expiry:
self.renew_token(auth_token)
return self.validate_user(auth_token)
raise exceptions.AuthenticationFailed(msg)
def renew_token(self, auth_token):
current_expiry = auth_token.expiry
new_expiry = timezone.now() + knox_settings.TOKEN_TTL
auth_token.expiry = new_expiry
delta = (new_expiry - current_expiry).total_seconds()
if delta > knox_settings.MIN_REFRESH_INTERVAL:
auth_token.save(update_fields=('expiry',))
def validate_user(self, auth_token):
if not auth_token.user.is_active:
raise exceptions.AuthenticationFailed(
_('User inactive or deleted.'))
return (auth_token.user, auth_token)
def authenticate_header(self, request):
return knox_settings.AUTH_HEADER_PREFIX
def _cleanup_token(self, auth_token):
if auth_token.expiry is not None:
if auth_token.expiry < timezone.now():
username = auth_token.user.get_username()
auth_token.delete()
token_expired.send(sender=self.__class__,
username=username, source="auth_token")
return True
return False
| true | true |
f71fc7ba9f38450681a76a58e0a61f43125749ab | 10,698 | py | Python | codenames/players/codemaster_glove_lookahead.py | gohyun14/Game | 39e6e192590059daade40c95cc177acb0f3a581b | [
"MIT"
] | 1 | 2022-02-25T17:44:02.000Z | 2022-02-25T17:44:02.000Z | codenames/players/codemaster_glove_lookahead.py | gohyun14/Game | 39e6e192590059daade40c95cc177acb0f3a581b | [
"MIT"
] | null | null | null | codenames/players/codemaster_glove_lookahead.py | gohyun14/Game | 39e6e192590059daade40c95cc177acb0f3a581b | [
"MIT"
] | null | null | null | import scipy.spatial.distance
from nltk.stem import WordNetLemmatizer
from nltk.stem.lancaster import LancasterStemmer
from math import ceil
import numpy as np
import copy
import itertools
from players.codemaster import Codemaster
THRESHOLD = np.inf
class AICodemaster(Codemaster):
def __init__(self, brown_ic=None, glove_vecs=None, word_vectors=None):
super().__init__()
self.brown_ic = brown_ic
self.glove_vecs = glove_vecs
self.word_vectors = word_vectors
self.wordnet_lemmatizer = WordNetLemmatizer()
self.lancaster_stemmer = LancasterStemmer()
self.cm_wordlist = []
with open('players/cm_wordlist.txt') as infile:
for line in infile:
self.cm_wordlist.append(line.rstrip())
self.root = None
self.turn_number = 0
def set_game_state(self, words, maps):
if self.turn_number == 0:
self.original_words = copy.copy(words)
print(f"original words: {self.original_words}")
self.words = words
self.maps = maps
self.update_board()
self.init_dists()
self.turn_number += 1
def update_board(self):
self.red_words = set()
self.bad_words = set()
self.words_guessed = set()
for i in range(25):
if self.words[i][0] == '*':
self.words_guessed.add(self.original_words[i].lower())
elif self.maps[i] == "Assassin" or self.maps[i] == "Blue" or self.maps[i] == "Civilian":
self.bad_words.add(self.words[i].lower())
if self.maps[i] == "Assassin":
self.black_word = self.words[i]
else:
self.red_words.add(self.words[i].lower())
def init_dists(self):
cos_dist = scipy.spatial.distance.cosine
all_vectors = (self.glove_vecs,)
self.bad_word_dists = {}
for word in self.bad_words:
self.bad_word_dists[word] = {}
for val in self.cm_wordlist:
b_dist = cos_dist(self.concatenate(val, all_vectors), self.concatenate(word, all_vectors))
self.bad_word_dists[word][val] = b_dist
self.red_word_dists = {}
for word in self.red_words:
self.red_word_dists[word] = {}
for val in self.cm_wordlist:
b_dist = cos_dist(self.concatenate(val, all_vectors), self.concatenate(word, all_vectors))
self.red_word_dists[word][val] = b_dist
def get_clue(self):
#self.all_guesses = set()
if self.root is None or self.root.words_guessed != self.words_guessed:
if self.root:
print("board mismatch: initializing new root")
print(f"game's words guessed: {self.words_guessed} nodes' words guessed: {self.root.words_guessed}")
self.root = Node(self, copy.copy(self.words_guessed), None, depth = self.turn_number-1)
self.root.get_val()
best_clue = self.root.best_clue
print('chosen_clue is:', best_clue[0])
self.root = self.root.best_child
return best_clue
def arr_not_in_word(self, word, arr):
if word in arr:
return False
lemm = self.wordnet_lemmatizer.lemmatize(word)
lancas = self.lancaster_stemmer.stem(word)
for i in arr:
if i == lemm or i == lancas:
return False
if i.find(word) != -1:
return False
if word.find(i) != -1:
return False
return True
def combine(self, words, wordvecs):
factor = 1.0 / float(len(words))
new_word = self.concatenate(words[0], wordvecs) * factor
for word in words[1:]:
new_word += self.concatenate(word, wordvecs) * factor
return new_word
def concatenate(self, word, wordvecs):
concatenated = wordvecs[0][word]
for vec in wordvecs[1:]:
concatenated = np.hstack((concatenated, vec[word]))
return concatenated
class Node:
def __init__(self, codemaster, words_guessed, parent, depth = 0, best=np.inf):
self.codemaster = codemaster
self.words_guessed = words_guessed
self.parent = parent
self.depth = depth
self.best_clue = None
self.best_child = None
self.val = np.inf
self.terminal = False
self.best = best
def get_best_clues(self):
bests = {}
possible = {}
cm = self.codemaster
red_words = cm.red_words.difference(self.words_guessed)
bad_words = cm.bad_words.difference(self.words_guessed)
print(f"calculating best clues")
#print(f"red word dists: {self.red_word_dists}")
for clue_num in range(1, 3 + 1):
best_per_dist = np.inf
best_per = ''
best_red_word = ''
for red_word in list(itertools.combinations(red_words, clue_num)):
best_word = ''
best_dist = np.inf
for word in cm.cm_wordlist:
if not cm.arr_not_in_word(word, red_words.union(bad_words)):
continue
bad_dist = np.inf
worst_bad = ''
for bad_word in bad_words:
if cm.bad_word_dists[bad_word][word] < bad_dist:
bad_dist = cm.bad_word_dists[bad_word][word]
worst_bad = bad_word
worst_red = 0
for red in red_word:
dist = cm.red_word_dists[red][word]
if dist > worst_red:
worst_red = dist
if worst_red < best_dist and worst_red < bad_dist:
best_dist = worst_red
best_word = word
# print(worst_red,red_word,word)
if best_dist < best_per_dist:
best_per_dist = best_dist
best_per = best_word
best_red_word = red_word
if best_dist < THRESHOLD or clue_num == 1:
possible[(best_word, clue_num)] = (red_word, best_dist)
bests[clue_num] = (best_red_word, best_per, best_per_dist)
print(f"length of possibilities: {len(possible)}")
return possible
def add_children(self):
cos_dist = scipy.spatial.distance.cosine
cm = self.codemaster
all_vectors = (cm.glove_vecs,)
print(f"at depth {self.depth}")
bests = self.get_best_clues()
for clue, clue_info in bests.items():
combined_clue, clue_num = clue
best_red_word, combined_score = clue_info
worst = -np.inf
for word in best_red_word:
dist = cos_dist(cm.concatenate(word, all_vectors), cm.concatenate(combined_clue, all_vectors))
if dist > worst:
worst = dist
if worst < 0.7 and worst != -np.inf or clue_num == 1:
print(f"adding clue: {clue}")
self.add_child(clue, best_red_word)
def check_board(self):
cm = self.codemaster
self.black_guessed = cm.black_word in self.words_guessed
red_words = cm.red_words.difference(self.words_guessed)
red_count = len(red_words)
if self.black_guessed:
self.val = np.inf
self.terminal = True
elif red_count == 0:
self.val = self.depth
self.terminal = True
print(f"Terminal Node: depth: {self.depth}")
else:
self.val = 25
def new_child(self, expected_words_chosen):
new_words_guessed = copy.copy(self.words_guessed)
for word in expected_words_chosen:
new_words_guessed.add(word)
return Node(self.codemaster, new_words_guessed, self, self.depth + 1, self.best)
def get_val(self, depth=np.inf):
# if self.words_guessed in self.codemaster.all_guesses:
# print("Board State already explored")
# return self.val
# self.codemaster.all_guesses.add(self.words_guessed)
self.check_board()
if self.not_possible():
print("Skipped")
return self.val
if self.terminal:
if self.val < self.best:
self.best = self.val
return self.val
if self.best_clue is not None:
return self.val
best_val = np.inf
possible = self.get_best_clues()
for clue, clue_info in sorted(possible.items(), key = lambda x: (x[0][1],-x[1][1]), reverse=True):
combined_clue, clue_num = clue
best_red_word, combined_score = clue_info
if self.check_clue_feasible(clue_num, combined_score):
print(f"Exploring child, depth: {self.depth+1}, clue: {clue}, dist: {combined_score}")
child = self.new_child(best_red_word)
child_val = child.get_val(depth)
if child_val < best_val:
best_val = child_val
self.best_clue = clue
self.best_child = child
if child.best < self.best:
print(f"Found new best, prev: {self.best} new: {child.best}")
self.best = child.best
self.val = best_val
return self.val
# def best_child(self):
# best_clue = self.best_clue
# for child_key in self.children.keys():
# if child_key == best_clue:
# best_child = self.children[child_key]
# best_child.reset_depth()
# return best_child
def not_possible(self):
red_words = self.codemaster.red_words.difference(self.words_guessed)
best_possible = self.depth + ceil(len(red_words)/3)
print(f"BEST POSSIBLE: {best_possible}")
return self.best <= best_possible or self.depth >= self.best or (not self.terminal and self.depth == self.best - 1)
def check_clue_feasible(self, clue_num, combined_score):
return clue_num == 1 or combined_score < THRESHOLD
# cos_dist = scipy.spatial.distance.cosine
# cm = self.codemaster
# all_vectors = (cm.glove_vecs,)
# worst = -np.inf
# for word in best_red_word:
# dist = cos_dist(cm.concatenate(word, all_vectors), cm.concatenate(combined_clue, all_vectors))
# if dist > worst:
# worst = dist
# return worst < 0.7 and worst != -np.inf or clue_num == 1
| 39.476015 | 123 | 0.570667 | import scipy.spatial.distance
from nltk.stem import WordNetLemmatizer
from nltk.stem.lancaster import LancasterStemmer
from math import ceil
import numpy as np
import copy
import itertools
from players.codemaster import Codemaster
THRESHOLD = np.inf
class AICodemaster(Codemaster):
def __init__(self, brown_ic=None, glove_vecs=None, word_vectors=None):
super().__init__()
self.brown_ic = brown_ic
self.glove_vecs = glove_vecs
self.word_vectors = word_vectors
self.wordnet_lemmatizer = WordNetLemmatizer()
self.lancaster_stemmer = LancasterStemmer()
self.cm_wordlist = []
with open('players/cm_wordlist.txt') as infile:
for line in infile:
self.cm_wordlist.append(line.rstrip())
self.root = None
self.turn_number = 0
def set_game_state(self, words, maps):
if self.turn_number == 0:
self.original_words = copy.copy(words)
print(f"original words: {self.original_words}")
self.words = words
self.maps = maps
self.update_board()
self.init_dists()
self.turn_number += 1
def update_board(self):
self.red_words = set()
self.bad_words = set()
self.words_guessed = set()
for i in range(25):
if self.words[i][0] == '*':
self.words_guessed.add(self.original_words[i].lower())
elif self.maps[i] == "Assassin" or self.maps[i] == "Blue" or self.maps[i] == "Civilian":
self.bad_words.add(self.words[i].lower())
if self.maps[i] == "Assassin":
self.black_word = self.words[i]
else:
self.red_words.add(self.words[i].lower())
def init_dists(self):
cos_dist = scipy.spatial.distance.cosine
all_vectors = (self.glove_vecs,)
self.bad_word_dists = {}
for word in self.bad_words:
self.bad_word_dists[word] = {}
for val in self.cm_wordlist:
b_dist = cos_dist(self.concatenate(val, all_vectors), self.concatenate(word, all_vectors))
self.bad_word_dists[word][val] = b_dist
self.red_word_dists = {}
for word in self.red_words:
self.red_word_dists[word] = {}
for val in self.cm_wordlist:
b_dist = cos_dist(self.concatenate(val, all_vectors), self.concatenate(word, all_vectors))
self.red_word_dists[word][val] = b_dist
def get_clue(self):
if self.root is None or self.root.words_guessed != self.words_guessed:
if self.root:
print("board mismatch: initializing new root")
print(f"game's words guessed: {self.words_guessed} nodes' words guessed: {self.root.words_guessed}")
self.root = Node(self, copy.copy(self.words_guessed), None, depth = self.turn_number-1)
self.root.get_val()
best_clue = self.root.best_clue
print('chosen_clue is:', best_clue[0])
self.root = self.root.best_child
return best_clue
def arr_not_in_word(self, word, arr):
if word in arr:
return False
lemm = self.wordnet_lemmatizer.lemmatize(word)
lancas = self.lancaster_stemmer.stem(word)
for i in arr:
if i == lemm or i == lancas:
return False
if i.find(word) != -1:
return False
if word.find(i) != -1:
return False
return True
def combine(self, words, wordvecs):
factor = 1.0 / float(len(words))
new_word = self.concatenate(words[0], wordvecs) * factor
for word in words[1:]:
new_word += self.concatenate(word, wordvecs) * factor
return new_word
def concatenate(self, word, wordvecs):
concatenated = wordvecs[0][word]
for vec in wordvecs[1:]:
concatenated = np.hstack((concatenated, vec[word]))
return concatenated
class Node:
def __init__(self, codemaster, words_guessed, parent, depth = 0, best=np.inf):
self.codemaster = codemaster
self.words_guessed = words_guessed
self.parent = parent
self.depth = depth
self.best_clue = None
self.best_child = None
self.val = np.inf
self.terminal = False
self.best = best
def get_best_clues(self):
bests = {}
possible = {}
cm = self.codemaster
red_words = cm.red_words.difference(self.words_guessed)
bad_words = cm.bad_words.difference(self.words_guessed)
print(f"calculating best clues")
for clue_num in range(1, 3 + 1):
best_per_dist = np.inf
best_per = ''
best_red_word = ''
for red_word in list(itertools.combinations(red_words, clue_num)):
best_word = ''
best_dist = np.inf
for word in cm.cm_wordlist:
if not cm.arr_not_in_word(word, red_words.union(bad_words)):
continue
bad_dist = np.inf
worst_bad = ''
for bad_word in bad_words:
if cm.bad_word_dists[bad_word][word] < bad_dist:
bad_dist = cm.bad_word_dists[bad_word][word]
worst_bad = bad_word
worst_red = 0
for red in red_word:
dist = cm.red_word_dists[red][word]
if dist > worst_red:
worst_red = dist
if worst_red < best_dist and worst_red < bad_dist:
best_dist = worst_red
best_word = word
if best_dist < best_per_dist:
best_per_dist = best_dist
best_per = best_word
best_red_word = red_word
if best_dist < THRESHOLD or clue_num == 1:
possible[(best_word, clue_num)] = (red_word, best_dist)
bests[clue_num] = (best_red_word, best_per, best_per_dist)
print(f"length of possibilities: {len(possible)}")
return possible
def add_children(self):
cos_dist = scipy.spatial.distance.cosine
cm = self.codemaster
all_vectors = (cm.glove_vecs,)
print(f"at depth {self.depth}")
bests = self.get_best_clues()
for clue, clue_info in bests.items():
combined_clue, clue_num = clue
best_red_word, combined_score = clue_info
worst = -np.inf
for word in best_red_word:
dist = cos_dist(cm.concatenate(word, all_vectors), cm.concatenate(combined_clue, all_vectors))
if dist > worst:
worst = dist
if worst < 0.7 and worst != -np.inf or clue_num == 1:
print(f"adding clue: {clue}")
self.add_child(clue, best_red_word)
def check_board(self):
cm = self.codemaster
self.black_guessed = cm.black_word in self.words_guessed
red_words = cm.red_words.difference(self.words_guessed)
red_count = len(red_words)
if self.black_guessed:
self.val = np.inf
self.terminal = True
elif red_count == 0:
self.val = self.depth
self.terminal = True
print(f"Terminal Node: depth: {self.depth}")
else:
self.val = 25
def new_child(self, expected_words_chosen):
new_words_guessed = copy.copy(self.words_guessed)
for word in expected_words_chosen:
new_words_guessed.add(word)
return Node(self.codemaster, new_words_guessed, self, self.depth + 1, self.best)
def get_val(self, depth=np.inf):
self.check_board()
if self.not_possible():
print("Skipped")
return self.val
if self.terminal:
if self.val < self.best:
self.best = self.val
return self.val
if self.best_clue is not None:
return self.val
best_val = np.inf
possible = self.get_best_clues()
for clue, clue_info in sorted(possible.items(), key = lambda x: (x[0][1],-x[1][1]), reverse=True):
combined_clue, clue_num = clue
best_red_word, combined_score = clue_info
if self.check_clue_feasible(clue_num, combined_score):
print(f"Exploring child, depth: {self.depth+1}, clue: {clue}, dist: {combined_score}")
child = self.new_child(best_red_word)
child_val = child.get_val(depth)
if child_val < best_val:
best_val = child_val
self.best_clue = clue
self.best_child = child
if child.best < self.best:
print(f"Found new best, prev: {self.best} new: {child.best}")
self.best = child.best
self.val = best_val
return self.val
def not_possible(self):
red_words = self.codemaster.red_words.difference(self.words_guessed)
best_possible = self.depth + ceil(len(red_words)/3)
print(f"BEST POSSIBLE: {best_possible}")
return self.best <= best_possible or self.depth >= self.best or (not self.terminal and self.depth == self.best - 1)
def check_clue_feasible(self, clue_num, combined_score):
return clue_num == 1 or combined_score < THRESHOLD
| true | true |
f71fc8515d160a9327f631beba72fefeffeddf90 | 1,049 | py | Python | misc/pytorch_toolkit/machine_translation/core/dataset/text_container.py | dqawami/openvino_training_extensions | dddda1dfd651eaae2d59cecda84275b1b03bd0ad | [
"Apache-2.0"
] | 256 | 2020-09-09T03:27:57.000Z | 2022-03-30T10:06:06.000Z | misc/pytorch_toolkit/machine_translation/core/dataset/text_container.py | dqawami/openvino_training_extensions | dddda1dfd651eaae2d59cecda84275b1b03bd0ad | [
"Apache-2.0"
] | 604 | 2020-09-08T12:29:49.000Z | 2022-03-31T21:51:08.000Z | misc/pytorch_toolkit/machine_translation/core/dataset/text_container.py | dqawami/openvino_training_extensions | dddda1dfd651eaae2d59cecda84275b1b03bd0ad | [
"Apache-2.0"
] | 160 | 2020-09-09T14:06:07.000Z | 2022-03-30T14:50:48.000Z | """
Copyright (c) 2020 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import io
from torch.utils.data import Dataset
from tqdm import tqdm
class TextContainer(Dataset):
def __init__(self, corpus):
self.data = []
with io.open(corpus, mode='r', encoding='utf-8') as f:
for line in tqdm(f):
self.data.append(line.strip())
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
return {
"text": self.data[idx],
"key": idx
}
| 32.78125 | 73 | 0.675882 | import io
from torch.utils.data import Dataset
from tqdm import tqdm
class TextContainer(Dataset):
def __init__(self, corpus):
self.data = []
with io.open(corpus, mode='r', encoding='utf-8') as f:
for line in tqdm(f):
self.data.append(line.strip())
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
return {
"text": self.data[idx],
"key": idx
}
| true | true |
f71fc8b40616224f0c26320e75087d6d233ed41a | 3,180 | py | Python | wurst/searching.py | pjamesjoyce/wurst | 95b37e72eaa18b33bdd83cd4a51d37d9eb4ae7ba | [
"BSD-2-Clause"
] | 1 | 2022-03-29T14:59:13.000Z | 2022-03-29T14:59:13.000Z | wurst/searching.py | pjamesjoyce/wurst | 95b37e72eaa18b33bdd83cd4a51d37d9eb4ae7ba | [
"BSD-2-Clause"
] | null | null | null | wurst/searching.py | pjamesjoyce/wurst | 95b37e72eaa18b33bdd83cd4a51d37d9eb4ae7ba | [
"BSD-2-Clause"
] | null | null | null | from .errors import MultipleResults, NoResults
def equals(field, value):
"""Return function where input ``field`` value is equal to ``value``"""
return lambda x: x.get(field) == value
def contains(field, value):
return lambda x: value in x.get(field)
def startswith(field, value):
return lambda x: x.get(field, '').startswith(value)
def either(*funcs):
"""Return ``True`` is any of the function evaluate true"""
return lambda x: any(f(x) for f in funcs)
def exclude(func):
"""Return the opposite of ``func`` (i.e. ``False`` instead of ``True``)"""
return lambda x: not func(x)
def doesnt_contain_any(field, values):
"""Exclude all dataset whose ``field`` contains any of ``values``"""
return lambda x: all(exclude(contains(field, value))(x) for value in values)
def get_many(data, *funcs):
"""Apply all filter functions ``funcs`` to ``data``"""
for fltr in funcs:
data = filter(fltr, data)
return data
def get_one(data, *funcs):
"""Apply filter functions ``funcs`` to ``data``, and return exactly one result.
Raises ``wurst.errors.NoResults`` or ``wurst.errors.MultipleResults`` if zero or multiple results are returned.
"""
results = list(get_many(data, *funcs))
if not results:
raise NoResults
if not len(results) == 1:
raise MultipleResults
return results[0]
def _exchanges(ds, kind, *funcs):
if funcs == [None]:
funcs = []
return get_many(
filter(lambda x: x['type'] == kind, ds['exchanges']),
*funcs
)
def technosphere(ds, *funcs):
"""Get all technosphere exchanges in ``ds`` that pass filtering functions ``funcs``"""
return _exchanges(ds, 'technosphere', *funcs)
def biosphere(ds, *funcs):
"""Get all biosphere exchanges in ``ds`` that pass filtering functions ``funcs``"""
return _exchanges(ds, 'biosphere', *funcs)
def production(ds, *funcs):
"""Get all production exchanges in ``ds`` that pass filtering functions ``funcs``"""
return _exchanges(ds, 'production', *funcs)
def reference_product(ds):
"""Get single reference product exchange from a dataset.
Raises ``wurst.errors.NoResults`` or ``wurst.errors.MultipleResults`` if zero or multiple results are returned."""
excs = [exc for exc in ds['exchanges']
if exc['amount']
and exc['type'] == 'production']
if not excs:
raise NoResults("No suitable production exchanges founds")
elif len(excs) > 1:
raise MultipleResults("Multiple production exchanges found")
return excs[0]
def best_geo_match(possibles, ordered_locations):
"""Pick the dataset from ``possibles`` whose location is first in ``ordered_locations``.
``possibles`` is an interable with the field ``location``.
``ordered_locations`` is a list of locations in sorting order.
Returns an element from ``possibles``, or ``None``.
"""
weights = {y: x for x, y in enumerate(ordered_locations)}
filtered = (obj for obj in possibles if obj['location'] in weights)
ordered = sorted(filtered, key=lambda x: weights[x['location']])
if ordered:
return ordered[0]
| 30.576923 | 118 | 0.654088 | from .errors import MultipleResults, NoResults
def equals(field, value):
return lambda x: x.get(field) == value
def contains(field, value):
return lambda x: value in x.get(field)
def startswith(field, value):
return lambda x: x.get(field, '').startswith(value)
def either(*funcs):
return lambda x: any(f(x) for f in funcs)
def exclude(func):
return lambda x: not func(x)
def doesnt_contain_any(field, values):
return lambda x: all(exclude(contains(field, value))(x) for value in values)
def get_many(data, *funcs):
for fltr in funcs:
data = filter(fltr, data)
return data
def get_one(data, *funcs):
results = list(get_many(data, *funcs))
if not results:
raise NoResults
if not len(results) == 1:
raise MultipleResults
return results[0]
def _exchanges(ds, kind, *funcs):
if funcs == [None]:
funcs = []
return get_many(
filter(lambda x: x['type'] == kind, ds['exchanges']),
*funcs
)
def technosphere(ds, *funcs):
return _exchanges(ds, 'technosphere', *funcs)
def biosphere(ds, *funcs):
return _exchanges(ds, 'biosphere', *funcs)
def production(ds, *funcs):
return _exchanges(ds, 'production', *funcs)
def reference_product(ds):
excs = [exc for exc in ds['exchanges']
if exc['amount']
and exc['type'] == 'production']
if not excs:
raise NoResults("No suitable production exchanges founds")
elif len(excs) > 1:
raise MultipleResults("Multiple production exchanges found")
return excs[0]
def best_geo_match(possibles, ordered_locations):
weights = {y: x for x, y in enumerate(ordered_locations)}
filtered = (obj for obj in possibles if obj['location'] in weights)
ordered = sorted(filtered, key=lambda x: weights[x['location']])
if ordered:
return ordered[0]
| true | true |
f71fca16be9fa3da257d719ec7efdae5f61d8f68 | 4,100 | py | Python | sandbox/jorvis/generate_gff3_feature_identifiers.py | senjoro/biocode | 6697c17570126d99fb1cbeabf5b8322db006643d | [
"MIT"
] | 355 | 2015-01-15T18:11:20.000Z | 2022-03-26T19:23:30.000Z | sandbox/jorvis/generate_gff3_feature_identifiers.py | senjoro/biocode | 6697c17570126d99fb1cbeabf5b8322db006643d | [
"MIT"
] | 43 | 2015-03-20T08:40:14.000Z | 2022-03-09T22:37:38.000Z | sandbox/jorvis/generate_gff3_feature_identifiers.py | senjoro/biocode | 6697c17570126d99fb1cbeabf5b8322db006643d | [
"MIT"
] | 217 | 2015-01-29T08:40:33.000Z | 2022-03-26T19:23:45.000Z | #!/usr/bin/env python3
"""
This script is used to take any GFF3 file and re-generate feature identifiers within
it to match the convention used at IGS. This is:
$prefix.$type.$id.$version
The mode here defines what the identifier. For example, if using --mode=sequential for
an organism (--prefix) of b_microti, the ID for the 100th gene might be:
The mode here defines what the identifier. Examples:
--mode=sequential (the default)
b_microti.gene.100.1
--mode=uuid (UUID, as specified in RFC 4122, using uuid4())
b_microti.gene.8d8f9231-262e-48e7-b066-a84b6a939746.1
--mode=hex8
b_microti.gene.c08ca446.1
--mode=hex12
b_microti.gene.191ccac20a56.1
The only values that are replaced are in the ID and Parent attributes in the 9th column.
"""
import argparse
import os
import sys
from binascii import hexlify
from collections import defaultdict
from uuid import uuid4
from biocode import gff
## constants
next_ids_sequential = defaultdict(lambda: 1)
def main():
parser = argparse.ArgumentParser( description='Generates new identifiers in GFF3 files following the IGS identifier convention.')
## output file to be written
parser.add_argument('-i', '--input_file', type=str, required=True, help='TA file of source molecules' )
parser.add_argument('-o', '--output_file', type=str, required=False, help='Optional output file path (else STDOUT)' )
parser.add_argument('-p', '--prefix', type=str, required=True, help='The prefix portion of IDs to be generated')
parser.add_argument('-m', '--mode', type=str, required=False, default='sequential', help='ID modes (see embedded documentation): sequential, uuid, hex8, hex12')
args = parser.parse_args()
check_arguments(args)
id_map = dict()
## output will either be a file or STDOUT
fout = sys.stdout
if args.output_file is not None:
fout = open(args.output_file, 'wt')
for line in open(args.input_file):
line = line.rstrip()
cols = line.split("\t")
if len(cols) != 9:
fout.write(line + "\n")
continue
# grab the ID column if any
id = gff.column_9_value(cols[8], 'ID')
parent = gff.column_9_value(cols[8], 'Parent')
new_id = None
new_parent = None
type = cols[2]
if id is not None:
if id in id_map:
new_id = id_map[id]
else:
new_id = get_new_id(args.prefix, type, args.mode)
id_map[id] = new_id
cols[8] = cols[8].replace("ID={0}".format(id), "ID={0}".format(new_id))
if parent is not None:
if parent in id_map:
new_parent = id_map[parent]
else:
raise Exception("ERROR: parent ({0}) referenced before it was used as an ID".format(parent))
cols[8] = cols[8].replace("Parent={0}".format(parent), "Parent={0}".format(new_parent))
#print("DEBUG: old_id:{0} - old_parent:{1}, new_id:{2} - new_parent:{3}".format(id, parent, new_id, new_parent))
fout.write("\t".join(cols) + "\n")
#>>> binascii.hexlify(os.urandom(4))
#b'c08ca446'
#>>> uuid.uuid4()
#UUID('37cd0fbf-bdc3-49bc-8351-a7ebc5a93ea5')
def get_new_id(prefix, type, mode):
new_id = "{0}.{1}.".format(prefix, type)
if mode == 'sequential':
new_id += str(next_ids_sequential[type])
next_ids_sequential[type] += 1
elif mode == 'uuid':
new_id += str(uuid4())
elif mode == 'hex8':
new_id += hexlify(os.urandom(4)).decode('ascii')
elif mode == 'hex12':
new_id += hexlify(os.urandom(6)).decode('ascii')
new_id += '.1'
return new_id
def check_arguments( args ):
# Check the acceptable values for format
mode_options = ('sequential', 'uuid', 'hex8', 'hex12')
if args.mode not in mode_options:
raise Exception("ERROR: The --mode provided ({0}) isn't supported. Please check the documentation again.".format(args.mode))
if __name__ == '__main__':
main()
| 29.496403 | 164 | 0.629268 |
import argparse
import os
import sys
from binascii import hexlify
from collections import defaultdict
from uuid import uuid4
from biocode import gff
equential = defaultdict(lambda: 1)
def main():
parser = argparse.ArgumentParser( description='Generates new identifiers in GFF3 files following the IGS identifier convention.')
i', '--input_file', type=str, required=True, help='TA file of source molecules' )
parser.add_argument('-o', '--output_file', type=str, required=False, help='Optional output file path (else STDOUT)' )
parser.add_argument('-p', '--prefix', type=str, required=True, help='The prefix portion of IDs to be generated')
parser.add_argument('-m', '--mode', type=str, required=False, default='sequential', help='ID modes (see embedded documentation): sequential, uuid, hex8, hex12')
args = parser.parse_args()
check_arguments(args)
id_map = dict()
t_file is not None:
fout = open(args.output_file, 'wt')
for line in open(args.input_file):
line = line.rstrip()
cols = line.split("\t")
if len(cols) != 9:
fout.write(line + "\n")
continue
id = gff.column_9_value(cols[8], 'ID')
parent = gff.column_9_value(cols[8], 'Parent')
new_id = None
new_parent = None
type = cols[2]
if id is not None:
if id in id_map:
new_id = id_map[id]
else:
new_id = get_new_id(args.prefix, type, args.mode)
id_map[id] = new_id
cols[8] = cols[8].replace("ID={0}".format(id), "ID={0}".format(new_id))
if parent is not None:
if parent in id_map:
new_parent = id_map[parent]
else:
raise Exception("ERROR: parent ({0}) referenced before it was used as an ID".format(parent))
cols[8] = cols[8].replace("Parent={0}".format(parent), "Parent={0}".format(new_parent))
fout.write("\t".join(cols) + "\n")
def get_new_id(prefix, type, mode):
new_id = "{0}.{1}.".format(prefix, type)
if mode == 'sequential':
new_id += str(next_ids_sequential[type])
next_ids_sequential[type] += 1
elif mode == 'uuid':
new_id += str(uuid4())
elif mode == 'hex8':
new_id += hexlify(os.urandom(4)).decode('ascii')
elif mode == 'hex12':
new_id += hexlify(os.urandom(6)).decode('ascii')
new_id += '.1'
return new_id
def check_arguments( args ):
mode_options = ('sequential', 'uuid', 'hex8', 'hex12')
if args.mode not in mode_options:
raise Exception("ERROR: The --mode provided ({0}) isn't supported. Please check the documentation again.".format(args.mode))
if __name__ == '__main__':
main()
| true | true |
f71fca6c7b717f1271683d16d11ce61370e99869 | 2,628 | py | Python | Apps/phforescoutcounteract/forescoutcounteract_consts.py | ryanbsaunders/phantom-apps | 1befda793a08d366fbd443894f993efb1baf9635 | [
"Apache-2.0"
] | 74 | 2019-10-22T02:00:53.000Z | 2022-03-15T12:56:13.000Z | Apps/phforescoutcounteract/forescoutcounteract_consts.py | ryanbsaunders/phantom-apps | 1befda793a08d366fbd443894f993efb1baf9635 | [
"Apache-2.0"
] | 375 | 2019-10-22T20:53:50.000Z | 2021-11-09T21:28:43.000Z | Apps/phforescoutcounteract/forescoutcounteract_consts.py | ryanbsaunders/phantom-apps | 1befda793a08d366fbd443894f993efb1baf9635 | [
"Apache-2.0"
] | 175 | 2019-10-23T15:30:42.000Z | 2021-11-05T21:33:31.000Z | # File: forescoutcounteract_consts.py
# Copyright (c) 2018-2021 Splunk Inc.
#
# Licensed under Apache 2.0 (https://www.apache.org/licenses/LICENSE-2.0.txt)
#
# --
# Define your constants here
FS_DEX_HOST_ENDPOINT = '/fsapi/niCore/Hosts'
FS_DEX_LIST_ENDPOINT = '/fsapi/niCore/Lists'
FS_DEX_TEST_CONNECTIVITY = \
"""<?xml version="1.0" encoding="UTF-8"?>
<FSAPI TYPE="request" API_VERSION="1.0">
<TRANSACTION TYPE="update">
<OPTIONS CREATE_NEW_HOST="true"/>
<HOST_KEY NAME="ip" VALUE="{host_key_value}"/>
<PROPERTIES></PROPERTIES>
</TRANSACTION>
</FSAPI>"""
FS_DEX_UPDATE_SIMPLE_PROPERTY = \
"""<?xml version='1.0' encoding='utf-8'?>
<FSAPI TYPE="request" API_VERSION="1.0">
<TRANSACTION TYPE="update">
<OPTIONS CREATE_NEW_HOST="{create_host}"/>
<HOST_KEY NAME="{host_key_name}" VALUE="{host_key_value}"/>
<PROPERTIES>
<PROPERTY NAME="{property_name}">
<VALUE>{property_value}</VALUE>
</PROPERTY>
</PROPERTIES>
</TRANSACTION>
</FSAPI>"""
FS_DEX_DELETE_SIMPLE_PROPERTY = \
"""<?xml version='1.0' encoding='utf-8'?>
<FSAPI TYPE="request" API_VERSION="1.0">
<TRANSACTION TYPE="delete">
<HOST_KEY NAME="{host_key_name}" VALUE="{host_key_value}"/>
<PROPERTIES>
<PROPERTY NAME="{property_name}" />
</PROPERTIES>
</TRANSACTION>
</FSAPI>"""
FS_DEX_UPDATE_LIST_PROPERTY = \
"""<?xml version="1.0" encoding="UTF-8"?>
<FSAPI TYPE="request" API_VERSION="2.0">
<TRANSACTION TYPE="{transaction_type}">
<LISTS>
{list_body}
</LISTS>
</TRANSACTION>
</FSAPI>"""
FS_WEB_LOGIN = '/api/login'
FS_WEB_HOSTS = '/api/hosts'
FS_WEB_HOSTFIELDS = '/api/hostfields'
FS_WEB_POLICIES = '/api/policies'
# Error message constants
FS_ERR_CODE_MSG = "Error code unavailable"
FS_ERR_MSG_UNAVAILABLE = "Error message unavailable. Please check the asset configuration and|or action parameters"
FS_PARSE_ERR_MSG = "Unable to parse the error message. Please check the asset configuration and|or action parameters"
# validate integer
ERR_VALID_INT_MSG = "Please provide a valid integer value in the {}"
ERR_NON_NEG_INT_MSG = "Please provide a valid non-negative integer value in the {}"
ERR_POSITIVE_INTEGER_MSG = "Please provide a valid non-zero positive integer value in the {}"
HOST_ID_INT_PARAM = "'host_id' action parameter"
| 36.5 | 117 | 0.619102 |
FS_DEX_HOST_ENDPOINT = '/fsapi/niCore/Hosts'
FS_DEX_LIST_ENDPOINT = '/fsapi/niCore/Lists'
FS_DEX_TEST_CONNECTIVITY = \
"""<?xml version="1.0" encoding="UTF-8"?>
<FSAPI TYPE="request" API_VERSION="1.0">
<TRANSACTION TYPE="update">
<OPTIONS CREATE_NEW_HOST="true"/>
<HOST_KEY NAME="ip" VALUE="{host_key_value}"/>
<PROPERTIES></PROPERTIES>
</TRANSACTION>
</FSAPI>"""
FS_DEX_UPDATE_SIMPLE_PROPERTY = \
"""<?xml version='1.0' encoding='utf-8'?>
<FSAPI TYPE="request" API_VERSION="1.0">
<TRANSACTION TYPE="update">
<OPTIONS CREATE_NEW_HOST="{create_host}"/>
<HOST_KEY NAME="{host_key_name}" VALUE="{host_key_value}"/>
<PROPERTIES>
<PROPERTY NAME="{property_name}">
<VALUE>{property_value}</VALUE>
</PROPERTY>
</PROPERTIES>
</TRANSACTION>
</FSAPI>"""
FS_DEX_DELETE_SIMPLE_PROPERTY = \
"""<?xml version='1.0' encoding='utf-8'?>
<FSAPI TYPE="request" API_VERSION="1.0">
<TRANSACTION TYPE="delete">
<HOST_KEY NAME="{host_key_name}" VALUE="{host_key_value}"/>
<PROPERTIES>
<PROPERTY NAME="{property_name}" />
</PROPERTIES>
</TRANSACTION>
</FSAPI>"""
FS_DEX_UPDATE_LIST_PROPERTY = \
"""<?xml version="1.0" encoding="UTF-8"?>
<FSAPI TYPE="request" API_VERSION="2.0">
<TRANSACTION TYPE="{transaction_type}">
<LISTS>
{list_body}
</LISTS>
</TRANSACTION>
</FSAPI>"""
FS_WEB_LOGIN = '/api/login'
FS_WEB_HOSTS = '/api/hosts'
FS_WEB_HOSTFIELDS = '/api/hostfields'
FS_WEB_POLICIES = '/api/policies'
FS_ERR_CODE_MSG = "Error code unavailable"
FS_ERR_MSG_UNAVAILABLE = "Error message unavailable. Please check the asset configuration and|or action parameters"
FS_PARSE_ERR_MSG = "Unable to parse the error message. Please check the asset configuration and|or action parameters"
ERR_VALID_INT_MSG = "Please provide a valid integer value in the {}"
ERR_NON_NEG_INT_MSG = "Please provide a valid non-negative integer value in the {}"
ERR_POSITIVE_INTEGER_MSG = "Please provide a valid non-zero positive integer value in the {}"
HOST_ID_INT_PARAM = "'host_id' action parameter"
| true | true |
f71fcd6b02e4155fea4b2f52039a9ce2c32ad6f6 | 7,009 | py | Python | nmeta2dpae/tc_policy_dpae.py | mattjhayes/nmeta2-dpae | f441d78f0cdbd63495d4f96cddb462b801d82925 | [
"Apache-2.0"
] | 1 | 2016-03-19T20:42:49.000Z | 2016-03-19T20:42:49.000Z | nmeta2dpae/tc_policy_dpae.py | mattjhayes/nmeta2-dpae | f441d78f0cdbd63495d4f96cddb462b801d82925 | [
"Apache-2.0"
] | 2 | 2016-04-09T01:17:39.000Z | 2016-04-09T01:30:58.000Z | nmeta2dpae/tc_policy_dpae.py | mattjhayes/nmeta2-dpae | f441d78f0cdbd63495d4f96cddb462b801d82925 | [
"Apache-2.0"
] | 4 | 2016-05-04T08:42:29.000Z | 2021-07-16T02:11:40.000Z | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module is part of nmeta Data Plane Auxiliary Engine (DPAE)
.
It is used to contain the Traffic Classification (TC) policy and provide
methods and direct variables to access it
.
Version 2.x Toulouse Code
"""
#*** Logging imports:
import logging
import logging.handlers
import coloredlogs
import sys
#*** YAML for config and policy file parsing:
import yaml
#*** Keys that must exist under 'identity' in the policy:
IDENTITY_KEYS = ('arp',
'lldp',
'dns',
'dhcp')
class TCPolicy(object):
"""
This class is instantiated by nmeta2.py and provides methods
to ingest the policy file main_policy.yaml and validate
that it is correctly structured
"""
def __init__(self, _config):
#*** Get logging config values from config class:
_logging_level_s = _config.get_value \
('tc_policy_dpae_logging_level_s')
_logging_level_c = _config.get_value \
('tc_policy_dpae_logging_level_c')
_syslog_enabled = _config.get_value('syslog_enabled')
_loghost = _config.get_value('loghost')
_logport = _config.get_value('logport')
_logfacility = _config.get_value('logfacility')
_syslog_format = _config.get_value('syslog_format')
_console_log_enabled = _config.get_value('console_log_enabled')
_coloredlogs_enabled = _config.get_value('coloredlogs_enabled')
_console_format = _config.get_value('console_format')
#*** Set up Logging:
self.logger = logging.getLogger(__name__)
self.logger.setLevel(logging.DEBUG)
self.logger.propagate = False
#*** Syslog:
if _syslog_enabled:
#*** Log to syslog on host specified in config.yaml:
self.syslog_handler = logging.handlers.SysLogHandler(address=(
_loghost, _logport),
facility=_logfacility)
syslog_formatter = logging.Formatter(_syslog_format)
self.syslog_handler.setFormatter(syslog_formatter)
self.syslog_handler.setLevel(_logging_level_s)
#*** Add syslog log handler to logger:
self.logger.addHandler(self.syslog_handler)
#*** Console logging:
if _console_log_enabled:
#*** Log to the console:
if _coloredlogs_enabled:
#*** Colourise the logs to make them easier to understand:
coloredlogs.install(level=_logging_level_c,
logger=self.logger, fmt=_console_format, datefmt='%H:%M:%S')
else:
#*** Add console log handler to logger:
self.console_handler = logging.StreamHandler()
console_formatter = logging.Formatter(_console_format)
self.console_handler.setFormatter(console_formatter)
self.console_handler.setLevel(_logging_level_c)
self.logger.addHandler(self.console_handler)
#*** Object to hold Controller main policies per interface in YAML:
self.main_policy = dict()
#*** Object to hold Controller optimised TC rules per iface in YAML:
self.opt_rules = dict()
def ingest_main_policy(self, main_policy_text, if_name):
"""
Turn a plain text main policy file object into a YAML object
and store it as a class variable
"""
#*** Ingest the policy file:
try:
self.main_policy[if_name] = yaml.load(main_policy_text)
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
self.logger.error("Failed to convert main policy to YAML "
"%s, %s, %s",
exc_type, exc_value, exc_traceback)
return 0
self.logger.debug("Successfully ingested main policy into YAML")
return 1
def ingest_optimised_rules(self, opt_rules_text, if_name):
"""
Turn a plain optimised TC rules file object into a YAML object
and store it as a class variable
"""
#*** Ingest the policy file:
try:
self.opt_rules[if_name] = yaml.load(opt_rules_text)
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
self.logger.error("Failed to convert optimised TC rules to YAML "
"%s, %s, %s",
exc_type, exc_value, exc_traceback)
return 0
self.logger.debug("Successfully ingested optimised TC rules into YAML")
return 1
def get_id_flag(self, if_name, id_key):
"""
Get a value for an Identity Indicator harvesting flag
"""
if not id_key in IDENTITY_KEYS:
self.logger.error("The key %s is not valid", id_key)
return 0
return self.main_policy[if_name]['identity'][id_key]
def get_tc_classifiers(self, if_name):
"""
Return a list of traffic classifiers
that should be run against ingress packets on a sniff interface.
Each entry is a tuple of type (statistical or payload) and
classifier name, example:
[('statistical', 'statistical_qos_bandwidth_1')]
"""
classifiers = []
for idx, fe_match_list in enumerate(self.opt_rules[if_name]):
self.logger.info("Optimised fe_match_list %s is %s", idx,
fe_match_list)
if not 'install_type' in fe_match_list:
self.logger.error("no install_type key")
continue
if fe_match_list['install_type'] == 'to_dpae':
self.logger.debug("Matched a DPAE TC condition...")
classifiers.append((fe_match_list['type'],
fe_match_list['value']))
return classifiers
def tc_mode(self, if_name):
"""
Return the tc mode for the policy (active or passive)
"""
_tc_policies = self.main_policy[if_name]['tc_policies']
_tc_policies_keys = list(_tc_policies.keys())
_tc_policy_name = _tc_policies_keys[0]
tc_mode = _tc_policies[_tc_policy_name]['mode']
if tc_mode:
return tc_mode
else:
self.logger.error("Could not find tc mode in policy")
return 'passive'
| 40.75 | 79 | 0.612356 |
import logging
import logging.handlers
import coloredlogs
import sys
import yaml
IDENTITY_KEYS = ('arp',
'lldp',
'dns',
'dhcp')
class TCPolicy(object):
def __init__(self, _config):
_logging_level_s = _config.get_value \
('tc_policy_dpae_logging_level_s')
_logging_level_c = _config.get_value \
('tc_policy_dpae_logging_level_c')
_syslog_enabled = _config.get_value('syslog_enabled')
_loghost = _config.get_value('loghost')
_logport = _config.get_value('logport')
_logfacility = _config.get_value('logfacility')
_syslog_format = _config.get_value('syslog_format')
_console_log_enabled = _config.get_value('console_log_enabled')
_coloredlogs_enabled = _config.get_value('coloredlogs_enabled')
_console_format = _config.get_value('console_format')
self.logger = logging.getLogger(__name__)
self.logger.setLevel(logging.DEBUG)
self.logger.propagate = False
if _syslog_enabled:
self.syslog_handler = logging.handlers.SysLogHandler(address=(
_loghost, _logport),
facility=_logfacility)
syslog_formatter = logging.Formatter(_syslog_format)
self.syslog_handler.setFormatter(syslog_formatter)
self.syslog_handler.setLevel(_logging_level_s)
self.logger.addHandler(self.syslog_handler)
if _console_log_enabled:
if _coloredlogs_enabled:
coloredlogs.install(level=_logging_level_c,
logger=self.logger, fmt=_console_format, datefmt='%H:%M:%S')
else:
self.console_handler = logging.StreamHandler()
console_formatter = logging.Formatter(_console_format)
self.console_handler.setFormatter(console_formatter)
self.console_handler.setLevel(_logging_level_c)
self.logger.addHandler(self.console_handler)
self.main_policy = dict()
self.opt_rules = dict()
def ingest_main_policy(self, main_policy_text, if_name):
try:
self.main_policy[if_name] = yaml.load(main_policy_text)
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
self.logger.error("Failed to convert main policy to YAML "
"%s, %s, %s",
exc_type, exc_value, exc_traceback)
return 0
self.logger.debug("Successfully ingested main policy into YAML")
return 1
def ingest_optimised_rules(self, opt_rules_text, if_name):
try:
self.opt_rules[if_name] = yaml.load(opt_rules_text)
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
self.logger.error("Failed to convert optimised TC rules to YAML "
"%s, %s, %s",
exc_type, exc_value, exc_traceback)
return 0
self.logger.debug("Successfully ingested optimised TC rules into YAML")
return 1
def get_id_flag(self, if_name, id_key):
if not id_key in IDENTITY_KEYS:
self.logger.error("The key %s is not valid", id_key)
return 0
return self.main_policy[if_name]['identity'][id_key]
def get_tc_classifiers(self, if_name):
classifiers = []
for idx, fe_match_list in enumerate(self.opt_rules[if_name]):
self.logger.info("Optimised fe_match_list %s is %s", idx,
fe_match_list)
if not 'install_type' in fe_match_list:
self.logger.error("no install_type key")
continue
if fe_match_list['install_type'] == 'to_dpae':
self.logger.debug("Matched a DPAE TC condition...")
classifiers.append((fe_match_list['type'],
fe_match_list['value']))
return classifiers
def tc_mode(self, if_name):
_tc_policies = self.main_policy[if_name]['tc_policies']
_tc_policies_keys = list(_tc_policies.keys())
_tc_policy_name = _tc_policies_keys[0]
tc_mode = _tc_policies[_tc_policy_name]['mode']
if tc_mode:
return tc_mode
else:
self.logger.error("Could not find tc mode in policy")
return 'passive'
| true | true |
f71fcd8293089c972b431387d1197b53dd7b564d | 516 | py | Python | src/event_representations.py | ATTPC/VAE-event-classification | aae331d44bffffec2ca8a6cdef71208899db0052 | [
"MIT"
] | null | null | null | src/event_representations.py | ATTPC/VAE-event-classification | aae331d44bffffec2ca8a6cdef71208899db0052 | [
"MIT"
] | 2 | 2018-12-20T20:10:52.000Z | 2019-02-04T17:44:01.000Z | src/event_representations.py | ATTPC/VAE-event-classification | aae331d44bffffec2ca8a6cdef71208899db0052 | [
"MIT"
] | null | null | null | import numpy as np
def make_histograms(x, bins=40, interval=[1e-1, 1]):
intervals = np.linspace(interval[0], interval[1], bins)
flat_x = x.reshape((x.shape[0], -1))
hist_x = np.zeros((x.shape[0], bins))
for i in range(1, bins):
mask = flat_x <= intervals[i]
mask = np.logical_and(mask, flat_x > intervals[i-1])
hist_x[:, i] = mask.sum(1)
return hist_x
def make_net_count(x, **kwargs):
flat_x = x.reshape((x.shape[0], -1))
sum_x = flat_x.sum(1)
return sum_x
| 28.666667 | 60 | 0.604651 | import numpy as np
def make_histograms(x, bins=40, interval=[1e-1, 1]):
intervals = np.linspace(interval[0], interval[1], bins)
flat_x = x.reshape((x.shape[0], -1))
hist_x = np.zeros((x.shape[0], bins))
for i in range(1, bins):
mask = flat_x <= intervals[i]
mask = np.logical_and(mask, flat_x > intervals[i-1])
hist_x[:, i] = mask.sum(1)
return hist_x
def make_net_count(x, **kwargs):
flat_x = x.reshape((x.shape[0], -1))
sum_x = flat_x.sum(1)
return sum_x
| true | true |
f71fce257398e75ef7291d3d8d8cd6d620bff9ed | 122 | py | Python | zpy/classes/bases/utility/pretty.py | yu-ichiro/zpy | 65e5ae7616d6e7fce91a03f20f663caa4af834b5 | [
"MIT"
] | null | null | null | zpy/classes/bases/utility/pretty.py | yu-ichiro/zpy | 65e5ae7616d6e7fce91a03f20f663caa4af834b5 | [
"MIT"
] | null | null | null | zpy/classes/bases/utility/pretty.py | yu-ichiro/zpy | 65e5ae7616d6e7fce91a03f20f663caa4af834b5 | [
"MIT"
] | null | null | null | from abc import ABC, abstractmethod
class Pretty(ABC):
@abstractmethod
def __pretty__(self) -> str:
...
| 15.25 | 35 | 0.639344 | from abc import ABC, abstractmethod
class Pretty(ABC):
@abstractmethod
def __pretty__(self) -> str:
...
| true | true |
f71fcef13051a8d05b65c648d775148a97470b27 | 167,246 | py | Python | tensorflow/python/ops/nn_ops.py | handongke/tensorflow | c6bb5cd0447a0af2764c195fb14d218df8ae6471 | [
"Apache-2.0"
] | 5 | 2019-01-13T16:15:25.000Z | 2019-07-07T16:17:32.000Z | tensorflow/python/ops/nn_ops.py | handongke/tensorflow | c6bb5cd0447a0af2764c195fb14d218df8ae6471 | [
"Apache-2.0"
] | null | null | null | tensorflow/python/ops/nn_ops.py | handongke/tensorflow | c6bb5cd0447a0af2764c195fb14d218df8ae6471 | [
"Apache-2.0"
] | null | null | null | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Wrappers for primitive Neural Net (NN) Operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numbers
import numpy as np
from tensorflow.python.compat import compat
from tensorflow.python.eager import context
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import graph_util
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import gen_nn_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
# go/tf-wildcard-import
# pylint: disable=wildcard-import
from tensorflow.python.ops.gen_nn_ops import *
# pylint: enable=wildcard-import
from tensorflow.python.util import deprecation
from tensorflow.python.util.deprecation import deprecated_args
from tensorflow.python.util.deprecation import deprecated_argument_lookup
from tensorflow.python.util.tf_export import tf_export
# Aliases for some automatically-generated names.
local_response_normalization = gen_nn_ops.lrn
# pylint: disable=protected-access
def _non_atrous_convolution(
input, # pylint: disable=redefined-builtin
filter, # pylint: disable=redefined-builtin
padding,
data_format=None, # pylint: disable=redefined-builtin
strides=None,
name=None):
"""Computes sums of N-D convolutions (actually cross correlation).
It is required that 1 <= N <= 3.
This is used to implement the more generic `convolution` function, which
extends the interface of this function with a `dilation_rate` parameter.
Args:
input: Rank N+2 tensor of type T of shape
`[batch_size] + input_spatial_shape + [in_channels]` if `data_format`
does not start with `"NC"`, or
`[batch_size, in_channels] + input_spatial_shape` if `data_format` starts
with `"NC"`.
filter: Rank N+2 tensor of type T of shape
`filter_spatial_shape + [in_channels, out_channels]`. Rank of either
`input` or `filter` must be known.
padding: Padding method to use, must be either "VALID" or "SAME".
data_format: A string or None. Specifies whether the channel dimension of
the `input` and output is the last dimension (default, or if `data_format`
does not start with "NC"), or the second dimension (if `data_format`
starts with "NC"). For N=1, the valid values are "NWC" (default) and
"NCW". For N=2, the valid values are "NHWC" (default) and "NCHW".
For N=3, the valid values are "NDHWC" (default) and "NCDHW".
strides: Sequence of N positive integers, defaults to `[1] * N`.
name: Name prefix to use.
Returns:
Rank N+2 tensor of type T of shape
`[batch_size] + output_spatial_shape + [out_channels]`, where
if padding == "SAME":
output_spatial_shape = input_spatial_shape
if padding == "VALID":
output_spatial_shape = input_spatial_shape - filter_spatial_shape + 1.
Raises:
ValueError: if ranks are incompatible.
"""
with ops.name_scope(name, "non_atrous_convolution", [input, filter]) as scope:
input = ops.convert_to_tensor(input, name="input") # pylint: disable=redefined-builtin
input_shape = input.get_shape()
filter = ops.convert_to_tensor(filter, name="filter") # pylint: disable=redefined-builtin
filter_shape = filter.get_shape()
op = _NonAtrousConvolution(
input_shape,
filter_shape=filter_shape,
padding=padding,
data_format=data_format,
strides=strides,
name=scope)
return op(input, filter)
class _NonAtrousConvolution(object):
"""Helper class for _non_atrous_convolution.
Note that this class assumes that shapes of input and filter passed to
__call__ are compatible with input_shape and filter_shape passed to the
constructor.
Arguments:
input_shape: static input shape, i.e. input.get_shape().
filter_shape: static filter shape, i.e. filter.get_shape().
padding: see _non_atrous_convolution.
data_format: see _non_atrous_convolution.
strides: see _non_atrous_convolution.
name: see _non_atrous_convolution.
"""
def __init__(
self,
input_shape,
filter_shape, # pylint: disable=redefined-builtin
padding,
data_format=None,
strides=None,
name=None):
filter_shape = filter_shape.with_rank(input_shape.ndims)
self.padding = padding
self.name = name
input_shape = input_shape.with_rank(filter_shape.ndims)
if input_shape.ndims is None:
raise ValueError("Rank of convolution must be known")
if input_shape.ndims < 3 or input_shape.ndims > 5:
raise ValueError(
"`input` and `filter` must have rank at least 3 and at most 5")
conv_dims = input_shape.ndims - 2
if strides is None:
strides = [1] * conv_dims
elif len(strides) != conv_dims:
raise ValueError("len(strides)=%d, but should be %d" % (len(strides),
conv_dims))
if conv_dims == 1:
# conv1d uses the 2-d data format names
if data_format is None:
data_format = "NWC"
elif data_format not in {"NCW", "NWC", "NCHW", "NHWC"}:
raise ValueError("data_format must be \"NWC\" or \"NCW\".")
self.strides = strides[0]
self.data_format = data_format
self.conv_op = self._conv1d
elif conv_dims == 2:
if data_format is None or data_format == "NHWC":
data_format = "NHWC"
strides = [1] + list(strides) + [1]
elif data_format == "NCHW":
strides = [1, 1] + list(strides)
else:
raise ValueError("data_format must be \"NHWC\" or \"NCHW\".")
self.strides = strides
self.data_format = data_format
self.conv_op = conv2d
elif conv_dims == 3:
if data_format is None or data_format == "NDHWC":
strides = [1] + list(strides) + [1]
elif data_format == "NCDHW":
strides = [1, 1] + list(strides)
else:
raise ValueError("data_format must be \"NDHWC\" or \"NCDHW\". Have: %s"
% data_format)
self.strides = strides
self.data_format = data_format
self.conv_op = gen_nn_ops.conv3d
# Note that we need this adapter since argument names for conv1d don't match
# those for gen_nn_ops.conv2d and gen_nn_ops.conv3d.
# pylint: disable=redefined-builtin
def _conv1d(self, input, filter, strides, padding, data_format, name):
return conv1d(
value=input,
filters=filter,
stride=strides,
padding=padding,
data_format=data_format,
name=name)
# pylint: enable=redefined-builtin
def __call__(self, inp, filter): # pylint: disable=redefined-builtin
return self.conv_op(
input=inp,
filter=filter,
strides=self.strides,
padding=self.padding,
data_format=self.data_format,
name=self.name)
@tf_export("nn.dilation2d", v1=[])
def dilation2d_v2(
input, # pylint: disable=redefined-builtin
filters, # pylint: disable=redefined-builtin
strides,
padding,
data_format,
dilations,
name=None):
"""Computes the grayscale dilation of 4-D `input` and 3-D `filters` tensors.
The `input` tensor has shape `[batch, in_height, in_width, depth]` and the
`filters` tensor has shape `[filter_height, filter_width, depth]`, i.e., each
input channel is processed independently of the others with its own
structuring function. The `output` tensor has shape
`[batch, out_height, out_width, depth]`. The spatial dimensions of the output
tensor depend on the `padding` algorithm. We currently only support the
default "NHWC" `data_format`.
In detail, the grayscale morphological 2-D dilation is the max-sum correlation
(for consistency with `conv2d`, we use unmirrored filters):
output[b, y, x, c] =
max_{dy, dx} input[b,
strides[1] * y + rates[1] * dy,
strides[2] * x + rates[2] * dx,
c] +
filters[dy, dx, c]
Max-pooling is a special case when the filter has size equal to the pooling
kernel size and contains all zeros.
Note on duality: The dilation of `input` by the `filters` is equal to the
negation of the erosion of `-input` by the reflected `filters`.
Args:
input: A `Tensor`. Must be one of the following types: `float32`, `float64`,
`int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `half`,
`uint32`, `uint64`.
4-D with shape `[batch, in_height, in_width, depth]`.
filters: A `Tensor`. Must have the same type as `input`.
3-D with shape `[filter_height, filter_width, depth]`.
strides: A list of `ints` that has length `>= 4`.
The stride of the sliding window for each dimension of the input
tensor. Must be: `[1, stride_height, stride_width, 1]`.
padding: A `string` from: `"SAME", "VALID"`.
The type of padding algorithm to use.
data_format: A `string`, only `"NCHW"` is currently supported.
dilations: A list of `ints` that has length `>= 4`.
The input stride for atrous morphological dilation. Must be:
`[1, rate_height, rate_width, 1]`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input`.
"""
if data_format != "NCHW":
raise ValueError("Data formats other than NCHW are not yet supported")
return gen_nn_ops.dilation2d(input=input,
filter=filters,
strides=strides,
rates=dilations,
padding=padding,
name=name)
@tf_export("nn.with_space_to_batch")
def with_space_to_batch(
input, # pylint: disable=redefined-builtin
dilation_rate,
padding,
op,
filter_shape=None,
spatial_dims=None,
data_format=None):
"""Performs `op` on the space-to-batch representation of `input`.
This has the effect of transforming sliding window operations into the
corresponding "atrous" operation in which the input is sampled at the
specified `dilation_rate`.
In the special case that `dilation_rate` is uniformly 1, this simply returns:
op(input, num_spatial_dims, padding)
Otherwise, it returns:
batch_to_space_nd(
op(space_to_batch_nd(input, adjusted_dilation_rate, adjusted_paddings),
num_spatial_dims,
"VALID")
adjusted_dilation_rate,
adjusted_crops),
where:
adjusted_dilation_rate is an int64 tensor of shape [max(spatial_dims)],
adjusted_{paddings,crops} are int64 tensors of shape [max(spatial_dims), 2]
defined as follows:
We first define two int64 tensors `paddings` and `crops` of shape
`[num_spatial_dims, 2]` based on the value of `padding` and the spatial
dimensions of the `input`:
If `padding = "VALID"`, then:
paddings, crops = required_space_to_batch_paddings(
input_shape[spatial_dims],
dilation_rate)
If `padding = "SAME"`, then:
dilated_filter_shape =
filter_shape + (filter_shape - 1) * (dilation_rate - 1)
paddings, crops = required_space_to_batch_paddings(
input_shape[spatial_dims],
dilation_rate,
[(dilated_filter_shape - 1) // 2,
dilated_filter_shape - 1 - (dilated_filter_shape - 1) // 2])
Because `space_to_batch_nd` and `batch_to_space_nd` assume that the spatial
dimensions are contiguous starting at the second dimension, but the specified
`spatial_dims` may not be, we must adjust `dilation_rate`, `paddings` and
`crops` in order to be usable with these operations. For a given dimension,
if the block size is 1, and both the starting and ending padding and crop
amounts are 0, then space_to_batch_nd effectively leaves that dimension alone,
which is what is needed for dimensions not part of `spatial_dims`.
Furthermore, `space_to_batch_nd` and `batch_to_space_nd` handle this case
efficiently for any number of leading and trailing dimensions.
For 0 <= i < len(spatial_dims), we assign:
adjusted_dilation_rate[spatial_dims[i] - 1] = dilation_rate[i]
adjusted_paddings[spatial_dims[i] - 1, :] = paddings[i, :]
adjusted_crops[spatial_dims[i] - 1, :] = crops[i, :]
All unassigned values of `adjusted_dilation_rate` default to 1, while all
unassigned values of `adjusted_paddings` and `adjusted_crops` default to 0.
Note in the case that `dilation_rate` is not uniformly 1, specifying "VALID"
padding is equivalent to specifying `padding = "SAME"` with a filter_shape of
`[1]*N`.
Advanced usage. Note the following optimization: A sequence of
`with_space_to_batch` operations with identical (not uniformly 1)
`dilation_rate` parameters and "VALID" padding
net = with_space_to_batch(net, dilation_rate, "VALID", op_1)
...
net = with_space_to_batch(net, dilation_rate, "VALID", op_k)
can be combined into a single `with_space_to_batch` operation as follows:
def combined_op(converted_input, num_spatial_dims, _):
result = op_1(converted_input, num_spatial_dims, "VALID")
...
result = op_k(result, num_spatial_dims, "VALID")
net = with_space_to_batch(net, dilation_rate, "VALID", combined_op)
This eliminates the overhead of `k-1` calls to `space_to_batch_nd` and
`batch_to_space_nd`.
Similarly, a sequence of `with_space_to_batch` operations with identical (not
uniformly 1) `dilation_rate` parameters, "SAME" padding, and odd filter
dimensions
net = with_space_to_batch(net, dilation_rate, "SAME", op_1, filter_shape_1)
...
net = with_space_to_batch(net, dilation_rate, "SAME", op_k, filter_shape_k)
can be combined into a single `with_space_to_batch` operation as follows:
def combined_op(converted_input, num_spatial_dims, _):
result = op_1(converted_input, num_spatial_dims, "SAME")
...
result = op_k(result, num_spatial_dims, "SAME")
net = with_space_to_batch(net, dilation_rate, "VALID", combined_op)
Args:
input: Tensor of rank > max(spatial_dims).
dilation_rate: int32 Tensor of *known* shape [num_spatial_dims].
padding: str constant equal to "VALID" or "SAME"
op: Function that maps (input, num_spatial_dims, padding) -> output
filter_shape: If padding = "SAME", specifies the shape of the convolution
kernel/pooling window as an integer Tensor of shape [>=num_spatial_dims].
If padding = "VALID", filter_shape is ignored and need not be specified.
spatial_dims: Monotonically increasing sequence of `num_spatial_dims`
integers (which are >= 1) specifying the spatial dimensions of `input`
and output. Defaults to: `range(1, num_spatial_dims+1)`.
data_format: A string or None. Specifies whether the channel dimension of
the `input` and output is the last dimension (default, or if `data_format`
does not start with "NC"), or the second dimension (if `data_format`
starts with "NC"). For N=1, the valid values are "NWC" (default) and
"NCW". For N=2, the valid values are "NHWC" (default) and "NCHW".
For N=3, the valid values are "NDHWC" (default) and "NCDHW".
Returns:
The output Tensor as described above, dimensions will vary based on the op
provided.
Raises:
ValueError: if `padding` is invalid or the arguments are incompatible.
ValueError: if `spatial_dims` are invalid.
"""
input = ops.convert_to_tensor(input, name="input") # pylint: disable=redefined-builtin
input_shape = input.get_shape()
def build_op(num_spatial_dims, padding):
return lambda inp, _: op(inp, num_spatial_dims, padding)
new_op = _WithSpaceToBatch(
input_shape,
dilation_rate,
padding,
build_op,
filter_shape=filter_shape,
spatial_dims=spatial_dims,
data_format=data_format)
return new_op(input, None)
class _WithSpaceToBatch(object):
"""Helper class for with_space_to_batch.
Note that this class assumes that shapes of input and filter passed to
__call__ are compatible with input_shape and filter_shape passed to the
constructor.
Arguments
input_shape: static shape of input. i.e. input.get_shape().
dilation_rate: see with_space_to_batch
padding: see with_space_to_batch
build_op: Function that maps (num_spatial_dims, paddings) -> (function that
maps (input, filter) -> output).
filter_shape: see with_space_to_batch
spatial_dims: see with_space_to_batch
data_format: see with_space_to_batch
"""
def __init__(self,
input_shape,
dilation_rate,
padding,
build_op,
filter_shape=None,
spatial_dims=None,
data_format=None):
"""Helper class for _with_space_to_batch."""
dilation_rate = ops.convert_to_tensor(
dilation_rate, dtypes.int32, name="dilation_rate")
try:
rate_shape = dilation_rate.get_shape().with_rank(1)
except ValueError:
raise ValueError("rate must be rank 1")
if not dilation_rate.get_shape().is_fully_defined():
raise ValueError("rate must have known shape")
num_spatial_dims = rate_shape.dims[0].value
if data_format is not None and data_format.startswith("NC"):
starting_spatial_dim = 2
else:
starting_spatial_dim = 1
if spatial_dims is None:
spatial_dims = range(starting_spatial_dim,
num_spatial_dims + starting_spatial_dim)
orig_spatial_dims = list(spatial_dims)
spatial_dims = sorted(set(int(x) for x in orig_spatial_dims))
if spatial_dims != orig_spatial_dims or any(x < 1 for x in spatial_dims):
raise ValueError(
"spatial_dims must be a montonically increasing sequence of positive "
"integers") # pylint: disable=line-too-long
if data_format is not None and data_format.startswith("NC"):
expected_input_rank = spatial_dims[-1]
else:
expected_input_rank = spatial_dims[-1] + 1
try:
input_shape.with_rank_at_least(expected_input_rank)
except ValueError:
raise ValueError(
"input tensor must have rank %d at least" % (expected_input_rank))
const_rate = tensor_util.constant_value(dilation_rate)
rate_or_const_rate = dilation_rate
if const_rate is not None:
rate_or_const_rate = const_rate
if np.any(const_rate < 1):
raise ValueError("dilation_rate must be positive")
if np.all(const_rate == 1):
self.call = build_op(num_spatial_dims, padding)
return
# We have two padding contributions. The first is used for converting "SAME"
# to "VALID". The second is required so that the height and width of the
# zero-padded value tensor are multiples of rate.
# Padding required to reduce to "VALID" convolution
if padding == "SAME":
if filter_shape is None:
raise ValueError("filter_shape must be specified for SAME padding")
filter_shape = ops.convert_to_tensor(filter_shape, name="filter_shape")
const_filter_shape = tensor_util.constant_value(filter_shape)
if const_filter_shape is not None:
filter_shape = const_filter_shape
self.base_paddings = _with_space_to_batch_base_paddings(
const_filter_shape, num_spatial_dims, rate_or_const_rate)
else:
self.num_spatial_dims = num_spatial_dims
self.rate_or_const_rate = rate_or_const_rate
self.base_paddings = None
elif padding == "VALID":
self.base_paddings = np.zeros([num_spatial_dims, 2], np.int32)
else:
raise ValueError("Invalid padding method %r" % padding)
self.input_shape = input_shape
self.spatial_dims = spatial_dims
self.dilation_rate = dilation_rate
self.data_format = data_format
self.op = build_op(num_spatial_dims, "VALID")
self.call = self._with_space_to_batch_call
def _with_space_to_batch_call(self, inp, filter): # pylint: disable=redefined-builtin
"""Call functionality for with_space_to_batch."""
# Handle input whose shape is unknown during graph creation.
input_spatial_shape = None
input_shape = self.input_shape
spatial_dims = self.spatial_dims
if input_shape.ndims is not None:
input_shape_list = input_shape.as_list()
input_spatial_shape = [input_shape_list[i] for i in spatial_dims]
if input_spatial_shape is None or None in input_spatial_shape:
input_shape_tensor = array_ops.shape(inp)
input_spatial_shape = array_ops.stack(
[input_shape_tensor[i] for i in spatial_dims])
base_paddings = self.base_paddings
if base_paddings is None:
# base_paddings could not be computed at build time since static filter
# shape was not fully defined.
filter_shape = array_ops.shape(filter)
base_paddings = _with_space_to_batch_base_paddings(
filter_shape, self.num_spatial_dims, self.rate_or_const_rate)
paddings, crops = array_ops.required_space_to_batch_paddings(
input_shape=input_spatial_shape,
base_paddings=base_paddings,
block_shape=self.dilation_rate)
dilation_rate = _with_space_to_batch_adjust(self.dilation_rate, 1,
spatial_dims)
paddings = _with_space_to_batch_adjust(paddings, 0, spatial_dims)
crops = _with_space_to_batch_adjust(crops, 0, spatial_dims)
input_converted = array_ops.space_to_batch_nd(
input=inp, block_shape=dilation_rate, paddings=paddings)
result = self.op(input_converted, filter)
result_converted = array_ops.batch_to_space_nd(
input=result, block_shape=dilation_rate, crops=crops)
# Recover channel information for output shape if channels are not last.
if self.data_format is not None and self.data_format.startswith("NC"):
if not result_converted.shape.dims[1].value and filter is not None:
output_shape = result_converted.shape.as_list()
output_shape[1] = filter.shape[-1]
result_converted.set_shape(output_shape)
return result_converted
def __call__(self, inp, filter): # pylint: disable=redefined-builtin
return self.call(inp, filter)
def _with_space_to_batch_base_paddings(filter_shape, num_spatial_dims,
rate_or_const_rate):
"""Helper function to compute base_paddings."""
# Spatial dimensions of the filters and the upsampled filters in which we
# introduce (rate - 1) zeros between consecutive filter values.
filter_spatial_shape = filter_shape[:num_spatial_dims]
dilated_filter_spatial_shape = (
filter_spatial_shape + (filter_spatial_shape - 1) *
(rate_or_const_rate - 1))
pad_extra_shape = dilated_filter_spatial_shape - 1
# When full_padding_shape is odd, we pad more at end, following the same
# convention as conv2d.
pad_extra_start = pad_extra_shape // 2
pad_extra_end = pad_extra_shape - pad_extra_start
base_paddings = array_ops.stack(
[[pad_extra_start[i], pad_extra_end[i]] for i in range(num_spatial_dims)])
return base_paddings
def _with_space_to_batch_adjust(orig, fill_value, spatial_dims):
"""Returns an `adjusted` version of `orig` based on `spatial_dims`.
Tensor of the same type as `orig` and with shape
`[max(spatial_dims), ...]` where:
adjusted[spatial_dims[i] - 1, ...] = orig[i, ...]
for 0 <= i < len(spatial_dims), and
adjusted[j, ...] = fill_value
for j != spatial_dims[i] - 1 for some i.
If `orig` is a constant value, then the result will be a constant value.
Args:
orig: Tensor of rank > max(spatial_dims).
fill_value: Numpy scalar (of same data type as `orig) specifying the fill
value for non-spatial dimensions.
spatial_dims: See with_space_to_batch.
Returns:
`adjusted` tensor.
"""
fill_dims = orig.get_shape().as_list()[1:]
dtype = orig.dtype.as_numpy_dtype
parts = []
const_orig = tensor_util.constant_value(orig)
const_or_orig = const_orig if const_orig is not None else orig
prev_spatial_dim = 0
i = 0
while i < len(spatial_dims):
start_i = i
start_spatial_dim = spatial_dims[i]
if start_spatial_dim > 1:
# Fill in any gap from the previous spatial dimension (or dimension 1 if
# this is the first spatial dimension) with `fill_value`.
parts.append(
np.full(
[start_spatial_dim - 1 - prev_spatial_dim] + fill_dims,
fill_value,
dtype=dtype))
# Find the largest value of i such that:
# [spatial_dims[start_i], ..., spatial_dims[i]]
# == [start_spatial_dim, ..., start_spatial_dim + i - start_i],
# i.e. the end of a contiguous group of spatial dimensions.
while (i + 1 < len(spatial_dims) and
spatial_dims[i + 1] == spatial_dims[i] + 1):
i += 1
parts.append(const_or_orig[start_i:i + 1])
prev_spatial_dim = spatial_dims[i]
i += 1
if const_orig is not None:
return np.concatenate(parts)
else:
return array_ops.concat(parts, 0)
def _get_strides_and_dilation_rate(num_spatial_dims, strides, dilation_rate):
"""Helper function for verifying strides and dilation_rate arguments.
This is used by `convolution` and `pool`.
Args:
num_spatial_dims: int
strides: Optional. List of N ints >= 1. Defaults to [1]*N. If any value
of strides is > 1, then all values of dilation_rate must be 1.
dilation_rate: Optional. List of N ints >= 1. Defaults to [1]*N. If any
value of dilation_rate is > 1, then all values of strides must be 1.
Returns:
Normalized (strides, dilation_rate) as int32 numpy arrays of shape
[num_spatial_dims].
Raises:
ValueError: if the parameters are invalid.
"""
if dilation_rate is None:
dilation_rate = [1] * num_spatial_dims
elif len(dilation_rate) != num_spatial_dims:
raise ValueError("len(dilation_rate)=%d but should be %d" %
(len(dilation_rate), num_spatial_dims))
dilation_rate = np.array(dilation_rate, dtype=np.int32)
if np.any(dilation_rate < 1):
raise ValueError("all values of dilation_rate must be positive")
if strides is None:
strides = [1] * num_spatial_dims
elif len(strides) != num_spatial_dims:
raise ValueError("len(strides)=%d but should be %d" % (len(strides),
num_spatial_dims))
strides = np.array(strides, dtype=np.int32)
if np.any(strides < 1):
raise ValueError("all values of strides must be positive")
if np.any(strides > 1) and np.any(dilation_rate > 1):
raise ValueError(
"strides > 1 not supported in conjunction with dilation_rate > 1")
return strides, dilation_rate
@tf_export(v1=["nn.convolution"])
def convolution(
input, # pylint: disable=redefined-builtin
filter, # pylint: disable=redefined-builtin
padding,
strides=None,
dilation_rate=None,
name=None,
data_format=None):
# pylint: disable=line-too-long
"""Computes sums of N-D convolutions (actually cross-correlation).
This also supports either output striding via the optional `strides` parameter
or atrous convolution (also known as convolution with holes or dilated
convolution, based on the French word "trous" meaning holes in English) via
the optional `dilation_rate` parameter. Currently, however, output striding
is not supported for atrous convolutions.
Specifically, in the case that `data_format` does not start with "NC", given
a rank (N+2) `input` Tensor of shape
[num_batches,
input_spatial_shape[0],
...,
input_spatial_shape[N-1],
num_input_channels],
a rank (N+2) `filter` Tensor of shape
[spatial_filter_shape[0],
...,
spatial_filter_shape[N-1],
num_input_channels,
num_output_channels],
an optional `dilation_rate` tensor of shape [N] (defaulting to [1]*N)
specifying the filter upsampling/input downsampling rate, and an optional list
of N `strides` (defaulting [1]*N), this computes for each N-D spatial output
position (x[0], ..., x[N-1]):
```
output[b, x[0], ..., x[N-1], k] =
sum_{z[0], ..., z[N-1], q}
filter[z[0], ..., z[N-1], q, k] *
padded_input[b,
x[0]*strides[0] + dilation_rate[0]*z[0],
...,
x[N-1]*strides[N-1] + dilation_rate[N-1]*z[N-1],
q]
```
where b is the index into the batch, k is the output channel number, q is the
input channel number, and z is the N-D spatial offset within the filter. Here,
`padded_input` is obtained by zero padding the input using an effective
spatial filter shape of `(spatial_filter_shape-1) * dilation_rate + 1` and
output striding `strides` as described in the
[comment here](https://tensorflow.org/api_guides/python/nn#Convolution).
In the case that `data_format` does start with `"NC"`, the `input` and output
(but not the `filter`) are simply transposed as follows:
convolution(input, data_format, **kwargs) =
tf.transpose(convolution(tf.transpose(input, [0] + range(2,N+2) + [1]),
**kwargs),
[0, N+1] + range(1, N+1))
It is required that 1 <= N <= 3.
Args:
input: An (N+2)-D `Tensor` of type `T`, of shape
`[batch_size] + input_spatial_shape + [in_channels]` if data_format does
not start with "NC" (default), or
`[batch_size, in_channels] + input_spatial_shape` if data_format starts
with "NC".
filter: An (N+2)-D `Tensor` with the same type as `input` and shape
`spatial_filter_shape + [in_channels, out_channels]`.
padding: A string, either `"VALID"` or `"SAME"`. The padding algorithm.
strides: Optional. Sequence of N ints >= 1. Specifies the output stride.
Defaults to [1]*N. If any value of strides is > 1, then all values of
dilation_rate must be 1.
dilation_rate: Optional. Sequence of N ints >= 1. Specifies the filter
upsampling/input downsampling rate. In the literature, the same parameter
is sometimes called `input stride` or `dilation`. The effective filter
size used for the convolution will be `spatial_filter_shape +
(spatial_filter_shape - 1) * (rate - 1)`, obtained by inserting
(dilation_rate[i]-1) zeros between consecutive elements of the original
filter in each spatial dimension i. If any value of dilation_rate is > 1,
then all values of strides must be 1.
name: Optional name for the returned tensor.
data_format: A string or None. Specifies whether the channel dimension of
the `input` and output is the last dimension (default, or if `data_format`
does not start with "NC"), or the second dimension (if `data_format`
starts with "NC"). For N=1, the valid values are "NWC" (default) and
"NCW". For N=2, the valid values are "NHWC" (default) and "NCHW".
For N=3, the valid values are "NDHWC" (default) and "NCDHW".
Returns:
A `Tensor` with the same type as `input` of shape
`[batch_size] + output_spatial_shape + [out_channels]`
if data_format is None or does not start with "NC", or
`[batch_size, out_channels] + output_spatial_shape`
if data_format starts with "NC",
where `output_spatial_shape` depends on the value of `padding`.
If padding == "SAME":
output_spatial_shape[i] = ceil(input_spatial_shape[i] / strides[i])
If padding == "VALID":
output_spatial_shape[i] =
ceil((input_spatial_shape[i] -
(spatial_filter_shape[i]-1) * dilation_rate[i])
/ strides[i]).
Raises:
ValueError: If input/output depth does not match `filter` shape, if padding
is other than `"VALID"` or `"SAME"`, or if data_format is invalid.
"""
# pylint: enable=line-too-long
with ops.name_scope(name, "convolution", [input, filter]) as name:
input = ops.convert_to_tensor(input, name="input") # pylint: disable=redefined-builtin
input_shape = input.get_shape()
filter = ops.convert_to_tensor(filter, name="filter") # pylint: disable=redefined-builtin
filter_shape = filter.get_shape()
op = Convolution(
input_shape,
filter_shape,
padding,
strides=strides,
dilation_rate=dilation_rate,
name=name,
data_format=data_format)
return op(input, filter)
@tf_export("nn.convolution", v1=[])
def convolution_v2(
input, # pylint: disable=redefined-builtin
filters,
strides=None,
padding="VALID",
data_format=None,
dilations=None,
name=None):
return convolution(
input, # pylint: disable=redefined-builtin
filters,
padding=padding,
strides=strides,
dilation_rate=dilations,
name=name,
data_format=data_format)
convolution_v2.__doc__ = deprecation.rewrite_argument_docstring(
deprecation.rewrite_argument_docstring(
convolution.__doc__, "dilation_rate", "dilations"),
"filter", "filters")
class Convolution(object):
"""Helper class for convolution.
Note that this class assumes that shapes of input and filter passed to
__call__ are compatible with input_shape and filter_shape passed to the
constructor.
Arguments
input_shape: static shape of input. i.e. input.get_shape().
filter_shape: static shape of the filter. i.e. filter.get_shape().
padding: see convolution.
strides: see convolution.
dilation_rate: see convolution.
name: see convolution.
data_format: see convolution.
"""
def __init__(self,
input_shape,
filter_shape,
padding,
strides=None,
dilation_rate=None,
name=None,
data_format=None):
"""Helper function for convolution."""
num_total_dims = filter_shape.ndims
if num_total_dims is None:
num_total_dims = input_shape.ndims
if num_total_dims is None:
raise ValueError("rank of input or filter must be known")
num_spatial_dims = num_total_dims - 2
try:
input_shape.with_rank(num_spatial_dims + 2)
except ValueError:
raise ValueError(
"input tensor must have rank %d" % (num_spatial_dims + 2))
try:
filter_shape.with_rank(num_spatial_dims + 2)
except ValueError:
raise ValueError(
"filter tensor must have rank %d" % (num_spatial_dims + 2))
if data_format is None or not data_format.startswith("NC"):
input_channels_dim = tensor_shape.dimension_at_index(
input_shape, num_spatial_dims + 1)
spatial_dims = range(1, num_spatial_dims + 1)
else:
input_channels_dim = tensor_shape.dimension_at_index(input_shape, 1)
spatial_dims = range(2, num_spatial_dims + 2)
if not input_channels_dim.is_compatible_with(
filter_shape[num_spatial_dims]):
raise ValueError(
"number of input channels does not match corresponding dimension of "
"filter, {} != {}".format(input_channels_dim,
filter_shape[num_spatial_dims]))
strides, dilation_rate = _get_strides_and_dilation_rate(
num_spatial_dims, strides, dilation_rate)
self.input_shape = input_shape
self.filter_shape = filter_shape
self.data_format = data_format
self.strides = strides
self.name = name
self.conv_op = _WithSpaceToBatch(
input_shape,
dilation_rate=dilation_rate,
padding=padding,
build_op=self._build_op,
filter_shape=filter_shape,
spatial_dims=spatial_dims,
data_format=data_format)
def _build_op(self, _, padding):
return _NonAtrousConvolution(
self.input_shape,
filter_shape=self.filter_shape,
padding=padding,
data_format=self.data_format,
strides=self.strides,
name=self.name)
def __call__(self, inp, filter): # pylint: disable=redefined-builtin
return self.conv_op(inp, filter)
@tf_export(v1=["nn.pool"])
def pool(
input, # pylint: disable=redefined-builtin
window_shape,
pooling_type,
padding,
dilation_rate=None,
strides=None,
name=None,
data_format=None):
# pylint: disable=line-too-long
"""Performs an N-D pooling operation.
In the case that `data_format` does not start with "NC", computes for
0 <= b < batch_size,
0 <= x[i] < output_spatial_shape[i],
0 <= c < num_channels:
```
output[b, x[0], ..., x[N-1], c] =
REDUCE_{z[0], ..., z[N-1]}
input[b,
x[0] * strides[0] - pad_before[0] + dilation_rate[0]*z[0],
...
x[N-1]*strides[N-1] - pad_before[N-1] + dilation_rate[N-1]*z[N-1],
c],
```
where the reduction function REDUCE depends on the value of `pooling_type`,
and pad_before is defined based on the value of `padding` as described in
the "returns" section of `tf.nn.convolution` for details.
The reduction never includes out-of-bounds positions.
In the case that `data_format` starts with `"NC"`, the `input` and output are
simply transposed as follows:
```
pool(input, data_format, **kwargs) =
tf.transpose(pool(tf.transpose(input, [0] + range(2,N+2) + [1]),
**kwargs),
[0, N+1] + range(1, N+1))
```
Args:
input: Tensor of rank N+2, of shape
`[batch_size] + input_spatial_shape + [num_channels]` if data_format does
not start with "NC" (default), or
`[batch_size, num_channels] + input_spatial_shape` if data_format starts
with "NC". Pooling happens over the spatial dimensions only.
window_shape: Sequence of N ints >= 1.
pooling_type: Specifies pooling operation, must be "AVG" or "MAX".
padding: The padding algorithm, must be "SAME" or "VALID".
See the "returns" section of `tf.nn.convolution` for details.
dilation_rate: Optional. Dilation rate. List of N ints >= 1.
Defaults to [1]*N. If any value of dilation_rate is > 1, then all values
of strides must be 1.
strides: Optional. Sequence of N ints >= 1. Defaults to [1]*N.
If any value of strides is > 1, then all values of dilation_rate must be
1.
name: Optional. Name of the op.
data_format: A string or None. Specifies whether the channel dimension of
the `input` and output is the last dimension (default, or if `data_format`
does not start with "NC"), or the second dimension (if `data_format`
starts with "NC"). For N=1, the valid values are "NWC" (default) and
"NCW". For N=2, the valid values are "NHWC" (default) and "NCHW".
For N=3, the valid values are "NDHWC" (default) and "NCDHW".
Returns:
Tensor of rank N+2, of shape
[batch_size] + output_spatial_shape + [num_channels]
if data_format is None or does not start with "NC", or
[batch_size, num_channels] + output_spatial_shape
if data_format starts with "NC",
where `output_spatial_shape` depends on the value of padding:
If padding = "SAME":
output_spatial_shape[i] = ceil(input_spatial_shape[i] / strides[i])
If padding = "VALID":
output_spatial_shape[i] =
ceil((input_spatial_shape[i] - (window_shape[i] - 1) * dilation_rate[i])
/ strides[i]).
Raises:
ValueError: if arguments are invalid.
"""
# pylint: enable=line-too-long
with ops.name_scope(name, "%s_pool" % (pooling_type.lower()),
[input]) as scope:
input = ops.convert_to_tensor(input, name="input") # pylint: disable=redefined-builtin
num_spatial_dims = len(window_shape)
if num_spatial_dims < 1 or num_spatial_dims > 3:
raise ValueError("It is required that 1 <= num_spatial_dims <= 3.")
input.get_shape().with_rank(num_spatial_dims + 2)
strides, dilation_rate = _get_strides_and_dilation_rate(
num_spatial_dims, strides, dilation_rate)
if padding == "SAME" and np.any(dilation_rate > 1):
raise ValueError(
"pooling with SAME padding is not implemented for dilation_rate > 1")
if np.any(strides > window_shape):
raise ValueError(
"strides > window_shape not supported due to inconsistency between "
"CPU and GPU implementations")
pooling_ops = {
("MAX", 1): max_pool,
("MAX", 2): max_pool,
("MAX", 3): max_pool3d, # pylint: disable=undefined-variable
("AVG", 1): avg_pool,
("AVG", 2): avg_pool,
("AVG", 3): avg_pool3d, # pylint: disable=undefined-variable
}
op_key = (pooling_type, num_spatial_dims)
if op_key not in pooling_ops:
raise ValueError("%d-D %s pooling is not supported." % (op_key[1],
op_key[0]))
if data_format is None or not data_format.startswith("NC"):
adjusted_window_shape = [1] + list(window_shape) + [1]
adjusted_strides = [1] + list(strides) + [1]
spatial_dims = range(1, num_spatial_dims + 1)
else:
adjusted_window_shape = [1, 1] + list(window_shape)
adjusted_strides = [1, 1] + list(strides)
spatial_dims = range(2, num_spatial_dims + 2)
if num_spatial_dims == 1:
if data_format is None or data_format == "NWC":
data_format_kwargs = dict(data_format="NHWC")
elif data_format == "NCW":
data_format_kwargs = dict(data_format="NCHW")
else:
raise ValueError("data_format must be either \"NWC\" or \"NCW\".")
adjusted_window_shape = [1] + adjusted_window_shape
adjusted_strides = [1] + adjusted_strides
else:
data_format_kwargs = dict(data_format=data_format)
def op(converted_input, _, converted_padding): # pylint: disable=missing-docstring
if num_spatial_dims == 1:
converted_input = array_ops.expand_dims(converted_input,
spatial_dims[0])
result = pooling_ops[op_key](
converted_input,
adjusted_window_shape,
adjusted_strides,
converted_padding,
name=scope,
**data_format_kwargs)
if num_spatial_dims == 1:
result = array_ops.squeeze(result, [spatial_dims[0]])
return result
return with_space_to_batch(
input=input,
dilation_rate=dilation_rate,
padding=padding,
op=op,
spatial_dims=spatial_dims,
filter_shape=window_shape)
@tf_export("nn.pool", v1=[])
def pool_v2(
input, # pylint: disable=redefined-builtin
window_shape,
pooling_type,
strides=None,
padding="VALID",
data_format=None,
dilations=None,
name=None):
# pylint: disable=line-too-long
"""Performs an N-D pooling operation.
In the case that `data_format` does not start with "NC", computes for
0 <= b < batch_size,
0 <= x[i] < output_spatial_shape[i],
0 <= c < num_channels:
```
output[b, x[0], ..., x[N-1], c] =
REDUCE_{z[0], ..., z[N-1]}
input[b,
x[0] * strides[0] - pad_before[0] + dilation_rate[0]*z[0],
...
x[N-1]*strides[N-1] - pad_before[N-1] + dilation_rate[N-1]*z[N-1],
c],
```
where the reduction function REDUCE depends on the value of `pooling_type`,
and pad_before is defined based on the value of `padding` as described in
the "returns" section of `tf.nn.convolution` for details.
The reduction never includes out-of-bounds positions.
In the case that `data_format` starts with `"NC"`, the `input` and output are
simply transposed as follows:
```
pool(input, data_format, **kwargs) =
tf.transpose(pool(tf.transpose(input, [0] + range(2,N+2) + [1]),
**kwargs),
[0, N+1] + range(1, N+1))
```
Args:
input: Tensor of rank N+2, of shape `[batch_size] + input_spatial_shape +
[num_channels]` if data_format does not start with "NC" (default), or
`[batch_size, num_channels] + input_spatial_shape` if data_format starts
with "NC". Pooling happens over the spatial dimensions only.
window_shape: Sequence of N ints >= 1.
pooling_type: Specifies pooling operation, must be "AVG" or "MAX".
strides: Optional. Sequence of N ints >= 1. Defaults to [1]*N. If any value of
strides is > 1, then all values of dilation_rate must be 1.
padding: The padding algorithm, must be "SAME" or "VALID". Defaults to "SAME".
See the "returns" section of `tf.nn.convolution` for details.
data_format: A string or None. Specifies whether the channel dimension of
the `input` and output is the last dimension (default, or if `data_format`
does not start with "NC"), or the second dimension (if `data_format`
starts with "NC"). For N=1, the valid values are "NWC" (default) and
"NCW". For N=2, the valid values are "NHWC" (default) and "NCHW". For
N=3, the valid values are "NDHWC" (default) and "NCDHW".
dilations: Optional. Dilation rate. List of N ints >= 1. Defaults to
[1]*N. If any value of dilation_rate is > 1, then all values of strides
must be 1.
name: Optional. Name of the op.
Returns:
Tensor of rank N+2, of shape
[batch_size] + output_spatial_shape + [num_channels]
if data_format is None or does not start with "NC", or
[batch_size, num_channels] + output_spatial_shape
if data_format starts with "NC",
where `output_spatial_shape` depends on the value of padding:
If padding = "SAME":
output_spatial_shape[i] = ceil(input_spatial_shape[i] / strides[i])
If padding = "VALID":
output_spatial_shape[i] =
ceil((input_spatial_shape[i] - (window_shape[i] - 1) * dilation_rate[i])
/ strides[i]).
Raises:
ValueError: if arguments are invalid.
"""
return pool(
input=input,
window_shape=window_shape,
pooling_type=pooling_type,
padding=padding,
dilation_rate=dilations,
strides=strides,
name=name,
data_format=data_format)
@tf_export("nn.atrous_conv2d")
def atrous_conv2d(value, filters, rate, padding, name=None):
"""Atrous convolution (a.k.a. convolution with holes or dilated convolution).
This function is a simpler wrapper around the more general
`tf.nn.convolution`, and exists only for backwards compatibility. You can
use `tf.nn.convolution` to perform 1-D, 2-D, or 3-D atrous convolution.
Computes a 2-D atrous convolution, also known as convolution with holes or
dilated convolution, given 4-D `value` and `filters` tensors. If the `rate`
parameter is equal to one, it performs regular 2-D convolution. If the `rate`
parameter is greater than one, it performs convolution with holes, sampling
the input values every `rate` pixels in the `height` and `width` dimensions.
This is equivalent to convolving the input with a set of upsampled filters,
produced by inserting `rate - 1` zeros between two consecutive values of the
filters along the `height` and `width` dimensions, hence the name atrous
convolution or convolution with holes (the French word trous means holes in
English).
More specifically:
```
output[batch, height, width, out_channel] =
sum_{dheight, dwidth, in_channel} (
filters[dheight, dwidth, in_channel, out_channel] *
value[batch, height + rate*dheight, width + rate*dwidth, in_channel]
)
```
Atrous convolution allows us to explicitly control how densely to compute
feature responses in fully convolutional networks. Used in conjunction with
bilinear interpolation, it offers an alternative to `conv2d_transpose` in
dense prediction tasks such as semantic image segmentation, optical flow
computation, or depth estimation. It also allows us to effectively enlarge
the field of view of filters without increasing the number of parameters or
the amount of computation.
For a description of atrous convolution and how it can be used for dense
feature extraction, please see: [Semantic Image Segmentation with Deep
Convolutional Nets and Fully Connected CRFs](http://arxiv.org/abs/1412.7062).
The same operation is investigated further in [Multi-Scale Context Aggregation
by Dilated Convolutions](http://arxiv.org/abs/1511.07122). Previous works
that effectively use atrous convolution in different ways are, among others,
[OverFeat: Integrated Recognition, Localization and Detection using
Convolutional Networks](http://arxiv.org/abs/1312.6229) and [Fast Image
Scanning with Deep Max-Pooling Convolutional Neural
Networks](http://arxiv.org/abs/1302.1700).
Atrous convolution is also closely related to the so-called noble identities
in multi-rate signal processing.
There are many different ways to implement atrous convolution (see the refs
above). The implementation here reduces
```python
atrous_conv2d(value, filters, rate, padding=padding)
```
to the following three operations:
```python
paddings = ...
net = space_to_batch(value, paddings, block_size=rate)
net = conv2d(net, filters, strides=[1, 1, 1, 1], padding="VALID")
crops = ...
net = batch_to_space(net, crops, block_size=rate)
```
Advanced usage. Note the following optimization: A sequence of `atrous_conv2d`
operations with identical `rate` parameters, 'SAME' `padding`, and filters
with odd heights/ widths:
```python
net = atrous_conv2d(net, filters1, rate, padding="SAME")
net = atrous_conv2d(net, filters2, rate, padding="SAME")
...
net = atrous_conv2d(net, filtersK, rate, padding="SAME")
```
can be equivalently performed cheaper in terms of computation and memory as:
```python
pad = ... # padding so that the input dims are multiples of rate
net = space_to_batch(net, paddings=pad, block_size=rate)
net = conv2d(net, filters1, strides=[1, 1, 1, 1], padding="SAME")
net = conv2d(net, filters2, strides=[1, 1, 1, 1], padding="SAME")
...
net = conv2d(net, filtersK, strides=[1, 1, 1, 1], padding="SAME")
net = batch_to_space(net, crops=pad, block_size=rate)
```
because a pair of consecutive `space_to_batch` and `batch_to_space` ops with
the same `block_size` cancel out when their respective `paddings` and `crops`
inputs are identical.
Args:
value: A 4-D `Tensor` of type `float`. It needs to be in the default "NHWC"
format. Its shape is `[batch, in_height, in_width, in_channels]`.
filters: A 4-D `Tensor` with the same type as `value` and shape
`[filter_height, filter_width, in_channels, out_channels]`. `filters`'
`in_channels` dimension must match that of `value`. Atrous convolution is
equivalent to standard convolution with upsampled filters with effective
height `filter_height + (filter_height - 1) * (rate - 1)` and effective
width `filter_width + (filter_width - 1) * (rate - 1)`, produced by
inserting `rate - 1` zeros along consecutive elements across the
`filters`' spatial dimensions.
rate: A positive int32. The stride with which we sample input values across
the `height` and `width` dimensions. Equivalently, the rate by which we
upsample the filter values by inserting zeros across the `height` and
`width` dimensions. In the literature, the same parameter is sometimes
called `input stride` or `dilation`.
padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm.
name: Optional name for the returned tensor.
Returns:
A `Tensor` with the same type as `value`.
Output shape with `'VALID'` padding is:
[batch, height - 2 * (filter_width - 1),
width - 2 * (filter_height - 1), out_channels].
Output shape with `'SAME'` padding is:
[batch, height, width, out_channels].
Raises:
ValueError: If input/output depth does not match `filters`' shape, or if
padding is other than `'VALID'` or `'SAME'`.
"""
return convolution(
input=value,
filter=filters,
padding=padding,
dilation_rate=np.broadcast_to(rate, (2,)),
name=name)
def _convert_padding(padding):
"""Converts Python padding to C++ padding for ops which take EXPLICIT padding.
Args:
padding: the `padding` argument for a Python op which supports EXPLICIT
padding.
Returns:
(padding, explicit_paddings) pair, which should be passed as attributes to a
C++ op.
Raises:
ValueError: If padding is invalid.
"""
explicit_paddings = []
if padding == "EXPLICIT":
# Give a better error message if EXPLICIT is passed.
raise ValueError('"EXPLICIT" is not a valid value for the padding '
"parameter. To use explicit padding, the padding "
"parameter must be a list.")
if isinstance(padding, (list, tuple)):
for i, dim_paddings in enumerate(padding):
if not isinstance(dim_paddings, (list, tuple)):
raise ValueError("When padding is a list, each element of padding must "
"be a list/tuple of size 2. Element with index %d of "
"padding is not a list/tuple" % i)
if len(dim_paddings) != 2:
raise ValueError("When padding is a list, each element of padding must "
"be a list/tuple of size 2. Element with index %d of "
"padding has size %d" % (i, len(dim_paddings)))
explicit_paddings.extend(dim_paddings)
if len(padding) != 4:
raise ValueError("When padding is a list, it must be of size 4. Got "
"padding of size: %d" % len(padding))
padding = "EXPLICIT"
return padding, explicit_paddings
@tf_export("nn.conv2d", v1=[])
def conv2d_v2(input, # pylint: disable=redefined-builtin
filters,
strides,
padding,
data_format="NHWC",
dilations=None,
name=None):
# pylint: disable=line-too-long
r"""Computes a 2-D convolution given 4-D `input` and `filters` tensors.
Given an input tensor of shape `[batch, in_height, in_width, in_channels]`
and a filter / kernel tensor of shape
`[filter_height, filter_width, in_channels, out_channels]`, this op
performs the following:
1. Flattens the filter to a 2-D matrix with shape
`[filter_height * filter_width * in_channels, output_channels]`.
2. Extracts image patches from the input tensor to form a *virtual*
tensor of shape `[batch, out_height, out_width,
filter_height * filter_width * in_channels]`.
3. For each patch, right-multiplies the filter matrix and the image patch
vector.
In detail, with the default NHWC format,
output[b, i, j, k] =
sum_{di, dj, q} input[b, strides[1] * i + di, strides[2] * j + dj, q] *
filter[di, dj, q, k]
Must have `strides[0] = strides[3] = 1`. For the most common case of the same
horizontal and vertices strides, `strides = [1, stride, stride, 1]`.
Args:
input: A `Tensor`. Must be one of the following types:
`half`, `bfloat16`, `float32`, `float64`.
A 4-D tensor. The dimension order is interpreted according to the value
of `data_format`, see below for details.
filters: A `Tensor`. Must have the same type as `input`.
A 4-D tensor of shape
`[filter_height, filter_width, in_channels, out_channels]`
strides: A list of `ints`.
1-D tensor of length 4. The stride of the sliding window for each
dimension of `input`. The dimension order is determined by the value of
`data_format`, see below for details.
padding: Either the `string `"SAME"` or `"VALID"` indicating the type of
padding algorithm to use, or a list indicating the explicit paddings at
the start and end of each dimension. When explicit padding is used and
data_format is `"NHWC"`, this should be in the form `[[0, 0], [pad_top,
pad_bottom], [pad_left, pad_right], [0, 0]]`. When explicit padding used
and data_format is `"NCHW"`, this should be in the form `[[0, 0], [0, 0],
[pad_top, pad_bottom], [pad_left, pad_right]]`.
data_format: An optional `string` from: `"NHWC", "NCHW"`.
Defaults to `"NHWC"`.
Specify the data format of the input and output data. With the
default format "NHWC", the data is stored in the order of:
[batch, height, width, channels].
Alternatively, the format could be "NCHW", the data storage order of:
[batch, channels, height, width].
dilations: An optional list of `ints`. Defaults to `[1, 1, 1, 1]`.
1-D tensor of length 4. The dilation factor for each dimension of
`input`. If set to k > 1, there will be k-1 skipped cells between each
filter element on that dimension. The dimension order is determined by the
value of `data_format`, see above for details. Dilations in the batch and
depth dimensions must be 1.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input`.
"""
# pylint: enable=line-too-long
if dilations is None:
dilations = [1, 1, 1, 1]
return conv2d(input, # pylint: disable=redefined-builtin
filters,
strides,
padding,
use_cudnn_on_gpu=True,
data_format=data_format,
dilations=dilations,
name=name)
@tf_export(v1=["nn.conv2d"])
def conv2d( # pylint: disable=redefined-builtin,dangerous-default-value
input,
filter,
strides,
padding,
use_cudnn_on_gpu=True,
data_format="NHWC",
dilations=[1, 1, 1, 1],
name=None):
r"""Computes a 2-D convolution given 4-D `input` and `filter` tensors.
Given an input tensor of shape `[batch, in_height, in_width, in_channels]`
and a filter / kernel tensor of shape
`[filter_height, filter_width, in_channels, out_channels]`, this op
performs the following:
1. Flattens the filter to a 2-D matrix with shape
`[filter_height * filter_width * in_channels, output_channels]`.
2. Extracts image patches from the input tensor to form a *virtual*
tensor of shape `[batch, out_height, out_width,
filter_height * filter_width * in_channels]`.
3. For each patch, right-multiplies the filter matrix and the image patch
vector.
In detail, with the default NHWC format,
output[b, i, j, k] =
sum_{di, dj, q} input[b, strides[1] * i + di, strides[2] * j + dj, q]
* filter[di, dj, q, k]
Must have `strides[0] = strides[3] = 1`. For the most common case of the same
horizontal and vertices strides, `strides = [1, stride, stride, 1]`.
Args:
input: A `Tensor`. Must be one of the following types:
`half`, `bfloat16`, `float32`, `float64`.
A 4-D tensor. The dimension order is interpreted according to the value
of `data_format`, see below for details.
filter: A `Tensor`. Must have the same type as `input`.
A 4-D tensor of shape
`[filter_height, filter_width, in_channels, out_channels]`
strides: A list of `ints`.
1-D tensor of length 4. The stride of the sliding window for each
dimension of `input`. The dimension order is determined by the value of
`data_format`, see below for details.
padding: Either the `string `"SAME"` or `"VALID"` indicating the type of
padding algorithm to use, or a list indicating the explicit paddings at
the start and end of each dimension. When explicit padding is used and
data_format is `"NHWC"`, this should be in the form `[[0, 0], [pad_top,
pad_bottom], [pad_left, pad_right], [0, 0]]`. When explicit padding used
and data_format is `"NCHW"`, this should be in the form `[[0, 0], [0, 0],
[pad_top, pad_bottom], [pad_left, pad_right]]`.
use_cudnn_on_gpu: An optional `bool`. Defaults to `True`.
data_format: An optional `string` from: `"NHWC", "NCHW"`.
Defaults to `"NHWC"`.
Specify the data format of the input and output data. With the
default format "NHWC", the data is stored in the order of:
[batch, height, width, channels].
Alternatively, the format could be "NCHW", the data storage order of:
[batch, channels, height, width].
dilations: An optional list of `ints`. Defaults to `[1, 1, 1, 1]`.
1-D tensor of length 4. The dilation factor for each dimension of
`input`. If set to k > 1, there will be k-1 skipped cells between each
filter element on that dimension. The dimension order is determined by the
value of `data_format`, see above for details. Dilations in the batch and
depth dimensions must be 1.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input`.
"""
padding, explicit_paddings = _convert_padding(padding)
return gen_nn_ops.conv2d(input, # pylint: disable=redefined-builtin
filter,
strides,
padding,
use_cudnn_on_gpu=use_cudnn_on_gpu,
explicit_paddings=explicit_paddings,
data_format=data_format,
dilations=dilations,
name=name)
@tf_export("nn.conv2d_backprop_filter", v1=[])
def conv2d_backprop_filter_v2(input, # pylint: disable=redefined-builtin
filter_sizes,
out_backprop,
strides,
padding,
data_format="NHWC",
dilations=None,
name=None):
r"""Computes the gradients of convolution with respect to the filter.
Args:
input: A `Tensor`. Must be one of the following types:
`half`, `bfloat16`, `float32`, `float64`.
4-D with shape `[batch, in_height, in_width, in_channels]`.
filter_sizes: A `Tensor` of type `int32`.
An integer vector representing the tensor shape of `filter`,
where `filter` is a 4-D
`[filter_height, filter_width, in_channels, out_channels]` tensor.
out_backprop: A `Tensor`. Must have the same type as `input`.
4-D with shape `[batch, out_height, out_width, out_channels]`.
Gradients w.r.t. the output of the convolution.
strides: A list of `ints`.
The stride of the sliding window for each dimension of the input
of the convolution. Must be in the same order as the dimension specified
with format.
padding: Either the `string `"SAME"` or `"VALID"` indicating the type of
padding algorithm to use, or a list indicating the explicit paddings at
the start and end of each dimension. When explicit padding is used and
data_format is `"NHWC"`, this should be in the form `[[0, 0], [pad_top,
pad_bottom], [pad_left, pad_right], [0, 0]]`. When explicit padding used
and data_format is `"NCHW"`, this should be in the form `[[0, 0], [0, 0],
[pad_top, pad_bottom], [pad_left, pad_right]]`.
data_format: An optional `string` from: `"NHWC", "NCHW"`.
Defaults to `"NHWC"`.
Specify the data format of the input and output data. With the
default format "NHWC", the data is stored in the order of:
[batch, in_height, in_width, in_channels].
Alternatively, the format could be "NCHW", the data storage order of:
[batch, in_channels, in_height, in_width].
dilations: An optional list of `ints`. Defaults to `[1, 1, 1, 1]`.
1-D tensor of length 4. The dilation factor for each dimension of
`input`. If set to k > 1, there will be k-1 skipped cells between each
filter element on that dimension. The dimension order is determined by
the value of `data_format`, see above for details. Dilations in the batch
and depth dimensions must be 1.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input`.
"""
if dilations is None:
dilations = [1, 1, 1, 1]
return conv2d_backprop_filter(input, # pylint: disable=redefined-builtin
filter_sizes,
out_backprop,
strides,
padding,
use_cudnn_on_gpu=True,
data_format=data_format,
dilations=dilations,
name=name)
@tf_export(v1=["nn.conv2d_backprop_filter"])
def conv2d_backprop_filter( # pylint: disable=redefined-builtin,dangerous-default-value
input,
filter_sizes,
out_backprop,
strides,
padding,
use_cudnn_on_gpu=True,
data_format="NHWC",
dilations=[1, 1, 1, 1],
name=None):
r"""Computes the gradients of convolution with respect to the filter.
Args:
input: A `Tensor`. Must be one of the following types:
`half`, `bfloat16`, `float32`, `float64`.
4-D with shape `[batch, in_height, in_width, in_channels]`.
filter_sizes: A `Tensor` of type `int32`.
An integer vector representing the tensor shape of `filter`,
where `filter` is a 4-D
`[filter_height, filter_width, in_channels, out_channels]` tensor.
out_backprop: A `Tensor`. Must have the same type as `input`.
4-D with shape `[batch, out_height, out_width, out_channels]`.
Gradients w.r.t. the output of the convolution.
strides: A list of `ints`.
The stride of the sliding window for each dimension of the input
of the convolution. Must be in the same order as the dimension specified
with format.
padding: Either the `string `"SAME"` or `"VALID"` indicating the type of
padding algorithm to use, or a list indicating the explicit paddings at
the start and end of each dimension. When explicit padding is used and
data_format is `"NHWC"`, this should be in the form `[[0, 0], [pad_top,
pad_bottom], [pad_left, pad_right], [0, 0]]`. When explicit padding used
and data_format is `"NCHW"`, this should be in the form `[[0, 0], [0, 0],
[pad_top, pad_bottom], [pad_left, pad_right]]`.
use_cudnn_on_gpu: An optional `bool`. Defaults to `True`.
data_format: An optional `string` from: `"NHWC", "NCHW"`.
Defaults to `"NHWC"`.
Specify the data format of the input and output data. With the
default format "NHWC", the data is stored in the order of:
[batch, in_height, in_width, in_channels].
Alternatively, the format could be "NCHW", the data storage order of:
[batch, in_channels, in_height, in_width].
dilations: An optional list of `ints`. Defaults to `[1, 1, 1, 1]`.
1-D tensor of length 4. The dilation factor for each dimension of
`input`. If set to k > 1, there will be k-1 skipped cells between each
filter element on that dimension. The dimension order is determined by
the value of `data_format`, see above for details. Dilations in the batch
and depth dimensions must be 1.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input`.
"""
padding, explicit_paddings = _convert_padding(padding)
return gen_nn_ops.conv2d_backprop_filter(
input, filter_sizes, out_backprop, strides, padding, use_cudnn_on_gpu,
explicit_paddings, data_format, dilations, name)
@tf_export("nn.conv2d_backprop_input", v1=[])
def conv2d_backprop_input_v2(input_sizes,
filters,
out_backprop,
strides,
padding,
data_format="NHWC",
dilations=None,
name=None):
r"""Computes the gradients of convolution with respect to the input.
Args:
input_sizes: A `Tensor` of type `int32`.
An integer vector representing the shape of `input`,
where `input` is a 4-D `[batch, height, width, channels]` tensor.
filters: A `Tensor`. Must be one of the following types:
`half`, `bfloat16`, `float32`, `float64`.
4-D with shape
`[filter_height, filter_width, in_channels, out_channels]`.
out_backprop: A `Tensor`. Must have the same type as `filters`.
4-D with shape `[batch, out_height, out_width, out_channels]`.
Gradients w.r.t. the output of the convolution.
strides: A list of `ints`.
The stride of the sliding window for each dimension of the input
of the convolution. Must be in the same order as the dimension specified
with format.
padding: Either the `string `"SAME"` or `"VALID"` indicating the type of
padding algorithm to use, or a list indicating the explicit paddings at
the start and end of each dimension. When explicit padding is used and
data_format is `"NHWC"`, this should be in the form `[[0, 0], [pad_top,
pad_bottom], [pad_left, pad_right], [0, 0]]`. When explicit padding used
and data_format is `"NCHW"`, this should be in the form `[[0, 0], [0, 0],
[pad_top, pad_bottom], [pad_left, pad_right]]`.
data_format: An optional `string` from: `"NHWC", "NCHW"`.
Defaults to `"NHWC"`.
Specify the data format of the input and output data. With the
default format "NHWC", the data is stored in the order of:
[batch, in_height, in_width, in_channels].
Alternatively, the format could be "NCHW", the data storage order of:
[batch, in_channels, in_height, in_width].
dilations: An optional list of `ints`. Defaults to `[1, 1, 1, 1]`.
1-D tensor of length 4. The dilation factor for each dimension of
`input`. If set to k > 1, there will be k-1 skipped cells between each
filter element on that dimension. The dimension order is determined by
the value of `data_format`, see above for details. Dilations in the batch
and depth dimensions must be 1.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `filters`.
"""
if dilations is None:
dilations = [1, 1, 1, 1]
return conv2d_backprop_input(input_sizes,
filters,
out_backprop,
strides,
padding,
use_cudnn_on_gpu=True,
data_format=data_format,
dilations=dilations,
name=name)
@tf_export(v1=["nn.conv2d_backprop_input"])
def conv2d_backprop_input( # pylint: disable=redefined-builtin,dangerous-default-value
input_sizes,
filter,
out_backprop,
strides,
padding,
use_cudnn_on_gpu=True,
data_format="NHWC",
dilations=[1, 1, 1, 1],
name=None):
r"""Computes the gradients of convolution with respect to the input.
Args:
input_sizes: A `Tensor` of type `int32`.
An integer vector representing the shape of `input`,
where `input` is a 4-D `[batch, height, width, channels]` tensor.
filter: A `Tensor`. Must be one of the following types:
`half`, `bfloat16`, `float32`, `float64`.
4-D with shape
`[filter_height, filter_width, in_channels, out_channels]`.
out_backprop: A `Tensor`. Must have the same type as `filter`.
4-D with shape `[batch, out_height, out_width, out_channels]`.
Gradients w.r.t. the output of the convolution.
strides: A list of `ints`.
The stride of the sliding window for each dimension of the input
of the convolution. Must be in the same order as the dimension specified
with format.
padding: Either the `string `"SAME"` or `"VALID"` indicating the type of
padding algorithm to use, or a list indicating the explicit paddings at
the start and end of each dimension. When explicit padding is used and
data_format is `"NHWC"`, this should be in the form `[[0, 0], [pad_top,
pad_bottom], [pad_left, pad_right], [0, 0]]`. When explicit padding used
and data_format is `"NCHW"`, this should be in the form `[[0, 0], [0, 0],
[pad_top, pad_bottom], [pad_left, pad_right]]`.
use_cudnn_on_gpu: An optional `bool`. Defaults to `True`.
data_format: An optional `string` from: `"NHWC", "NCHW"`.
Defaults to `"NHWC"`.
Specify the data format of the input and output data. With the
default format "NHWC", the data is stored in the order of:
[batch, in_height, in_width, in_channels].
Alternatively, the format could be "NCHW", the data storage order of:
[batch, in_channels, in_height, in_width].
dilations: An optional list of `ints`. Defaults to `[1, 1, 1, 1]`.
1-D tensor of length 4. The dilation factor for each dimension of
`input`. If set to k > 1, there will be k-1 skipped cells between each
filter element on that dimension. The dimension order is determined by
the value of `data_format`, see above for details. Dilations in the batch
and depth dimensions must be 1.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `filter`.
"""
padding, explicit_paddings = _convert_padding(padding)
return gen_nn_ops.conv2d_backprop_input(
input_sizes, filter, out_backprop, strides, padding, use_cudnn_on_gpu,
explicit_paddings, data_format, dilations, name)
@tf_export(v1=["nn.conv2d_transpose"])
def conv2d_transpose(
value,
filter, # pylint: disable=redefined-builtin
output_shape,
strides,
padding="SAME",
data_format="NHWC",
name=None):
"""The transpose of `conv2d`.
This operation is sometimes called "deconvolution" after [Deconvolutional
Networks](http://www.matthewzeiler.com/pubs/cvpr2010/cvpr2010.pdf), but is
actually the transpose (gradient) of `conv2d` rather than an actual
deconvolution.
Args:
value: A 4-D `Tensor` of type `float` and shape
`[batch, height, width, in_channels]` for `NHWC` data format or
`[batch, in_channels, height, width]` for `NCHW` data format.
filter: A 4-D `Tensor` with the same type as `value` and shape
`[height, width, output_channels, in_channels]`. `filter`'s
`in_channels` dimension must match that of `value`.
output_shape: A 1-D `Tensor` representing the output shape of the
deconvolution op.
strides: A list of ints. The stride of the sliding window for each
dimension of the input tensor.
padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm.
See the "returns" section of `tf.nn.convolution` for details.
data_format: A string. 'NHWC' and 'NCHW' are supported.
name: Optional name for the returned tensor.
Returns:
A `Tensor` with the same type as `value`.
Raises:
ValueError: If input/output depth does not match `filter`'s shape, or if
padding is other than `'VALID'` or `'SAME'`.
"""
with ops.name_scope(name, "conv2d_transpose",
[value, filter, output_shape]) as name:
if data_format not in ("NCHW", "NHWC"):
raise ValueError("data_format has to be either NCHW or NHWC.")
value = ops.convert_to_tensor(value, name="value")
filter = ops.convert_to_tensor(filter, name="filter") # pylint: disable=redefined-builtin
axis = 3 if data_format == "NHWC" else 1
if not value.get_shape().dims[axis].is_compatible_with(
filter.get_shape()[3]):
raise ValueError("input channels does not match filter's input channels, "
"{} != {}".format(value.get_shape()[axis],
filter.get_shape()[3]))
output_shape_ = ops.convert_to_tensor(output_shape, name="output_shape")
if not output_shape_.get_shape().is_compatible_with(tensor_shape.vector(4)):
raise ValueError("output_shape must have shape (4,), got {}".format(
output_shape_.get_shape()))
if isinstance(output_shape, (list, np.ndarray)):
# output_shape's shape should be == [4] if reached this point.
if not filter.get_shape().dims[2].is_compatible_with(
output_shape[axis]):
raise ValueError(
"output_shape does not match filter's output channels, "
"{} != {}".format(output_shape[axis],
filter.get_shape()[2]))
if padding != "VALID" and padding != "SAME":
raise ValueError("padding must be either VALID or SAME:"
" {}".format(padding))
return gen_nn_ops.conv2d_backprop_input(
input_sizes=output_shape_,
filter=filter,
out_backprop=value,
strides=strides,
padding=padding,
data_format=data_format,
name=name)
# pylint: disable=redefined-builtin
@tf_export("nn.conv2d_transpose", v1=[])
def conv2d_transpose_v2(
input,
filters, # pylint: disable=redefined-builtin
output_shape,
strides,
padding="SAME",
data_format="NHWC",
name=None):
return conv2d_transpose(
input,
filters,
output_shape,
strides,
padding=padding,
data_format=data_format,
name=name)
# pylint: enable=redefined-builtin
conv2d_transpose_v2.__doc__ = deprecation.rewrite_argument_docstring(
deprecation.rewrite_argument_docstring(
conv2d_transpose.__doc__, "filter", "filters"),
"value", "input")
@tf_export("nn.atrous_conv2d_transpose")
def atrous_conv2d_transpose(value,
filters,
output_shape,
rate,
padding,
name=None):
"""The transpose of `atrous_conv2d`.
This operation is sometimes called "deconvolution" after [Deconvolutional
Networks](http://www.matthewzeiler.com/pubs/cvpr2010/cvpr2010.pdf), but is
actually the transpose (gradient) of `atrous_conv2d` rather than an actual
deconvolution.
Args:
value: A 4-D `Tensor` of type `float`. It needs to be in the default `NHWC`
format. Its shape is `[batch, in_height, in_width, in_channels]`.
filters: A 4-D `Tensor` with the same type as `value` and shape
`[filter_height, filter_width, out_channels, in_channels]`. `filters`'
`in_channels` dimension must match that of `value`. Atrous convolution is
equivalent to standard convolution with upsampled filters with effective
height `filter_height + (filter_height - 1) * (rate - 1)` and effective
width `filter_width + (filter_width - 1) * (rate - 1)`, produced by
inserting `rate - 1` zeros along consecutive elements across the
`filters`' spatial dimensions.
output_shape: A 1-D `Tensor` of shape representing the output shape of the
deconvolution op.
rate: A positive int32. The stride with which we sample input values across
the `height` and `width` dimensions. Equivalently, the rate by which we
upsample the filter values by inserting zeros across the `height` and
`width` dimensions. In the literature, the same parameter is sometimes
called `input stride` or `dilation`.
padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm.
name: Optional name for the returned tensor.
Returns:
A `Tensor` with the same type as `value`.
Raises:
ValueError: If input/output depth does not match `filters`' shape, or if
padding is other than `'VALID'` or `'SAME'`, or if the `rate` is less
than one, or if the output_shape is not a tensor with 4 elements.
"""
with ops.name_scope(name, "atrous_conv2d_transpose",
[value, filters, output_shape]) as name:
value = ops.convert_to_tensor(value, name="value")
filters = ops.convert_to_tensor(filters, name="filters")
if not value.get_shape().dims[3].is_compatible_with(filters.get_shape()[3]):
raise ValueError(
"value's input channels does not match filters' input channels, "
"{} != {}".format(value.get_shape()[3],
filters.get_shape()[3]))
if rate < 1:
raise ValueError("rate {} cannot be less than one".format(rate))
if rate == 1:
return conv2d_transpose(
value,
filters,
output_shape,
strides=[1, 1, 1, 1],
padding=padding,
data_format="NHWC")
output_shape_ = ops.convert_to_tensor(output_shape, name="output_shape")
if not output_shape_.get_shape().is_compatible_with(tensor_shape.vector(4)):
raise ValueError("output_shape must have shape (4,), got {}".format(
output_shape_.get_shape()))
if isinstance(output_shape, (list, np.ndarray)):
# output_shape's shape should be == [4] if reached this point.
if not filters.get_shape().dims[2].is_compatible_with(output_shape[3]):
raise ValueError(
"output_shape does not match filter's output channels, "
"{} != {}".format(output_shape[3],
filters.get_shape()[2]))
# We have two padding contributions. The first is used for converting "SAME"
# to "VALID". The second is required so that the height and width of the
# zero-padded value tensor are multiples of rate.
# Padding required to reduce to "VALID" convolution
if padding == "SAME":
# Handle filters whose shape is unknown during graph creation.
if filters.get_shape().is_fully_defined():
filter_shape = filters.get_shape().as_list()
else:
filter_shape = array_ops.shape(filters)
filter_height, filter_width = filter_shape[0], filter_shape[1]
# Spatial dimensions of the filters and the upsampled filters in which we
# introduce (rate - 1) zeros between consecutive filter values.
filter_height_up = filter_height + (filter_height - 1) * (rate - 1)
filter_width_up = filter_width + (filter_width - 1) * (rate - 1)
pad_height = filter_height_up - 1
pad_width = filter_width_up - 1
# When pad_height (pad_width) is odd, we pad more to bottom (right),
# following the same convention as conv2d().
pad_top = pad_height // 2
pad_bottom = pad_height - pad_top
pad_left = pad_width // 2
pad_right = pad_width - pad_left
elif padding == "VALID":
pad_top = 0
pad_bottom = 0
pad_left = 0
pad_right = 0
else:
raise ValueError("padding must be either VALID or SAME:"
" {}".format(padding))
in_height = output_shape[1] + pad_top + pad_bottom
in_width = output_shape[2] + pad_left + pad_right
# More padding so that rate divides the height and width of the input.
pad_bottom_extra = (rate - in_height % rate) % rate
pad_right_extra = (rate - in_width % rate) % rate
# The paddings argument to space_to_batch is just the extra padding
# component.
space_to_batch_pad = [[0, pad_bottom_extra], [0, pad_right_extra]]
value = array_ops.space_to_batch(
input=value, paddings=space_to_batch_pad, block_size=rate)
input_sizes = [
rate * rate * output_shape[0], (in_height + pad_bottom_extra) // rate,
(in_width + pad_right_extra) // rate, output_shape[3]
]
value = gen_nn_ops.conv2d_backprop_input(
input_sizes=input_sizes,
filter=filters,
out_backprop=value,
strides=[1, 1, 1, 1],
padding="VALID",
data_format="NHWC")
# The crops argument to batch_to_space includes both padding components.
batch_to_space_crop = [[pad_top, pad_bottom + pad_bottom_extra],
[pad_left, pad_right + pad_right_extra]]
return array_ops.batch_to_space(
input=value, crops=batch_to_space_crop, block_size=rate)
@tf_export("nn.conv3d", v1=[])
def conv3d_v2(input, # pylint: disable=redefined-builtin,missing-docstring
filters,
strides,
padding,
data_format="NDHWC",
dilations=None,
name=None):
if dilations is None:
dilations = [1, 1, 1, 1, 1]
return gen_nn_ops.conv3d(input, # pylint: disable=redefined-builtin
filters,
strides,
padding,
data_format=data_format,
dilations=dilations,
name=name)
tf_export(v1=["nn.conv3d"])(gen_nn_ops.conv3d)
conv3d_v2.__doc__ = deprecation.rewrite_argument_docstring(
gen_nn_ops.conv3d.__doc__, "filter", "filters")
@tf_export(v1=["nn.conv3d_transpose"])
def conv3d_transpose(
value,
filter, # pylint: disable=redefined-builtin
output_shape,
strides,
padding="SAME",
data_format="NDHWC",
name=None):
"""The transpose of `conv3d`.
This operation is sometimes called "deconvolution" after [Deconvolutional
Networks](http://www.matthewzeiler.com/pubs/cvpr2010/cvpr2010.pdf), but is
actually the transpose (gradient) of `conv3d` rather than an actual
deconvolution.
Args:
value: A 5-D `Tensor` of type `float` and shape
`[batch, depth, height, width, in_channels]`.
filter: A 5-D `Tensor` with the same type as `value` and shape
`[depth, height, width, output_channels, in_channels]`. `filter`'s
`in_channels` dimension must match that of `value`.
output_shape: A 1-D `Tensor` representing the output shape of the
deconvolution op.
strides: A list of ints. The stride of the sliding window for each
dimension of the input tensor.
padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm.
See the "returns" section of `tf.nn.convolution` for details.
data_format: A string, either `'NDHWC'` or `'NCDHW`' specifying the layout
of the input and output tensors. Defaults to `'NDHWC'`.
name: Optional name for the returned tensor.
Returns:
A `Tensor` with the same type as `value`.
Raises:
ValueError: If input/output depth does not match `filter`'s shape, or if
padding is other than `'VALID'` or `'SAME'`.
"""
with ops.name_scope(name, "conv3d_transpose",
[value, filter, output_shape]) as name:
value = ops.convert_to_tensor(value, name="value")
filter = ops.convert_to_tensor(filter, name="filter") # pylint: disable=redefined-builtin
axis = 1 if data_format == "NCDHW" else 4
if not value.get_shape().dims[axis].is_compatible_with(
filter.get_shape()[4]):
raise ValueError("input channels does not match filter's input channels, "
"{} != {}".format(value.get_shape()[axis],
filter.get_shape()[4]))
output_shape_ = ops.convert_to_tensor(output_shape, name="output_shape")
if not output_shape_.get_shape().is_compatible_with(tensor_shape.vector(5)):
raise ValueError("output_shape must have shape (5,), got {}".format(
output_shape_.get_shape()))
if isinstance(output_shape, (list, np.ndarray)):
# output_shape's shape should be == [5] if reached this point.
if not filter.get_shape().dims[3].is_compatible_with(
output_shape[axis]):
raise ValueError(
"output_shape does not match filter's output channels, "
"{} != {}".format(output_shape[axis],
filter.get_shape()[3]))
if padding != "VALID" and padding != "SAME":
raise ValueError("padding must be either VALID or SAME:"
" {}".format(padding))
return gen_nn_ops.conv3d_backprop_input_v2(
input_sizes=output_shape_,
filter=filter,
out_backprop=value,
strides=strides,
padding=padding,
data_format=data_format,
name=name)
# pylint: disable=redefined-builtin
@tf_export("nn.conv3d_transpose", v1=[])
def conv3d_transpose_v2(
input,
filters,
output_shape,
strides,
padding="SAME",
data_format="NDHWC",
name=None):
return conv3d_transpose(
input,
filters,
output_shape,
strides,
padding=padding,
data_format=data_format,
name=name)
# pylint: enable=redefined-builtin
conv3d_transpose_v2.__doc__ = deprecation.rewrite_argument_docstring(
deprecation.rewrite_argument_docstring(
conv3d_transpose.__doc__, "filter", "filters"),
"value", "input")
@tf_export("nn.bias_add")
def bias_add(value, bias, data_format=None, name=None):
"""Adds `bias` to `value`.
This is (mostly) a special case of `tf.add` where `bias` is restricted to 1-D.
Broadcasting is supported, so `value` may have any number of dimensions.
Unlike `tf.add`, the type of `bias` is allowed to differ from `value` in the
case where both types are quantized.
Args:
value: A `Tensor` with type `float`, `double`, `int64`, `int32`, `uint8`,
`int16`, `int8`, `complex64`, or `complex128`.
bias: A 1-D `Tensor` with size matching the last dimension of `value`.
Must be the same type as `value` unless `value` is a quantized type,
in which case a different quantized type may be used.
data_format: A string. 'NHWC' and 'NCHW' are supported.
name: A name for the operation (optional).
Returns:
A `Tensor` with the same type as `value`.
"""
with ops.name_scope(name, "BiasAdd", [value, bias]) as name:
if not context.executing_eagerly():
value = ops.convert_to_tensor(value, name="input")
bias = ops.convert_to_tensor(bias, dtype=value.dtype, name="bias")
return gen_nn_ops.bias_add(value, bias, data_format=data_format, name=name)
def bias_add_v1(value, bias, name=None):
"""Adds `bias` to `value`.
This is a deprecated version of bias_add and will soon to be removed.
This is (mostly) a special case of `tf.add` where `bias` is restricted to 1-D.
Broadcasting is supported, so `value` may have any number of dimensions.
Unlike `tf.add`, the type of `bias` is allowed to differ from `value` in the
case where both types are quantized.
Args:
value: A `Tensor` with type `float`, `double`, `int64`, `int32`, `uint8`,
`int16`, `int8`, `complex64`, or `complex128`.
bias: A 1-D `Tensor` with size matching the last dimension of `value`.
Must be the same type as `value` unless `value` is a quantized type,
in which case a different quantized type may be used.
name: A name for the operation (optional).
Returns:
A `Tensor` with the same type as `value`.
"""
with ops.name_scope(name, "BiasAddV1", [value, bias]) as name:
value = ops.convert_to_tensor(value, name="input")
bias = ops.convert_to_tensor(bias, dtype=value.dtype, name="bias")
return gen_nn_ops.bias_add_v1(value, bias, name=name)
@tf_export(v1=["nn.crelu"])
def crelu(features, name=None, axis=-1):
"""Computes Concatenated ReLU.
Concatenates a ReLU which selects only the positive part of the activation
with a ReLU which selects only the *negative* part of the activation.
Note that as a result this non-linearity doubles the depth of the activations.
Source: [Understanding and Improving Convolutional Neural Networks via
Concatenated Rectified Linear Units. W. Shang, et
al.](https://arxiv.org/abs/1603.05201)
Args:
features: A `Tensor` with type `float`, `double`, `int32`, `int64`, `uint8`,
`int16`, or `int8`.
name: A name for the operation (optional).
axis: The axis that the output values are concatenated along. Default is -1.
Returns:
A `Tensor` with the same type as `features`.
"""
with ops.name_scope(name, "CRelu", [features]) as name:
features = ops.convert_to_tensor(features, name="features")
c = array_ops.concat([features, -features], axis, name=name)
return gen_nn_ops.relu(c)
@tf_export("nn.crelu", v1=[])
def crelu_v2(features, axis=-1, name=None):
return crelu(features, name=name, axis=axis)
crelu_v2.__doc__ = crelu.__doc__
@tf_export("nn.relu6")
def relu6(features, name=None):
"""Computes Rectified Linear 6: `min(max(features, 0), 6)`.
Source: [Convolutional Deep Belief Networks on CIFAR-10. A.
Krizhevsky](http://www.cs.utoronto.ca/~kriz/conv-cifar10-aug2010.pdf)
Args:
features: A `Tensor` with type `float`, `double`, `int32`, `int64`, `uint8`,
`int16`, or `int8`.
name: A name for the operation (optional).
Returns:
A `Tensor` with the same type as `features`.
"""
with ops.name_scope(name, "Relu6", [features]) as name:
features = ops.convert_to_tensor(features, name="features")
return gen_nn_ops.relu6(features, name=name)
@tf_export("nn.leaky_relu")
def leaky_relu(features, alpha=0.2, name=None):
"""Compute the Leaky ReLU activation function.
"Rectifier Nonlinearities Improve Neural Network Acoustic Models"
AL Maas, AY Hannun, AY Ng - Proc. ICML, 2013
https://ai.stanford.edu/~amaas/papers/relu_hybrid_icml2013_final.pdf
Args:
features: A `Tensor` representing preactivation values. Must be one of
the following types: `float16`, `float32`, `float64`, `int32`, `int64`.
alpha: Slope of the activation function at x < 0.
name: A name for the operation (optional).
Returns:
The activation value.
"""
with ops.name_scope(name, "LeakyRelu", [features, alpha]) as name:
features = ops.convert_to_tensor(features, name="features")
if features.dtype.is_integer:
features = math_ops.to_float(features)
if compat.forward_compatible(2018, 11, 1):
if isinstance(alpha, np.ndarray):
alpha = np.asscalar(alpha)
return gen_nn_ops.leaky_relu(features, alpha=alpha, name=name)
alpha = ops.convert_to_tensor(alpha, dtype=features.dtype, name="alpha")
return math_ops.maximum(alpha * features, features, name=name)
def _flatten_outer_dims(logits):
"""Flattens logits' outer dimensions and keep its last dimension."""
rank = array_ops.rank(logits)
last_dim_size = array_ops.slice(
array_ops.shape(logits), [math_ops.subtract(rank, 1)], [1])
output = array_ops.reshape(logits, array_ops.concat([[-1], last_dim_size], 0))
# Set output shape if known.
if not context.executing_eagerly():
shape = logits.get_shape()
if shape is not None and shape.dims is not None:
shape = shape.as_list()
product = 1
product_valid = True
for d in shape[:-1]:
if d is None:
product_valid = False
break
else:
product *= d
if product_valid:
output_shape = [product, shape[-1]]
output.set_shape(output_shape)
return output
def _softmax(logits, compute_op, dim=-1, name=None):
"""Helper function for softmax and log_softmax.
It reshapes and transposes the input logits into a 2-D Tensor and then invokes
the tf.nn._softmax or tf.nn._log_softmax function. The output would be
transposed and reshaped back.
Args:
logits: A non-empty `Tensor`. Must be one of the following types: `half`,
`float32`, `float64`.
compute_op: Either gen_nn_ops.softmax or gen_nn_ops.log_softmax
dim: The dimension softmax would be performed on. The default is -1 which
indicates the last dimension.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `logits`. Same shape as `logits`.
Raises:
InvalidArgumentError: if `logits` is empty or `dim` is beyond the last
dimension of `logits`.
"""
def _swap_axis(logits, dim_index, last_index, name=None):
"""Swaps logits's dim_index and last_index."""
return array_ops.transpose(
logits,
array_ops.concat([
math_ops.range(dim_index), [last_index],
math_ops.range(dim_index + 1, last_index), [dim_index]
], 0),
name=name)
logits = ops.convert_to_tensor(logits)
# We need its original shape for shape inference.
shape = logits.get_shape()
is_last_dim = (dim is -1) or (dim == shape.ndims - 1)
if is_last_dim:
return compute_op(logits, name=name)
dim_val = dim
if isinstance(dim, ops.Tensor):
dim_val = tensor_util.constant_value(dim)
if dim_val is not None and (dim_val < -shape.ndims or dim_val >= shape.ndims):
raise errors_impl.InvalidArgumentError(
None, None,
"Dimension (%d) must be in the range [%d, %d) where %d is the number of"
" dimensions in the input." % (dim_val, -shape.ndims, shape.ndims,
shape.ndims))
# If dim is not the last dimension, we have to do a transpose so that we can
# still perform softmax on its last dimension.
# In case dim is negative (and is not last dimension -1), add shape.ndims
ndims = array_ops.rank(logits)
if not isinstance(dim, ops.Tensor):
if dim < 0:
dim += ndims
else:
dim = array_ops.where(math_ops.less(dim, 0), dim + ndims, dim)
# Swap logits' dimension of dim and its last dimension.
input_rank = array_ops.rank(logits)
dim_axis = dim % shape.ndims
logits = _swap_axis(logits, dim_axis, math_ops.subtract(input_rank, 1))
# Do the actual softmax on its last dimension.
output = compute_op(logits)
output = _swap_axis(
output, dim_axis, math_ops.subtract(input_rank, 1), name=name)
# Make shape inference work since transpose may erase its static shape.
output.set_shape(shape)
return output
@tf_export(v1=["nn.softmax", "math.softmax"])
@deprecation.deprecated_args(None, "dim is deprecated, use axis instead", "dim")
def softmax(logits, axis=None, name=None, dim=None):
"""Computes softmax activations.
This function performs the equivalent of
softmax = tf.exp(logits) / tf.reduce_sum(tf.exp(logits), axis)
Args:
logits: A non-empty `Tensor`. Must be one of the following types: `half`,
`float32`, `float64`.
axis: The dimension softmax would be performed on. The default is -1 which
indicates the last dimension.
name: A name for the operation (optional).
dim: Deprecated alias for `axis`.
Returns:
A `Tensor`. Has the same type and shape as `logits`.
Raises:
InvalidArgumentError: if `logits` is empty or `axis` is beyond the last
dimension of `logits`.
"""
axis = deprecation.deprecated_argument_lookup("axis", axis, "dim", dim)
if axis is None:
axis = -1
return _softmax(logits, gen_nn_ops.softmax, axis, name)
@tf_export("nn.softmax", "math.softmax", v1=[])
def softmax_v2(logits, axis=None, name=None):
"""Computes softmax activations.
This function performs the equivalent of
softmax = tf.exp(logits) / tf.reduce_sum(tf.exp(logits), axis)
Args:
logits: A non-empty `Tensor`. Must be one of the following types: `half`,
`float32`, `float64`.
axis: The dimension softmax would be performed on. The default is -1 which
indicates the last dimension.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type and shape as `logits`.
Raises:
InvalidArgumentError: if `logits` is empty or `axis` is beyond the last
dimension of `logits`.
"""
if axis is None:
axis = -1
return _softmax(logits, gen_nn_ops.softmax, axis, name)
@tf_export(v1=["nn.log_softmax", "math.log_softmax"])
@deprecation.deprecated_args(None, "dim is deprecated, use axis instead", "dim")
def log_softmax(logits, axis=None, name=None, dim=None):
"""Computes log softmax activations.
For each batch `i` and class `j` we have
logsoftmax = logits - log(reduce_sum(exp(logits), axis))
Args:
logits: A non-empty `Tensor`. Must be one of the following types: `half`,
`float32`, `float64`.
axis: The dimension softmax would be performed on. The default is -1 which
indicates the last dimension.
name: A name for the operation (optional).
dim: Deprecated alias for `axis`.
Returns:
A `Tensor`. Has the same type as `logits`. Same shape as `logits`.
Raises:
InvalidArgumentError: if `logits` is empty or `axis` is beyond the last
dimension of `logits`.
"""
axis = deprecation.deprecated_argument_lookup("axis", axis, "dim", dim)
if axis is None:
axis = -1
return _softmax(logits, gen_nn_ops.log_softmax, axis, name)
@tf_export("nn.log_softmax", "math.log_softmax", v1=[])
def log_softmax_v2(logits, axis=None, name=None):
"""Computes log softmax activations.
For each batch `i` and class `j` we have
logsoftmax = logits - log(reduce_sum(exp(logits), axis))
Args:
logits: A non-empty `Tensor`. Must be one of the following types: `half`,
`float32`, `float64`.
axis: The dimension softmax would be performed on. The default is -1 which
indicates the last dimension.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `logits`. Same shape as `logits`.
Raises:
InvalidArgumentError: if `logits` is empty or `axis` is beyond the last
dimension of `logits`.
"""
if axis is None:
axis = -1
return _softmax(logits, gen_nn_ops.log_softmax, axis, name)
def _ensure_xent_args(name, sentinel, labels, logits):
# Make sure that all arguments were passed as named arguments.
if sentinel is not None:
raise ValueError("Only call `%s` with "
"named arguments (labels=..., logits=..., ...)" % name)
if labels is None or logits is None:
raise ValueError("Both labels and logits must be provided.")
@tf_export("nn.softmax_cross_entropy_with_logits", v1=[])
def softmax_cross_entropy_with_logits_v2(labels, logits, axis=-1, name=None):
"""Computes softmax cross entropy between `logits` and `labels`.
Measures the probability error in discrete classification tasks in which the
classes are mutually exclusive (each entry is in exactly one class). For
example, each CIFAR-10 image is labeled with one and only one label: an image
can be a dog or a truck, but not both.
**NOTE:** While the classes are mutually exclusive, their probabilities
need not be. All that is required is that each row of `labels` is
a valid probability distribution. If they are not, the computation of the
gradient will be incorrect.
If using exclusive `labels` (wherein one and only
one class is true at a time), see `sparse_softmax_cross_entropy_with_logits`.
**WARNING:** This op expects unscaled logits, since it performs a `softmax`
on `logits` internally for efficiency. Do not call this op with the
output of `softmax`, as it will produce incorrect results.
A common use case is to have logits and labels of shape
`[batch_size, num_classes]`, but higher dimensions are supported, with
the `axis` argument specifying the class dimension.
`logits` and `labels` must have the same dtype (either `float16`, `float32`,
or `float64`).
Backpropagation will happen into both `logits` and `labels`. To disallow
backpropagation into `labels`, pass label tensors through `tf.stop_gradient`
before feeding it to this function.
**Note that to avoid confusion, it is required to pass only named arguments to
this function.**
Args:
labels: Each vector along the class dimension should hold a valid
probability distribution e.g. for the case in which labels are of shape
`[batch_size, num_classes]`, each row of `labels[i]` must be a valid
probability distribution.
logits: Unscaled log probabilities.
axis: The class dimension. Defaulted to -1 which is the last dimension.
name: A name for the operation (optional).
Returns:
A `Tensor` that contains the softmax cross entropy loss. Its type is the
same as `logits` and its shape is the same as `labels` except that it does
not have the last dimension of `labels`.
"""
return softmax_cross_entropy_with_logits_v2_helper(
labels=labels, logits=logits, axis=axis, name=name)
@tf_export(v1=["nn.softmax_cross_entropy_with_logits_v2"])
@deprecated_args(None, "dim is deprecated, use axis instead", "dim")
def softmax_cross_entropy_with_logits_v2_helper(
labels, logits, axis=None, name=None, dim=None):
"""Computes softmax cross entropy between `logits` and `labels`.
Measures the probability error in discrete classification tasks in which the
classes are mutually exclusive (each entry is in exactly one class). For
example, each CIFAR-10 image is labeled with one and only one label: an image
can be a dog or a truck, but not both.
**NOTE:** While the classes are mutually exclusive, their probabilities
need not be. All that is required is that each row of `labels` is
a valid probability distribution. If they are not, the computation of the
gradient will be incorrect.
If using exclusive `labels` (wherein one and only
one class is true at a time), see `sparse_softmax_cross_entropy_with_logits`.
**WARNING:** This op expects unscaled logits, since it performs a `softmax`
on `logits` internally for efficiency. Do not call this op with the
output of `softmax`, as it will produce incorrect results.
A common use case is to have logits and labels of shape
`[batch_size, num_classes]`, but higher dimensions are supported, with
the `axis` argument specifying the class dimension.
`logits` and `labels` must have the same dtype (either `float16`, `float32`,
or `float64`).
Backpropagation will happen into both `logits` and `labels`. To disallow
backpropagation into `labels`, pass label tensors through `tf.stop_gradient`
before feeding it to this function.
**Note that to avoid confusion, it is required to pass only named arguments to
this function.**
Args:
labels: Each vector along the class dimension should hold a valid
probability distribution e.g. for the case in which labels are of shape
`[batch_size, num_classes]`, each row of `labels[i]` must be a valid
probability distribution.
logits: Unscaled log probabilities.
axis: The class dimension. Defaulted to -1 which is the last dimension.
name: A name for the operation (optional).
dim: Deprecated alias for axis.
Returns:
A `Tensor` that contains the softmax cross entropy loss. Its type is the
same as `logits` and its shape is the same as `labels` except that it does
not have the last dimension of `labels`.
"""
# TODO(pcmurray) Raise an error when the labels do not sum to 1. Note: This
# could break users who call this with bad labels, but disregard the bad
# results.
axis = deprecated_argument_lookup("axis", axis, "dim", dim)
del dim
if axis is None:
axis = -1
with ops.name_scope(name, "softmax_cross_entropy_with_logits",
[logits, labels]) as name:
logits = ops.convert_to_tensor(logits, name="logits")
labels = ops.convert_to_tensor(labels, name="labels")
convert_to_float32 = (
logits.dtype == dtypes.float16 or logits.dtype == dtypes.bfloat16)
precise_logits = math_ops.cast(
logits, dtypes.float32) if convert_to_float32 else logits
# labels and logits must be of the same type
labels = math_ops.cast(labels, precise_logits.dtype)
input_rank = array_ops.rank(precise_logits)
# For shape inference.
shape = logits.get_shape()
# Move the dim to the end if dim is not the last dimension.
if axis != -1:
def _move_dim_to_end(tensor, dim_index, rank):
return array_ops.transpose(
tensor,
array_ops.concat([
math_ops.range(dim_index),
math_ops.range(dim_index + 1, rank), [dim_index]
], 0))
precise_logits = _move_dim_to_end(precise_logits, axis, input_rank)
labels = _move_dim_to_end(labels, axis, input_rank)
input_shape = array_ops.shape(precise_logits)
# Make precise_logits and labels into matrices.
precise_logits = _flatten_outer_dims(precise_logits)
labels = _flatten_outer_dims(labels)
# Do the actual op computation.
# The second output tensor contains the gradients. We use it in
# _CrossEntropyGrad() in nn_grad but not here.
cost, unused_backprop = gen_nn_ops.softmax_cross_entropy_with_logits(
precise_logits, labels, name=name)
# The output cost shape should be the input minus axis.
output_shape = array_ops.slice(input_shape, [0],
[math_ops.subtract(input_rank, 1)])
cost = array_ops.reshape(cost, output_shape)
# Make shape inference work since reshape and transpose may erase its static
# shape.
if not context.executing_eagerly(
) and shape is not None and shape.dims is not None:
shape = shape.as_list()
del shape[axis]
cost.set_shape(shape)
if convert_to_float32:
return math_ops.cast(cost, logits.dtype)
else:
return cost
_XENT_DEPRECATION = """
Future major versions of TensorFlow will allow gradients to flow
into the labels input on backprop by default.
See `tf.nn.softmax_cross_entropy_with_logits_v2`.
"""
@tf_export(v1=["nn.softmax_cross_entropy_with_logits"])
@deprecation.deprecated(date=None, instructions=_XENT_DEPRECATION)
def softmax_cross_entropy_with_logits(
_sentinel=None, # pylint: disable=invalid-name
labels=None,
logits=None,
dim=-1,
name=None):
"""Computes softmax cross entropy between `logits` and `labels`.
Measures the probability error in discrete classification tasks in which the
classes are mutually exclusive (each entry is in exactly one class). For
example, each CIFAR-10 image is labeled with one and only one label: an image
can be a dog or a truck, but not both.
**NOTE:** While the classes are mutually exclusive, their probabilities
need not be. All that is required is that each row of `labels` is
a valid probability distribution. If they are not, the computation of the
gradient will be incorrect.
If using exclusive `labels` (wherein one and only
one class is true at a time), see `sparse_softmax_cross_entropy_with_logits`.
**WARNING:** This op expects unscaled logits, since it performs a `softmax`
on `logits` internally for efficiency. Do not call this op with the
output of `softmax`, as it will produce incorrect results.
A common use case is to have logits and labels of shape
`[batch_size, num_classes]`, but higher dimensions are supported, with
the `dim` argument specifying the class dimension.
Backpropagation will happen only into `logits`. To calculate a cross entropy
loss that allows backpropagation into both `logits` and `labels`, see
`tf.nn.softmax_cross_entropy_with_logits_v2`.
**Note that to avoid confusion, it is required to pass only named arguments to
this function.**
Args:
_sentinel: Used to prevent positional parameters. Internal, do not use.
labels: Each vector along the class dimension should hold a valid
probability distribution e.g. for the case in which labels are of shape
`[batch_size, num_classes]`, each row of `labels[i]` must be a valid
probability distribution.
logits: Unscaled log probabilities.
dim: The class dimension. Defaulted to -1 which is the last dimension.
name: A name for the operation (optional).
Returns:
A `Tensor` that contains the softmax cross entropy loss. Its type is the
same as `logits` and its shape is the same as `labels` except that it does
not have the last dimension of `labels`.
"""
_ensure_xent_args("softmax_cross_entropy_with_logits", _sentinel, labels,
logits)
with ops.name_scope(name, "softmax_cross_entropy_with_logits_sg",
[logits, labels]) as name:
labels = array_ops.stop_gradient(labels, name="labels_stop_gradient")
return softmax_cross_entropy_with_logits_v2(
labels=labels, logits=logits, axis=dim, name=name)
@tf_export("nn.sparse_softmax_cross_entropy_with_logits")
def sparse_softmax_cross_entropy_with_logits(
_sentinel=None, # pylint: disable=invalid-name
labels=None,
logits=None,
name=None):
"""Computes sparse softmax cross entropy between `logits` and `labels`.
Measures the probability error in discrete classification tasks in which the
classes are mutually exclusive (each entry is in exactly one class). For
example, each CIFAR-10 image is labeled with one and only one label: an image
can be a dog or a truck, but not both.
**NOTE:** For this operation, the probability of a given label is considered
exclusive. That is, soft classes are not allowed, and the `labels` vector
must provide a single specific index for the true class for each row of
`logits` (each minibatch entry). For soft softmax classification with
a probability distribution for each entry, see
`softmax_cross_entropy_with_logits_v2`.
**WARNING:** This op expects unscaled logits, since it performs a `softmax`
on `logits` internally for efficiency. Do not call this op with the
output of `softmax`, as it will produce incorrect results.
A common use case is to have logits of shape
`[batch_size, num_classes]` and have labels of shape
`[batch_size]`, but higher dimensions are supported, in which
case the `dim`-th dimension is assumed to be of size `num_classes`.
`logits` must have the dtype of `float16`, `float32`, or `float64`, and
`labels` must have the dtype of `int32` or `int64`.
**Note that to avoid confusion, it is required to pass only named arguments to
this function.**
Args:
_sentinel: Used to prevent positional parameters. Internal, do not use.
labels: `Tensor` of shape `[d_0, d_1, ..., d_{r-1}]` (where `r` is rank of
`labels` and result) and dtype `int32` or `int64`. Each entry in `labels`
must be an index in `[0, num_classes)`. Other values will raise an
exception when this op is run on CPU, and return `NaN` for corresponding
loss and gradient rows on GPU.
logits: Unscaled log probabilities of shape
`[d_0, d_1, ..., d_{r-1}, num_classes]` and dtype `float16`, `float32`, or
`float64`.
name: A name for the operation (optional).
Returns:
A `Tensor` of the same shape as `labels` and of the same type as `logits`
with the softmax cross entropy loss.
Raises:
ValueError: If logits are scalars (need to have rank >= 1) or if the rank
of the labels is not equal to the rank of the logits minus one.
"""
_ensure_xent_args("sparse_softmax_cross_entropy_with_logits", _sentinel,
labels, logits)
# TODO(pcmurray) Raise an error when the label is not an index in
# [0, num_classes). Note: This could break users who call this with bad
# labels, but disregard the bad results.
# Reshape logits and labels to rank 2.
with ops.name_scope(name, "SparseSoftmaxCrossEntropyWithLogits",
[labels, logits]):
labels = ops.convert_to_tensor(labels)
logits = ops.convert_to_tensor(logits)
precise_logits = math_ops.cast(logits, dtypes.float32) if (dtypes.as_dtype(
logits.dtype) == dtypes.float16) else logits
# Store label shape for result later.
labels_static_shape = labels.get_shape()
labels_shape = array_ops.shape(labels)
static_shapes_fully_defined = (
labels_static_shape.is_fully_defined() and
logits.get_shape()[:-1].is_fully_defined())
if logits.get_shape().ndims is not None and logits.get_shape().ndims == 0:
raise ValueError(
"Logits cannot be scalars - received shape %s." % logits.get_shape())
if logits.get_shape().ndims is not None and (
labels_static_shape.ndims is not None and
labels_static_shape.ndims != logits.get_shape().ndims - 1):
raise ValueError("Rank mismatch: Rank of labels (received %s) should "
"equal rank of logits minus 1 (received %s)." %
(labels_static_shape.ndims, logits.get_shape().ndims))
if (static_shapes_fully_defined and
labels_static_shape != logits.get_shape()[:-1]):
raise ValueError("Shape mismatch: The shape of labels (received %s) "
"should equal the shape of logits except for the last "
"dimension (received %s)." % (labels_static_shape,
logits.get_shape()))
# Check if no reshapes are required.
if logits.get_shape().ndims == 2:
cost, _ = gen_nn_ops.sparse_softmax_cross_entropy_with_logits(
precise_logits, labels, name=name)
if logits.dtype == dtypes.float16:
return math_ops.cast(cost, dtypes.float16)
else:
return cost
# Perform a check of the dynamic shapes if the static shapes are not fully
# defined.
shape_checks = []
if not static_shapes_fully_defined:
shape_checks.append(
check_ops.assert_equal(
array_ops.shape(labels),
array_ops.shape(logits)[:-1]))
with ops.control_dependencies(shape_checks):
# Reshape logits to 2 dim, labels to 1 dim.
num_classes = array_ops.shape(logits)[array_ops.rank(logits) - 1]
precise_logits = array_ops.reshape(precise_logits, [-1, num_classes])
labels = array_ops.reshape(labels, [-1])
# The second output tensor contains the gradients. We use it in
# _CrossEntropyGrad() in nn_grad but not here.
cost, _ = gen_nn_ops.sparse_softmax_cross_entropy_with_logits(
precise_logits, labels, name=name)
cost = array_ops.reshape(cost, labels_shape)
cost.set_shape(labels_static_shape)
if logits.dtype == dtypes.float16:
return math_ops.cast(cost, dtypes.float16)
else:
return cost
@tf_export("nn.avg_pool")
def avg_pool(value, ksize, strides, padding, data_format="NHWC", name=None):
"""Performs the average pooling on the input.
Each entry in `output` is the mean of the corresponding size `ksize`
window in `value`.
Args:
value: A 4-D `Tensor` of shape `[batch, height, width, channels]` and type
`float32`, `float64`, `qint8`, `quint8`, or `qint32`.
ksize: A list or tuple of 4 ints. The size of the window for each dimension
of the input tensor.
strides: A list or tuple of 4 ints. The stride of the sliding window for
each dimension of the input tensor.
padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm.
See the "returns" section of `tf.nn.convolution` for details.
data_format: A string. 'NHWC' and 'NCHW' are supported.
name: Optional name for the operation.
Returns:
A `Tensor` with the same type as `value`. The average pooled output tensor.
"""
with ops.name_scope(name, "AvgPool", [value]) as name:
value = ops.convert_to_tensor(value, name="input")
return gen_nn_ops.avg_pool(
value,
ksize=ksize,
strides=strides,
padding=padding,
data_format=data_format,
name=name)
@tf_export("nn.max_pool")
def max_pool(value, ksize, strides, padding, data_format="NHWC", name=None):
"""Performs the max pooling on the input.
Args:
value: A 4-D `Tensor` of the format specified by `data_format`.
ksize: A list or tuple of 4 ints. The size of the window for each dimension
of the input tensor.
strides: A list or tuple of 4 ints. The stride of the sliding window for
each dimension of the input tensor.
padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm.
See the "returns" section of `tf.nn.convolution` for details.
data_format: A string. 'NHWC', 'NCHW' and 'NCHW_VECT_C' are supported.
name: Optional name for the operation.
Returns:
A `Tensor` of format specified by `data_format`.
The max pooled output tensor.
"""
with ops.name_scope(name, "MaxPool", [value]) as name:
value = ops.convert_to_tensor(value, name="input")
return gen_nn_ops.max_pool(
value,
ksize=ksize,
strides=strides,
padding=padding,
data_format=data_format,
name=name)
# pylint: disable=redefined-builtin
@tf_export("nn.max_pool_with_argmax", v1=[])
def max_pool_with_argmax_v2(input,
ksize,
strides,
padding,
data_format="NHWC",
output_dtype=dtypes.int64,
name=None):
"""Performs max pooling on the input and outputs both max values and indices.
The indices in `argmax` are flattened, so that a maximum value at position
`[b, y, x, c]` becomes flattened index
`((b * height + y) * width + x) * channels + c`.
The indices returned are always in `[0, height) x [0, width)` before
flattening, even if padding is involved and the mathematically correct answer
is outside (either negative or too large). This is a bug, but fixing it is
difficult to do in a safe backwards compatible way, especially due to
flattening.
Args:
input: A `Tensor`. Must be one of the following types: `float32`, `float64`,
`int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `half`,
`uint32`, `uint64`.
4-D with shape `[batch, height, width, channels]`. Input to pool over.
ksize: A list of `ints` that has length `>= 4`.
The size of the window for each dimension of the input tensor.
strides: A list of `ints` that has length `>= 4`.
The stride of the sliding window for each dimension of the
input tensor.
padding: A `string` from: `"SAME", "VALID"`.
The type of padding algorithm to use.
data_format: An optional `string`, must be set to `"NHWC"`. Defaults to
`"NHWC"`.
Specify the data format of the input and output data.
output_dtype: An optional `tf.DType` from: `tf.int32, tf.int64`.
Defaults to `tf.int64`.
The dtype of the returned argmax tensor.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (output, argmax).
output: A `Tensor`. Has the same type as `input`.
argmax: A `Tensor` of type `output_dtype`.
"""
if data_format != "NHWC":
raise ValueError("Data formats other than 'NHWC' are not yet supported")
return gen_nn_ops.max_pool_with_argmax(input=input,
ksize=ksize,
strides=strides,
padding=padding,
Targmax=output_dtype,
name=name)
# pylint: enable=redefined-builtin
@ops.RegisterStatistics("Conv2D", "flops")
def _calc_conv_flops(graph, node):
"""Calculates the compute resources needed for Conv2D."""
input_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[0])
input_shape.assert_is_fully_defined()
filter_shape = graph_util.tensor_shape_from_node_def_name(
graph, node.input[1])
filter_shape.assert_is_fully_defined()
output_shape = graph_util.tensor_shape_from_node_def_name(graph, node.name)
output_shape.assert_is_fully_defined()
filter_height = int(filter_shape[0])
filter_width = int(filter_shape[1])
filter_in_depth = int(filter_shape[2])
output_count = np.prod(output_shape.as_list(), dtype=np.int64)
return ops.OpStats(
"flops",
(output_count * filter_in_depth * filter_height * filter_width * 2))
@ops.RegisterStatistics("DepthwiseConv2dNative", "flops")
def _calc_depthwise_conv_flops(graph, node):
"""Calculates the compute resources needed for DepthwiseConv2dNative."""
input_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[0])
input_shape.assert_is_fully_defined()
filter_shape = graph_util.tensor_shape_from_node_def_name(
graph, node.input[1])
filter_shape.assert_is_fully_defined()
output_shape = graph_util.tensor_shape_from_node_def_name(graph, node.name)
output_shape.assert_is_fully_defined()
filter_height = int(filter_shape[0])
filter_width = int(filter_shape[1])
output_count = np.prod(output_shape.as_list(), dtype=np.int64)
return ops.OpStats("flops", (output_count * filter_height * filter_width * 2))
@ops.RegisterStatistics("BiasAdd", "flops")
def _calc_bias_add_flops(graph, node):
"""Calculates the computing needed for BiasAdd."""
input_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[0])
input_shape.assert_is_fully_defined()
input_count = np.prod(input_shape.as_list())
return ops.OpStats("flops", input_count)
@tf_export(v1=["nn.xw_plus_b"])
def xw_plus_b(x, weights, biases, name=None): # pylint: disable=invalid-name
"""Computes matmul(x, weights) + biases.
Args:
x: a 2D tensor. Dimensions typically: batch, in_units
weights: a 2D tensor. Dimensions typically: in_units, out_units
biases: a 1D tensor. Dimensions: out_units
name: A name for the operation (optional). If not specified
"xw_plus_b" is used.
Returns:
A 2-D Tensor computing matmul(x, weights) + biases.
Dimensions typically: batch, out_units.
"""
with ops.name_scope(name, "xw_plus_b", [x, weights, biases]) as name:
x = ops.convert_to_tensor(x, name="x")
weights = ops.convert_to_tensor(weights, name="weights")
biases = ops.convert_to_tensor(biases, name="biases")
mm = math_ops.matmul(x, weights)
return bias_add(mm, biases, name=name)
def xw_plus_b_v1(x, weights, biases, name=None): # pylint: disable=invalid-name
"""Computes matmul(x, weights) + biases.
This is a deprecated version of that will soon be removed.
Args:
x: a 2D tensor. Dimensions typically: batch, in_units
weights: a 2D tensor. Dimensions typically: in_units, out_units
biases: a 1D tensor. Dimensions: out_units
name: A name for the operation (optional). If not specified
"xw_plus_b_v1" is used.
Returns:
A 2-D Tensor computing matmul(x, weights) + biases.
Dimensions typically: batch, out_units.
"""
with ops.name_scope(name, "xw_plus_b_v1", [x, weights, biases]) as name:
x = ops.convert_to_tensor(x, name="x")
weights = ops.convert_to_tensor(weights, name="weights")
biases = ops.convert_to_tensor(biases, name="biases")
mm = math_ops.matmul(x, weights)
return bias_add_v1(mm, biases, name=name)
def _get_noise_shape(x, noise_shape):
# If noise_shape is none return immediately.
if noise_shape is None:
return array_ops.shape(x)
try:
# Best effort to figure out the intended shape.
# If not possible, let the op to handle it.
# In eager mode exception will show up.
noise_shape_ = tensor_shape.as_shape(noise_shape)
except (TypeError, ValueError):
return noise_shape
if x.shape.dims is not None and len(x.shape.dims) == len(noise_shape_.dims):
new_dims = []
for i, dim in enumerate(x.shape.dims):
if noise_shape_.dims[i].value is None and dim.value is not None:
new_dims.append(dim.value)
else:
new_dims.append(noise_shape_.dims[i].value)
return tensor_shape.TensorShape(new_dims)
return noise_shape
@tf_export(v1=["nn.dropout"])
@deprecation.deprecated_args(None, "Please use `rate` instead of `keep_prob`. "
"Rate should be set to `rate = 1 - keep_prob`.",
"keep_prob")
def dropout(x, keep_prob=None, noise_shape=None, seed=None, name=None,
rate=None): # pylint: disable=invalid-name
"""Computes dropout.
For each element of `x`, with probability `rate`, outputs `0`, and otherwise
scales up the input by `1 / (1-rate)`. The scaling is such that the expected
sum is unchanged.
By default, each element is kept or dropped independently. If `noise_shape`
is specified, it must be
[broadcastable](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
to the shape of `x`, and only dimensions with `noise_shape[i] == shape(x)[i]`
will make independent decisions. For example, if `shape(x) = [k, l, m, n]`
and `noise_shape = [k, 1, 1, n]`, each batch and channel component will be
kept independently and each row and column will be kept or not kept together.
Args:
x: A floating point tensor.
keep_prob: (deprecated) A deprecated alias for `(1-rate)`.
noise_shape: A 1-D `Tensor` of type `int32`, representing the
shape for randomly generated keep/drop flags.
seed: A Python integer. Used to create random seeds. See
`tf.set_random_seed` for behavior.
name: A name for this operation (optional).
rate: A scalar `Tensor` with the same type as `x`. The probability that each
element of `x` is discarded.
Returns:
A Tensor of the same shape of `x`.
Raises:
ValueError: If `rate` is not in `[0, 1)` or if `x` is not a floating
point tensor.
"""
try:
keep = 1. - keep_prob if keep_prob is not None else None
except TypeError:
raise ValueError("keep_prob must be a floating point number or Tensor "
"(got %r)" % keep_prob)
rate = deprecation.deprecated_argument_lookup(
"rate", rate,
"keep_prob", keep)
if rate is None:
raise ValueError("You must provide a rate to dropout.")
return dropout_v2(x, rate, noise_shape=noise_shape, seed=seed, name=name)
@tf_export("nn.dropout", v1=[])
def dropout_v2(x, rate, noise_shape=None, seed=None, name=None): # pylint: disable=invalid-name
"""Computes dropout.
With probability `rate`, drops elements of `x`. Input that are kept are
scaled up by `1 / (1 - rate)`, otherwise outputs `0`. The scaling is so that
the expected sum is unchanged.
By default, each element is kept or dropped independently. If `noise_shape`
is specified, it must be
[broadcastable](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
to the shape of `x`, and only dimensions with `noise_shape[i] == shape(x)[i]`
will make independent decisions. For example, if `shape(x) = [k, l, m, n]`
and `noise_shape = [k, 1, 1, n]`, each batch and channel component will be
kept independently and each row and column will be kept or not kept together.
Args:
x: A floating point tensor.
rate: A scalar `Tensor` with the same type as x. The probability
that each element is dropped. For example, setting rate=0.1 would drop
10% of input elements.
noise_shape: A 1-D `Tensor` of type `int32`, representing the
shape for randomly generated keep/drop flags.
seed: A Python integer. Used to create random seeds. See
`tf.set_random_seed`
for behavior.
name: A name for this operation (optional).
Returns:
A Tensor of the same shape of `x`.
Raises:
ValueError: If `keep_prob` is not in `(0, 1]` or if `x` is not a floating
point tensor.
"""
with ops.name_scope(name, "dropout", [x]) as name:
x = ops.convert_to_tensor(x, name="x")
if not x.dtype.is_floating:
raise ValueError("x has to be a floating point tensor since it's going to"
" be scaled. Got a %s tensor instead." % x.dtype)
if isinstance(rate, numbers.Real) and not (rate >= 0 and rate < 1):
raise ValueError("rate must be a scalar tensor or a float in the "
"range [0, 1), got %g" % rate)
# Early return if nothing needs to be dropped.
if isinstance(rate, numbers.Real) and rate == 0:
return x
if context.executing_eagerly():
if isinstance(rate, ops.EagerTensor):
if rate.numpy() == 0:
return x
else:
rate = ops.convert_to_tensor(
rate, dtype=x.dtype, name="rate")
rate.get_shape().assert_is_compatible_with(tensor_shape.scalar())
# Do nothing if we know rate == 0
if tensor_util.constant_value(rate) == 0:
return x
noise_shape = _get_noise_shape(x, noise_shape)
keep_prob = 1 - rate
# uniform [keep_prob, 1.0 + keep_prob)
random_tensor = keep_prob
random_tensor += random_ops.random_uniform(
noise_shape, seed=seed, dtype=x.dtype)
# 0. if [keep_prob, 1.0) and 1. if [1.0, 1.0 + keep_prob)
binary_tensor = math_ops.floor(random_tensor)
ret = math_ops.divide(x, keep_prob) * binary_tensor
if not context.executing_eagerly():
ret.set_shape(x.get_shape())
return ret
@tf_export("math.top_k", "nn.top_k")
def top_k(input, k=1, sorted=True, name=None): # pylint: disable=redefined-builtin
"""Finds values and indices of the `k` largest entries for the last dimension.
If the input is a vector (rank=1), finds the `k` largest entries in the vector
and outputs their values and indices as vectors. Thus `values[j]` is the
`j`-th largest entry in `input`, and its index is `indices[j]`.
For matrices (resp. higher rank input), computes the top `k` entries in each
row (resp. vector along the last dimension). Thus,
values.shape = indices.shape = input.shape[:-1] + [k]
If two elements are equal, the lower-index element appears first.
Args:
input: 1-D or higher `Tensor` with last dimension at least `k`.
k: 0-D `int32` `Tensor`. Number of top elements to look for along the last
dimension (along each row for matrices).
sorted: If true the resulting `k` elements will be sorted by the values in
descending order.
name: Optional name for the operation.
Returns:
values: The `k` largest elements along each last dimensional slice.
indices: The indices of `values` within the last dimension of `input`.
"""
return gen_nn_ops.top_kv2(input, k=k, sorted=sorted, name=name)
def nth_element(input, n, reverse=False, name=None): # pylint: disable=redefined-builtin
r"""Finds values of the `n`-th order statistic for the last dmension.
If the input is a vector (rank-1), finds the entries which is the nth-smallest
value in the vector and outputs their values as scalar tensor.
For matrices (resp. higher rank input), computes the entries which is the
nth-smallest value in each row (resp. vector along the last dimension). Thus,
values.shape = input.shape[:-1]
Args:
input: 1-D or higher `Tensor` with last dimension at least `n+1`.
n: A `Tensor` of type `int32`.
0-D. Position of sorted vector to select along the last dimension (along
each row for matrices). Valid range of n is `[0, input.shape[:-1])`
reverse: An optional `bool`. Defaults to `False`.
When set to True, find the nth-largest value in the vector and vice
versa.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input`.
The `n`-th order statistic along each last dimensional slice.
"""
return gen_nn_ops.nth_element(input, n, reverse=reverse, name=name)
@tf_export(v1=["nn.fractional_max_pool"])
@deprecation.deprecated(date=None, instructions="`seed2` and `deterministic` "
"args are deprecated. Use fractional_max_pool_v2.")
def fractional_max_pool(value,
pooling_ratio,
pseudo_random=False,
overlapping=False,
deterministic=False,
seed=0,
seed2=0,
name=None): # pylint: disable=redefined-builtin
r"""Performs fractional max pooling on the input.
This is a deprecated version of `fractional_max_pool`.
Fractional max pooling is slightly different than regular max pooling. In
regular max pooling, you downsize an input set by taking the maximum value of
smaller N x N subsections of the set (often 2x2), and try to reduce the set by
a factor of N, where N is an integer. Fractional max pooling, as you might
expect from the word "fractional", means that the overall reduction ratio N
does not have to be an integer.
The sizes of the pooling regions are generated randomly but are fairly
uniform. For example, let's look at the height dimension, and the constraints
on the list of rows that will be pool boundaries.
First we define the following:
1. input_row_length : the number of rows from the input set
2. output_row_length : which will be smaller than the input
3. alpha = input_row_length / output_row_length : our reduction ratio
4. K = floor(alpha)
5. row_pooling_sequence : this is the result list of pool boundary rows
Then, row_pooling_sequence should satisfy:
1. a[0] = 0 : the first value of the sequence is 0
2. a[end] = input_row_length : the last value of the sequence is the size
3. K <= (a[i+1] - a[i]) <= K+1 : all intervals are K or K+1 size
4. length(row_pooling_sequence) = output_row_length+1
For more details on fractional max pooling, see this paper: [Benjamin Graham,
Fractional Max-Pooling](http://arxiv.org/abs/1412.6071)
Args:
value: A `Tensor`. 4-D with shape `[batch, height, width, channels]`.
pooling_ratio: A list of `floats` that has length >= 4. Pooling ratio for
each dimension of `value`, currently only supports row and col dimension
and should be >= 1.0. For example, a valid pooling ratio looks like [1.0,
1.44, 1.73, 1.0]. The first and last elements must be 1.0 because we don't
allow pooling on batch and channels dimensions. 1.44 and 1.73 are pooling
ratio on height and width dimensions respectively.
pseudo_random: An optional `bool`. Defaults to `False`. When set to `True`,
generates the pooling sequence in a pseudorandom fashion, otherwise, in a
random fashion. Check paper [Benjamin Graham, Fractional
Max-Pooling](http://arxiv.org/abs/1412.6071) for difference between
pseudorandom and random.
overlapping: An optional `bool`. Defaults to `False`. When set to `True`,
it means when pooling, the values at the boundary of adjacent pooling
cells are used by both cells. For example:
`index 0 1 2 3 4`
`value 20 5 16 3 7`
If the pooling sequence is [0, 2, 4], then 16, at index 2 will be used
twice. The result would be [20, 16] for fractional max pooling.
deterministic: An optional `bool`. Deprecated; use `fractional_max_pool_v2`
instead.
seed: An optional `int`. Defaults to `0`. If set to be non-zero, the
random number generator is seeded by the given seed. Otherwise it is
seeded by a random seed.
seed2: An optional `int`. Deprecated; use `fractional_max_pool_v2` instead.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (`output`, `row_pooling_sequence`,
`col_pooling_sequence`).
output: Output `Tensor` after fractional max pooling. Has the same type as
`value`.
row_pooling_sequence: A `Tensor` of type `int64`.
col_pooling_sequence: A `Tensor` of type `int64`.
"""
return gen_nn_ops.fractional_max_pool(value, pooling_ratio, pseudo_random,
overlapping, deterministic, seed, seed2,
name)
@tf_export("nn.fractional_max_pool", v1=[])
def fractional_max_pool_v2(value,
pooling_ratio,
pseudo_random=False,
overlapping=False,
seed=0,
name=None): # pylint: disable=redefined-builtin
r"""Performs fractional max pooling on the input.
Fractional max pooling is slightly different than regular max pooling. In
regular max pooling, you downsize an input set by taking the maximum value of
smaller N x N subsections of the set (often 2x2), and try to reduce the set by
a factor of N, where N is an integer. Fractional max pooling, as you might
expect from the word "fractional", means that the overall reduction ratio N
does not have to be an integer.
The sizes of the pooling regions are generated randomly but are fairly
uniform. For example, let's look at the height dimension, and the constraints
on the list of rows that will be pool boundaries.
First we define the following:
1. input_row_length : the number of rows from the input set
2. output_row_length : which will be smaller than the input
3. alpha = input_row_length / output_row_length : our reduction ratio
4. K = floor(alpha)
5. row_pooling_sequence : this is the result list of pool boundary rows
Then, row_pooling_sequence should satisfy:
1. a[0] = 0 : the first value of the sequence is 0
2. a[end] = input_row_length : the last value of the sequence is the size
3. K <= (a[i+1] - a[i]) <= K+1 : all intervals are K or K+1 size
4. length(row_pooling_sequence) = output_row_length+1
For more details on fractional max pooling, see this paper: [Benjamin Graham,
Fractional Max-Pooling](http://arxiv.org/abs/1412.6071)
Args:
value: A `Tensor`. 4-D with shape `[batch, height, width, channels]`.
pooling_ratio: A list of `floats` that has length >= 4. Pooling ratio for
each dimension of `value`, currently only supports row and col dimension
and should be >= 1.0. For example, a valid pooling ratio looks like [1.0,
1.44, 1.73, 1.0]. The first and last elements must be 1.0 because we don't
allow pooling on batch and channels dimensions. 1.44 and 1.73 are pooling
ratio on height and width dimensions respectively.
pseudo_random: An optional `bool`. Defaults to `False`. When set to `True`,
generates the pooling sequence in a pseudorandom fashion, otherwise, in a
random fashion. Check paper [Benjamin Graham, Fractional
Max-Pooling](http://arxiv.org/abs/1412.6071) for difference between
pseudorandom and random.
overlapping: An optional `bool`. Defaults to `False`. When set to `True`,
it means when pooling, the values at the boundary of adjacent pooling
cells are used by both cells. For example:
`index 0 1 2 3 4`
`value 20 5 16 3 7`
If the pooling sequence is [0, 2, 4], then 16, at index 2 will be used
twice. The result would be [20, 16] for fractional max pooling.
seed: An optional `int`. Defaults to `0`. If set to be non-zero, the
random number generator is seeded by the given seed. Otherwise it is
seeded by a random seed.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (`output`, `row_pooling_sequence`,
`col_pooling_sequence`).
output: Output `Tensor` after fractional max pooling. Has the same type as
`value`.
row_pooling_sequence: A `Tensor` of type `int64`.
col_pooling_sequence: A `Tensor` of type `int64`.
"""
if seed == 0:
return gen_nn_ops.fractional_max_pool(value, pooling_ratio, pseudo_random,
overlapping, deterministic=False,
seed=0, seed2=0, name=name)
else:
seed1, seed2 = random_seed.get_seed(seed)
return gen_nn_ops.fractional_max_pool(value, pooling_ratio, pseudo_random,
overlapping, deterministic=True,
seed=seed1, seed2=seed2, name=name)
@tf_export(v1=["nn.fractional_avg_pool"])
@deprecation.deprecated(date=None, instructions="`seed2` and `deterministic` "
"args are deprecated. Use fractional_avg_pool_v2.")
def fractional_avg_pool(value,
pooling_ratio,
pseudo_random=False,
overlapping=False,
deterministic=False,
seed=0,
seed2=0,
name=None): # pylint: disable=redefined-builtin
r"""Performs fractional average pooling on the input.
This is a deprecated version of `fractional_avg_pool`.
Fractional average pooling is similar to Fractional max pooling in the pooling
region generation step. The only difference is that after pooling regions are
generated, a mean operation is performed instead of a max operation in each
pooling region.
Args:
value: A `Tensor`. 4-D with shape `[batch, height, width, channels]`.
pooling_ratio: A list of `floats` that has length >= 4. Pooling ratio for
each dimension of `value`, currently only supports row and col dimension
and should be >= 1.0. For example, a valid pooling ratio looks like [1.0,
1.44, 1.73, 1.0]. The first and last elements must be 1.0 because we don't
allow pooling on batch and channels dimensions. 1.44 and 1.73 are pooling
ratio on height and width dimensions respectively.
pseudo_random: An optional `bool`. Defaults to `False`. When set to `True`,
generates the pooling sequence in a pseudorandom fashion, otherwise, in a
random fashion. Check paper [Benjamin Graham, Fractional
Max-Pooling](http://arxiv.org/abs/1412.6071) for difference between
pseudorandom and random.
overlapping: An optional `bool`. Defaults to `False`. When set to `True`,
it means when pooling, the values at the boundary of adjacent pooling
cells are used by both cells. For example:
`index 0 1 2 3 4`
`value 20 5 16 3 7`
If the pooling sequence is [0, 2, 4], then 16, at index 2 will be used
twice. The result would be [20, 16] for fractional avg pooling.
deterministic: An optional `bool`. Deprecated; use `fractional_avg_pool_v2`
instead.
seed: An optional `int`. Defaults to `0`. If set to be non-zero, the
random number generator is seeded by the given seed. Otherwise it is
seeded by a random seed.
seed2: An optional `int`. Deprecated; use `fractional_avg_pool_v2` instead.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (`output`, `row_pooling_sequence`,
`col_pooling_sequence`).
output: Output `Tensor` after fractional avg pooling. Has the same type as
`value`.
row_pooling_sequence: A `Tensor` of type `int64`.
col_pooling_sequence: A `Tensor` of type `int64`.
"""
return gen_nn_ops.fractional_avg_pool(value, pooling_ratio, pseudo_random,
overlapping, deterministic, seed, seed2,
name=name)
@tf_export("nn.fractional_avg_pool", v1=[])
def fractional_avg_pool_v2(value,
pooling_ratio,
pseudo_random=False,
overlapping=False,
seed=0,
name=None): # pylint: disable=redefined-builtin
r"""Performs fractional average pooling on the input.
Fractional average pooling is similar to Fractional max pooling in the pooling
region generation step. The only difference is that after pooling regions are
generated, a mean operation is performed instead of a max operation in each
pooling region.
Args:
value: A `Tensor`. 4-D with shape `[batch, height, width, channels]`.
pooling_ratio: A list of `floats` that has length >= 4. Pooling ratio for
each dimension of `value`, currently only supports row and col dimension
and should be >= 1.0. For example, a valid pooling ratio looks like [1.0,
1.44, 1.73, 1.0]. The first and last elements must be 1.0 because we don't
allow pooling on batch and channels dimensions. 1.44 and 1.73 are pooling
ratio on height and width dimensions respectively.
pseudo_random: An optional `bool`. Defaults to `False`. When set to `True`,
generates the pooling sequence in a pseudorandom fashion, otherwise, in a
random fashion. Check paper [Benjamin Graham, Fractional
Max-Pooling](http://arxiv.org/abs/1412.6071) for difference between
pseudorandom and random.
overlapping: An optional `bool`. Defaults to `False`. When set to `True`,
it means when pooling, the values at the boundary of adjacent pooling
cells are used by both cells. For example:
`index 0 1 2 3 4`
`value 20 5 16 3 7`
If the pooling sequence is [0, 2, 4], then 16, at index 2 will be used
twice. The result would be [20, 16] for fractional avg pooling.
seed: An optional `int`. Defaults to `0`. If set to be non-zero, the
random number generator is seeded by the given seed. Otherwise it is
seeded by a random seed.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (`output`, `row_pooling_sequence`,
`col_pooling_sequence`).
output: Output `Tensor` after fractional avg pooling. Has the same type as
`value`.
row_pooling_sequence: A `Tensor` of type `int64`.
col_pooling_sequence: A `Tensor` of type `int64`.
"""
if seed == 0:
return gen_nn_ops.fractional_avg_pool(value, pooling_ratio, pseudo_random,
overlapping, deterministic=False,
seed=0, seed2=0, name=name)
else:
seed1, seed2 = random_seed.get_seed(seed)
return gen_nn_ops.fractional_avg_pool(value, pooling_ratio, pseudo_random,
overlapping, deterministic=True,
seed=seed1, seed2=seed2, name=name)
@tf_export(v1=["nn.conv1d"])
@deprecation.deprecated_arg_values(
None,
"`NCHW` for data_format is deprecated, use `NCW` instead",
warn_once=True,
data_format="NCHW")
@deprecation.deprecated_arg_values(
None,
"`NHWC` for data_format is deprecated, use `NWC` instead",
warn_once=True,
data_format="NHWC")
def conv1d(value,
filters,
stride,
padding,
use_cudnn_on_gpu=None,
data_format=None,
name=None):
r"""Computes a 1-D convolution given 3-D input and filter tensors.
Given an input tensor of shape
[batch, in_width, in_channels]
if data_format is "NWC", or
[batch, in_channels, in_width]
if data_format is "NCW",
and a filter / kernel tensor of shape
[filter_width, in_channels, out_channels], this op reshapes
the arguments to pass them to conv2d to perform the equivalent
convolution operation.
Internally, this op reshapes the input tensors and invokes `tf.nn.conv2d`.
For example, if `data_format` does not start with "NC", a tensor of shape
[batch, in_width, in_channels]
is reshaped to
[batch, 1, in_width, in_channels],
and the filter is reshaped to
[1, filter_width, in_channels, out_channels].
The result is then reshaped back to
[batch, out_width, out_channels]
\(where out_width is a function of the stride and padding as in conv2d\) and
returned to the caller.
Args:
value: A 3D `Tensor`. Must be of type `float16`, `float32`, or `float64`.
filters: A 3D `Tensor`. Must have the same type as `value`.
stride: An `integer`. The number of entries by which
the filter is moved right at each step.
padding: 'SAME' or 'VALID'
use_cudnn_on_gpu: An optional `bool`. Defaults to `True`.
data_format: An optional `string` from `"NWC", "NCW"`. Defaults
to `"NWC"`, the data is stored in the order of
[batch, in_width, in_channels]. The `"NCW"` format stores
data as [batch, in_channels, in_width].
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as input.
Raises:
ValueError: if `data_format` is invalid.
"""
with ops.name_scope(name, "conv1d", [value, filters]) as name:
# Reshape the input tensor to [batch, 1, in_width, in_channels]
if data_format is None or data_format == "NHWC" or data_format == "NWC":
data_format = "NHWC"
spatial_start_dim = 1
strides = [1, 1, stride, 1]
elif data_format == "NCHW" or data_format == "NCW":
data_format = "NCHW"
spatial_start_dim = 2
strides = [1, 1, 1, stride]
else:
raise ValueError("data_format must be \"NWC\" or \"NCW\".")
value = array_ops.expand_dims(value, spatial_start_dim)
filters = array_ops.expand_dims(filters, 0)
result = gen_nn_ops.conv2d(
value,
filters,
strides,
padding,
use_cudnn_on_gpu=use_cudnn_on_gpu,
data_format=data_format)
return array_ops.squeeze(result, [spatial_start_dim])
@tf_export("nn.conv1d", v1=[])
def conv1d_v2(input, # pylint: disable=redefined-builtin
filters,
stride,
padding,
data_format=None,
name=None):
r"""Computes a 1-D convolution given 3-D input and filter tensors.
Given an input tensor of shape
[batch, in_width, in_channels]
if data_format is "NWC", or
[batch, in_channels, in_width]
if data_format is "NCW",
and a filter / kernel tensor of shape
[filter_width, in_channels, out_channels], this op reshapes
the arguments to pass them to conv2d to perform the equivalent
convolution operation.
Internally, this op reshapes the input tensors and invokes `tf.nn.conv2d`.
For example, if `data_format` does not start with "NC", a tensor of shape
[batch, in_width, in_channels]
is reshaped to
[batch, 1, in_width, in_channels],
and the filter is reshaped to
[1, filter_width, in_channels, out_channels].
The result is then reshaped back to
[batch, out_width, out_channels]
\(where out_width is a function of the stride and padding as in conv2d\) and
returned to the caller.
Args:
input: A 3D `Tensor`. Must be of type `float16`, `float32`, or `float64`.
filters: A 3D `Tensor`. Must have the same type as `input`.
stride: An `integer`. The number of entries by which
the filter is moved right at each step.
padding: 'SAME' or 'VALID'
data_format: An optional `string` from `"NWC", "NCW"`. Defaults
to `"NWC"`, the data is stored in the order of
[batch, in_width, in_channels]. The `"NCW"` format stores
data as [batch, in_channels, in_width].
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as input.
Raises:
ValueError: if `data_format` is invalid.
"""
return conv1d(input, # pylint: disable=redefined-builtin
filters,
stride,
padding,
use_cudnn_on_gpu=True,
data_format=data_format,
name=name)
def conv1d_transpose(
value,
filter, # pylint: disable=redefined-builtin
output_shape,
stride,
padding="SAME",
data_format="NWC",
name=None):
"""The transpose of `conv1d`.
This operation is sometimes called "deconvolution" after [Deconvolutional
Networks](http://www.matthewzeiler.com/pubs/cvpr2010/cvpr2010.pdf), but is
actually the transpose (gradient) of `conv1d` rather than an actual
deconvolution.
Args:
value: A 3-D `Tensor` of type `float` and shape
`[batch, in_width, in_channels]` for `NWC` data format or
`[batch, in_channels, in_width]` for `NCW` data format.
filter: A 3-D `Tensor` with the same type as `value` and shape
`[filter_width, output_channels, in_channels]`. `filter`'s
`in_channels` dimension must match that of `value`.
output_shape: A 1-D `Tensor` representing the output shape of the
deconvolution op.
stride: An `integer`. The number of entries by which
the filter is moved right at each step.
padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm.
See the "returns" section of `tf.nn.convolution` for details.
data_format: A string. 'NHWC' and 'NCHW' are supported.
name: Optional name for the returned tensor.
Returns:
A `Tensor` with the same type as `value`.
Raises:
ValueError: If input/output depth does not match `filter`'s shape, or if
padding is other than `'VALID'` or `'SAME'`.
"""
with ops.name_scope(name, "conv1d_transpose",
[value, filter, output_shape]) as name:
output_shape_ = ops.convert_to_tensor(output_shape, name="output_shape")
if not output_shape_.get_shape().is_compatible_with(tensor_shape.vector(3)):
raise ValueError("output_shape must have shape (3,), got {}".format(
output_shape_.get_shape()))
# The format could be either NWC or NCW, map to NHWC or NCHW
if data_format is None or data_format == "NWC":
data_format_2d = "NHWC"
axis = 2
elif data_format == "NCW":
data_format_2d = "NCHW"
axis = 1
else:
raise ValueError("data_format must be \"NWC\" or \"NCW\".")
if not value.get_shape().dims[axis].is_compatible_with(
filter.get_shape()[2]):
raise ValueError("input channels does not match filter's input channels, "
"{} != {}".format(value.get_shape()[axis],
filter.get_shape()[2]))
if isinstance(output_shape, (list, np.ndarray)):
# output_shape's shape should be == [3] if reached this point.
if not filter.get_shape().dims[1].is_compatible_with(
output_shape[axis]):
raise ValueError(
"output_shape does not match filter's output channels, "
"{} != {}".format(output_shape[axis],
filter.get_shape()[1]))
if padding != "VALID" and padding != "SAME":
raise ValueError("padding must be either VALID or SAME:"
" {}".format(padding))
# Reshape the input tensor to [batch, 1, in_width, in_channels]
if data_format_2d == "NHWC":
output_shape_ = array_ops.concat(
[output_shape_[:1], [1], output_shape_[1:]], axis=0)
spatial_start_dim = 1
strides = [1, 1, stride, 1]
else:
output_shape_ = array_ops.concat(
[output_shape_[:2], [1], output_shape_[2:]], axis=0)
spatial_start_dim = 2
strides = [1, 1, 1, stride]
value = array_ops.expand_dims(value, spatial_start_dim)
filter = array_ops.expand_dims(filter, 0) # pylint: disable=redefined-builtin
result = gen_nn_ops.conv2d_backprop_input(
input_sizes=output_shape_,
filter=filter,
out_backprop=value,
strides=strides,
padding=padding,
data_format=data_format_2d,
name=name)
return array_ops.squeeze(result, [spatial_start_dim])
@ops.RegisterStatistics("Dilation2D", "flops")
def _calc_dilation2d_flops(graph, node):
"""Calculates the compute resources needed for Dilation2D."""
input_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[0])
input_shape.assert_is_fully_defined()
filter_shape = graph_util.tensor_shape_from_node_def_name(
graph, node.input[1])
filter_shape.assert_is_fully_defined()
output_shape = graph_util.tensor_shape_from_node_def_name(graph, node.name)
output_shape.assert_is_fully_defined()
filter_height = int(filter_shape[0])
filter_width = int(filter_shape[1])
output_count = np.prod(output_shape.as_list(), dtype=np.int64)
return ops.OpStats("flops", (output_count * filter_height * filter_width * 2))
@tf_export(v1=["nn.erosion2d"])
def erosion2d(value, kernel, strides, rates, padding, name=None):
"""Computes the grayscale erosion of 4-D `value` and 3-D `kernel` tensors.
The `value` tensor has shape `[batch, in_height, in_width, depth]` and the
`kernel` tensor has shape `[kernel_height, kernel_width, depth]`, i.e.,
each input channel is processed independently of the others with its own
structuring function. The `output` tensor has shape
`[batch, out_height, out_width, depth]`. The spatial dimensions of the
output tensor depend on the `padding` algorithm. We currently only support the
default "NHWC" `data_format`.
In detail, the grayscale morphological 2-D erosion is given by:
output[b, y, x, c] =
min_{dy, dx} value[b,
strides[1] * y - rates[1] * dy,
strides[2] * x - rates[2] * dx,
c] -
kernel[dy, dx, c]
Duality: The erosion of `value` by the `kernel` is equal to the negation of
the dilation of `-value` by the reflected `kernel`.
Args:
value: A `Tensor`. 4-D with shape `[batch, in_height, in_width, depth]`.
kernel: A `Tensor`. Must have the same type as `value`.
3-D with shape `[kernel_height, kernel_width, depth]`.
strides: A list of `ints` that has length `>= 4`.
1-D of length 4. The stride of the sliding window for each dimension of
the input tensor. Must be: `[1, stride_height, stride_width, 1]`.
rates: A list of `ints` that has length `>= 4`.
1-D of length 4. The input stride for atrous morphological dilation.
Must be: `[1, rate_height, rate_width, 1]`.
padding: A `string` from: `"SAME", "VALID"`.
The type of padding algorithm to use.
name: A name for the operation (optional). If not specified "erosion2d"
is used.
Returns:
A `Tensor`. Has the same type as `value`.
4-D with shape `[batch, out_height, out_width, depth]`.
Raises:
ValueError: If the `value` depth does not match `kernel`' shape, or if
padding is other than `'VALID'` or `'SAME'`.
"""
with ops.name_scope(name, "erosion2d", [value, kernel]) as name:
# Reduce erosion to dilation by duality.
return math_ops.negative(
gen_nn_ops.dilation2d(
input=math_ops.negative(value),
filter=array_ops.reverse_v2(kernel, [0, 1]),
strides=strides,
rates=rates,
padding=padding,
name=name))
@tf_export("nn.erosion2d", v1=[])
def erosion2d_v2(value,
filters,
strides,
padding,
data_format,
dilations,
name=None):
"""Computes the grayscale erosion of 4-D `value` and 3-D `filters` tensors.
The `value` tensor has shape `[batch, in_height, in_width, depth]` and the
`filters` tensor has shape `[filters_height, filters_width, depth]`, i.e.,
each input channel is processed independently of the others with its own
structuring function. The `output` tensor has shape
`[batch, out_height, out_width, depth]`. The spatial dimensions of the
output tensor depend on the `padding` algorithm. We currently only support the
default "NHWC" `data_format`.
In detail, the grayscale morphological 2-D erosion is given by:
output[b, y, x, c] =
min_{dy, dx} value[b,
strides[1] * y - dilations[1] * dy,
strides[2] * x - dilations[2] * dx,
c] -
filters[dy, dx, c]
Duality: The erosion of `value` by the `filters` is equal to the negation of
the dilation of `-value` by the reflected `filters`.
Args:
value: A `Tensor`. 4-D with shape `[batch, in_height, in_width, depth]`.
filters: A `Tensor`. Must have the same type as `value`.
3-D with shape `[filters_height, filters_width, depth]`.
strides: A list of `ints` that has length `>= 4`.
1-D of length 4. The stride of the sliding window for each dimension of
the input tensor. Must be: `[1, stride_height, stride_width, 1]`.
padding: A `string` from: `"SAME", "VALID"`.
The type of padding algorithm to use.
data_format: A `string`, only `"NHWC"` is currently supported.
dilations: A list of `ints` that has length `>= 4`.
1-D of length 4. The input stride for atrous morphological dilation.
Must be: `[1, rate_height, rate_width, 1]`.
name: A name for the operation (optional). If not specified "erosion2d"
is used.
Returns:
A `Tensor`. Has the same type as `value`.
4-D with shape `[batch, out_height, out_width, depth]`.
Raises:
ValueError: If the `value` depth does not match `filters`' shape, or if
padding is other than `'VALID'` or `'SAME'`.
"""
if data_format != "NHWC":
raise ValueError("Data formats other than NHWC are not yet supported")
with ops.name_scope(name, "erosion2d", [value, filters]) as name:
# Reduce erosion to dilation by duality.
return math_ops.negative(
gen_nn_ops.dilation2d(
input=math_ops.negative(value),
filter=array_ops.reverse_v2(filters, [0, 1]),
strides=strides,
rates=dilations,
padding=padding,
name=name))
@tf_export(v1=["math.in_top_k", "nn.in_top_k"])
def in_top_k(predictions, targets, k, name=None):
r"""Says whether the targets are in the top `K` predictions.
This outputs a `batch_size` bool array, an entry `out[i]` is `true` if the
prediction for the target class is among the top `k` predictions among
all predictions for example `i`. Note that the behavior of `InTopK` differs
from the `TopK` op in its handling of ties; if multiple classes have the
same prediction value and straddle the top-`k` boundary, all of those
classes are considered to be in the top `k`.
More formally, let
\\(predictions_i\\) be the predictions for all classes for example `i`,
\\(targets_i\\) be the target class for example `i`,
\\(out_i\\) be the output for example `i`,
$$out_i = predictions_{i, targets_i} \in TopKIncludingTies(predictions_i)$$
Args:
predictions: A `Tensor` of type `float32`.
A `batch_size` x `classes` tensor.
targets: A `Tensor`. Must be one of the following types: `int32`, `int64`.
A `batch_size` vector of class ids.
k: An `int`. Number of top elements to look at for computing precision.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `bool`. Computed Precision at `k` as a `bool Tensor`.
"""
with ops.name_scope(name, "in_top_k"):
return gen_nn_ops.in_top_kv2(predictions, targets, k, name=name)
@tf_export("math.in_top_k", "nn.in_top_k", v1=[])
def in_top_k_v2(targets, predictions, k, name=None):
return in_top_k(predictions, targets, k, name)
in_top_k_v2.__doc__ = in_top_k.__doc__
tf_export(v1=["nn.quantized_avg_pool"])(gen_nn_ops.quantized_avg_pool)
tf_export(v1=["nn.quantized_conv2d"])(gen_nn_ops.quantized_conv2d)
tf_export(v1=["nn.quantized_relu_x"])(gen_nn_ops.quantized_relu_x)
tf_export(v1=["nn.quantized_max_pool"])(gen_nn_ops.quantized_max_pool)
| 40.941493 | 96 | 0.668919 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numbers
import numpy as np
from tensorflow.python.compat import compat
from tensorflow.python.eager import context
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import graph_util
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import gen_nn_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops.gen_nn_ops import *
from tensorflow.python.util import deprecation
from tensorflow.python.util.deprecation import deprecated_args
from tensorflow.python.util.deprecation import deprecated_argument_lookup
from tensorflow.python.util.tf_export import tf_export
local_response_normalization = gen_nn_ops.lrn
def _non_atrous_convolution(
input,
filter,
padding,
data_format=None,
strides=None,
name=None):
with ops.name_scope(name, "non_atrous_convolution", [input, filter]) as scope:
input = ops.convert_to_tensor(input, name="input")
input_shape = input.get_shape()
filter = ops.convert_to_tensor(filter, name="filter")
filter_shape = filter.get_shape()
op = _NonAtrousConvolution(
input_shape,
filter_shape=filter_shape,
padding=padding,
data_format=data_format,
strides=strides,
name=scope)
return op(input, filter)
class _NonAtrousConvolution(object):
def __init__(
self,
input_shape,
filter_shape,
padding,
data_format=None,
strides=None,
name=None):
filter_shape = filter_shape.with_rank(input_shape.ndims)
self.padding = padding
self.name = name
input_shape = input_shape.with_rank(filter_shape.ndims)
if input_shape.ndims is None:
raise ValueError("Rank of convolution must be known")
if input_shape.ndims < 3 or input_shape.ndims > 5:
raise ValueError(
"`input` and `filter` must have rank at least 3 and at most 5")
conv_dims = input_shape.ndims - 2
if strides is None:
strides = [1] * conv_dims
elif len(strides) != conv_dims:
raise ValueError("len(strides)=%d, but should be %d" % (len(strides),
conv_dims))
if conv_dims == 1:
if data_format is None:
data_format = "NWC"
elif data_format not in {"NCW", "NWC", "NCHW", "NHWC"}:
raise ValueError("data_format must be \"NWC\" or \"NCW\".")
self.strides = strides[0]
self.data_format = data_format
self.conv_op = self._conv1d
elif conv_dims == 2:
if data_format is None or data_format == "NHWC":
data_format = "NHWC"
strides = [1] + list(strides) + [1]
elif data_format == "NCHW":
strides = [1, 1] + list(strides)
else:
raise ValueError("data_format must be \"NHWC\" or \"NCHW\".")
self.strides = strides
self.data_format = data_format
self.conv_op = conv2d
elif conv_dims == 3:
if data_format is None or data_format == "NDHWC":
strides = [1] + list(strides) + [1]
elif data_format == "NCDHW":
strides = [1, 1] + list(strides)
else:
raise ValueError("data_format must be \"NDHWC\" or \"NCDHW\". Have: %s"
% data_format)
self.strides = strides
self.data_format = data_format
self.conv_op = gen_nn_ops.conv3d
# those for gen_nn_ops.conv2d and gen_nn_ops.conv3d.
# pylint: disable=redefined-builtin
def _conv1d(self, input, filter, strides, padding, data_format, name):
return conv1d(
value=input,
filters=filter,
stride=strides,
padding=padding,
data_format=data_format,
name=name)
# pylint: enable=redefined-builtin
def __call__(self, inp, filter): # pylint: disable=redefined-builtin
return self.conv_op(
input=inp,
filter=filter,
strides=self.strides,
padding=self.padding,
data_format=self.data_format,
name=self.name)
@tf_export("nn.dilation2d", v1=[])
def dilation2d_v2(
input, # pylint: disable=redefined-builtin
filters, # pylint: disable=redefined-builtin
strides,
padding,
data_format,
dilations,
name=None):
if data_format != "NCHW":
raise ValueError("Data formats other than NCHW are not yet supported")
return gen_nn_ops.dilation2d(input=input,
filter=filters,
strides=strides,
rates=dilations,
padding=padding,
name=name)
@tf_export("nn.with_space_to_batch")
def with_space_to_batch(
input, # pylint: disable=redefined-builtin
dilation_rate,
padding,
op,
filter_shape=None,
spatial_dims=None,
data_format=None):
input = ops.convert_to_tensor(input, name="input") # pylint: disable=redefined-builtin
input_shape = input.get_shape()
def build_op(num_spatial_dims, padding):
return lambda inp, _: op(inp, num_spatial_dims, padding)
new_op = _WithSpaceToBatch(
input_shape,
dilation_rate,
padding,
build_op,
filter_shape=filter_shape,
spatial_dims=spatial_dims,
data_format=data_format)
return new_op(input, None)
class _WithSpaceToBatch(object):
def __init__(self,
input_shape,
dilation_rate,
padding,
build_op,
filter_shape=None,
spatial_dims=None,
data_format=None):
dilation_rate = ops.convert_to_tensor(
dilation_rate, dtypes.int32, name="dilation_rate")
try:
rate_shape = dilation_rate.get_shape().with_rank(1)
except ValueError:
raise ValueError("rate must be rank 1")
if not dilation_rate.get_shape().is_fully_defined():
raise ValueError("rate must have known shape")
num_spatial_dims = rate_shape.dims[0].value
if data_format is not None and data_format.startswith("NC"):
starting_spatial_dim = 2
else:
starting_spatial_dim = 1
if spatial_dims is None:
spatial_dims = range(starting_spatial_dim,
num_spatial_dims + starting_spatial_dim)
orig_spatial_dims = list(spatial_dims)
spatial_dims = sorted(set(int(x) for x in orig_spatial_dims))
if spatial_dims != orig_spatial_dims or any(x < 1 for x in spatial_dims):
raise ValueError(
"spatial_dims must be a montonically increasing sequence of positive "
"integers") # pylint: disable=line-too-long
if data_format is not None and data_format.startswith("NC"):
expected_input_rank = spatial_dims[-1]
else:
expected_input_rank = spatial_dims[-1] + 1
try:
input_shape.with_rank_at_least(expected_input_rank)
except ValueError:
raise ValueError(
"input tensor must have rank %d at least" % (expected_input_rank))
const_rate = tensor_util.constant_value(dilation_rate)
rate_or_const_rate = dilation_rate
if const_rate is not None:
rate_or_const_rate = const_rate
if np.any(const_rate < 1):
raise ValueError("dilation_rate must be positive")
if np.all(const_rate == 1):
self.call = build_op(num_spatial_dims, padding)
return
# We have two padding contributions. The first is used for converting "SAME"
# to "VALID". The second is required so that the height and width of the
# zero-padded value tensor are multiples of rate.
# Padding required to reduce to "VALID" convolution
if padding == "SAME":
if filter_shape is None:
raise ValueError("filter_shape must be specified for SAME padding")
filter_shape = ops.convert_to_tensor(filter_shape, name="filter_shape")
const_filter_shape = tensor_util.constant_value(filter_shape)
if const_filter_shape is not None:
filter_shape = const_filter_shape
self.base_paddings = _with_space_to_batch_base_paddings(
const_filter_shape, num_spatial_dims, rate_or_const_rate)
else:
self.num_spatial_dims = num_spatial_dims
self.rate_or_const_rate = rate_or_const_rate
self.base_paddings = None
elif padding == "VALID":
self.base_paddings = np.zeros([num_spatial_dims, 2], np.int32)
else:
raise ValueError("Invalid padding method %r" % padding)
self.input_shape = input_shape
self.spatial_dims = spatial_dims
self.dilation_rate = dilation_rate
self.data_format = data_format
self.op = build_op(num_spatial_dims, "VALID")
self.call = self._with_space_to_batch_call
def _with_space_to_batch_call(self, inp, filter): # pylint: disable=redefined-builtin
# Handle input whose shape is unknown during graph creation.
input_spatial_shape = None
input_shape = self.input_shape
spatial_dims = self.spatial_dims
if input_shape.ndims is not None:
input_shape_list = input_shape.as_list()
input_spatial_shape = [input_shape_list[i] for i in spatial_dims]
if input_spatial_shape is None or None in input_spatial_shape:
input_shape_tensor = array_ops.shape(inp)
input_spatial_shape = array_ops.stack(
[input_shape_tensor[i] for i in spatial_dims])
base_paddings = self.base_paddings
if base_paddings is None:
# base_paddings could not be computed at build time since static filter
# shape was not fully defined.
filter_shape = array_ops.shape(filter)
base_paddings = _with_space_to_batch_base_paddings(
filter_shape, self.num_spatial_dims, self.rate_or_const_rate)
paddings, crops = array_ops.required_space_to_batch_paddings(
input_shape=input_spatial_shape,
base_paddings=base_paddings,
block_shape=self.dilation_rate)
dilation_rate = _with_space_to_batch_adjust(self.dilation_rate, 1,
spatial_dims)
paddings = _with_space_to_batch_adjust(paddings, 0, spatial_dims)
crops = _with_space_to_batch_adjust(crops, 0, spatial_dims)
input_converted = array_ops.space_to_batch_nd(
input=inp, block_shape=dilation_rate, paddings=paddings)
result = self.op(input_converted, filter)
result_converted = array_ops.batch_to_space_nd(
input=result, block_shape=dilation_rate, crops=crops)
# Recover channel information for output shape if channels are not last.
if self.data_format is not None and self.data_format.startswith("NC"):
if not result_converted.shape.dims[1].value and filter is not None:
output_shape = result_converted.shape.as_list()
output_shape[1] = filter.shape[-1]
result_converted.set_shape(output_shape)
return result_converted
def __call__(self, inp, filter): # pylint: disable=redefined-builtin
return self.call(inp, filter)
def _with_space_to_batch_base_paddings(filter_shape, num_spatial_dims,
rate_or_const_rate):
# Spatial dimensions of the filters and the upsampled filters in which we
# introduce (rate - 1) zeros between consecutive filter values.
filter_spatial_shape = filter_shape[:num_spatial_dims]
dilated_filter_spatial_shape = (
filter_spatial_shape + (filter_spatial_shape - 1) *
(rate_or_const_rate - 1))
pad_extra_shape = dilated_filter_spatial_shape - 1
# When full_padding_shape is odd, we pad more at end, following the same
# convention as conv2d.
pad_extra_start = pad_extra_shape // 2
pad_extra_end = pad_extra_shape - pad_extra_start
base_paddings = array_ops.stack(
[[pad_extra_start[i], pad_extra_end[i]] for i in range(num_spatial_dims)])
return base_paddings
def _with_space_to_batch_adjust(orig, fill_value, spatial_dims):
fill_dims = orig.get_shape().as_list()[1:]
dtype = orig.dtype.as_numpy_dtype
parts = []
const_orig = tensor_util.constant_value(orig)
const_or_orig = const_orig if const_orig is not None else orig
prev_spatial_dim = 0
i = 0
while i < len(spatial_dims):
start_i = i
start_spatial_dim = spatial_dims[i]
if start_spatial_dim > 1:
# Fill in any gap from the previous spatial dimension (or dimension 1 if
# this is the first spatial dimension) with `fill_value`.
parts.append(
np.full(
[start_spatial_dim - 1 - prev_spatial_dim] + fill_dims,
fill_value,
dtype=dtype))
# Find the largest value of i such that:
# [spatial_dims[start_i], ..., spatial_dims[i]]
# == [start_spatial_dim, ..., start_spatial_dim + i - start_i],
# i.e. the end of a contiguous group of spatial dimensions.
while (i + 1 < len(spatial_dims) and
spatial_dims[i + 1] == spatial_dims[i] + 1):
i += 1
parts.append(const_or_orig[start_i:i + 1])
prev_spatial_dim = spatial_dims[i]
i += 1
if const_orig is not None:
return np.concatenate(parts)
else:
return array_ops.concat(parts, 0)
def _get_strides_and_dilation_rate(num_spatial_dims, strides, dilation_rate):
if dilation_rate is None:
dilation_rate = [1] * num_spatial_dims
elif len(dilation_rate) != num_spatial_dims:
raise ValueError("len(dilation_rate)=%d but should be %d" %
(len(dilation_rate), num_spatial_dims))
dilation_rate = np.array(dilation_rate, dtype=np.int32)
if np.any(dilation_rate < 1):
raise ValueError("all values of dilation_rate must be positive")
if strides is None:
strides = [1] * num_spatial_dims
elif len(strides) != num_spatial_dims:
raise ValueError("len(strides)=%d but should be %d" % (len(strides),
num_spatial_dims))
strides = np.array(strides, dtype=np.int32)
if np.any(strides < 1):
raise ValueError("all values of strides must be positive")
if np.any(strides > 1) and np.any(dilation_rate > 1):
raise ValueError(
"strides > 1 not supported in conjunction with dilation_rate > 1")
return strides, dilation_rate
@tf_export(v1=["nn.convolution"])
def convolution(
input, # pylint: disable=redefined-builtin
filter, # pylint: disable=redefined-builtin
padding,
strides=None,
dilation_rate=None,
name=None,
data_format=None):
# pylint: disable=line-too-long
# pylint: enable=line-too-long
with ops.name_scope(name, "convolution", [input, filter]) as name:
input = ops.convert_to_tensor(input, name="input") # pylint: disable=redefined-builtin
input_shape = input.get_shape()
filter = ops.convert_to_tensor(filter, name="filter") # pylint: disable=redefined-builtin
filter_shape = filter.get_shape()
op = Convolution(
input_shape,
filter_shape,
padding,
strides=strides,
dilation_rate=dilation_rate,
name=name,
data_format=data_format)
return op(input, filter)
@tf_export("nn.convolution", v1=[])
def convolution_v2(
input, # pylint: disable=redefined-builtin
filters,
strides=None,
padding="VALID",
data_format=None,
dilations=None,
name=None):
return convolution(
input, # pylint: disable=redefined-builtin
filters,
padding=padding,
strides=strides,
dilation_rate=dilations,
name=name,
data_format=data_format)
convolution_v2.__doc__ = deprecation.rewrite_argument_docstring(
deprecation.rewrite_argument_docstring(
convolution.__doc__, "dilation_rate", "dilations"),
"filter", "filters")
class Convolution(object):
def __init__(self,
input_shape,
filter_shape,
padding,
strides=None,
dilation_rate=None,
name=None,
data_format=None):
num_total_dims = filter_shape.ndims
if num_total_dims is None:
num_total_dims = input_shape.ndims
if num_total_dims is None:
raise ValueError("rank of input or filter must be known")
num_spatial_dims = num_total_dims - 2
try:
input_shape.with_rank(num_spatial_dims + 2)
except ValueError:
raise ValueError(
"input tensor must have rank %d" % (num_spatial_dims + 2))
try:
filter_shape.with_rank(num_spatial_dims + 2)
except ValueError:
raise ValueError(
"filter tensor must have rank %d" % (num_spatial_dims + 2))
if data_format is None or not data_format.startswith("NC"):
input_channels_dim = tensor_shape.dimension_at_index(
input_shape, num_spatial_dims + 1)
spatial_dims = range(1, num_spatial_dims + 1)
else:
input_channels_dim = tensor_shape.dimension_at_index(input_shape, 1)
spatial_dims = range(2, num_spatial_dims + 2)
if not input_channels_dim.is_compatible_with(
filter_shape[num_spatial_dims]):
raise ValueError(
"number of input channels does not match corresponding dimension of "
"filter, {} != {}".format(input_channels_dim,
filter_shape[num_spatial_dims]))
strides, dilation_rate = _get_strides_and_dilation_rate(
num_spatial_dims, strides, dilation_rate)
self.input_shape = input_shape
self.filter_shape = filter_shape
self.data_format = data_format
self.strides = strides
self.name = name
self.conv_op = _WithSpaceToBatch(
input_shape,
dilation_rate=dilation_rate,
padding=padding,
build_op=self._build_op,
filter_shape=filter_shape,
spatial_dims=spatial_dims,
data_format=data_format)
def _build_op(self, _, padding):
return _NonAtrousConvolution(
self.input_shape,
filter_shape=self.filter_shape,
padding=padding,
data_format=self.data_format,
strides=self.strides,
name=self.name)
def __call__(self, inp, filter): # pylint: disable=redefined-builtin
return self.conv_op(inp, filter)
@tf_export(v1=["nn.pool"])
def pool(
input, # pylint: disable=redefined-builtin
window_shape,
pooling_type,
padding,
dilation_rate=None,
strides=None,
name=None,
data_format=None):
# pylint: disable=line-too-long
# pylint: enable=line-too-long
with ops.name_scope(name, "%s_pool" % (pooling_type.lower()),
[input]) as scope:
input = ops.convert_to_tensor(input, name="input") # pylint: disable=redefined-builtin
num_spatial_dims = len(window_shape)
if num_spatial_dims < 1 or num_spatial_dims > 3:
raise ValueError("It is required that 1 <= num_spatial_dims <= 3.")
input.get_shape().with_rank(num_spatial_dims + 2)
strides, dilation_rate = _get_strides_and_dilation_rate(
num_spatial_dims, strides, dilation_rate)
if padding == "SAME" and np.any(dilation_rate > 1):
raise ValueError(
"pooling with SAME padding is not implemented for dilation_rate > 1")
if np.any(strides > window_shape):
raise ValueError(
"strides > window_shape not supported due to inconsistency between "
"CPU and GPU implementations")
pooling_ops = {
("MAX", 1): max_pool,
("MAX", 2): max_pool,
("MAX", 3): max_pool3d, # pylint: disable=undefined-variable
("AVG", 1): avg_pool,
("AVG", 2): avg_pool,
("AVG", 3): avg_pool3d, # pylint: disable=undefined-variable
}
op_key = (pooling_type, num_spatial_dims)
if op_key not in pooling_ops:
raise ValueError("%d-D %s pooling is not supported." % (op_key[1],
op_key[0]))
if data_format is None or not data_format.startswith("NC"):
adjusted_window_shape = [1] + list(window_shape) + [1]
adjusted_strides = [1] + list(strides) + [1]
spatial_dims = range(1, num_spatial_dims + 1)
else:
adjusted_window_shape = [1, 1] + list(window_shape)
adjusted_strides = [1, 1] + list(strides)
spatial_dims = range(2, num_spatial_dims + 2)
if num_spatial_dims == 1:
if data_format is None or data_format == "NWC":
data_format_kwargs = dict(data_format="NHWC")
elif data_format == "NCW":
data_format_kwargs = dict(data_format="NCHW")
else:
raise ValueError("data_format must be either \"NWC\" or \"NCW\".")
adjusted_window_shape = [1] + adjusted_window_shape
adjusted_strides = [1] + adjusted_strides
else:
data_format_kwargs = dict(data_format=data_format)
def op(converted_input, _, converted_padding): # pylint: disable=missing-docstring
if num_spatial_dims == 1:
converted_input = array_ops.expand_dims(converted_input,
spatial_dims[0])
result = pooling_ops[op_key](
converted_input,
adjusted_window_shape,
adjusted_strides,
converted_padding,
name=scope,
**data_format_kwargs)
if num_spatial_dims == 1:
result = array_ops.squeeze(result, [spatial_dims[0]])
return result
return with_space_to_batch(
input=input,
dilation_rate=dilation_rate,
padding=padding,
op=op,
spatial_dims=spatial_dims,
filter_shape=window_shape)
@tf_export("nn.pool", v1=[])
def pool_v2(
input, # pylint: disable=redefined-builtin
window_shape,
pooling_type,
strides=None,
padding="VALID",
data_format=None,
dilations=None,
name=None):
# pylint: disable=line-too-long
return pool(
input=input,
window_shape=window_shape,
pooling_type=pooling_type,
padding=padding,
dilation_rate=dilations,
strides=strides,
name=name,
data_format=data_format)
@tf_export("nn.atrous_conv2d")
def atrous_conv2d(value, filters, rate, padding, name=None):
return convolution(
input=value,
filter=filters,
padding=padding,
dilation_rate=np.broadcast_to(rate, (2,)),
name=name)
def _convert_padding(padding):
explicit_paddings = []
if padding == "EXPLICIT":
# Give a better error message if EXPLICIT is passed.
raise ValueError('"EXPLICIT" is not a valid value for the padding '
"parameter. To use explicit padding, the padding "
"parameter must be a list.")
if isinstance(padding, (list, tuple)):
for i, dim_paddings in enumerate(padding):
if not isinstance(dim_paddings, (list, tuple)):
raise ValueError("When padding is a list, each element of padding must "
"be a list/tuple of size 2. Element with index %d of "
"padding is not a list/tuple" % i)
if len(dim_paddings) != 2:
raise ValueError("When padding is a list, each element of padding must "
"be a list/tuple of size 2. Element with index %d of "
"padding has size %d" % (i, len(dim_paddings)))
explicit_paddings.extend(dim_paddings)
if len(padding) != 4:
raise ValueError("When padding is a list, it must be of size 4. Got "
"padding of size: %d" % len(padding))
padding = "EXPLICIT"
return padding, explicit_paddings
@tf_export("nn.conv2d", v1=[])
def conv2d_v2(input, # pylint: disable=redefined-builtin
filters,
strides,
padding,
data_format="NHWC",
dilations=None,
name=None):
# pylint: disable=line-too-long
# pylint: enable=line-too-long
if dilations is None:
dilations = [1, 1, 1, 1]
return conv2d(input, # pylint: disable=redefined-builtin
filters,
strides,
padding,
use_cudnn_on_gpu=True,
data_format=data_format,
dilations=dilations,
name=name)
@tf_export(v1=["nn.conv2d"])
def conv2d( # pylint: disable=redefined-builtin,dangerous-default-value
input,
filter,
strides,
padding,
use_cudnn_on_gpu=True,
data_format="NHWC",
dilations=[1, 1, 1, 1],
name=None):
padding, explicit_paddings = _convert_padding(padding)
return gen_nn_ops.conv2d(input, # pylint: disable=redefined-builtin
filter,
strides,
padding,
use_cudnn_on_gpu=use_cudnn_on_gpu,
explicit_paddings=explicit_paddings,
data_format=data_format,
dilations=dilations,
name=name)
@tf_export("nn.conv2d_backprop_filter", v1=[])
def conv2d_backprop_filter_v2(input, # pylint: disable=redefined-builtin
filter_sizes,
out_backprop,
strides,
padding,
data_format="NHWC",
dilations=None,
name=None):
if dilations is None:
dilations = [1, 1, 1, 1]
return conv2d_backprop_filter(input, # pylint: disable=redefined-builtin
filter_sizes,
out_backprop,
strides,
padding,
use_cudnn_on_gpu=True,
data_format=data_format,
dilations=dilations,
name=name)
@tf_export(v1=["nn.conv2d_backprop_filter"])
def conv2d_backprop_filter( # pylint: disable=redefined-builtin,dangerous-default-value
input,
filter_sizes,
out_backprop,
strides,
padding,
use_cudnn_on_gpu=True,
data_format="NHWC",
dilations=[1, 1, 1, 1],
name=None):
padding, explicit_paddings = _convert_padding(padding)
return gen_nn_ops.conv2d_backprop_filter(
input, filter_sizes, out_backprop, strides, padding, use_cudnn_on_gpu,
explicit_paddings, data_format, dilations, name)
@tf_export("nn.conv2d_backprop_input", v1=[])
def conv2d_backprop_input_v2(input_sizes,
filters,
out_backprop,
strides,
padding,
data_format="NHWC",
dilations=None,
name=None):
if dilations is None:
dilations = [1, 1, 1, 1]
return conv2d_backprop_input(input_sizes,
filters,
out_backprop,
strides,
padding,
use_cudnn_on_gpu=True,
data_format=data_format,
dilations=dilations,
name=name)
@tf_export(v1=["nn.conv2d_backprop_input"])
def conv2d_backprop_input( # pylint: disable=redefined-builtin,dangerous-default-value
input_sizes,
filter,
out_backprop,
strides,
padding,
use_cudnn_on_gpu=True,
data_format="NHWC",
dilations=[1, 1, 1, 1],
name=None):
padding, explicit_paddings = _convert_padding(padding)
return gen_nn_ops.conv2d_backprop_input(
input_sizes, filter, out_backprop, strides, padding, use_cudnn_on_gpu,
explicit_paddings, data_format, dilations, name)
@tf_export(v1=["nn.conv2d_transpose"])
def conv2d_transpose(
value,
filter, # pylint: disable=redefined-builtin
output_shape,
strides,
padding="SAME",
data_format="NHWC",
name=None):
with ops.name_scope(name, "conv2d_transpose",
[value, filter, output_shape]) as name:
if data_format not in ("NCHW", "NHWC"):
raise ValueError("data_format has to be either NCHW or NHWC.")
value = ops.convert_to_tensor(value, name="value")
filter = ops.convert_to_tensor(filter, name="filter") # pylint: disable=redefined-builtin
axis = 3 if data_format == "NHWC" else 1
if not value.get_shape().dims[axis].is_compatible_with(
filter.get_shape()[3]):
raise ValueError("input channels does not match filter's input channels, "
"{} != {}".format(value.get_shape()[axis],
filter.get_shape()[3]))
output_shape_ = ops.convert_to_tensor(output_shape, name="output_shape")
if not output_shape_.get_shape().is_compatible_with(tensor_shape.vector(4)):
raise ValueError("output_shape must have shape (4,), got {}".format(
output_shape_.get_shape()))
if isinstance(output_shape, (list, np.ndarray)):
if not filter.get_shape().dims[2].is_compatible_with(
output_shape[axis]):
raise ValueError(
"output_shape does not match filter's output channels, "
"{} != {}".format(output_shape[axis],
filter.get_shape()[2]))
if padding != "VALID" and padding != "SAME":
raise ValueError("padding must be either VALID or SAME:"
" {}".format(padding))
return gen_nn_ops.conv2d_backprop_input(
input_sizes=output_shape_,
filter=filter,
out_backprop=value,
strides=strides,
padding=padding,
data_format=data_format,
name=name)
@tf_export("nn.conv2d_transpose", v1=[])
def conv2d_transpose_v2(
input,
filters,
output_shape,
strides,
padding="SAME",
data_format="NHWC",
name=None):
return conv2d_transpose(
input,
filters,
output_shape,
strides,
padding=padding,
data_format=data_format,
name=name)
conv2d_transpose_v2.__doc__ = deprecation.rewrite_argument_docstring(
deprecation.rewrite_argument_docstring(
conv2d_transpose.__doc__, "filter", "filters"),
"value", "input")
@tf_export("nn.atrous_conv2d_transpose")
def atrous_conv2d_transpose(value,
filters,
output_shape,
rate,
padding,
name=None):
with ops.name_scope(name, "atrous_conv2d_transpose",
[value, filters, output_shape]) as name:
value = ops.convert_to_tensor(value, name="value")
filters = ops.convert_to_tensor(filters, name="filters")
if not value.get_shape().dims[3].is_compatible_with(filters.get_shape()[3]):
raise ValueError(
"value's input channels does not match filters' input channels, "
"{} != {}".format(value.get_shape()[3],
filters.get_shape()[3]))
if rate < 1:
raise ValueError("rate {} cannot be less than one".format(rate))
if rate == 1:
return conv2d_transpose(
value,
filters,
output_shape,
strides=[1, 1, 1, 1],
padding=padding,
data_format="NHWC")
output_shape_ = ops.convert_to_tensor(output_shape, name="output_shape")
if not output_shape_.get_shape().is_compatible_with(tensor_shape.vector(4)):
raise ValueError("output_shape must have shape (4,), got {}".format(
output_shape_.get_shape()))
if isinstance(output_shape, (list, np.ndarray)):
if not filters.get_shape().dims[2].is_compatible_with(output_shape[3]):
raise ValueError(
"output_shape does not match filter's output channels, "
"{} != {}".format(output_shape[3],
filters.get_shape()[2]))
if padding == "SAME":
if filters.get_shape().is_fully_defined():
filter_shape = filters.get_shape().as_list()
else:
filter_shape = array_ops.shape(filters)
filter_height, filter_width = filter_shape[0], filter_shape[1]
filter_height_up = filter_height + (filter_height - 1) * (rate - 1)
filter_width_up = filter_width + (filter_width - 1) * (rate - 1)
pad_height = filter_height_up - 1
pad_width = filter_width_up - 1
pad_top = pad_height // 2
pad_bottom = pad_height - pad_top
pad_left = pad_width // 2
pad_right = pad_width - pad_left
elif padding == "VALID":
pad_top = 0
pad_bottom = 0
pad_left = 0
pad_right = 0
else:
raise ValueError("padding must be either VALID or SAME:"
" {}".format(padding))
in_height = output_shape[1] + pad_top + pad_bottom
in_width = output_shape[2] + pad_left + pad_right
pad_bottom_extra = (rate - in_height % rate) % rate
pad_right_extra = (rate - in_width % rate) % rate
space_to_batch_pad = [[0, pad_bottom_extra], [0, pad_right_extra]]
value = array_ops.space_to_batch(
input=value, paddings=space_to_batch_pad, block_size=rate)
input_sizes = [
rate * rate * output_shape[0], (in_height + pad_bottom_extra) // rate,
(in_width + pad_right_extra) // rate, output_shape[3]
]
value = gen_nn_ops.conv2d_backprop_input(
input_sizes=input_sizes,
filter=filters,
out_backprop=value,
strides=[1, 1, 1, 1],
padding="VALID",
data_format="NHWC")
batch_to_space_crop = [[pad_top, pad_bottom + pad_bottom_extra],
[pad_left, pad_right + pad_right_extra]]
return array_ops.batch_to_space(
input=value, crops=batch_to_space_crop, block_size=rate)
@tf_export("nn.conv3d", v1=[])
def conv3d_v2(input,
filters,
strides,
padding,
data_format="NDHWC",
dilations=None,
name=None):
if dilations is None:
dilations = [1, 1, 1, 1, 1]
return gen_nn_ops.conv3d(input,
filters,
strides,
padding,
data_format=data_format,
dilations=dilations,
name=name)
tf_export(v1=["nn.conv3d"])(gen_nn_ops.conv3d)
conv3d_v2.__doc__ = deprecation.rewrite_argument_docstring(
gen_nn_ops.conv3d.__doc__, "filter", "filters")
@tf_export(v1=["nn.conv3d_transpose"])
def conv3d_transpose(
value,
filter,
output_shape,
strides,
padding="SAME",
data_format="NDHWC",
name=None):
with ops.name_scope(name, "conv3d_transpose",
[value, filter, output_shape]) as name:
value = ops.convert_to_tensor(value, name="value")
filter = ops.convert_to_tensor(filter, name="filter")
axis = 1 if data_format == "NCDHW" else 4
if not value.get_shape().dims[axis].is_compatible_with(
filter.get_shape()[4]):
raise ValueError("input channels does not match filter's input channels, "
"{} != {}".format(value.get_shape()[axis],
filter.get_shape()[4]))
output_shape_ = ops.convert_to_tensor(output_shape, name="output_shape")
if not output_shape_.get_shape().is_compatible_with(tensor_shape.vector(5)):
raise ValueError("output_shape must have shape (5,), got {}".format(
output_shape_.get_shape()))
if isinstance(output_shape, (list, np.ndarray)):
# output_shape's shape should be == [5] if reached this point.
if not filter.get_shape().dims[3].is_compatible_with(
output_shape[axis]):
raise ValueError(
"output_shape does not match filter's output channels, "
"{} != {}".format(output_shape[axis],
filter.get_shape()[3]))
if padding != "VALID" and padding != "SAME":
raise ValueError("padding must be either VALID or SAME:"
" {}".format(padding))
return gen_nn_ops.conv3d_backprop_input_v2(
input_sizes=output_shape_,
filter=filter,
out_backprop=value,
strides=strides,
padding=padding,
data_format=data_format,
name=name)
# pylint: disable=redefined-builtin
@tf_export("nn.conv3d_transpose", v1=[])
def conv3d_transpose_v2(
input,
filters,
output_shape,
strides,
padding="SAME",
data_format="NDHWC",
name=None):
return conv3d_transpose(
input,
filters,
output_shape,
strides,
padding=padding,
data_format=data_format,
name=name)
# pylint: enable=redefined-builtin
conv3d_transpose_v2.__doc__ = deprecation.rewrite_argument_docstring(
deprecation.rewrite_argument_docstring(
conv3d_transpose.__doc__, "filter", "filters"),
"value", "input")
@tf_export("nn.bias_add")
def bias_add(value, bias, data_format=None, name=None):
with ops.name_scope(name, "BiasAdd", [value, bias]) as name:
if not context.executing_eagerly():
value = ops.convert_to_tensor(value, name="input")
bias = ops.convert_to_tensor(bias, dtype=value.dtype, name="bias")
return gen_nn_ops.bias_add(value, bias, data_format=data_format, name=name)
def bias_add_v1(value, bias, name=None):
with ops.name_scope(name, "BiasAddV1", [value, bias]) as name:
value = ops.convert_to_tensor(value, name="input")
bias = ops.convert_to_tensor(bias, dtype=value.dtype, name="bias")
return gen_nn_ops.bias_add_v1(value, bias, name=name)
@tf_export(v1=["nn.crelu"])
def crelu(features, name=None, axis=-1):
with ops.name_scope(name, "CRelu", [features]) as name:
features = ops.convert_to_tensor(features, name="features")
c = array_ops.concat([features, -features], axis, name=name)
return gen_nn_ops.relu(c)
@tf_export("nn.crelu", v1=[])
def crelu_v2(features, axis=-1, name=None):
return crelu(features, name=name, axis=axis)
crelu_v2.__doc__ = crelu.__doc__
@tf_export("nn.relu6")
def relu6(features, name=None):
with ops.name_scope(name, "Relu6", [features]) as name:
features = ops.convert_to_tensor(features, name="features")
return gen_nn_ops.relu6(features, name=name)
@tf_export("nn.leaky_relu")
def leaky_relu(features, alpha=0.2, name=None):
with ops.name_scope(name, "LeakyRelu", [features, alpha]) as name:
features = ops.convert_to_tensor(features, name="features")
if features.dtype.is_integer:
features = math_ops.to_float(features)
if compat.forward_compatible(2018, 11, 1):
if isinstance(alpha, np.ndarray):
alpha = np.asscalar(alpha)
return gen_nn_ops.leaky_relu(features, alpha=alpha, name=name)
alpha = ops.convert_to_tensor(alpha, dtype=features.dtype, name="alpha")
return math_ops.maximum(alpha * features, features, name=name)
def _flatten_outer_dims(logits):
rank = array_ops.rank(logits)
last_dim_size = array_ops.slice(
array_ops.shape(logits), [math_ops.subtract(rank, 1)], [1])
output = array_ops.reshape(logits, array_ops.concat([[-1], last_dim_size], 0))
# Set output shape if known.
if not context.executing_eagerly():
shape = logits.get_shape()
if shape is not None and shape.dims is not None:
shape = shape.as_list()
product = 1
product_valid = True
for d in shape[:-1]:
if d is None:
product_valid = False
break
else:
product *= d
if product_valid:
output_shape = [product, shape[-1]]
output.set_shape(output_shape)
return output
def _softmax(logits, compute_op, dim=-1, name=None):
def _swap_axis(logits, dim_index, last_index, name=None):
return array_ops.transpose(
logits,
array_ops.concat([
math_ops.range(dim_index), [last_index],
math_ops.range(dim_index + 1, last_index), [dim_index]
], 0),
name=name)
logits = ops.convert_to_tensor(logits)
# We need its original shape for shape inference.
shape = logits.get_shape()
is_last_dim = (dim is -1) or (dim == shape.ndims - 1)
if is_last_dim:
return compute_op(logits, name=name)
dim_val = dim
if isinstance(dim, ops.Tensor):
dim_val = tensor_util.constant_value(dim)
if dim_val is not None and (dim_val < -shape.ndims or dim_val >= shape.ndims):
raise errors_impl.InvalidArgumentError(
None, None,
"Dimension (%d) must be in the range [%d, %d) where %d is the number of"
" dimensions in the input." % (dim_val, -shape.ndims, shape.ndims,
shape.ndims))
# If dim is not the last dimension, we have to do a transpose so that we can
# still perform softmax on its last dimension.
# In case dim is negative (and is not last dimension -1), add shape.ndims
ndims = array_ops.rank(logits)
if not isinstance(dim, ops.Tensor):
if dim < 0:
dim += ndims
else:
dim = array_ops.where(math_ops.less(dim, 0), dim + ndims, dim)
# Swap logits' dimension of dim and its last dimension.
input_rank = array_ops.rank(logits)
dim_axis = dim % shape.ndims
logits = _swap_axis(logits, dim_axis, math_ops.subtract(input_rank, 1))
output = compute_op(logits)
output = _swap_axis(
output, dim_axis, math_ops.subtract(input_rank, 1), name=name)
output.set_shape(shape)
return output
@tf_export(v1=["nn.softmax", "math.softmax"])
@deprecation.deprecated_args(None, "dim is deprecated, use axis instead", "dim")
def softmax(logits, axis=None, name=None, dim=None):
axis = deprecation.deprecated_argument_lookup("axis", axis, "dim", dim)
if axis is None:
axis = -1
return _softmax(logits, gen_nn_ops.softmax, axis, name)
@tf_export("nn.softmax", "math.softmax", v1=[])
def softmax_v2(logits, axis=None, name=None):
if axis is None:
axis = -1
return _softmax(logits, gen_nn_ops.softmax, axis, name)
@tf_export(v1=["nn.log_softmax", "math.log_softmax"])
@deprecation.deprecated_args(None, "dim is deprecated, use axis instead", "dim")
def log_softmax(logits, axis=None, name=None, dim=None):
axis = deprecation.deprecated_argument_lookup("axis", axis, "dim", dim)
if axis is None:
axis = -1
return _softmax(logits, gen_nn_ops.log_softmax, axis, name)
@tf_export("nn.log_softmax", "math.log_softmax", v1=[])
def log_softmax_v2(logits, axis=None, name=None):
if axis is None:
axis = -1
return _softmax(logits, gen_nn_ops.log_softmax, axis, name)
def _ensure_xent_args(name, sentinel, labels, logits):
if sentinel is not None:
raise ValueError("Only call `%s` with "
"named arguments (labels=..., logits=..., ...)" % name)
if labels is None or logits is None:
raise ValueError("Both labels and logits must be provided.")
@tf_export("nn.softmax_cross_entropy_with_logits", v1=[])
def softmax_cross_entropy_with_logits_v2(labels, logits, axis=-1, name=None):
return softmax_cross_entropy_with_logits_v2_helper(
labels=labels, logits=logits, axis=axis, name=name)
@tf_export(v1=["nn.softmax_cross_entropy_with_logits_v2"])
@deprecated_args(None, "dim is deprecated, use axis instead", "dim")
def softmax_cross_entropy_with_logits_v2_helper(
labels, logits, axis=None, name=None, dim=None):
axis = deprecated_argument_lookup("axis", axis, "dim", dim)
del dim
if axis is None:
axis = -1
with ops.name_scope(name, "softmax_cross_entropy_with_logits",
[logits, labels]) as name:
logits = ops.convert_to_tensor(logits, name="logits")
labels = ops.convert_to_tensor(labels, name="labels")
convert_to_float32 = (
logits.dtype == dtypes.float16 or logits.dtype == dtypes.bfloat16)
precise_logits = math_ops.cast(
logits, dtypes.float32) if convert_to_float32 else logits
labels = math_ops.cast(labels, precise_logits.dtype)
input_rank = array_ops.rank(precise_logits)
shape = logits.get_shape()
if axis != -1:
def _move_dim_to_end(tensor, dim_index, rank):
return array_ops.transpose(
tensor,
array_ops.concat([
math_ops.range(dim_index),
math_ops.range(dim_index + 1, rank), [dim_index]
], 0))
precise_logits = _move_dim_to_end(precise_logits, axis, input_rank)
labels = _move_dim_to_end(labels, axis, input_rank)
input_shape = array_ops.shape(precise_logits)
precise_logits = _flatten_outer_dims(precise_logits)
labels = _flatten_outer_dims(labels)
cost, unused_backprop = gen_nn_ops.softmax_cross_entropy_with_logits(
precise_logits, labels, name=name)
output_shape = array_ops.slice(input_shape, [0],
[math_ops.subtract(input_rank, 1)])
cost = array_ops.reshape(cost, output_shape)
if not context.executing_eagerly(
) and shape is not None and shape.dims is not None:
shape = shape.as_list()
del shape[axis]
cost.set_shape(shape)
if convert_to_float32:
return math_ops.cast(cost, logits.dtype)
else:
return cost
_XENT_DEPRECATION = """
Future major versions of TensorFlow will allow gradients to flow
into the labels input on backprop by default.
See `tf.nn.softmax_cross_entropy_with_logits_v2`.
"""
@tf_export(v1=["nn.softmax_cross_entropy_with_logits"])
@deprecation.deprecated(date=None, instructions=_XENT_DEPRECATION)
def softmax_cross_entropy_with_logits(
_sentinel=None,
labels=None,
logits=None,
dim=-1,
name=None):
_ensure_xent_args("softmax_cross_entropy_with_logits", _sentinel, labels,
logits)
with ops.name_scope(name, "softmax_cross_entropy_with_logits_sg",
[logits, labels]) as name:
labels = array_ops.stop_gradient(labels, name="labels_stop_gradient")
return softmax_cross_entropy_with_logits_v2(
labels=labels, logits=logits, axis=dim, name=name)
@tf_export("nn.sparse_softmax_cross_entropy_with_logits")
def sparse_softmax_cross_entropy_with_logits(
_sentinel=None,
labels=None,
logits=None,
name=None):
_ensure_xent_args("sparse_softmax_cross_entropy_with_logits", _sentinel,
labels, logits)
with ops.name_scope(name, "SparseSoftmaxCrossEntropyWithLogits",
[labels, logits]):
labels = ops.convert_to_tensor(labels)
logits = ops.convert_to_tensor(logits)
precise_logits = math_ops.cast(logits, dtypes.float32) if (dtypes.as_dtype(
logits.dtype) == dtypes.float16) else logits
labels_static_shape = labels.get_shape()
labels_shape = array_ops.shape(labels)
static_shapes_fully_defined = (
labels_static_shape.is_fully_defined() and
logits.get_shape()[:-1].is_fully_defined())
if logits.get_shape().ndims is not None and logits.get_shape().ndims == 0:
raise ValueError(
"Logits cannot be scalars - received shape %s." % logits.get_shape())
if logits.get_shape().ndims is not None and (
labels_static_shape.ndims is not None and
labels_static_shape.ndims != logits.get_shape().ndims - 1):
raise ValueError("Rank mismatch: Rank of labels (received %s) should "
"equal rank of logits minus 1 (received %s)." %
(labels_static_shape.ndims, logits.get_shape().ndims))
if (static_shapes_fully_defined and
labels_static_shape != logits.get_shape()[:-1]):
raise ValueError("Shape mismatch: The shape of labels (received %s) "
"should equal the shape of logits except for the last "
"dimension (received %s)." % (labels_static_shape,
logits.get_shape()))
if logits.get_shape().ndims == 2:
cost, _ = gen_nn_ops.sparse_softmax_cross_entropy_with_logits(
precise_logits, labels, name=name)
if logits.dtype == dtypes.float16:
return math_ops.cast(cost, dtypes.float16)
else:
return cost
shape_checks = []
if not static_shapes_fully_defined:
shape_checks.append(
check_ops.assert_equal(
array_ops.shape(labels),
array_ops.shape(logits)[:-1]))
with ops.control_dependencies(shape_checks):
num_classes = array_ops.shape(logits)[array_ops.rank(logits) - 1]
precise_logits = array_ops.reshape(precise_logits, [-1, num_classes])
labels = array_ops.reshape(labels, [-1])
cost, _ = gen_nn_ops.sparse_softmax_cross_entropy_with_logits(
precise_logits, labels, name=name)
cost = array_ops.reshape(cost, labels_shape)
cost.set_shape(labels_static_shape)
if logits.dtype == dtypes.float16:
return math_ops.cast(cost, dtypes.float16)
else:
return cost
@tf_export("nn.avg_pool")
def avg_pool(value, ksize, strides, padding, data_format="NHWC", name=None):
with ops.name_scope(name, "AvgPool", [value]) as name:
value = ops.convert_to_tensor(value, name="input")
return gen_nn_ops.avg_pool(
value,
ksize=ksize,
strides=strides,
padding=padding,
data_format=data_format,
name=name)
@tf_export("nn.max_pool")
def max_pool(value, ksize, strides, padding, data_format="NHWC", name=None):
with ops.name_scope(name, "MaxPool", [value]) as name:
value = ops.convert_to_tensor(value, name="input")
return gen_nn_ops.max_pool(
value,
ksize=ksize,
strides=strides,
padding=padding,
data_format=data_format,
name=name)
@tf_export("nn.max_pool_with_argmax", v1=[])
def max_pool_with_argmax_v2(input,
ksize,
strides,
padding,
data_format="NHWC",
output_dtype=dtypes.int64,
name=None):
if data_format != "NHWC":
raise ValueError("Data formats other than 'NHWC' are not yet supported")
return gen_nn_ops.max_pool_with_argmax(input=input,
ksize=ksize,
strides=strides,
padding=padding,
Targmax=output_dtype,
name=name)
@ops.RegisterStatistics("Conv2D", "flops")
def _calc_conv_flops(graph, node):
input_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[0])
input_shape.assert_is_fully_defined()
filter_shape = graph_util.tensor_shape_from_node_def_name(
graph, node.input[1])
filter_shape.assert_is_fully_defined()
output_shape = graph_util.tensor_shape_from_node_def_name(graph, node.name)
output_shape.assert_is_fully_defined()
filter_height = int(filter_shape[0])
filter_width = int(filter_shape[1])
filter_in_depth = int(filter_shape[2])
output_count = np.prod(output_shape.as_list(), dtype=np.int64)
return ops.OpStats(
"flops",
(output_count * filter_in_depth * filter_height * filter_width * 2))
@ops.RegisterStatistics("DepthwiseConv2dNative", "flops")
def _calc_depthwise_conv_flops(graph, node):
input_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[0])
input_shape.assert_is_fully_defined()
filter_shape = graph_util.tensor_shape_from_node_def_name(
graph, node.input[1])
filter_shape.assert_is_fully_defined()
output_shape = graph_util.tensor_shape_from_node_def_name(graph, node.name)
output_shape.assert_is_fully_defined()
filter_height = int(filter_shape[0])
filter_width = int(filter_shape[1])
output_count = np.prod(output_shape.as_list(), dtype=np.int64)
return ops.OpStats("flops", (output_count * filter_height * filter_width * 2))
@ops.RegisterStatistics("BiasAdd", "flops")
def _calc_bias_add_flops(graph, node):
input_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[0])
input_shape.assert_is_fully_defined()
input_count = np.prod(input_shape.as_list())
return ops.OpStats("flops", input_count)
@tf_export(v1=["nn.xw_plus_b"])
def xw_plus_b(x, weights, biases, name=None):
with ops.name_scope(name, "xw_plus_b", [x, weights, biases]) as name:
x = ops.convert_to_tensor(x, name="x")
weights = ops.convert_to_tensor(weights, name="weights")
biases = ops.convert_to_tensor(biases, name="biases")
mm = math_ops.matmul(x, weights)
return bias_add(mm, biases, name=name)
def xw_plus_b_v1(x, weights, biases, name=None):
with ops.name_scope(name, "xw_plus_b_v1", [x, weights, biases]) as name:
x = ops.convert_to_tensor(x, name="x")
weights = ops.convert_to_tensor(weights, name="weights")
biases = ops.convert_to_tensor(biases, name="biases")
mm = math_ops.matmul(x, weights)
return bias_add_v1(mm, biases, name=name)
def _get_noise_shape(x, noise_shape):
if noise_shape is None:
return array_ops.shape(x)
try:
noise_shape_ = tensor_shape.as_shape(noise_shape)
except (TypeError, ValueError):
return noise_shape
if x.shape.dims is not None and len(x.shape.dims) == len(noise_shape_.dims):
new_dims = []
for i, dim in enumerate(x.shape.dims):
if noise_shape_.dims[i].value is None and dim.value is not None:
new_dims.append(dim.value)
else:
new_dims.append(noise_shape_.dims[i].value)
return tensor_shape.TensorShape(new_dims)
return noise_shape
@tf_export(v1=["nn.dropout"])
@deprecation.deprecated_args(None, "Please use `rate` instead of `keep_prob`. "
"Rate should be set to `rate = 1 - keep_prob`.",
"keep_prob")
def dropout(x, keep_prob=None, noise_shape=None, seed=None, name=None,
rate=None):
try:
keep = 1. - keep_prob if keep_prob is not None else None
except TypeError:
raise ValueError("keep_prob must be a floating point number or Tensor "
"(got %r)" % keep_prob)
rate = deprecation.deprecated_argument_lookup(
"rate", rate,
"keep_prob", keep)
if rate is None:
raise ValueError("You must provide a rate to dropout.")
return dropout_v2(x, rate, noise_shape=noise_shape, seed=seed, name=name)
@tf_export("nn.dropout", v1=[])
def dropout_v2(x, rate, noise_shape=None, seed=None, name=None):
with ops.name_scope(name, "dropout", [x]) as name:
x = ops.convert_to_tensor(x, name="x")
if not x.dtype.is_floating:
raise ValueError("x has to be a floating point tensor since it's going to"
" be scaled. Got a %s tensor instead." % x.dtype)
if isinstance(rate, numbers.Real) and not (rate >= 0 and rate < 1):
raise ValueError("rate must be a scalar tensor or a float in the "
"range [0, 1), got %g" % rate)
# Early return if nothing needs to be dropped.
if isinstance(rate, numbers.Real) and rate == 0:
return x
if context.executing_eagerly():
if isinstance(rate, ops.EagerTensor):
if rate.numpy() == 0:
return x
else:
rate = ops.convert_to_tensor(
rate, dtype=x.dtype, name="rate")
rate.get_shape().assert_is_compatible_with(tensor_shape.scalar())
# Do nothing if we know rate == 0
if tensor_util.constant_value(rate) == 0:
return x
noise_shape = _get_noise_shape(x, noise_shape)
keep_prob = 1 - rate
# uniform [keep_prob, 1.0 + keep_prob)
random_tensor = keep_prob
random_tensor += random_ops.random_uniform(
noise_shape, seed=seed, dtype=x.dtype)
# 0. if [keep_prob, 1.0) and 1. if [1.0, 1.0 + keep_prob)
binary_tensor = math_ops.floor(random_tensor)
ret = math_ops.divide(x, keep_prob) * binary_tensor
if not context.executing_eagerly():
ret.set_shape(x.get_shape())
return ret
@tf_export("math.top_k", "nn.top_k")
def top_k(input, k=1, sorted=True, name=None): # pylint: disable=redefined-builtin
return gen_nn_ops.top_kv2(input, k=k, sorted=sorted, name=name)
def nth_element(input, n, reverse=False, name=None): # pylint: disable=redefined-builtin
return gen_nn_ops.nth_element(input, n, reverse=reverse, name=name)
@tf_export(v1=["nn.fractional_max_pool"])
@deprecation.deprecated(date=None, instructions="`seed2` and `deterministic` "
"args are deprecated. Use fractional_max_pool_v2.")
def fractional_max_pool(value,
pooling_ratio,
pseudo_random=False,
overlapping=False,
deterministic=False,
seed=0,
seed2=0,
name=None): # pylint: disable=redefined-builtin
return gen_nn_ops.fractional_max_pool(value, pooling_ratio, pseudo_random,
overlapping, deterministic, seed, seed2,
name)
@tf_export("nn.fractional_max_pool", v1=[])
def fractional_max_pool_v2(value,
pooling_ratio,
pseudo_random=False,
overlapping=False,
seed=0,
name=None): # pylint: disable=redefined-builtin
if seed == 0:
return gen_nn_ops.fractional_max_pool(value, pooling_ratio, pseudo_random,
overlapping, deterministic=False,
seed=0, seed2=0, name=name)
else:
seed1, seed2 = random_seed.get_seed(seed)
return gen_nn_ops.fractional_max_pool(value, pooling_ratio, pseudo_random,
overlapping, deterministic=True,
seed=seed1, seed2=seed2, name=name)
@tf_export(v1=["nn.fractional_avg_pool"])
@deprecation.deprecated(date=None, instructions="`seed2` and `deterministic` "
"args are deprecated. Use fractional_avg_pool_v2.")
def fractional_avg_pool(value,
pooling_ratio,
pseudo_random=False,
overlapping=False,
deterministic=False,
seed=0,
seed2=0,
name=None): # pylint: disable=redefined-builtin
return gen_nn_ops.fractional_avg_pool(value, pooling_ratio, pseudo_random,
overlapping, deterministic, seed, seed2,
name=name)
@tf_export("nn.fractional_avg_pool", v1=[])
def fractional_avg_pool_v2(value,
pooling_ratio,
pseudo_random=False,
overlapping=False,
seed=0,
name=None): # pylint: disable=redefined-builtin
if seed == 0:
return gen_nn_ops.fractional_avg_pool(value, pooling_ratio, pseudo_random,
overlapping, deterministic=False,
seed=0, seed2=0, name=name)
else:
seed1, seed2 = random_seed.get_seed(seed)
return gen_nn_ops.fractional_avg_pool(value, pooling_ratio, pseudo_random,
overlapping, deterministic=True,
seed=seed1, seed2=seed2, name=name)
@tf_export(v1=["nn.conv1d"])
@deprecation.deprecated_arg_values(
None,
"`NCHW` for data_format is deprecated, use `NCW` instead",
warn_once=True,
data_format="NCHW")
@deprecation.deprecated_arg_values(
None,
"`NHWC` for data_format is deprecated, use `NWC` instead",
warn_once=True,
data_format="NHWC")
def conv1d(value,
filters,
stride,
padding,
use_cudnn_on_gpu=None,
data_format=None,
name=None):
with ops.name_scope(name, "conv1d", [value, filters]) as name:
# Reshape the input tensor to [batch, 1, in_width, in_channels]
if data_format is None or data_format == "NHWC" or data_format == "NWC":
data_format = "NHWC"
spatial_start_dim = 1
strides = [1, 1, stride, 1]
elif data_format == "NCHW" or data_format == "NCW":
data_format = "NCHW"
spatial_start_dim = 2
strides = [1, 1, 1, stride]
else:
raise ValueError("data_format must be \"NWC\" or \"NCW\".")
value = array_ops.expand_dims(value, spatial_start_dim)
filters = array_ops.expand_dims(filters, 0)
result = gen_nn_ops.conv2d(
value,
filters,
strides,
padding,
use_cudnn_on_gpu=use_cudnn_on_gpu,
data_format=data_format)
return array_ops.squeeze(result, [spatial_start_dim])
@tf_export("nn.conv1d", v1=[])
def conv1d_v2(input, # pylint: disable=redefined-builtin
filters,
stride,
padding,
data_format=None,
name=None):
return conv1d(input, # pylint: disable=redefined-builtin
filters,
stride,
padding,
use_cudnn_on_gpu=True,
data_format=data_format,
name=name)
def conv1d_transpose(
value,
filter, # pylint: disable=redefined-builtin
output_shape,
stride,
padding="SAME",
data_format="NWC",
name=None):
with ops.name_scope(name, "conv1d_transpose",
[value, filter, output_shape]) as name:
output_shape_ = ops.convert_to_tensor(output_shape, name="output_shape")
if not output_shape_.get_shape().is_compatible_with(tensor_shape.vector(3)):
raise ValueError("output_shape must have shape (3,), got {}".format(
output_shape_.get_shape()))
# The format could be either NWC or NCW, map to NHWC or NCHW
if data_format is None or data_format == "NWC":
data_format_2d = "NHWC"
axis = 2
elif data_format == "NCW":
data_format_2d = "NCHW"
axis = 1
else:
raise ValueError("data_format must be \"NWC\" or \"NCW\".")
if not value.get_shape().dims[axis].is_compatible_with(
filter.get_shape()[2]):
raise ValueError("input channels does not match filter's input channels, "
"{} != {}".format(value.get_shape()[axis],
filter.get_shape()[2]))
if isinstance(output_shape, (list, np.ndarray)):
if not filter.get_shape().dims[1].is_compatible_with(
output_shape[axis]):
raise ValueError(
"output_shape does not match filter's output channels, "
"{} != {}".format(output_shape[axis],
filter.get_shape()[1]))
if padding != "VALID" and padding != "SAME":
raise ValueError("padding must be either VALID or SAME:"
" {}".format(padding))
if data_format_2d == "NHWC":
output_shape_ = array_ops.concat(
[output_shape_[:1], [1], output_shape_[1:]], axis=0)
spatial_start_dim = 1
strides = [1, 1, stride, 1]
else:
output_shape_ = array_ops.concat(
[output_shape_[:2], [1], output_shape_[2:]], axis=0)
spatial_start_dim = 2
strides = [1, 1, 1, stride]
value = array_ops.expand_dims(value, spatial_start_dim)
filter = array_ops.expand_dims(filter, 0)
result = gen_nn_ops.conv2d_backprop_input(
input_sizes=output_shape_,
filter=filter,
out_backprop=value,
strides=strides,
padding=padding,
data_format=data_format_2d,
name=name)
return array_ops.squeeze(result, [spatial_start_dim])
@ops.RegisterStatistics("Dilation2D", "flops")
def _calc_dilation2d_flops(graph, node):
input_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[0])
input_shape.assert_is_fully_defined()
filter_shape = graph_util.tensor_shape_from_node_def_name(
graph, node.input[1])
filter_shape.assert_is_fully_defined()
output_shape = graph_util.tensor_shape_from_node_def_name(graph, node.name)
output_shape.assert_is_fully_defined()
filter_height = int(filter_shape[0])
filter_width = int(filter_shape[1])
output_count = np.prod(output_shape.as_list(), dtype=np.int64)
return ops.OpStats("flops", (output_count * filter_height * filter_width * 2))
@tf_export(v1=["nn.erosion2d"])
def erosion2d(value, kernel, strides, rates, padding, name=None):
with ops.name_scope(name, "erosion2d", [value, kernel]) as name:
return math_ops.negative(
gen_nn_ops.dilation2d(
input=math_ops.negative(value),
filter=array_ops.reverse_v2(kernel, [0, 1]),
strides=strides,
rates=rates,
padding=padding,
name=name))
@tf_export("nn.erosion2d", v1=[])
def erosion2d_v2(value,
filters,
strides,
padding,
data_format,
dilations,
name=None):
if data_format != "NHWC":
raise ValueError("Data formats other than NHWC are not yet supported")
with ops.name_scope(name, "erosion2d", [value, filters]) as name:
return math_ops.negative(
gen_nn_ops.dilation2d(
input=math_ops.negative(value),
filter=array_ops.reverse_v2(filters, [0, 1]),
strides=strides,
rates=dilations,
padding=padding,
name=name))
@tf_export(v1=["math.in_top_k", "nn.in_top_k"])
def in_top_k(predictions, targets, k, name=None):
with ops.name_scope(name, "in_top_k"):
return gen_nn_ops.in_top_kv2(predictions, targets, k, name=name)
@tf_export("math.in_top_k", "nn.in_top_k", v1=[])
def in_top_k_v2(targets, predictions, k, name=None):
return in_top_k(predictions, targets, k, name)
in_top_k_v2.__doc__ = in_top_k.__doc__
tf_export(v1=["nn.quantized_avg_pool"])(gen_nn_ops.quantized_avg_pool)
tf_export(v1=["nn.quantized_conv2d"])(gen_nn_ops.quantized_conv2d)
tf_export(v1=["nn.quantized_relu_x"])(gen_nn_ops.quantized_relu_x)
tf_export(v1=["nn.quantized_max_pool"])(gen_nn_ops.quantized_max_pool)
| true | true |
f71fd03a67d84d601549326e242f921bbfd460d7 | 3,974 | py | Python | alipay/aop/api/request/ZhimaCreditEpProductCodeQueryRequest.py | snowxmas/alipay-sdk-python-all | 96870ced60facd96c5bce18d19371720cbda3317 | [
"Apache-2.0"
] | 213 | 2018-08-27T16:49:32.000Z | 2021-12-29T04:34:12.000Z | alipay/aop/api/request/ZhimaCreditEpProductCodeQueryRequest.py | snowxmas/alipay-sdk-python-all | 96870ced60facd96c5bce18d19371720cbda3317 | [
"Apache-2.0"
] | 29 | 2018-09-29T06:43:00.000Z | 2021-09-02T03:27:32.000Z | alipay/aop/api/request/ZhimaCreditEpProductCodeQueryRequest.py | snowxmas/alipay-sdk-python-all | 96870ced60facd96c5bce18d19371720cbda3317 | [
"Apache-2.0"
] | 59 | 2018-08-27T16:59:26.000Z | 2022-03-25T10:08:15.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.FileItem import FileItem
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.ZhimaCreditEpProductCodeQueryModel import ZhimaCreditEpProductCodeQueryModel
class ZhimaCreditEpProductCodeQueryRequest(object):
def __init__(self, biz_model=None):
self._biz_model = biz_model
self._biz_content = None
self._version = "1.0"
self._terminal_type = None
self._terminal_info = None
self._prod_code = None
self._notify_url = None
self._return_url = None
self._udf_params = None
self._need_encrypt = False
@property
def biz_model(self):
return self._biz_model
@biz_model.setter
def biz_model(self, value):
self._biz_model = value
@property
def biz_content(self):
return self._biz_content
@biz_content.setter
def biz_content(self, value):
if isinstance(value, ZhimaCreditEpProductCodeQueryModel):
self._biz_content = value
else:
self._biz_content = ZhimaCreditEpProductCodeQueryModel.from_alipay_dict(value)
@property
def version(self):
return self._version
@version.setter
def version(self, value):
self._version = value
@property
def terminal_type(self):
return self._terminal_type
@terminal_type.setter
def terminal_type(self, value):
self._terminal_type = value
@property
def terminal_info(self):
return self._terminal_info
@terminal_info.setter
def terminal_info(self, value):
self._terminal_info = value
@property
def prod_code(self):
return self._prod_code
@prod_code.setter
def prod_code(self, value):
self._prod_code = value
@property
def notify_url(self):
return self._notify_url
@notify_url.setter
def notify_url(self, value):
self._notify_url = value
@property
def return_url(self):
return self._return_url
@return_url.setter
def return_url(self, value):
self._return_url = value
@property
def udf_params(self):
return self._udf_params
@udf_params.setter
def udf_params(self, value):
if not isinstance(value, dict):
return
self._udf_params = value
@property
def need_encrypt(self):
return self._need_encrypt
@need_encrypt.setter
def need_encrypt(self, value):
self._need_encrypt = value
def add_other_text_param(self, key, value):
if not self.udf_params:
self.udf_params = dict()
self.udf_params[key] = value
def get_params(self):
params = dict()
params[P_METHOD] = 'zhima.credit.ep.product.code.query'
params[P_VERSION] = self.version
if self.biz_model:
params[P_BIZ_CONTENT] = json.dumps(obj=self.biz_model.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
if self.biz_content:
if hasattr(self.biz_content, 'to_alipay_dict'):
params['biz_content'] = json.dumps(obj=self.biz_content.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['biz_content'] = self.biz_content
if self.terminal_type:
params['terminal_type'] = self.terminal_type
if self.terminal_info:
params['terminal_info'] = self.terminal_info
if self.prod_code:
params['prod_code'] = self.prod_code
if self.notify_url:
params['notify_url'] = self.notify_url
if self.return_url:
params['return_url'] = self.return_url
if self.udf_params:
params.update(self.udf_params)
return params
def get_multipart_params(self):
multipart_params = dict()
return multipart_params
| 27.406897 | 148 | 0.64469 |
import json
from alipay.aop.api.FileItem import FileItem
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.ZhimaCreditEpProductCodeQueryModel import ZhimaCreditEpProductCodeQueryModel
class ZhimaCreditEpProductCodeQueryRequest(object):
def __init__(self, biz_model=None):
self._biz_model = biz_model
self._biz_content = None
self._version = "1.0"
self._terminal_type = None
self._terminal_info = None
self._prod_code = None
self._notify_url = None
self._return_url = None
self._udf_params = None
self._need_encrypt = False
@property
def biz_model(self):
return self._biz_model
@biz_model.setter
def biz_model(self, value):
self._biz_model = value
@property
def biz_content(self):
return self._biz_content
@biz_content.setter
def biz_content(self, value):
if isinstance(value, ZhimaCreditEpProductCodeQueryModel):
self._biz_content = value
else:
self._biz_content = ZhimaCreditEpProductCodeQueryModel.from_alipay_dict(value)
@property
def version(self):
return self._version
@version.setter
def version(self, value):
self._version = value
@property
def terminal_type(self):
return self._terminal_type
@terminal_type.setter
def terminal_type(self, value):
self._terminal_type = value
@property
def terminal_info(self):
return self._terminal_info
@terminal_info.setter
def terminal_info(self, value):
self._terminal_info = value
@property
def prod_code(self):
return self._prod_code
@prod_code.setter
def prod_code(self, value):
self._prod_code = value
@property
def notify_url(self):
return self._notify_url
@notify_url.setter
def notify_url(self, value):
self._notify_url = value
@property
def return_url(self):
return self._return_url
@return_url.setter
def return_url(self, value):
self._return_url = value
@property
def udf_params(self):
return self._udf_params
@udf_params.setter
def udf_params(self, value):
if not isinstance(value, dict):
return
self._udf_params = value
@property
def need_encrypt(self):
return self._need_encrypt
@need_encrypt.setter
def need_encrypt(self, value):
self._need_encrypt = value
def add_other_text_param(self, key, value):
if not self.udf_params:
self.udf_params = dict()
self.udf_params[key] = value
def get_params(self):
params = dict()
params[P_METHOD] = 'zhima.credit.ep.product.code.query'
params[P_VERSION] = self.version
if self.biz_model:
params[P_BIZ_CONTENT] = json.dumps(obj=self.biz_model.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
if self.biz_content:
if hasattr(self.biz_content, 'to_alipay_dict'):
params['biz_content'] = json.dumps(obj=self.biz_content.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['biz_content'] = self.biz_content
if self.terminal_type:
params['terminal_type'] = self.terminal_type
if self.terminal_info:
params['terminal_info'] = self.terminal_info
if self.prod_code:
params['prod_code'] = self.prod_code
if self.notify_url:
params['notify_url'] = self.notify_url
if self.return_url:
params['return_url'] = self.return_url
if self.udf_params:
params.update(self.udf_params)
return params
def get_multipart_params(self):
multipart_params = dict()
return multipart_params
| true | true |
f71fd0d406c540386851f557a8dd36be11fad94d | 37,269 | py | Python | pandas/core/indexes/datetimes.py | cgangwar11/pandas | 972f491cb7fdcc3c1c2cb9f05644128f13457f87 | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 1 | 2020-09-05T07:09:39.000Z | 2020-09-05T07:09:39.000Z | pandas/core/indexes/datetimes.py | cgangwar11/pandas | 972f491cb7fdcc3c1c2cb9f05644128f13457f87 | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null | pandas/core/indexes/datetimes.py | cgangwar11/pandas | 972f491cb7fdcc3c1c2cb9f05644128f13457f87 | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null | from datetime import date, datetime, time, timedelta, tzinfo
import operator
from typing import Optional
import warnings
import numpy as np
from pandas._libs import NaT, Period, Timestamp, index as libindex, lib
from pandas._libs.tslibs import (
Resolution,
ints_to_pydatetime,
parsing,
timezones,
to_offset,
)
from pandas._libs.tslibs.offsets import prefix_mapping
from pandas._typing import DtypeObj, Label
from pandas.errors import InvalidIndexError
from pandas.util._decorators import cache_readonly, doc
from pandas.core.dtypes.common import (
DT64NS_DTYPE,
is_datetime64_any_dtype,
is_datetime64_dtype,
is_datetime64tz_dtype,
is_float,
is_integer,
is_scalar,
)
from pandas.core.dtypes.missing import is_valid_nat_for_dtype
from pandas.core.arrays.datetimes import DatetimeArray, tz_to_dtype
import pandas.core.common as com
from pandas.core.indexes.base import Index, maybe_extract_name
from pandas.core.indexes.datetimelike import DatetimeTimedeltaMixin
from pandas.core.indexes.extension import inherit_names
from pandas.core.tools.times import to_time
def _new_DatetimeIndex(cls, d):
"""
This is called upon unpickling, rather than the default which doesn't
have arguments and breaks __new__
"""
if "data" in d and not isinstance(d["data"], DatetimeIndex):
# Avoid need to verify integrity by calling simple_new directly
data = d.pop("data")
if not isinstance(data, DatetimeArray):
# For backward compat with older pickles, we may need to construct
# a DatetimeArray to adapt to the newer _simple_new signature
tz = d.pop("tz")
freq = d.pop("freq")
dta = DatetimeArray._simple_new(data, dtype=tz_to_dtype(tz), freq=freq)
else:
dta = data
for key in ["tz", "freq"]:
# These are already stored in our DatetimeArray; if they are
# also in the pickle and don't match, we have a problem.
if key in d:
assert d.pop(key) == getattr(dta, key)
result = cls._simple_new(dta, **d)
else:
with warnings.catch_warnings():
# TODO: If we knew what was going in to **d, we might be able to
# go through _simple_new instead
warnings.simplefilter("ignore")
result = cls.__new__(cls, **d)
return result
@inherit_names(
["to_perioddelta", "to_julian_date", "strftime", "isocalendar"]
+ DatetimeArray._field_ops
+ [
method
for method in DatetimeArray._datetimelike_methods
if method not in ("tz_localize",)
],
DatetimeArray,
wrap=True,
)
@inherit_names(["is_normalized", "_resolution_obj"], DatetimeArray, cache=True)
@inherit_names(
[
"_bool_ops",
"_object_ops",
"_field_ops",
"_datetimelike_ops",
"_datetimelike_methods",
"tz",
"tzinfo",
"dtype",
"to_pydatetime",
"_has_same_tz",
"_format_native_types",
"date",
"time",
"timetz",
]
+ DatetimeArray._bool_ops,
DatetimeArray,
)
class DatetimeIndex(DatetimeTimedeltaMixin):
"""
Immutable ndarray-like of datetime64 data.
Represented internally as int64, and which can be boxed to Timestamp objects
that are subclasses of datetime and carry metadata.
Parameters
----------
data : array-like (1-dimensional), optional
Optional datetime-like data to construct index with.
freq : str or pandas offset object, optional
One of pandas date offset strings or corresponding objects. The string
'infer' can be passed in order to set the frequency of the index as the
inferred frequency upon creation.
tz : pytz.timezone or dateutil.tz.tzfile or datetime.tzinfo or str
Set the Timezone of the data.
normalize : bool, default False
Normalize start/end dates to midnight before generating date range.
closed : {'left', 'right'}, optional
Set whether to include `start` and `end` that are on the
boundary. The default includes boundary points on either end.
ambiguous : 'infer', bool-ndarray, 'NaT', default 'raise'
When clocks moved backward due to DST, ambiguous times may arise.
For example in Central European Time (UTC+01), when going from 03:00
DST to 02:00 non-DST, 02:30:00 local time occurs both at 00:30:00 UTC
and at 01:30:00 UTC. In such a situation, the `ambiguous` parameter
dictates how ambiguous times should be handled.
- 'infer' will attempt to infer fall dst-transition hours based on
order
- bool-ndarray where True signifies a DST time, False signifies a
non-DST time (note that this flag is only applicable for ambiguous
times)
- 'NaT' will return NaT where there are ambiguous times
- 'raise' will raise an AmbiguousTimeError if there are ambiguous times.
dayfirst : bool, default False
If True, parse dates in `data` with the day first order.
yearfirst : bool, default False
If True parse dates in `data` with the year first order.
dtype : numpy.dtype or DatetimeTZDtype or str, default None
Note that the only NumPy dtype allowed is ‘datetime64[ns]’.
copy : bool, default False
Make a copy of input ndarray.
name : label, default None
Name to be stored in the index.
Attributes
----------
year
month
day
hour
minute
second
microsecond
nanosecond
date
time
timetz
dayofyear
weekofyear
week
dayofweek
weekday
quarter
tz
freq
freqstr
is_month_start
is_month_end
is_quarter_start
is_quarter_end
is_year_start
is_year_end
is_leap_year
inferred_freq
Methods
-------
normalize
strftime
snap
tz_convert
tz_localize
round
floor
ceil
to_period
to_perioddelta
to_pydatetime
to_series
to_frame
month_name
day_name
mean
See Also
--------
Index : The base pandas Index type.
TimedeltaIndex : Index of timedelta64 data.
PeriodIndex : Index of Period data.
to_datetime : Convert argument to datetime.
date_range : Create a fixed-frequency DatetimeIndex.
Notes
-----
To learn more about the frequency strings, please see `this link
<https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`__.
"""
_typ = "datetimeindex"
_engine_type = libindex.DatetimeEngine
_supports_partial_string_indexing = True
_comparables = ["name", "freqstr", "tz"]
_attributes = ["name", "tz", "freq"]
_is_numeric_dtype = False
_data: DatetimeArray
tz: Optional[tzinfo]
# --------------------------------------------------------------------
# methods that dispatch to array and wrap result in DatetimeIndex
@doc(DatetimeArray.tz_localize)
def tz_localize(
self, tz, ambiguous="raise", nonexistent="raise"
) -> "DatetimeIndex":
arr = self._data.tz_localize(tz, ambiguous, nonexistent)
return type(self)._simple_new(arr, name=self.name)
@doc(DatetimeArray.to_period)
def to_period(self, freq=None) -> "DatetimeIndex":
arr = self._data.to_period(freq)
return type(self)._simple_new(arr, name=self.name)
# --------------------------------------------------------------------
# Constructors
def __new__(
cls,
data=None,
freq=lib.no_default,
tz=None,
normalize=False,
closed=None,
ambiguous="raise",
dayfirst=False,
yearfirst=False,
dtype=None,
copy=False,
name=None,
):
if is_scalar(data):
raise TypeError(
f"{cls.__name__}() must be called with a "
f"collection of some kind, {repr(data)} was passed"
)
# - Cases checked above all return/raise before reaching here - #
name = maybe_extract_name(name, data, cls)
dtarr = DatetimeArray._from_sequence(
data,
dtype=dtype,
copy=copy,
tz=tz,
freq=freq,
dayfirst=dayfirst,
yearfirst=yearfirst,
ambiguous=ambiguous,
)
subarr = cls._simple_new(dtarr, name=name)
return subarr
@classmethod
def _simple_new(cls, values: DatetimeArray, name: Label = None):
assert isinstance(values, DatetimeArray), type(values)
result = object.__new__(cls)
result._data = values
result.name = name
result._cache = {}
result._no_setting_name = False
# For groupby perf. See note in indexes/base about _index_data
result._index_data = values._data
result._reset_identity()
return result
# --------------------------------------------------------------------
@cache_readonly
def _is_dates_only(self) -> bool:
"""
Return a boolean if we are only dates (and don't have a timezone)
Returns
-------
bool
"""
from pandas.io.formats.format import _is_dates_only
return self.tz is None and _is_dates_only(self._values)
def __reduce__(self):
# we use a special reduce here because we need
# to simply set the .tz (and not reinterpret it)
d = dict(data=self._data)
d.update(self._get_attributes_dict())
return _new_DatetimeIndex, (type(self), d), None
def _convert_for_op(self, value):
"""
Convert value to be insertable to ndarray.
"""
if self._has_same_tz(value):
return Timestamp(value).asm8
raise ValueError("Passed item and index have different timezone")
def _is_comparable_dtype(self, dtype: DtypeObj) -> bool:
"""
Can we compare values of the given dtype to our own?
"""
if not is_datetime64_any_dtype(dtype):
return False
if self.tz is not None:
# If we have tz, we can compare to tzaware
return is_datetime64tz_dtype(dtype)
# if we dont have tz, we can only compare to tznaive
return is_datetime64_dtype(dtype)
# --------------------------------------------------------------------
# Rendering Methods
def _mpl_repr(self):
# how to represent ourselves to matplotlib
return ints_to_pydatetime(self.asi8, self.tz)
@property
def _formatter_func(self):
from pandas.io.formats.format import _get_format_datetime64
formatter = _get_format_datetime64(is_dates_only=self._is_dates_only)
return lambda x: f"'{formatter(x, tz=self.tz)}'"
# --------------------------------------------------------------------
# Set Operation Methods
def union_many(self, others):
"""
A bit of a hack to accelerate unioning a collection of indexes.
"""
this = self
for other in others:
if not isinstance(this, DatetimeIndex):
this = Index.union(this, other)
continue
if not isinstance(other, DatetimeIndex):
try:
other = DatetimeIndex(other)
except TypeError:
pass
this, other = this._maybe_utc_convert(other)
if this._can_fast_union(other):
this = this._fast_union(other)
else:
this = Index.union(this, other)
return this
# --------------------------------------------------------------------
def _get_time_micros(self):
"""
Return the number of microseconds since midnight.
Returns
-------
ndarray[int64_t]
"""
values = self.asi8
if self.tz is not None and not timezones.is_utc(self.tz):
values = self._data._local_timestamps()
nanos = values % (24 * 3600 * 1_000_000_000)
micros = nanos // 1000
micros[self._isnan] = -1
return micros
def to_series(self, keep_tz=lib.no_default, index=None, name=None):
"""
Create a Series with both index and values equal to the index keys
useful with map for returning an indexer based on an index.
Parameters
----------
keep_tz : optional, defaults True
Return the data keeping the timezone.
If keep_tz is True:
If the timezone is not set, the resulting
Series will have a datetime64[ns] dtype.
Otherwise the Series will have an datetime64[ns, tz] dtype; the
tz will be preserved.
If keep_tz is False:
Series will have a datetime64[ns] dtype. TZ aware
objects will have the tz removed.
.. versionchanged:: 1.0.0
The default value is now True. In a future version,
this keyword will be removed entirely. Stop passing the
argument to obtain the future behavior and silence the warning.
index : Index, optional
Index of resulting Series. If None, defaults to original index.
name : str, optional
Name of resulting Series. If None, defaults to name of original
index.
Returns
-------
Series
"""
from pandas import Series
if index is None:
index = self._shallow_copy()
if name is None:
name = self.name
if keep_tz is not lib.no_default:
if keep_tz:
warnings.warn(
"The 'keep_tz' keyword in DatetimeIndex.to_series "
"is deprecated and will be removed in a future version. "
"You can stop passing 'keep_tz' to silence this warning.",
FutureWarning,
stacklevel=2,
)
else:
warnings.warn(
"Specifying 'keep_tz=False' is deprecated and this "
"option will be removed in a future release. If "
"you want to remove the timezone information, you "
"can do 'idx.tz_convert(None)' before calling "
"'to_series'.",
FutureWarning,
stacklevel=2,
)
else:
keep_tz = True
if keep_tz and self.tz is not None:
# preserve the tz & copy
values = self.copy(deep=True)
else:
values = self._values.view("M8[ns]").copy()
return Series(values, index=index, name=name)
def snap(self, freq="S"):
"""
Snap time stamps to nearest occurring frequency.
Returns
-------
DatetimeIndex
"""
# Superdumb, punting on any optimizing
freq = to_offset(freq)
snapped = np.empty(len(self), dtype=DT64NS_DTYPE)
for i, v in enumerate(self):
s = v
if not freq.is_on_offset(s):
t0 = freq.rollback(s)
t1 = freq.rollforward(s)
if abs(s - t0) < abs(t1 - s):
s = t0
else:
s = t1
snapped[i] = s
dta = DatetimeArray(snapped, dtype=self.dtype)
return DatetimeIndex._simple_new(dta, name=self.name)
def _parsed_string_to_bounds(self, reso: Resolution, parsed: datetime):
"""
Calculate datetime bounds for parsed time string and its resolution.
Parameters
----------
reso : str
Resolution provided by parsed string.
parsed : datetime
Datetime from parsed string.
Returns
-------
lower, upper: pd.Timestamp
"""
assert isinstance(reso, Resolution), (type(reso), reso)
valid_resos = {
"year",
"month",
"quarter",
"day",
"hour",
"minute",
"second",
"minute",
"second",
"microsecond",
}
if reso.attrname not in valid_resos:
raise KeyError
grp = reso.freq_group
per = Period(parsed, freq=grp)
start, end = per.start_time, per.end_time
# GH 24076
# If an incoming date string contained a UTC offset, need to localize
# the parsed date to this offset first before aligning with the index's
# timezone
if parsed.tzinfo is not None:
if self.tz is None:
raise ValueError(
"The index must be timezone aware when indexing "
"with a date string with a UTC offset"
)
start = start.tz_localize(parsed.tzinfo).tz_convert(self.tz)
end = end.tz_localize(parsed.tzinfo).tz_convert(self.tz)
elif self.tz is not None:
start = start.tz_localize(self.tz)
end = end.tz_localize(self.tz)
return start, end
def _validate_partial_date_slice(self, reso: Resolution):
assert isinstance(reso, Resolution), (type(reso), reso)
if (
self.is_monotonic
and reso.attrname in ["day", "hour", "minute", "second"]
and self._resolution_obj >= reso
):
# These resolution/monotonicity validations came from GH3931,
# GH3452 and GH2369.
# See also GH14826
raise KeyError
if reso == "microsecond":
# _partial_date_slice doesn't allow microsecond resolution, but
# _parsed_string_to_bounds allows it.
raise KeyError
def get_loc(self, key, method=None, tolerance=None):
"""
Get integer location for requested label
Returns
-------
loc : int
"""
if not is_scalar(key):
raise InvalidIndexError(key)
orig_key = key
if is_valid_nat_for_dtype(key, self.dtype):
key = NaT
if isinstance(key, self._data._recognized_scalars):
# needed to localize naive datetimes
key = self._maybe_cast_for_get_loc(key)
elif isinstance(key, str):
try:
return self._get_string_slice(key)
except (TypeError, KeyError, ValueError, OverflowError):
pass
try:
key = self._maybe_cast_for_get_loc(key)
except ValueError as err:
raise KeyError(key) from err
elif isinstance(key, timedelta):
# GH#20464
raise TypeError(
f"Cannot index {type(self).__name__} with {type(key).__name__}"
)
elif isinstance(key, time):
if method is not None:
raise NotImplementedError(
"cannot yet lookup inexact labels when key is a time object"
)
return self.indexer_at_time(key)
else:
# unrecognized type
raise KeyError(key)
try:
return Index.get_loc(self, key, method, tolerance)
except KeyError as err:
raise KeyError(orig_key) from err
def _maybe_cast_for_get_loc(self, key) -> Timestamp:
# needed to localize naive datetimes
key = Timestamp(key)
if key.tzinfo is None:
key = key.tz_localize(self.tz)
else:
key = key.tz_convert(self.tz)
return key
def _maybe_cast_slice_bound(self, label, side: str, kind):
"""
If label is a string, cast it to datetime according to resolution.
Parameters
----------
label : object
side : {'left', 'right'}
kind : {'loc', 'getitem'} or None
Returns
-------
label : object
Notes
-----
Value of `side` parameter should be validated in caller.
"""
assert kind in ["loc", "getitem", None]
if is_float(label) or isinstance(label, time) or is_integer(label):
self._invalid_indexer("slice", label)
if isinstance(label, str):
freq = getattr(self, "freqstr", getattr(self, "inferred_freq", None))
parsed, reso = parsing.parse_time_string(label, freq)
reso = Resolution.from_attrname(reso)
lower, upper = self._parsed_string_to_bounds(reso, parsed)
# lower, upper form the half-open interval:
# [parsed, parsed + 1 freq)
# because label may be passed to searchsorted
# the bounds need swapped if index is reverse sorted and has a
# length > 1 (is_monotonic_decreasing gives True for empty
# and length 1 index)
if self._is_strictly_monotonic_decreasing and len(self) > 1:
return upper if side == "left" else lower
return lower if side == "left" else upper
else:
return label
def _get_string_slice(self, key: str, use_lhs: bool = True, use_rhs: bool = True):
freq = getattr(self, "freqstr", getattr(self, "inferred_freq", None))
parsed, reso = parsing.parse_time_string(key, freq)
reso = Resolution.from_attrname(reso)
loc = self._partial_date_slice(reso, parsed, use_lhs=use_lhs, use_rhs=use_rhs)
return loc
def slice_indexer(self, start=None, end=None, step=None, kind=None):
"""
Return indexer for specified label slice.
Index.slice_indexer, customized to handle time slicing.
In addition to functionality provided by Index.slice_indexer, does the
following:
- if both `start` and `end` are instances of `datetime.time`, it
invokes `indexer_between_time`
- if `start` and `end` are both either string or None perform
value-based selection in non-monotonic cases.
"""
# For historical reasons DatetimeIndex supports slices between two
# instances of datetime.time as if it were applying a slice mask to
# an array of (self.hour, self.minute, self.seconds, self.microsecond).
if isinstance(start, time) and isinstance(end, time):
if step is not None and step != 1:
raise ValueError("Must have step size of 1 with time slices")
return self.indexer_between_time(start, end)
if isinstance(start, time) or isinstance(end, time):
raise KeyError("Cannot mix time and non-time slice keys")
# Pandas supports slicing with dates, treated as datetimes at midnight.
# https://github.com/pandas-dev/pandas/issues/31501
if isinstance(start, date) and not isinstance(start, datetime):
start = datetime.combine(start, time(0, 0))
if isinstance(end, date) and not isinstance(end, datetime):
end = datetime.combine(end, time(0, 0))
try:
return Index.slice_indexer(self, start, end, step, kind=kind)
except KeyError:
# For historical reasons DatetimeIndex by default supports
# value-based partial (aka string) slices on non-monotonic arrays,
# let's try that.
if (start is None or isinstance(start, str)) and (
end is None or isinstance(end, str)
):
mask = True
if start is not None:
start_casted = self._maybe_cast_slice_bound(start, "left", kind)
mask = start_casted <= self
if end is not None:
end_casted = self._maybe_cast_slice_bound(end, "right", kind)
mask = (self <= end_casted) & mask
indexer = mask.nonzero()[0][::step]
if len(indexer) == len(self):
return slice(None)
else:
return indexer
else:
raise
# --------------------------------------------------------------------
def is_type_compatible(self, typ) -> bool:
return typ == self.inferred_type or typ == "datetime"
@property
def inferred_type(self) -> str:
# b/c datetime is represented as microseconds since the epoch, make
# sure we can't have ambiguous indexing
return "datetime64"
def indexer_at_time(self, time, asof=False):
"""
Return index locations of values at particular time of day
(e.g. 9:30AM).
Parameters
----------
time : datetime.time or str
Time passed in either as object (datetime.time) or as string in
appropriate format ("%H:%M", "%H%M", "%I:%M%p", "%I%M%p",
"%H:%M:%S", "%H%M%S", "%I:%M:%S%p", "%I%M%S%p").
Returns
-------
values_at_time : array of integers
See Also
--------
indexer_between_time : Get index locations of values between particular
times of day.
DataFrame.at_time : Select values at particular time of day.
"""
if asof:
raise NotImplementedError("'asof' argument is not supported")
if isinstance(time, str):
from dateutil.parser import parse
time = parse(time).time()
if time.tzinfo:
if self.tz is None:
raise ValueError("Index must be timezone aware.")
time_micros = self.tz_convert(time.tzinfo)._get_time_micros()
else:
time_micros = self._get_time_micros()
micros = _time_to_micros(time)
return (micros == time_micros).nonzero()[0]
def indexer_between_time(
self, start_time, end_time, include_start=True, include_end=True
):
"""
Return index locations of values between particular times of day
(e.g., 9:00-9:30AM).
Parameters
----------
start_time, end_time : datetime.time, str
Time passed either as object (datetime.time) or as string in
appropriate format ("%H:%M", "%H%M", "%I:%M%p", "%I%M%p",
"%H:%M:%S", "%H%M%S", "%I:%M:%S%p","%I%M%S%p").
include_start : bool, default True
include_end : bool, default True
Returns
-------
values_between_time : array of integers
See Also
--------
indexer_at_time : Get index locations of values at particular time of day.
DataFrame.between_time : Select values between particular times of day.
"""
start_time = to_time(start_time)
end_time = to_time(end_time)
time_micros = self._get_time_micros()
start_micros = _time_to_micros(start_time)
end_micros = _time_to_micros(end_time)
if include_start and include_end:
lop = rop = operator.le
elif include_start:
lop = operator.le
rop = operator.lt
elif include_end:
lop = operator.lt
rop = operator.le
else:
lop = rop = operator.lt
if start_time <= end_time:
join_op = operator.and_
else:
join_op = operator.or_
mask = join_op(lop(start_micros, time_micros), rop(time_micros, end_micros))
return mask.nonzero()[0]
DatetimeIndex._add_logical_methods_disabled()
def date_range(
start=None,
end=None,
periods=None,
freq=None,
tz=None,
normalize=False,
name=None,
closed=None,
**kwargs,
) -> DatetimeIndex:
"""
Return a fixed frequency DatetimeIndex.
Parameters
----------
start : str or datetime-like, optional
Left bound for generating dates.
end : str or datetime-like, optional
Right bound for generating dates.
periods : int, optional
Number of periods to generate.
freq : str or DateOffset, default 'D'
Frequency strings can have multiples, e.g. '5H'. See
:ref:`here <timeseries.offset_aliases>` for a list of
frequency aliases.
tz : str or tzinfo, optional
Time zone name for returning localized DatetimeIndex, for example
'Asia/Hong_Kong'. By default, the resulting DatetimeIndex is
timezone-naive.
normalize : bool, default False
Normalize start/end dates to midnight before generating date range.
name : str, default None
Name of the resulting DatetimeIndex.
closed : {None, 'left', 'right'}, optional
Make the interval closed with respect to the given frequency to
the 'left', 'right', or both sides (None, the default).
**kwargs
For compatibility. Has no effect on the result.
Returns
-------
rng : DatetimeIndex
See Also
--------
DatetimeIndex : An immutable container for datetimes.
timedelta_range : Return a fixed frequency TimedeltaIndex.
period_range : Return a fixed frequency PeriodIndex.
interval_range : Return a fixed frequency IntervalIndex.
Notes
-----
Of the four parameters ``start``, ``end``, ``periods``, and ``freq``,
exactly three must be specified. If ``freq`` is omitted, the resulting
``DatetimeIndex`` will have ``periods`` linearly spaced elements between
``start`` and ``end`` (closed on both sides).
To learn more about the frequency strings, please see `this link
<https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`__.
Examples
--------
**Specifying the values**
The next four examples generate the same `DatetimeIndex`, but vary
the combination of `start`, `end` and `periods`.
Specify `start` and `end`, with the default daily frequency.
>>> pd.date_range(start='1/1/2018', end='1/08/2018')
DatetimeIndex(['2018-01-01', '2018-01-02', '2018-01-03', '2018-01-04',
'2018-01-05', '2018-01-06', '2018-01-07', '2018-01-08'],
dtype='datetime64[ns]', freq='D')
Specify `start` and `periods`, the number of periods (days).
>>> pd.date_range(start='1/1/2018', periods=8)
DatetimeIndex(['2018-01-01', '2018-01-02', '2018-01-03', '2018-01-04',
'2018-01-05', '2018-01-06', '2018-01-07', '2018-01-08'],
dtype='datetime64[ns]', freq='D')
Specify `end` and `periods`, the number of periods (days).
>>> pd.date_range(end='1/1/2018', periods=8)
DatetimeIndex(['2017-12-25', '2017-12-26', '2017-12-27', '2017-12-28',
'2017-12-29', '2017-12-30', '2017-12-31', '2018-01-01'],
dtype='datetime64[ns]', freq='D')
Specify `start`, `end`, and `periods`; the frequency is generated
automatically (linearly spaced).
>>> pd.date_range(start='2018-04-24', end='2018-04-27', periods=3)
DatetimeIndex(['2018-04-24 00:00:00', '2018-04-25 12:00:00',
'2018-04-27 00:00:00'],
dtype='datetime64[ns]', freq=None)
**Other Parameters**
Changed the `freq` (frequency) to ``'M'`` (month end frequency).
>>> pd.date_range(start='1/1/2018', periods=5, freq='M')
DatetimeIndex(['2018-01-31', '2018-02-28', '2018-03-31', '2018-04-30',
'2018-05-31'],
dtype='datetime64[ns]', freq='M')
Multiples are allowed
>>> pd.date_range(start='1/1/2018', periods=5, freq='3M')
DatetimeIndex(['2018-01-31', '2018-04-30', '2018-07-31', '2018-10-31',
'2019-01-31'],
dtype='datetime64[ns]', freq='3M')
`freq` can also be specified as an Offset object.
>>> pd.date_range(start='1/1/2018', periods=5, freq=pd.offsets.MonthEnd(3))
DatetimeIndex(['2018-01-31', '2018-04-30', '2018-07-31', '2018-10-31',
'2019-01-31'],
dtype='datetime64[ns]', freq='3M')
Specify `tz` to set the timezone.
>>> pd.date_range(start='1/1/2018', periods=5, tz='Asia/Tokyo')
DatetimeIndex(['2018-01-01 00:00:00+09:00', '2018-01-02 00:00:00+09:00',
'2018-01-03 00:00:00+09:00', '2018-01-04 00:00:00+09:00',
'2018-01-05 00:00:00+09:00'],
dtype='datetime64[ns, Asia/Tokyo]', freq='D')
`closed` controls whether to include `start` and `end` that are on the
boundary. The default includes boundary points on either end.
>>> pd.date_range(start='2017-01-01', end='2017-01-04', closed=None)
DatetimeIndex(['2017-01-01', '2017-01-02', '2017-01-03', '2017-01-04'],
dtype='datetime64[ns]', freq='D')
Use ``closed='left'`` to exclude `end` if it falls on the boundary.
>>> pd.date_range(start='2017-01-01', end='2017-01-04', closed='left')
DatetimeIndex(['2017-01-01', '2017-01-02', '2017-01-03'],
dtype='datetime64[ns]', freq='D')
Use ``closed='right'`` to exclude `start` if it falls on the boundary.
>>> pd.date_range(start='2017-01-01', end='2017-01-04', closed='right')
DatetimeIndex(['2017-01-02', '2017-01-03', '2017-01-04'],
dtype='datetime64[ns]', freq='D')
"""
if freq is None and com.any_none(periods, start, end):
freq = "D"
dtarr = DatetimeArray._generate_range(
start=start,
end=end,
periods=periods,
freq=freq,
tz=tz,
normalize=normalize,
closed=closed,
**kwargs,
)
return DatetimeIndex._simple_new(dtarr, name=name)
def bdate_range(
start=None,
end=None,
periods=None,
freq="B",
tz=None,
normalize=True,
name=None,
weekmask=None,
holidays=None,
closed=None,
**kwargs,
) -> DatetimeIndex:
"""
Return a fixed frequency DatetimeIndex, with business day as the default
frequency.
Parameters
----------
start : str or datetime-like, default None
Left bound for generating dates.
end : str or datetime-like, default None
Right bound for generating dates.
periods : int, default None
Number of periods to generate.
freq : str or DateOffset, default 'B' (business daily)
Frequency strings can have multiples, e.g. '5H'.
tz : str or None
Time zone name for returning localized DatetimeIndex, for example
Asia/Beijing.
normalize : bool, default False
Normalize start/end dates to midnight before generating date range.
name : str, default None
Name of the resulting DatetimeIndex.
weekmask : str or None, default None
Weekmask of valid business days, passed to ``numpy.busdaycalendar``,
only used when custom frequency strings are passed. The default
value None is equivalent to 'Mon Tue Wed Thu Fri'.
holidays : list-like or None, default None
Dates to exclude from the set of valid business days, passed to
``numpy.busdaycalendar``, only used when custom frequency strings
are passed.
closed : str, default None
Make the interval closed with respect to the given frequency to
the 'left', 'right', or both sides (None).
**kwargs
For compatibility. Has no effect on the result.
Returns
-------
DatetimeIndex
Notes
-----
Of the four parameters: ``start``, ``end``, ``periods``, and ``freq``,
exactly three must be specified. Specifying ``freq`` is a requirement
for ``bdate_range``. Use ``date_range`` if specifying ``freq`` is not
desired.
To learn more about the frequency strings, please see `this link
<https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`__.
Examples
--------
Note how the two weekend days are skipped in the result.
>>> pd.bdate_range(start='1/1/2018', end='1/08/2018')
DatetimeIndex(['2018-01-01', '2018-01-02', '2018-01-03', '2018-01-04',
'2018-01-05', '2018-01-08'],
dtype='datetime64[ns]', freq='B')
"""
if freq is None:
msg = "freq must be specified for bdate_range; use date_range instead"
raise TypeError(msg)
if isinstance(freq, str) and freq.startswith("C"):
try:
weekmask = weekmask or "Mon Tue Wed Thu Fri"
freq = prefix_mapping[freq](holidays=holidays, weekmask=weekmask)
except (KeyError, TypeError) as err:
msg = f"invalid custom frequency string: {freq}"
raise ValueError(msg) from err
elif holidays or weekmask:
msg = (
"a custom frequency string is required when holidays or "
f"weekmask are passed, got frequency {freq}"
)
raise ValueError(msg)
return date_range(
start=start,
end=end,
periods=periods,
freq=freq,
tz=tz,
normalize=normalize,
name=name,
closed=closed,
**kwargs,
)
def _time_to_micros(time_obj: time) -> int:
seconds = time_obj.hour * 60 * 60 + 60 * time_obj.minute + time_obj.second
return 1_000_000 * seconds + time_obj.microsecond
| 33.395161 | 96 | 0.584937 | from datetime import date, datetime, time, timedelta, tzinfo
import operator
from typing import Optional
import warnings
import numpy as np
from pandas._libs import NaT, Period, Timestamp, index as libindex, lib
from pandas._libs.tslibs import (
Resolution,
ints_to_pydatetime,
parsing,
timezones,
to_offset,
)
from pandas._libs.tslibs.offsets import prefix_mapping
from pandas._typing import DtypeObj, Label
from pandas.errors import InvalidIndexError
from pandas.util._decorators import cache_readonly, doc
from pandas.core.dtypes.common import (
DT64NS_DTYPE,
is_datetime64_any_dtype,
is_datetime64_dtype,
is_datetime64tz_dtype,
is_float,
is_integer,
is_scalar,
)
from pandas.core.dtypes.missing import is_valid_nat_for_dtype
from pandas.core.arrays.datetimes import DatetimeArray, tz_to_dtype
import pandas.core.common as com
from pandas.core.indexes.base import Index, maybe_extract_name
from pandas.core.indexes.datetimelike import DatetimeTimedeltaMixin
from pandas.core.indexes.extension import inherit_names
from pandas.core.tools.times import to_time
def _new_DatetimeIndex(cls, d):
if "data" in d and not isinstance(d["data"], DatetimeIndex):
data = d.pop("data")
if not isinstance(data, DatetimeArray):
tz = d.pop("tz")
freq = d.pop("freq")
dta = DatetimeArray._simple_new(data, dtype=tz_to_dtype(tz), freq=freq)
else:
dta = data
for key in ["tz", "freq"]:
if key in d:
assert d.pop(key) == getattr(dta, key)
result = cls._simple_new(dta, **d)
else:
with warnings.catch_warnings():
# TODO: If we knew what was going in to **d, we might be able to
# go through _simple_new instead
warnings.simplefilter("ignore")
result = cls.__new__(cls, **d)
return result
@inherit_names(
["to_perioddelta", "to_julian_date", "strftime", "isocalendar"]
+ DatetimeArray._field_ops
+ [
method
for method in DatetimeArray._datetimelike_methods
if method not in ("tz_localize",)
],
DatetimeArray,
wrap=True,
)
@inherit_names(["is_normalized", "_resolution_obj"], DatetimeArray, cache=True)
@inherit_names(
[
"_bool_ops",
"_object_ops",
"_field_ops",
"_datetimelike_ops",
"_datetimelike_methods",
"tz",
"tzinfo",
"dtype",
"to_pydatetime",
"_has_same_tz",
"_format_native_types",
"date",
"time",
"timetz",
]
+ DatetimeArray._bool_ops,
DatetimeArray,
)
class DatetimeIndex(DatetimeTimedeltaMixin):
_typ = "datetimeindex"
_engine_type = libindex.DatetimeEngine
_supports_partial_string_indexing = True
_comparables = ["name", "freqstr", "tz"]
_attributes = ["name", "tz", "freq"]
_is_numeric_dtype = False
_data: DatetimeArray
tz: Optional[tzinfo]
# --------------------------------------------------------------------
# methods that dispatch to array and wrap result in DatetimeIndex
@doc(DatetimeArray.tz_localize)
def tz_localize(
self, tz, ambiguous="raise", nonexistent="raise"
) -> "DatetimeIndex":
arr = self._data.tz_localize(tz, ambiguous, nonexistent)
return type(self)._simple_new(arr, name=self.name)
@doc(DatetimeArray.to_period)
def to_period(self, freq=None) -> "DatetimeIndex":
arr = self._data.to_period(freq)
return type(self)._simple_new(arr, name=self.name)
# --------------------------------------------------------------------
# Constructors
def __new__(
cls,
data=None,
freq=lib.no_default,
tz=None,
normalize=False,
closed=None,
ambiguous="raise",
dayfirst=False,
yearfirst=False,
dtype=None,
copy=False,
name=None,
):
if is_scalar(data):
raise TypeError(
f"{cls.__name__}() must be called with a "
f"collection of some kind, {repr(data)} was passed"
)
# - Cases checked above all return/raise before reaching here - #
name = maybe_extract_name(name, data, cls)
dtarr = DatetimeArray._from_sequence(
data,
dtype=dtype,
copy=copy,
tz=tz,
freq=freq,
dayfirst=dayfirst,
yearfirst=yearfirst,
ambiguous=ambiguous,
)
subarr = cls._simple_new(dtarr, name=name)
return subarr
@classmethod
def _simple_new(cls, values: DatetimeArray, name: Label = None):
assert isinstance(values, DatetimeArray), type(values)
result = object.__new__(cls)
result._data = values
result.name = name
result._cache = {}
result._no_setting_name = False
# For groupby perf. See note in indexes/base about _index_data
result._index_data = values._data
result._reset_identity()
return result
# --------------------------------------------------------------------
@cache_readonly
def _is_dates_only(self) -> bool:
from pandas.io.formats.format import _is_dates_only
return self.tz is None and _is_dates_only(self._values)
def __reduce__(self):
# we use a special reduce here because we need
# to simply set the .tz (and not reinterpret it)
d = dict(data=self._data)
d.update(self._get_attributes_dict())
return _new_DatetimeIndex, (type(self), d), None
def _convert_for_op(self, value):
if self._has_same_tz(value):
return Timestamp(value).asm8
raise ValueError("Passed item and index have different timezone")
def _is_comparable_dtype(self, dtype: DtypeObj) -> bool:
if not is_datetime64_any_dtype(dtype):
return False
if self.tz is not None:
# If we have tz, we can compare to tzaware
return is_datetime64tz_dtype(dtype)
# if we dont have tz, we can only compare to tznaive
return is_datetime64_dtype(dtype)
# --------------------------------------------------------------------
# Rendering Methods
def _mpl_repr(self):
# how to represent ourselves to matplotlib
return ints_to_pydatetime(self.asi8, self.tz)
@property
def _formatter_func(self):
from pandas.io.formats.format import _get_format_datetime64
formatter = _get_format_datetime64(is_dates_only=self._is_dates_only)
return lambda x: f"'{formatter(x, tz=self.tz)}'"
# --------------------------------------------------------------------
# Set Operation Methods
def union_many(self, others):
this = self
for other in others:
if not isinstance(this, DatetimeIndex):
this = Index.union(this, other)
continue
if not isinstance(other, DatetimeIndex):
try:
other = DatetimeIndex(other)
except TypeError:
pass
this, other = this._maybe_utc_convert(other)
if this._can_fast_union(other):
this = this._fast_union(other)
else:
this = Index.union(this, other)
return this
# --------------------------------------------------------------------
def _get_time_micros(self):
values = self.asi8
if self.tz is not None and not timezones.is_utc(self.tz):
values = self._data._local_timestamps()
nanos = values % (24 * 3600 * 1_000_000_000)
micros = nanos // 1000
micros[self._isnan] = -1
return micros
def to_series(self, keep_tz=lib.no_default, index=None, name=None):
from pandas import Series
if index is None:
index = self._shallow_copy()
if name is None:
name = self.name
if keep_tz is not lib.no_default:
if keep_tz:
warnings.warn(
"The 'keep_tz' keyword in DatetimeIndex.to_series "
"is deprecated and will be removed in a future version. "
"You can stop passing 'keep_tz' to silence this warning.",
FutureWarning,
stacklevel=2,
)
else:
warnings.warn(
"Specifying 'keep_tz=False' is deprecated and this "
"option will be removed in a future release. If "
"you want to remove the timezone information, you "
"can do 'idx.tz_convert(None)' before calling "
"'to_series'.",
FutureWarning,
stacklevel=2,
)
else:
keep_tz = True
if keep_tz and self.tz is not None:
# preserve the tz & copy
values = self.copy(deep=True)
else:
values = self._values.view("M8[ns]").copy()
return Series(values, index=index, name=name)
def snap(self, freq="S"):
# Superdumb, punting on any optimizing
freq = to_offset(freq)
snapped = np.empty(len(self), dtype=DT64NS_DTYPE)
for i, v in enumerate(self):
s = v
if not freq.is_on_offset(s):
t0 = freq.rollback(s)
t1 = freq.rollforward(s)
if abs(s - t0) < abs(t1 - s):
s = t0
else:
s = t1
snapped[i] = s
dta = DatetimeArray(snapped, dtype=self.dtype)
return DatetimeIndex._simple_new(dta, name=self.name)
def _parsed_string_to_bounds(self, reso: Resolution, parsed: datetime):
assert isinstance(reso, Resolution), (type(reso), reso)
valid_resos = {
"year",
"month",
"quarter",
"day",
"hour",
"minute",
"second",
"minute",
"second",
"microsecond",
}
if reso.attrname not in valid_resos:
raise KeyError
grp = reso.freq_group
per = Period(parsed, freq=grp)
start, end = per.start_time, per.end_time
# GH 24076
# If an incoming date string contained a UTC offset, need to localize
# the parsed date to this offset first before aligning with the index's
if parsed.tzinfo is not None:
if self.tz is None:
raise ValueError(
"The index must be timezone aware when indexing "
"with a date string with a UTC offset"
)
start = start.tz_localize(parsed.tzinfo).tz_convert(self.tz)
end = end.tz_localize(parsed.tzinfo).tz_convert(self.tz)
elif self.tz is not None:
start = start.tz_localize(self.tz)
end = end.tz_localize(self.tz)
return start, end
def _validate_partial_date_slice(self, reso: Resolution):
assert isinstance(reso, Resolution), (type(reso), reso)
if (
self.is_monotonic
and reso.attrname in ["day", "hour", "minute", "second"]
and self._resolution_obj >= reso
):
raise KeyError
if reso == "microsecond":
# _parsed_string_to_bounds allows it.
raise KeyError
def get_loc(self, key, method=None, tolerance=None):
if not is_scalar(key):
raise InvalidIndexError(key)
orig_key = key
if is_valid_nat_for_dtype(key, self.dtype):
key = NaT
if isinstance(key, self._data._recognized_scalars):
# needed to localize naive datetimes
key = self._maybe_cast_for_get_loc(key)
elif isinstance(key, str):
try:
return self._get_string_slice(key)
except (TypeError, KeyError, ValueError, OverflowError):
pass
try:
key = self._maybe_cast_for_get_loc(key)
except ValueError as err:
raise KeyError(key) from err
elif isinstance(key, timedelta):
# GH#20464
raise TypeError(
f"Cannot index {type(self).__name__} with {type(key).__name__}"
)
elif isinstance(key, time):
if method is not None:
raise NotImplementedError(
"cannot yet lookup inexact labels when key is a time object"
)
return self.indexer_at_time(key)
else:
# unrecognized type
raise KeyError(key)
try:
return Index.get_loc(self, key, method, tolerance)
except KeyError as err:
raise KeyError(orig_key) from err
def _maybe_cast_for_get_loc(self, key) -> Timestamp:
# needed to localize naive datetimes
key = Timestamp(key)
if key.tzinfo is None:
key = key.tz_localize(self.tz)
else:
key = key.tz_convert(self.tz)
return key
def _maybe_cast_slice_bound(self, label, side: str, kind):
assert kind in ["loc", "getitem", None]
if is_float(label) or isinstance(label, time) or is_integer(label):
self._invalid_indexer("slice", label)
if isinstance(label, str):
freq = getattr(self, "freqstr", getattr(self, "inferred_freq", None))
parsed, reso = parsing.parse_time_string(label, freq)
reso = Resolution.from_attrname(reso)
lower, upper = self._parsed_string_to_bounds(reso, parsed)
# lower, upper form the half-open interval:
# [parsed, parsed + 1 freq)
# because label may be passed to searchsorted
# the bounds need swapped if index is reverse sorted and has a
# length > 1 (is_monotonic_decreasing gives True for empty
# and length 1 index)
if self._is_strictly_monotonic_decreasing and len(self) > 1:
return upper if side == "left" else lower
return lower if side == "left" else upper
else:
return label
def _get_string_slice(self, key: str, use_lhs: bool = True, use_rhs: bool = True):
freq = getattr(self, "freqstr", getattr(self, "inferred_freq", None))
parsed, reso = parsing.parse_time_string(key, freq)
reso = Resolution.from_attrname(reso)
loc = self._partial_date_slice(reso, parsed, use_lhs=use_lhs, use_rhs=use_rhs)
return loc
def slice_indexer(self, start=None, end=None, step=None, kind=None):
# For historical reasons DatetimeIndex supports slices between two
# instances of datetime.time as if it were applying a slice mask to
# an array of (self.hour, self.minute, self.seconds, self.microsecond).
if isinstance(start, time) and isinstance(end, time):
if step is not None and step != 1:
raise ValueError("Must have step size of 1 with time slices")
return self.indexer_between_time(start, end)
if isinstance(start, time) or isinstance(end, time):
raise KeyError("Cannot mix time and non-time slice keys")
# Pandas supports slicing with dates, treated as datetimes at midnight.
# https://github.com/pandas-dev/pandas/issues/31501
if isinstance(start, date) and not isinstance(start, datetime):
start = datetime.combine(start, time(0, 0))
if isinstance(end, date) and not isinstance(end, datetime):
end = datetime.combine(end, time(0, 0))
try:
return Index.slice_indexer(self, start, end, step, kind=kind)
except KeyError:
# For historical reasons DatetimeIndex by default supports
# value-based partial (aka string) slices on non-monotonic arrays,
# let's try that.
if (start is None or isinstance(start, str)) and (
end is None or isinstance(end, str)
):
mask = True
if start is not None:
start_casted = self._maybe_cast_slice_bound(start, "left", kind)
mask = start_casted <= self
if end is not None:
end_casted = self._maybe_cast_slice_bound(end, "right", kind)
mask = (self <= end_casted) & mask
indexer = mask.nonzero()[0][::step]
if len(indexer) == len(self):
return slice(None)
else:
return indexer
else:
raise
def is_type_compatible(self, typ) -> bool:
return typ == self.inferred_type or typ == "datetime"
@property
def inferred_type(self) -> str:
return "datetime64"
def indexer_at_time(self, time, asof=False):
if asof:
raise NotImplementedError("'asof' argument is not supported")
if isinstance(time, str):
from dateutil.parser import parse
time = parse(time).time()
if time.tzinfo:
if self.tz is None:
raise ValueError("Index must be timezone aware.")
time_micros = self.tz_convert(time.tzinfo)._get_time_micros()
else:
time_micros = self._get_time_micros()
micros = _time_to_micros(time)
return (micros == time_micros).nonzero()[0]
def indexer_between_time(
self, start_time, end_time, include_start=True, include_end=True
):
start_time = to_time(start_time)
end_time = to_time(end_time)
time_micros = self._get_time_micros()
start_micros = _time_to_micros(start_time)
end_micros = _time_to_micros(end_time)
if include_start and include_end:
lop = rop = operator.le
elif include_start:
lop = operator.le
rop = operator.lt
elif include_end:
lop = operator.lt
rop = operator.le
else:
lop = rop = operator.lt
if start_time <= end_time:
join_op = operator.and_
else:
join_op = operator.or_
mask = join_op(lop(start_micros, time_micros), rop(time_micros, end_micros))
return mask.nonzero()[0]
DatetimeIndex._add_logical_methods_disabled()
def date_range(
start=None,
end=None,
periods=None,
freq=None,
tz=None,
normalize=False,
name=None,
closed=None,
**kwargs,
) -> DatetimeIndex:
if freq is None and com.any_none(periods, start, end):
freq = "D"
dtarr = DatetimeArray._generate_range(
start=start,
end=end,
periods=periods,
freq=freq,
tz=tz,
normalize=normalize,
closed=closed,
**kwargs,
)
return DatetimeIndex._simple_new(dtarr, name=name)
def bdate_range(
start=None,
end=None,
periods=None,
freq="B",
tz=None,
normalize=True,
name=None,
weekmask=None,
holidays=None,
closed=None,
**kwargs,
) -> DatetimeIndex:
if freq is None:
msg = "freq must be specified for bdate_range; use date_range instead"
raise TypeError(msg)
if isinstance(freq, str) and freq.startswith("C"):
try:
weekmask = weekmask or "Mon Tue Wed Thu Fri"
freq = prefix_mapping[freq](holidays=holidays, weekmask=weekmask)
except (KeyError, TypeError) as err:
msg = f"invalid custom frequency string: {freq}"
raise ValueError(msg) from err
elif holidays or weekmask:
msg = (
"a custom frequency string is required when holidays or "
f"weekmask are passed, got frequency {freq}"
)
raise ValueError(msg)
return date_range(
start=start,
end=end,
periods=periods,
freq=freq,
tz=tz,
normalize=normalize,
name=name,
closed=closed,
**kwargs,
)
def _time_to_micros(time_obj: time) -> int:
seconds = time_obj.hour * 60 * 60 + 60 * time_obj.minute + time_obj.second
return 1_000_000 * seconds + time_obj.microsecond
| true | true |
f71fd14cad832cbba2759947ed66a936ca0786bb | 5,432 | py | Python | src/python/sim_doc.py | csiu/kick | 0ebc9166074b702fc8b5835685ad102957ab349c | [
"MIT"
] | null | null | null | src/python/sim_doc.py | csiu/kick | 0ebc9166074b702fc8b5835685ad102957ab349c | [
"MIT"
] | null | null | null | src/python/sim_doc.py | csiu/kick | 0ebc9166074b702fc8b5835685ad102957ab349c | [
"MIT"
] | null | null | null | import sys
sys.path.append("/Users/csiu/repo/kick/src/python")
import argparse
import custom
import pandas as pd
import numpy as np
import re
import os
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.utils.extmath import randomized_svd
from sklearn.metrics import pairwise_distances
usage = """
For finding similar documents
"""
def get_args():
parser = argparse.ArgumentParser(description=usage)
parser.add_argument('-s', '--num_singular_values', default=100, type=int,
help="Number of singular values to use from SVD")
parser.add_argument('-n', '--num_results', default=None, type=int,
help="Number of similar documents to print in the results")
parser.add_argument('-w', '--term_weight', default="tfidf",
choices=["tfidf", "raw"],
help="How should terms in document be weighted? 'tfidf' or 'raw' counts")
parser.add_argument('-d', '--distance', default="cosine",
help="Metric for calculating the distance between documents.")
parser.add_argument('-i', '--document0_id', default=None, type=int,
help="Kickstarter ID of query document")
parser.add_argument('-c', '--cache_dir', default=".",
help="Specify cache dir")
parser.add_argument('-v', '--verbose', action='store_true')
args = parser.parse_args()
return(args)
def get_data():
"""
Output dataframe w/ 2 columns: "id", "document"
"""
# Get data
dk = custom.DatabaseKick()
cur = dk.connect()
cur.execute("SELECT id, concat_ws(name, blurb) FROM info")
rows = cur.fetchall()
df = pd.DataFrame(rows, columns=["id", "document"])
dk.disconnect()
return(df)
def preprocess_data(df):
"""
Preprocess 'document' of dataframe by
- to lowercase
- remove nonletters
- tokenize
- remove stopwords
- stem
Dataframe will contain additional 'doc_processed' column
and df['doc_processed'] will be returned
"""
def join_output(func):
"""
Decorator function to join list output to string
"""
def func_wrapper(text, *arg, **karg):
return ' '.join(func(text, *arg, **karg))
return func_wrapper
def doc_to_string(doc):
"""
Replace None -> empty string, and
text newlines (\n, \r) -> whitespace
"""
if doc == None:
return("")
else:
return(re.sub("[\n\r]", "", doc))
df['document'] = df['document'].apply(
lambda x: doc_to_string(x))
text_processing = join_output(custom.text_processing)
df['doc_processed'] = df['document'].apply(
lambda x: text_processing(x, method="stem"))
return(df['doc_processed'])
def compute_distance(U, i=None, sort=False, top_n=None, metric='euclidean'):
"""
Compute distance of document U[i] with all documents in U
"""
if i != None:
index_document0 = df[df["id"] == i].index.tolist()
else:
index_document0 = 0
document0 = np.asmatrix(U[index_document0])
dist = pairwise_distances(document0, U, metric=metric)
df_dist = pd.DataFrame(np.transpose(dist), columns=["dist"])
if sort:
df_dist.sort_values(by="dist", inplace=True)
if top_n != None:
assert type(top_n) is int
df_dist = df_dist.head(top_n)
return(df_dist)
if __name__ == '__main__':
args = get_args()
num_singular_values = args.num_singular_values
document0_id = args.document0_id
num_results = args.num_results
cache_dir = args.cache_dir
verbose = args.verbose
term_weight = args.term_weight
distance_metric = args.distance
preprocess_file = os.path.join(os.path.abspath(cache_dir),
"preprocessed.pkl")
msg = "# Getting and preprocessing data..."
if os.path.isfile(preprocess_file):
if verbose: print(msg, "from cache...")
df = pd.read_pickle(preprocess_file)
else:
if verbose: print(msg)
df = get_data()
_ = preprocess_data(df)
df.to_pickle(preprocess_file)
if term_weight == "raw":
if verbose: print("# Making count matrix...")
cv = CountVectorizer()
X = cv.fit_transform(df['doc_processed'])
else:
if verbose: print("# Making TF-IDF matrix...")
vectorizer = TfidfVectorizer()
X = vectorizer.fit_transform(df['doc_processed'])
if verbose: print("# Computing SVD for %s singular values..." %
num_singular_values)
U, s, Vh = randomized_svd(X, n_components=num_singular_values,
n_iter=5, random_state=5)
if verbose: print("# Computing distances (%s)..." % distance_metric)
top_n = compute_distance(U, i=document0_id,
sort=True, top_n=num_results,
metric=distance_metric)
if verbose: print("# Printing results...")
results = []
counter = 0
for index, row in df.iloc[top_n.index].iterrows():
row["dist"] = top_n.iloc[counter]["dist"]
results.append(row)
counter += 1
print('>> %s | %s' % (row['id'], row['doc_processed']),
row['document'], "\n", sep="\n")
| 30.01105 | 97 | 0.60475 | import sys
sys.path.append("/Users/csiu/repo/kick/src/python")
import argparse
import custom
import pandas as pd
import numpy as np
import re
import os
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.utils.extmath import randomized_svd
from sklearn.metrics import pairwise_distances
usage = """
For finding similar documents
"""
def get_args():
parser = argparse.ArgumentParser(description=usage)
parser.add_argument('-s', '--num_singular_values', default=100, type=int,
help="Number of singular values to use from SVD")
parser.add_argument('-n', '--num_results', default=None, type=int,
help="Number of similar documents to print in the results")
parser.add_argument('-w', '--term_weight', default="tfidf",
choices=["tfidf", "raw"],
help="How should terms in document be weighted? 'tfidf' or 'raw' counts")
parser.add_argument('-d', '--distance', default="cosine",
help="Metric for calculating the distance between documents.")
parser.add_argument('-i', '--document0_id', default=None, type=int,
help="Kickstarter ID of query document")
parser.add_argument('-c', '--cache_dir', default=".",
help="Specify cache dir")
parser.add_argument('-v', '--verbose', action='store_true')
args = parser.parse_args()
return(args)
def get_data():
dk = custom.DatabaseKick()
cur = dk.connect()
cur.execute("SELECT id, concat_ws(name, blurb) FROM info")
rows = cur.fetchall()
df = pd.DataFrame(rows, columns=["id", "document"])
dk.disconnect()
return(df)
def preprocess_data(df):
def join_output(func):
def func_wrapper(text, *arg, **karg):
return ' '.join(func(text, *arg, **karg))
return func_wrapper
def doc_to_string(doc):
if doc == None:
return("")
else:
return(re.sub("[\n\r]", "", doc))
df['document'] = df['document'].apply(
lambda x: doc_to_string(x))
text_processing = join_output(custom.text_processing)
df['doc_processed'] = df['document'].apply(
lambda x: text_processing(x, method="stem"))
return(df['doc_processed'])
def compute_distance(U, i=None, sort=False, top_n=None, metric='euclidean'):
if i != None:
index_document0 = df[df["id"] == i].index.tolist()
else:
index_document0 = 0
document0 = np.asmatrix(U[index_document0])
dist = pairwise_distances(document0, U, metric=metric)
df_dist = pd.DataFrame(np.transpose(dist), columns=["dist"])
if sort:
df_dist.sort_values(by="dist", inplace=True)
if top_n != None:
assert type(top_n) is int
df_dist = df_dist.head(top_n)
return(df_dist)
if __name__ == '__main__':
args = get_args()
num_singular_values = args.num_singular_values
document0_id = args.document0_id
num_results = args.num_results
cache_dir = args.cache_dir
verbose = args.verbose
term_weight = args.term_weight
distance_metric = args.distance
preprocess_file = os.path.join(os.path.abspath(cache_dir),
"preprocessed.pkl")
msg = "# Getting and preprocessing data..."
if os.path.isfile(preprocess_file):
if verbose: print(msg, "from cache...")
df = pd.read_pickle(preprocess_file)
else:
if verbose: print(msg)
df = get_data()
_ = preprocess_data(df)
df.to_pickle(preprocess_file)
if term_weight == "raw":
if verbose: print("# Making count matrix...")
cv = CountVectorizer()
X = cv.fit_transform(df['doc_processed'])
else:
if verbose: print("# Making TF-IDF matrix...")
vectorizer = TfidfVectorizer()
X = vectorizer.fit_transform(df['doc_processed'])
if verbose: print("# Computing SVD for %s singular values..." %
num_singular_values)
U, s, Vh = randomized_svd(X, n_components=num_singular_values,
n_iter=5, random_state=5)
if verbose: print("# Computing distances (%s)..." % distance_metric)
top_n = compute_distance(U, i=document0_id,
sort=True, top_n=num_results,
metric=distance_metric)
if verbose: print("# Printing results...")
results = []
counter = 0
for index, row in df.iloc[top_n.index].iterrows():
row["dist"] = top_n.iloc[counter]["dist"]
results.append(row)
counter += 1
print('>> %s | %s' % (row['id'], row['doc_processed']),
row['document'], "\n", sep="\n")
| true | true |
f71fd21c199e5a31cb8e95fea4d6ad447b4eb6cf | 2,082 | py | Python | adsputils/tests/test_init.py | adsabs/ADSPipelineUtils | eb8cc988f57c19a256ebc8802cc2a812d5279d12 | [
"MIT"
] | null | null | null | adsputils/tests/test_init.py | adsabs/ADSPipelineUtils | eb8cc988f57c19a256ebc8802cc2a812d5279d12 | [
"MIT"
] | 36 | 2017-06-23T20:29:22.000Z | 2020-03-18T15:04:27.000Z | adsputils/tests/test_init.py | adsabs/ADSPipelineUtils | eb8cc988f57c19a256ebc8802cc2a812d5279d12 | [
"MIT"
] | 12 | 2017-06-21T18:10:57.000Z | 2021-11-01T19:13:32.000Z | # -*- coding: utf-8 -*-
import adsputils
import unittest
import os
import json
import time
from inspect import currentframe, getframeinfo
from adsputils.exceptions import UnicodeHandlerError
def _read_file(fpath):
with open(fpath, 'r') as fi:
return fi.read()
class TestInit(unittest.TestCase):
def test_logging(self):
logdir = os.path.abspath(os.path.join(os.path.dirname(__file__), '../../logs'))
foo_log = logdir + '/foo.bar.log'
if os.path.exists(foo_log):
os.remove(foo_log)
logger = adsputils.setup_logging('foo.bar')
logger.warning('first')
frameinfo = getframeinfo(currentframe())
#print foo_log
self.assertTrue(os.path.exists(foo_log))
c = _read_file(foo_log)
j = json.loads(c)
self.assertEqual(j['message'], 'first')
self.assertTrue('hostname' in j)
# verify warning has filename and linenumber
self.assertEqual(os.path.basename(frameinfo.filename), j['filename'])
self.assertEqual(j['lineno'], frameinfo.lineno - 1)
time.sleep(0.01)
# now multiline message
logger.warning(u'second\nthird')
logger.warning('last')
c = _read_file(foo_log)
found = False
msecs = False
for x in c.strip().split('\n'):
j = json.loads(x)
self.assertTrue(j)
if j['message'] == u'second\nthird':
found = True
t = adsputils.get_date(j['asctime'])
if t.microsecond > 0:
msecs = True
self.assertTrue(found)
self.assertTrue(msecs)
def test_u2asc(self):
input1 = 'benìtez, n'
input2 = u'izzet, sakallı'
output1 = adsputils.u2asc(input1)
output2 = adsputils.u2asc(input2)
self.assertEqual(output1,'benitez, n')
self.assertEqual(output2,u'izzet, sakalli')
input3 = input2.encode('utf16')
self.assertRaises(UnicodeHandlerError, adsputils.u2asc, input3)
if __name__ == '__main__':
unittest.main()
| 28.135135 | 87 | 0.604707 |
import adsputils
import unittest
import os
import json
import time
from inspect import currentframe, getframeinfo
from adsputils.exceptions import UnicodeHandlerError
def _read_file(fpath):
with open(fpath, 'r') as fi:
return fi.read()
class TestInit(unittest.TestCase):
def test_logging(self):
logdir = os.path.abspath(os.path.join(os.path.dirname(__file__), '../../logs'))
foo_log = logdir + '/foo.bar.log'
if os.path.exists(foo_log):
os.remove(foo_log)
logger = adsputils.setup_logging('foo.bar')
logger.warning('first')
frameinfo = getframeinfo(currentframe())
self.assertTrue(os.path.exists(foo_log))
c = _read_file(foo_log)
j = json.loads(c)
self.assertEqual(j['message'], 'first')
self.assertTrue('hostname' in j)
self.assertEqual(os.path.basename(frameinfo.filename), j['filename'])
self.assertEqual(j['lineno'], frameinfo.lineno - 1)
time.sleep(0.01)
logger.warning(u'second\nthird')
logger.warning('last')
c = _read_file(foo_log)
found = False
msecs = False
for x in c.strip().split('\n'):
j = json.loads(x)
self.assertTrue(j)
if j['message'] == u'second\nthird':
found = True
t = adsputils.get_date(j['asctime'])
if t.microsecond > 0:
msecs = True
self.assertTrue(found)
self.assertTrue(msecs)
def test_u2asc(self):
input1 = 'benìtez, n'
input2 = u'izzet, sakallı'
output1 = adsputils.u2asc(input1)
output2 = adsputils.u2asc(input2)
self.assertEqual(output1,'benitez, n')
self.assertEqual(output2,u'izzet, sakalli')
input3 = input2.encode('utf16')
self.assertRaises(UnicodeHandlerError, adsputils.u2asc, input3)
if __name__ == '__main__':
unittest.main()
| true | true |
f71fd223ff855bd602d59319796b96fc483982ca | 20,550 | py | Python | log_complete_bcl2/model_76.py | LoLab-VU/Bayesian_Inference_of_Network_Dynamics | 54a5ef7e868be34289836bbbb024a2963c0c9c86 | [
"MIT"
] | null | null | null | log_complete_bcl2/model_76.py | LoLab-VU/Bayesian_Inference_of_Network_Dynamics | 54a5ef7e868be34289836bbbb024a2963c0c9c86 | [
"MIT"
] | null | null | null | log_complete_bcl2/model_76.py | LoLab-VU/Bayesian_Inference_of_Network_Dynamics | 54a5ef7e868be34289836bbbb024a2963c0c9c86 | [
"MIT"
] | null | null | null | # exported from PySB model 'model'
from pysb import Model, Monomer, Parameter, Expression, Compartment, Rule, Observable, Initial, MatchOnce, Annotation, ANY, WILD
Model()
Monomer('Ligand', ['Receptor'])
Monomer('ParpU', ['C3A'])
Monomer('C8A', ['BidU', 'C3pro'])
Monomer('SmacM', ['BaxA'])
Monomer('BaxM', ['BidM', 'BaxA'])
Monomer('Apop', ['C3pro', 'Xiap'])
Monomer('Fadd', ['Receptor', 'C8pro'])
Monomer('SmacC', ['Xiap'])
Monomer('ParpC')
Monomer('Xiap', ['SmacC', 'Apop', 'C3A'])
Monomer('C9')
Monomer('C3ub')
Monomer('C8pro', ['Fadd', 'C6A'])
Monomer('Bcl2', ['BidM', 'BaxA'])
Monomer('C3pro', ['Apop', 'C8A'])
Monomer('CytoCM', ['BaxA'])
Monomer('CytoCC')
Monomer('BaxA', ['BaxM', 'Bcl2', 'BaxA_1', 'BaxA_2', 'SmacM', 'CytoCM'])
Monomer('ApafI')
Monomer('BidU', ['C8A'])
Monomer('BidT')
Monomer('C3A', ['Xiap', 'ParpU', 'C6pro'])
Monomer('ApafA')
Monomer('BidM', ['BaxM', 'Bcl2'])
Monomer('Receptor', ['Ligand', 'Fadd'])
Monomer('C6A', ['C8pro'])
Monomer('C6pro', ['C3A'])
Parameter('bind_0_Ligand_binder_Receptor_binder_target_2kf', 1.0)
Parameter('bind_0_Ligand_binder_Receptor_binder_target_1kr', 1.0)
Parameter('bind_0_Receptor_binder_Fadd_binder_target_2kf', 1.0)
Parameter('bind_0_Receptor_binder_Fadd_binder_target_1kr', 1.0)
Parameter('substrate_binding_0_Fadd_catalyzer_C8pro_substrate_2kf', 1.0)
Parameter('substrate_binding_0_Fadd_catalyzer_C8pro_substrate_1kr', 1.0)
Parameter('catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product_1kc', 1.0)
Parameter('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_2kf', 1.0)
Parameter('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_1kr', 1.0)
Parameter('catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product_1kc', 1.0)
Parameter('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_2kf', 1.0)
Parameter('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_1kr', 1.0)
Parameter('inhibition_0_SmacC_inhibitor_Xiap_inh_target_2kf', 1.0)
Parameter('inhibition_0_SmacC_inhibitor_Xiap_inh_target_1kr', 1.0)
Parameter('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_2kf', 1.0)
Parameter('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_1kr', 1.0)
Parameter('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_2kf', 1.0)
Parameter('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_1kr', 1.0)
Parameter('catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product_1kc', 1.0)
Parameter('inhibition_0_Xiap_inhibitor_Apop_inh_target_2kf', 1.0)
Parameter('inhibition_0_Xiap_inhibitor_Apop_inh_target_1kr', 1.0)
Parameter('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_2kf', 1.0)
Parameter('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_1kr', 1.0)
Parameter('catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product_1kc', 1.0)
Parameter('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_2kf', 1.0)
Parameter('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_1kr', 1.0)
Parameter('catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product_1kc', 1.0)
Parameter('equilibration_0_BidT_equil_a_BidM_equil_b_1kf', 1.0)
Parameter('equilibration_0_BidT_equil_a_BidM_equil_b_1kr', 1.0)
Parameter('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_2kf', 1.0)
Parameter('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_1kr', 1.0)
Parameter('catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product_1kc', 1.0)
Parameter('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_2kf', 1.0)
Parameter('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_1kr', 1.0)
Parameter('self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate_1kc', 1.0)
Parameter('inhibition_0_Bcl2_inhibitor_BidM_inh_target_2kf', 1.0)
Parameter('inhibition_0_Bcl2_inhibitor_BidM_inh_target_1kr', 1.0)
Parameter('inhibition_0_Bcl2_inhibitor_BaxA_inh_target_2kf', 1.0)
Parameter('inhibition_0_Bcl2_inhibitor_BaxA_inh_target_1kr', 1.0)
Parameter('pore_formation_0_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_0_BaxA_pore_1kr', 1.0)
Parameter('pore_formation_1_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_1_BaxA_pore_1kr', 1.0)
Parameter('pore_formation_2_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_2_BaxA_pore_1kr', 1.0)
Parameter('transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_2kf', 1.0)
Parameter('transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kr', 1.0)
Parameter('transport_1_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kc', 1.0)
Parameter('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_2kf', 1.0)
Parameter('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kr', 1.0)
Parameter('transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kc', 1.0)
Parameter('catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_2kf', 1.0)
Parameter('catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_1kr', 1.0)
Parameter('catalysis_1_C8A_catalyzer_C3pro_substrate_C3A_product_1kc', 1.0)
Parameter('catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_2kf', 1.0)
Parameter('catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_1kr', 1.0)
Parameter('catalysis_1_C3A_catalyzer_C6pro_substrate_C6A_product_1kc', 1.0)
Parameter('catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_2kf', 1.0)
Parameter('catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_1kr', 1.0)
Parameter('catalysis_1_C6A_catalyzer_C8pro_substrate_C8A_product_1kc', 1.0)
Parameter('Ligand_0', 1000.0)
Parameter('ParpU_0', 1000000.0)
Parameter('C8A_0', 0.0)
Parameter('SmacM_0', 100000.0)
Parameter('BaxM_0', 40000.0)
Parameter('Apop_0', 0.0)
Parameter('Fadd_0', 130000.0)
Parameter('SmacC_0', 0.0)
Parameter('ParpC_0', 0.0)
Parameter('Xiap_0', 19000.0)
Parameter('C9_0', 100000.0)
Parameter('C3ub_0', 0.0)
Parameter('C8pro_0', 130000.0)
Parameter('Bcl2_0', 328000.0)
Parameter('C3pro_0', 21000.0)
Parameter('CytoCM_0', 500000.0)
Parameter('CytoCC_0', 0.0)
Parameter('BaxA_0', 0.0)
Parameter('ApafI_0', 100000.0)
Parameter('BidU_0', 171000.0)
Parameter('BidT_0', 0.0)
Parameter('C3A_0', 0.0)
Parameter('ApafA_0', 0.0)
Parameter('BidM_0', 0.0)
Parameter('Receptor_0', 100.0)
Parameter('C6A_0', 0.0)
Parameter('C6pro_0', 100.0)
Observable('Ligand_obs', Ligand())
Observable('ParpU_obs', ParpU())
Observable('C8A_obs', C8A())
Observable('SmacM_obs', SmacM())
Observable('BaxM_obs', BaxM())
Observable('Apop_obs', Apop())
Observable('Fadd_obs', Fadd())
Observable('SmacC_obs', SmacC())
Observable('ParpC_obs', ParpC())
Observable('Xiap_obs', Xiap())
Observable('C9_obs', C9())
Observable('C3ub_obs', C3ub())
Observable('C8pro_obs', C8pro())
Observable('Bcl2_obs', Bcl2())
Observable('C3pro_obs', C3pro())
Observable('CytoCM_obs', CytoCM())
Observable('CytoCC_obs', CytoCC())
Observable('BaxA_obs', BaxA())
Observable('ApafI_obs', ApafI())
Observable('BidU_obs', BidU())
Observable('BidT_obs', BidT())
Observable('C3A_obs', C3A())
Observable('ApafA_obs', ApafA())
Observable('BidM_obs', BidM())
Observable('Receptor_obs', Receptor())
Observable('C6A_obs', C6A())
Observable('C6pro_obs', C6pro())
Rule('bind_0_Ligand_binder_Receptor_binder_target', Ligand(Receptor=None) + Receptor(Ligand=None, Fadd=None) | Ligand(Receptor=1) % Receptor(Ligand=1, Fadd=None), bind_0_Ligand_binder_Receptor_binder_target_2kf, bind_0_Ligand_binder_Receptor_binder_target_1kr)
Rule('bind_0_Receptor_binder_Fadd_binder_target', Receptor(Ligand=ANY, Fadd=None) + Fadd(Receptor=None, C8pro=None) | Receptor(Ligand=ANY, Fadd=1) % Fadd(Receptor=1, C8pro=None), bind_0_Receptor_binder_Fadd_binder_target_2kf, bind_0_Receptor_binder_Fadd_binder_target_1kr)
Rule('substrate_binding_0_Fadd_catalyzer_C8pro_substrate', Fadd(Receptor=ANY, C8pro=None) + C8pro(Fadd=None, C6A=None) | Fadd(Receptor=ANY, C8pro=1) % C8pro(Fadd=1, C6A=None), substrate_binding_0_Fadd_catalyzer_C8pro_substrate_2kf, substrate_binding_0_Fadd_catalyzer_C8pro_substrate_1kr)
Rule('catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product', Fadd(Receptor=ANY, C8pro=1) % C8pro(Fadd=1, C6A=None) >> Fadd(Receptor=ANY, C8pro=None) + C8A(BidU=None, C3pro=None), catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product_1kc)
Rule('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product', C8A(BidU=None, C3pro=None) + BidU(C8A=None) | C8A(BidU=1, C3pro=None) % BidU(C8A=1), catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_2kf, catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_1kr)
Rule('catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product', C8A(BidU=1, C3pro=None) % BidU(C8A=1) >> C8A(BidU=None, C3pro=None) + BidT(), catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product_1kc)
Rule('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex', ApafI() + CytoCC() | ApafA(), conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_2kf, conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_1kr)
Rule('inhibition_0_SmacC_inhibitor_Xiap_inh_target', SmacC(Xiap=None) + Xiap(SmacC=None, Apop=None, C3A=None) | SmacC(Xiap=1) % Xiap(SmacC=1, Apop=None, C3A=None), inhibition_0_SmacC_inhibitor_Xiap_inh_target_2kf, inhibition_0_SmacC_inhibitor_Xiap_inh_target_1kr)
Rule('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex', ApafA() + C9() | Apop(C3pro=None, Xiap=None), conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_2kf, conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_1kr)
Rule('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product', Apop(C3pro=None, Xiap=None) + C3pro(Apop=None, C8A=None) | Apop(C3pro=1, Xiap=None) % C3pro(Apop=1, C8A=None), catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_2kf, catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_1kr)
Rule('catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product', Apop(C3pro=1, Xiap=None) % C3pro(Apop=1, C8A=None) >> Apop(C3pro=None, Xiap=None) + C3A(Xiap=None, ParpU=None, C6pro=None), catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product_1kc)
Rule('inhibition_0_Xiap_inhibitor_Apop_inh_target', Xiap(SmacC=None, Apop=None, C3A=None) + Apop(C3pro=None, Xiap=None) | Xiap(SmacC=None, Apop=1, C3A=None) % Apop(C3pro=None, Xiap=1), inhibition_0_Xiap_inhibitor_Apop_inh_target_2kf, inhibition_0_Xiap_inhibitor_Apop_inh_target_1kr)
Rule('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product', Xiap(SmacC=None, Apop=None, C3A=None) + C3A(Xiap=None, ParpU=None, C6pro=None) | Xiap(SmacC=None, Apop=None, C3A=1) % C3A(Xiap=1, ParpU=None, C6pro=None), catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_2kf, catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_1kr)
Rule('catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product', Xiap(SmacC=None, Apop=None, C3A=1) % C3A(Xiap=1, ParpU=None, C6pro=None) >> Xiap(SmacC=None, Apop=None, C3A=None) + C3ub(), catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product_1kc)
Rule('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product', C3A(Xiap=None, ParpU=None, C6pro=None) + ParpU(C3A=None) | C3A(Xiap=None, ParpU=1, C6pro=None) % ParpU(C3A=1), catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_2kf, catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_1kr)
Rule('catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product', C3A(Xiap=None, ParpU=1, C6pro=None) % ParpU(C3A=1) >> C3A(Xiap=None, ParpU=None, C6pro=None) + ParpC(), catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product_1kc)
Rule('equilibration_0_BidT_equil_a_BidM_equil_b', BidT() | BidM(BaxM=None, Bcl2=None), equilibration_0_BidT_equil_a_BidM_equil_b_1kf, equilibration_0_BidT_equil_a_BidM_equil_b_1kr)
Rule('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product', BidM(BaxM=None, Bcl2=None) + BaxM(BidM=None, BaxA=None) | BidM(BaxM=1, Bcl2=None) % BaxM(BidM=1, BaxA=None), catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_2kf, catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_1kr)
Rule('catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product', BidM(BaxM=1, Bcl2=None) % BaxM(BidM=1, BaxA=None) >> BidM(BaxM=None, Bcl2=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product_1kc)
Rule('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate', BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxM(BidM=None, BaxA=None) | BaxA(BaxM=1, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) % BaxM(BidM=None, BaxA=1), self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_2kf, self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_1kr)
Rule('self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate', BaxA(BaxM=1, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) % BaxM(BidM=None, BaxA=1) >> BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate_1kc)
Rule('inhibition_0_Bcl2_inhibitor_BidM_inh_target', Bcl2(BidM=None, BaxA=None) + BidM(BaxM=None, Bcl2=None) | Bcl2(BidM=1, BaxA=None) % BidM(BaxM=None, Bcl2=1), inhibition_0_Bcl2_inhibitor_BidM_inh_target_2kf, inhibition_0_Bcl2_inhibitor_BidM_inh_target_1kr)
Rule('inhibition_0_Bcl2_inhibitor_BaxA_inh_target', Bcl2(BidM=None, BaxA=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) | Bcl2(BidM=None, BaxA=1) % BaxA(BaxM=None, Bcl2=1, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), inhibition_0_Bcl2_inhibitor_BaxA_inh_target_2kf, inhibition_0_Bcl2_inhibitor_BaxA_inh_target_1kr)
Rule('pore_formation_0_BaxA_pore', BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) | BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=None, SmacM=None, CytoCM=None), pore_formation_0_BaxA_pore_2kf, pore_formation_0_BaxA_pore_1kr)
Rule('pore_formation_1_BaxA_pore', BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=None, SmacM=None, CytoCM=None) | BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None), pore_formation_1_BaxA_pore_2kf, pore_formation_1_BaxA_pore_1kr)
Rule('pore_formation_2_BaxA_pore', BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) | BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None), pore_formation_2_BaxA_pore_2kf, pore_formation_2_BaxA_pore_1kr)
Rule('transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C', BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + SmacM(BaxA=None) | BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=5, CytoCM=None) % SmacM(BaxA=5), transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_2kf, transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kr)
Rule('transport_1_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C', BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=5, CytoCM=None) % SmacM(BaxA=5) >> BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + SmacC(Xiap=None), transport_1_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kc)
Rule('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C', BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + CytoCM(BaxA=None) | BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=5) % CytoCM(BaxA=5), transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_2kf, transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kr)
Rule('transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C', BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=5) % CytoCM(BaxA=5) >> BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + CytoCC(), transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kc)
Rule('catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product', C8A(BidU=None, C3pro=None) + C3pro(Apop=None, C8A=None) | C8A(BidU=None, C3pro=1) % C3pro(Apop=None, C8A=1), catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_2kf, catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_1kr)
Rule('catalysis_1_C8A_catalyzer_C3pro_substrate_C3A_product', C8A(BidU=None, C3pro=1) % C3pro(Apop=None, C8A=1) >> C8A(BidU=None, C3pro=None) + C3A(Xiap=None, ParpU=None, C6pro=None), catalysis_1_C8A_catalyzer_C3pro_substrate_C3A_product_1kc)
Rule('catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product', C3A(Xiap=None, ParpU=None, C6pro=None) + C6pro(C3A=None) | C3A(Xiap=None, ParpU=None, C6pro=1) % C6pro(C3A=1), catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_2kf, catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_1kr)
Rule('catalysis_1_C3A_catalyzer_C6pro_substrate_C6A_product', C3A(Xiap=None, ParpU=None, C6pro=1) % C6pro(C3A=1) >> C3A(Xiap=None, ParpU=None, C6pro=None) + C6A(C8pro=None), catalysis_1_C3A_catalyzer_C6pro_substrate_C6A_product_1kc)
Rule('catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product', C6A(C8pro=None) + C8pro(Fadd=None, C6A=None) | C6A(C8pro=1) % C8pro(Fadd=None, C6A=1), catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_2kf, catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_1kr)
Rule('catalysis_1_C6A_catalyzer_C8pro_substrate_C8A_product', C6A(C8pro=1) % C8pro(Fadd=None, C6A=1) >> C6A(C8pro=None) + C8A(BidU=None, C3pro=None), catalysis_1_C6A_catalyzer_C8pro_substrate_C8A_product_1kc)
Initial(Ligand(Receptor=None), Ligand_0)
Initial(ParpU(C3A=None), ParpU_0)
Initial(C8A(BidU=None, C3pro=None), C8A_0)
Initial(SmacM(BaxA=None), SmacM_0)
Initial(BaxM(BidM=None, BaxA=None), BaxM_0)
Initial(Apop(C3pro=None, Xiap=None), Apop_0)
Initial(Fadd(Receptor=None, C8pro=None), Fadd_0)
Initial(SmacC(Xiap=None), SmacC_0)
Initial(ParpC(), ParpC_0)
Initial(Xiap(SmacC=None, Apop=None, C3A=None), Xiap_0)
Initial(C9(), C9_0)
Initial(C3ub(), C3ub_0)
Initial(C8pro(Fadd=None, C6A=None), C8pro_0)
Initial(Bcl2(BidM=None, BaxA=None), Bcl2_0)
Initial(C3pro(Apop=None, C8A=None), C3pro_0)
Initial(CytoCM(BaxA=None), CytoCM_0)
Initial(CytoCC(), CytoCC_0)
Initial(BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), BaxA_0)
Initial(ApafI(), ApafI_0)
Initial(BidU(C8A=None), BidU_0)
Initial(BidT(), BidT_0)
Initial(C3A(Xiap=None, ParpU=None, C6pro=None), C3A_0)
Initial(ApafA(), ApafA_0)
Initial(BidM(BaxM=None, Bcl2=None), BidM_0)
Initial(Receptor(Ligand=None, Fadd=None), Receptor_0)
Initial(C6A(C8pro=None), C6A_0)
Initial(C6pro(C3A=None), C6pro_0)
| 95.138889 | 798 | 0.804136 |
from pysb import Model, Monomer, Parameter, Expression, Compartment, Rule, Observable, Initial, MatchOnce, Annotation, ANY, WILD
Model()
Monomer('Ligand', ['Receptor'])
Monomer('ParpU', ['C3A'])
Monomer('C8A', ['BidU', 'C3pro'])
Monomer('SmacM', ['BaxA'])
Monomer('BaxM', ['BidM', 'BaxA'])
Monomer('Apop', ['C3pro', 'Xiap'])
Monomer('Fadd', ['Receptor', 'C8pro'])
Monomer('SmacC', ['Xiap'])
Monomer('ParpC')
Monomer('Xiap', ['SmacC', 'Apop', 'C3A'])
Monomer('C9')
Monomer('C3ub')
Monomer('C8pro', ['Fadd', 'C6A'])
Monomer('Bcl2', ['BidM', 'BaxA'])
Monomer('C3pro', ['Apop', 'C8A'])
Monomer('CytoCM', ['BaxA'])
Monomer('CytoCC')
Monomer('BaxA', ['BaxM', 'Bcl2', 'BaxA_1', 'BaxA_2', 'SmacM', 'CytoCM'])
Monomer('ApafI')
Monomer('BidU', ['C8A'])
Monomer('BidT')
Monomer('C3A', ['Xiap', 'ParpU', 'C6pro'])
Monomer('ApafA')
Monomer('BidM', ['BaxM', 'Bcl2'])
Monomer('Receptor', ['Ligand', 'Fadd'])
Monomer('C6A', ['C8pro'])
Monomer('C6pro', ['C3A'])
Parameter('bind_0_Ligand_binder_Receptor_binder_target_2kf', 1.0)
Parameter('bind_0_Ligand_binder_Receptor_binder_target_1kr', 1.0)
Parameter('bind_0_Receptor_binder_Fadd_binder_target_2kf', 1.0)
Parameter('bind_0_Receptor_binder_Fadd_binder_target_1kr', 1.0)
Parameter('substrate_binding_0_Fadd_catalyzer_C8pro_substrate_2kf', 1.0)
Parameter('substrate_binding_0_Fadd_catalyzer_C8pro_substrate_1kr', 1.0)
Parameter('catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product_1kc', 1.0)
Parameter('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_2kf', 1.0)
Parameter('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_1kr', 1.0)
Parameter('catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product_1kc', 1.0)
Parameter('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_2kf', 1.0)
Parameter('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_1kr', 1.0)
Parameter('inhibition_0_SmacC_inhibitor_Xiap_inh_target_2kf', 1.0)
Parameter('inhibition_0_SmacC_inhibitor_Xiap_inh_target_1kr', 1.0)
Parameter('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_2kf', 1.0)
Parameter('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_1kr', 1.0)
Parameter('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_2kf', 1.0)
Parameter('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_1kr', 1.0)
Parameter('catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product_1kc', 1.0)
Parameter('inhibition_0_Xiap_inhibitor_Apop_inh_target_2kf', 1.0)
Parameter('inhibition_0_Xiap_inhibitor_Apop_inh_target_1kr', 1.0)
Parameter('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_2kf', 1.0)
Parameter('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_1kr', 1.0)
Parameter('catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product_1kc', 1.0)
Parameter('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_2kf', 1.0)
Parameter('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_1kr', 1.0)
Parameter('catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product_1kc', 1.0)
Parameter('equilibration_0_BidT_equil_a_BidM_equil_b_1kf', 1.0)
Parameter('equilibration_0_BidT_equil_a_BidM_equil_b_1kr', 1.0)
Parameter('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_2kf', 1.0)
Parameter('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_1kr', 1.0)
Parameter('catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product_1kc', 1.0)
Parameter('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_2kf', 1.0)
Parameter('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_1kr', 1.0)
Parameter('self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate_1kc', 1.0)
Parameter('inhibition_0_Bcl2_inhibitor_BidM_inh_target_2kf', 1.0)
Parameter('inhibition_0_Bcl2_inhibitor_BidM_inh_target_1kr', 1.0)
Parameter('inhibition_0_Bcl2_inhibitor_BaxA_inh_target_2kf', 1.0)
Parameter('inhibition_0_Bcl2_inhibitor_BaxA_inh_target_1kr', 1.0)
Parameter('pore_formation_0_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_0_BaxA_pore_1kr', 1.0)
Parameter('pore_formation_1_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_1_BaxA_pore_1kr', 1.0)
Parameter('pore_formation_2_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_2_BaxA_pore_1kr', 1.0)
Parameter('transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_2kf', 1.0)
Parameter('transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kr', 1.0)
Parameter('transport_1_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kc', 1.0)
Parameter('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_2kf', 1.0)
Parameter('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kr', 1.0)
Parameter('transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kc', 1.0)
Parameter('catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_2kf', 1.0)
Parameter('catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_1kr', 1.0)
Parameter('catalysis_1_C8A_catalyzer_C3pro_substrate_C3A_product_1kc', 1.0)
Parameter('catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_2kf', 1.0)
Parameter('catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_1kr', 1.0)
Parameter('catalysis_1_C3A_catalyzer_C6pro_substrate_C6A_product_1kc', 1.0)
Parameter('catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_2kf', 1.0)
Parameter('catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_1kr', 1.0)
Parameter('catalysis_1_C6A_catalyzer_C8pro_substrate_C8A_product_1kc', 1.0)
Parameter('Ligand_0', 1000.0)
Parameter('ParpU_0', 1000000.0)
Parameter('C8A_0', 0.0)
Parameter('SmacM_0', 100000.0)
Parameter('BaxM_0', 40000.0)
Parameter('Apop_0', 0.0)
Parameter('Fadd_0', 130000.0)
Parameter('SmacC_0', 0.0)
Parameter('ParpC_0', 0.0)
Parameter('Xiap_0', 19000.0)
Parameter('C9_0', 100000.0)
Parameter('C3ub_0', 0.0)
Parameter('C8pro_0', 130000.0)
Parameter('Bcl2_0', 328000.0)
Parameter('C3pro_0', 21000.0)
Parameter('CytoCM_0', 500000.0)
Parameter('CytoCC_0', 0.0)
Parameter('BaxA_0', 0.0)
Parameter('ApafI_0', 100000.0)
Parameter('BidU_0', 171000.0)
Parameter('BidT_0', 0.0)
Parameter('C3A_0', 0.0)
Parameter('ApafA_0', 0.0)
Parameter('BidM_0', 0.0)
Parameter('Receptor_0', 100.0)
Parameter('C6A_0', 0.0)
Parameter('C6pro_0', 100.0)
Observable('Ligand_obs', Ligand())
Observable('ParpU_obs', ParpU())
Observable('C8A_obs', C8A())
Observable('SmacM_obs', SmacM())
Observable('BaxM_obs', BaxM())
Observable('Apop_obs', Apop())
Observable('Fadd_obs', Fadd())
Observable('SmacC_obs', SmacC())
Observable('ParpC_obs', ParpC())
Observable('Xiap_obs', Xiap())
Observable('C9_obs', C9())
Observable('C3ub_obs', C3ub())
Observable('C8pro_obs', C8pro())
Observable('Bcl2_obs', Bcl2())
Observable('C3pro_obs', C3pro())
Observable('CytoCM_obs', CytoCM())
Observable('CytoCC_obs', CytoCC())
Observable('BaxA_obs', BaxA())
Observable('ApafI_obs', ApafI())
Observable('BidU_obs', BidU())
Observable('BidT_obs', BidT())
Observable('C3A_obs', C3A())
Observable('ApafA_obs', ApafA())
Observable('BidM_obs', BidM())
Observable('Receptor_obs', Receptor())
Observable('C6A_obs', C6A())
Observable('C6pro_obs', C6pro())
Rule('bind_0_Ligand_binder_Receptor_binder_target', Ligand(Receptor=None) + Receptor(Ligand=None, Fadd=None) | Ligand(Receptor=1) % Receptor(Ligand=1, Fadd=None), bind_0_Ligand_binder_Receptor_binder_target_2kf, bind_0_Ligand_binder_Receptor_binder_target_1kr)
Rule('bind_0_Receptor_binder_Fadd_binder_target', Receptor(Ligand=ANY, Fadd=None) + Fadd(Receptor=None, C8pro=None) | Receptor(Ligand=ANY, Fadd=1) % Fadd(Receptor=1, C8pro=None), bind_0_Receptor_binder_Fadd_binder_target_2kf, bind_0_Receptor_binder_Fadd_binder_target_1kr)
Rule('substrate_binding_0_Fadd_catalyzer_C8pro_substrate', Fadd(Receptor=ANY, C8pro=None) + C8pro(Fadd=None, C6A=None) | Fadd(Receptor=ANY, C8pro=1) % C8pro(Fadd=1, C6A=None), substrate_binding_0_Fadd_catalyzer_C8pro_substrate_2kf, substrate_binding_0_Fadd_catalyzer_C8pro_substrate_1kr)
Rule('catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product', Fadd(Receptor=ANY, C8pro=1) % C8pro(Fadd=1, C6A=None) >> Fadd(Receptor=ANY, C8pro=None) + C8A(BidU=None, C3pro=None), catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product_1kc)
Rule('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product', C8A(BidU=None, C3pro=None) + BidU(C8A=None) | C8A(BidU=1, C3pro=None) % BidU(C8A=1), catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_2kf, catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_1kr)
Rule('catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product', C8A(BidU=1, C3pro=None) % BidU(C8A=1) >> C8A(BidU=None, C3pro=None) + BidT(), catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product_1kc)
Rule('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex', ApafI() + CytoCC() | ApafA(), conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_2kf, conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_1kr)
Rule('inhibition_0_SmacC_inhibitor_Xiap_inh_target', SmacC(Xiap=None) + Xiap(SmacC=None, Apop=None, C3A=None) | SmacC(Xiap=1) % Xiap(SmacC=1, Apop=None, C3A=None), inhibition_0_SmacC_inhibitor_Xiap_inh_target_2kf, inhibition_0_SmacC_inhibitor_Xiap_inh_target_1kr)
Rule('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex', ApafA() + C9() | Apop(C3pro=None, Xiap=None), conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_2kf, conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_1kr)
Rule('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product', Apop(C3pro=None, Xiap=None) + C3pro(Apop=None, C8A=None) | Apop(C3pro=1, Xiap=None) % C3pro(Apop=1, C8A=None), catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_2kf, catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_1kr)
Rule('catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product', Apop(C3pro=1, Xiap=None) % C3pro(Apop=1, C8A=None) >> Apop(C3pro=None, Xiap=None) + C3A(Xiap=None, ParpU=None, C6pro=None), catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product_1kc)
Rule('inhibition_0_Xiap_inhibitor_Apop_inh_target', Xiap(SmacC=None, Apop=None, C3A=None) + Apop(C3pro=None, Xiap=None) | Xiap(SmacC=None, Apop=1, C3A=None) % Apop(C3pro=None, Xiap=1), inhibition_0_Xiap_inhibitor_Apop_inh_target_2kf, inhibition_0_Xiap_inhibitor_Apop_inh_target_1kr)
Rule('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product', Xiap(SmacC=None, Apop=None, C3A=None) + C3A(Xiap=None, ParpU=None, C6pro=None) | Xiap(SmacC=None, Apop=None, C3A=1) % C3A(Xiap=1, ParpU=None, C6pro=None), catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_2kf, catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_1kr)
Rule('catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product', Xiap(SmacC=None, Apop=None, C3A=1) % C3A(Xiap=1, ParpU=None, C6pro=None) >> Xiap(SmacC=None, Apop=None, C3A=None) + C3ub(), catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product_1kc)
Rule('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product', C3A(Xiap=None, ParpU=None, C6pro=None) + ParpU(C3A=None) | C3A(Xiap=None, ParpU=1, C6pro=None) % ParpU(C3A=1), catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_2kf, catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_1kr)
Rule('catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product', C3A(Xiap=None, ParpU=1, C6pro=None) % ParpU(C3A=1) >> C3A(Xiap=None, ParpU=None, C6pro=None) + ParpC(), catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product_1kc)
Rule('equilibration_0_BidT_equil_a_BidM_equil_b', BidT() | BidM(BaxM=None, Bcl2=None), equilibration_0_BidT_equil_a_BidM_equil_b_1kf, equilibration_0_BidT_equil_a_BidM_equil_b_1kr)
Rule('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product', BidM(BaxM=None, Bcl2=None) + BaxM(BidM=None, BaxA=None) | BidM(BaxM=1, Bcl2=None) % BaxM(BidM=1, BaxA=None), catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_2kf, catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_1kr)
Rule('catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product', BidM(BaxM=1, Bcl2=None) % BaxM(BidM=1, BaxA=None) >> BidM(BaxM=None, Bcl2=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product_1kc)
Rule('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate', BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxM(BidM=None, BaxA=None) | BaxA(BaxM=1, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) % BaxM(BidM=None, BaxA=1), self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_2kf, self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_1kr)
Rule('self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate', BaxA(BaxM=1, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) % BaxM(BidM=None, BaxA=1) >> BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate_1kc)
Rule('inhibition_0_Bcl2_inhibitor_BidM_inh_target', Bcl2(BidM=None, BaxA=None) + BidM(BaxM=None, Bcl2=None) | Bcl2(BidM=1, BaxA=None) % BidM(BaxM=None, Bcl2=1), inhibition_0_Bcl2_inhibitor_BidM_inh_target_2kf, inhibition_0_Bcl2_inhibitor_BidM_inh_target_1kr)
Rule('inhibition_0_Bcl2_inhibitor_BaxA_inh_target', Bcl2(BidM=None, BaxA=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) | Bcl2(BidM=None, BaxA=1) % BaxA(BaxM=None, Bcl2=1, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), inhibition_0_Bcl2_inhibitor_BaxA_inh_target_2kf, inhibition_0_Bcl2_inhibitor_BaxA_inh_target_1kr)
Rule('pore_formation_0_BaxA_pore', BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) | BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=None, SmacM=None, CytoCM=None), pore_formation_0_BaxA_pore_2kf, pore_formation_0_BaxA_pore_1kr)
Rule('pore_formation_1_BaxA_pore', BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=None, SmacM=None, CytoCM=None) | BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None), pore_formation_1_BaxA_pore_2kf, pore_formation_1_BaxA_pore_1kr)
Rule('pore_formation_2_BaxA_pore', BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) | BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None), pore_formation_2_BaxA_pore_2kf, pore_formation_2_BaxA_pore_1kr)
Rule('transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C', BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + SmacM(BaxA=None) | BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=5, CytoCM=None) % SmacM(BaxA=5), transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_2kf, transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kr)
Rule('transport_1_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C', BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=5, CytoCM=None) % SmacM(BaxA=5) >> BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + SmacC(Xiap=None), transport_1_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kc)
Rule('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C', BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + CytoCM(BaxA=None) | BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=5) % CytoCM(BaxA=5), transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_2kf, transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kr)
Rule('transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C', BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=5) % CytoCM(BaxA=5) >> BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + CytoCC(), transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kc)
Rule('catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product', C8A(BidU=None, C3pro=None) + C3pro(Apop=None, C8A=None) | C8A(BidU=None, C3pro=1) % C3pro(Apop=None, C8A=1), catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_2kf, catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_1kr)
Rule('catalysis_1_C8A_catalyzer_C3pro_substrate_C3A_product', C8A(BidU=None, C3pro=1) % C3pro(Apop=None, C8A=1) >> C8A(BidU=None, C3pro=None) + C3A(Xiap=None, ParpU=None, C6pro=None), catalysis_1_C8A_catalyzer_C3pro_substrate_C3A_product_1kc)
Rule('catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product', C3A(Xiap=None, ParpU=None, C6pro=None) + C6pro(C3A=None) | C3A(Xiap=None, ParpU=None, C6pro=1) % C6pro(C3A=1), catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_2kf, catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_1kr)
Rule('catalysis_1_C3A_catalyzer_C6pro_substrate_C6A_product', C3A(Xiap=None, ParpU=None, C6pro=1) % C6pro(C3A=1) >> C3A(Xiap=None, ParpU=None, C6pro=None) + C6A(C8pro=None), catalysis_1_C3A_catalyzer_C6pro_substrate_C6A_product_1kc)
Rule('catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product', C6A(C8pro=None) + C8pro(Fadd=None, C6A=None) | C6A(C8pro=1) % C8pro(Fadd=None, C6A=1), catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_2kf, catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_1kr)
Rule('catalysis_1_C6A_catalyzer_C8pro_substrate_C8A_product', C6A(C8pro=1) % C8pro(Fadd=None, C6A=1) >> C6A(C8pro=None) + C8A(BidU=None, C3pro=None), catalysis_1_C6A_catalyzer_C8pro_substrate_C8A_product_1kc)
Initial(Ligand(Receptor=None), Ligand_0)
Initial(ParpU(C3A=None), ParpU_0)
Initial(C8A(BidU=None, C3pro=None), C8A_0)
Initial(SmacM(BaxA=None), SmacM_0)
Initial(BaxM(BidM=None, BaxA=None), BaxM_0)
Initial(Apop(C3pro=None, Xiap=None), Apop_0)
Initial(Fadd(Receptor=None, C8pro=None), Fadd_0)
Initial(SmacC(Xiap=None), SmacC_0)
Initial(ParpC(), ParpC_0)
Initial(Xiap(SmacC=None, Apop=None, C3A=None), Xiap_0)
Initial(C9(), C9_0)
Initial(C3ub(), C3ub_0)
Initial(C8pro(Fadd=None, C6A=None), C8pro_0)
Initial(Bcl2(BidM=None, BaxA=None), Bcl2_0)
Initial(C3pro(Apop=None, C8A=None), C3pro_0)
Initial(CytoCM(BaxA=None), CytoCM_0)
Initial(CytoCC(), CytoCC_0)
Initial(BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), BaxA_0)
Initial(ApafI(), ApafI_0)
Initial(BidU(C8A=None), BidU_0)
Initial(BidT(), BidT_0)
Initial(C3A(Xiap=None, ParpU=None, C6pro=None), C3A_0)
Initial(ApafA(), ApafA_0)
Initial(BidM(BaxM=None, Bcl2=None), BidM_0)
Initial(Receptor(Ligand=None, Fadd=None), Receptor_0)
Initial(C6A(C8pro=None), C6A_0)
Initial(C6pro(C3A=None), C6pro_0)
| true | true |
f71fd3f3075081a6f7b219e41391ee28001ad25c | 2,073 | py | Python | codes/poop/fizzbuzz.py | cassiobotaro/go_slides | 1e9bedff22acc78a39cc054c0de432ef80c2df7b | [
"MIT"
] | 2 | 2019-02-27T14:45:39.000Z | 2021-09-27T03:46:20.000Z | codes/poop/fizzbuzz.py | cassiobotaro/go_slides | 1e9bedff22acc78a39cc054c0de432ef80c2df7b | [
"MIT"
] | null | null | null | codes/poop/fizzbuzz.py | cassiobotaro/go_slides | 1e9bedff22acc78a39cc054c0de432ef80c2df7b | [
"MIT"
] | 2 | 2017-04-13T14:42:31.000Z | 2021-09-27T03:46:22.000Z | # smalltalk infected fizzbuzz version
from forbiddenfruit import curse
from collections import deque
def if_true(self, block):
# simulate blocks using functions
self and block()
# close circuit when object is truthy
return self
def if_false(self, block):
# simulate blocks using functions
not self and block()
# close circuit when object is falsy
return self
def println(self):
"""Prints the values to a stream, or to sys.stdout by default.
>>> "Fizz".print()
Fizz
>>> "FizzBuzz".print()
FizzBuzz
"""
print(self)
def do(self, block):
"""Evaluate the receiver for each element in aBlock.
>>> range(1, 11).do(lambda number: number.print())
"""
deque(map(block, self), maxlen=0)
return self
curse(bool, "if_true", if_true)
curse(bool, "if_false", if_false)
curse(str, "print", println)
curse(int, "print", println)
curse(range, "do", do)
# lambdas are used to simulate blocks
"""Summary
We add a do methd on range objects that evaluates a block
for each element on interval.
This block will receive a number, that evaluated
in the expression "number % 15 == 0", This will result in a boolean object,
to which we will send two messages,
one with a block to be evaluated if the expression is true and
another for if it is false.
If true, we will send a print message to a "FizzBuzz" object.
If it is false, we will use the same numeric object
to evaluate the expression number% 5 == 0.
And so we repeat the cycle, until at last a message
is sent to the number printed.
"""
range(1, 101).do(
lambda number: (number % 15 == 0)
.if_true("FizzBuzz".print)
.if_false(
lambda: (number % 5 == 0)
.if_true("Buzz".print)
.if_false(
lambda: (number % 3 == 0)
.if_true("Fizz".print)
.if_false(number.print)
)
)
)
"""
Notes:
- A message is sent to an object for printing
- Lambdas are used to simulate a block
- Add method do for a range, evaluating a block on each number on interval
- Objects and messages
"""
| 25.280488 | 75 | 0.670043 |
from forbiddenfruit import curse
from collections import deque
def if_true(self, block):
self and block()
return self
def if_false(self, block):
not self and block()
return self
def println(self):
print(self)
def do(self, block):
deque(map(block, self), maxlen=0)
return self
curse(bool, "if_true", if_true)
curse(bool, "if_false", if_false)
curse(str, "print", println)
curse(int, "print", println)
curse(range, "do", do)
range(1, 101).do(
lambda number: (number % 15 == 0)
.if_true("FizzBuzz".print)
.if_false(
lambda: (number % 5 == 0)
.if_true("Buzz".print)
.if_false(
lambda: (number % 3 == 0)
.if_true("Fizz".print)
.if_false(number.print)
)
)
)
| true | true |
f71fd4a1f731db04f57e93f442976ca80b5b4b5d | 8,349 | py | Python | s2e_env/tui/tui.py | michaelbrownuc/s2e-env | 4bd6a45bf1ec9456ed5acf5047b6aac3fcd19683 | [
"BSD-3-Clause"
] | null | null | null | s2e_env/tui/tui.py | michaelbrownuc/s2e-env | 4bd6a45bf1ec9456ed5acf5047b6aac3fcd19683 | [
"BSD-3-Clause"
] | null | null | null | s2e_env/tui/tui.py | michaelbrownuc/s2e-env | 4bd6a45bf1ec9456ed5acf5047b6aac3fcd19683 | [
"BSD-3-Clause"
] | null | null | null | """
Copyright (c) 2017 Cyberhaven
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import curses
import time
_s_screen = None
# TODO: this module requires clean up
# pylint: disable=too-many-instance-attributes
# pylint: disable=too-many-arguments
# pylint: disable=no-self-use
class Form:
def __init__(self, parent, x, y, w=None, h=None):
self._children = []
self._parent = parent
self._x = x
self._y = y
self._h = h
self._w = w
self._vcenter, self._hcenter = False, False
self.set_size(w, h)
ax, ay = self.get_screen_coords(0, 0)
self._wnd = curses.newwin(self._h, self._w, ay, ax)
if parent is not None:
parent._children.append(self)
@property
def x(self):
return self._x
@property
def y(self):
return self._y
@property
def width(self):
return self._w
@width.setter
def width(self, value):
self._w = value
@property
def height(self):
return self._h
@height.setter
def height(self, value):
self._h = value
@property
def window(self):
return self._wnd
def get_screen_coords(self, x, y):
form = self
ax, ay = x, y
while form is not None:
ax, ay = ax + form.x, ay + form.y
form = form.get_parent()
return ax, ay
def set_pos(self, x, y):
self._x, self._y = x, y
def set_centering(self, hcenter, vcenter):
if self._parent is None:
raise Exception('Form must have a parent')
self._vcenter = vcenter
self._hcenter = hcenter
def set_size(self, w=None, h=None):
"""
Width and Height can be set to None to expand the window
to the size of the parent container.
"""
if w is None or h is None:
form = self.get_parent()
if form is None:
mh, mw = _s_screen.getmaxyx()
else:
mh, mw = form.height, form.width
if w is None:
w = mw
if h is None:
h = mh
self._w, self._h = w, h
def get_parent(self):
return self._parent
def get_draw_coords(self, ax, ay):
x, y = self.x, self.y
# Center the form in the parent window if needed
if self._hcenter:
x = (self._parent._w - self._w) // 2
if self._vcenter:
y = (self._parent._h - self._h) // 2
x += ax
y += ay
return x, y
def draw(self, ax, ay):
x, y = self.get_draw_coords(ax, ay)
# TODO: clipping
self.do_draw(x, y)
for child in self._children:
child.draw(x, y)
def do_draw(self, ax, ay):
self._wnd.mvwin(ay, ax)
self._wnd.resize(self._h, self._w)
self._wnd.border()
self._wnd.refresh()
class Label(Form):
def __init__(self, parent, x, y, text):
super().__init__(parent, x, y, len(text) + 2, 1)
self._text = f' {text}'
def do_draw(self, ax, ay):
self._wnd.mvwin(ay, ax)
self._wnd.resize(self._h, self._w)
self._wnd.addstr(0, 0, self._text)
self._wnd.refresh()
class Table(Form):
def __init__(self, parent, x, y, data, legend, layout):
self._data = data
self._legend = legend
self._layout = layout
self.set_data(data, legend, layout)
w, h = self._get_dimensions()
super().__init__(parent, x, y, w, h)
def _get_dimensions(self):
lw, dw, h = self._compute_data_size()
w, h = self._compute_bounding_box(lw, dw, h)
return w, h
def _update_dimensions(self):
w, h = self._get_dimensions()
self.width = w
self.height = h
def set_data(self, data, legend, layout):
self._data = data
self._legend = legend
self._layout = layout
self._update_dimensions()
def _compute_bounding_box(self, lw, dw, h):
return lw + dw + 5, h
def _compute_data_size(self):
max_legend_width = 0
max_data_width = 0
max_height = len(self._layout)
for k, v in self._data.items():
l = self._legend[k]
max_legend_width = max(max_legend_width, len(l))
max_data_width = max(max_data_width, len(str(v)))
return max_legend_width, max_data_width, max_height
def do_draw(self, ax, ay):
y = 0
lw, dw, h = self._compute_data_size()
w, h = self._compute_bounding_box(lw, dw, h)
self._wnd.clear()
self._wnd.resize(h, w)
self._wnd.mvwin(ay, ax)
# self._wnd.border()
for k in self._layout:
l = self._legend[k]
if not k in self._data:
continue
v = self._data[k]
self._wnd.addstr(y, 0, l + ':')
self._wnd.addstr(y, lw + 3, str(v))
y += 1
self._wnd.refresh()
class Tui:
def __init__(self):
self._updated = True
self._data = {}
self._legend = {}
self._layout = {}
self._desktop = None
self._stats = None
self._title = None
self._exitmsg = None
self._table = None
def _create_desktop(self):
global _s_screen
_s_screen = curses.initscr()
curses.noecho()
curses.curs_set(0)
curses.start_color()
self._desktop = Form(None, 0, 0)
self._stats = Form(self._desktop, 0, 0, 70, 20)
self._stats.set_centering(True, True)
self._title = Label(self._stats, 0, 0, 'S2E')
self._title.set_centering(True, False)
self._exitmsg = Label(self._stats, 0, 17, 'Press q to exit')
self._exitmsg.set_centering(True, False)
self._table = Table(self._stats, 2, 2, self._data, self._legend,
self._layout)
self._table.set_centering(True, True)
def _cleanup(self):
curses.nocbreak()
_s_screen.keypad(0)
curses.echo()
curses.endwin()
def _redraw(self):
self._desktop.window.clear()
self._desktop.set_size()
self._desktop.draw(0, 0)
def set_content(self, data, legend, layout):
self._data = data
self._legend = legend
self._layout = layout
self._updated = True
self._table.set_data(data, legend, layout)
def _run(self, callback):
self._create_desktop()
if not callback(self):
return
self._redraw()
self._desktop.window.nodelay(True)
while True:
c = self._desktop.window.getch()
if c == curses.ERR:
if not callback(self):
return
time.sleep(1)
elif c == ord('q'):
break
elif c == curses.KEY_RESIZE:
self._updated = True
if self._updated:
self._redraw()
self._updated = False
def run(self, callback):
try:
self._run(callback)
except Exception:
self._cleanup()
# Print message only after screen is restored, otherwise we might
# get unreadable garbage.
raise
finally:
self._cleanup()
| 26.674121 | 78 | 0.573602 |
import curses
import time
_s_screen = None
class Form:
def __init__(self, parent, x, y, w=None, h=None):
self._children = []
self._parent = parent
self._x = x
self._y = y
self._h = h
self._w = w
self._vcenter, self._hcenter = False, False
self.set_size(w, h)
ax, ay = self.get_screen_coords(0, 0)
self._wnd = curses.newwin(self._h, self._w, ay, ax)
if parent is not None:
parent._children.append(self)
@property
def x(self):
return self._x
@property
def y(self):
return self._y
@property
def width(self):
return self._w
@width.setter
def width(self, value):
self._w = value
@property
def height(self):
return self._h
@height.setter
def height(self, value):
self._h = value
@property
def window(self):
return self._wnd
def get_screen_coords(self, x, y):
form = self
ax, ay = x, y
while form is not None:
ax, ay = ax + form.x, ay + form.y
form = form.get_parent()
return ax, ay
def set_pos(self, x, y):
self._x, self._y = x, y
def set_centering(self, hcenter, vcenter):
if self._parent is None:
raise Exception('Form must have a parent')
self._vcenter = vcenter
self._hcenter = hcenter
def set_size(self, w=None, h=None):
if w is None or h is None:
form = self.get_parent()
if form is None:
mh, mw = _s_screen.getmaxyx()
else:
mh, mw = form.height, form.width
if w is None:
w = mw
if h is None:
h = mh
self._w, self._h = w, h
def get_parent(self):
return self._parent
def get_draw_coords(self, ax, ay):
x, y = self.x, self.y
if self._hcenter:
x = (self._parent._w - self._w) // 2
if self._vcenter:
y = (self._parent._h - self._h) // 2
x += ax
y += ay
return x, y
def draw(self, ax, ay):
x, y = self.get_draw_coords(ax, ay)
self.do_draw(x, y)
for child in self._children:
child.draw(x, y)
def do_draw(self, ax, ay):
self._wnd.mvwin(ay, ax)
self._wnd.resize(self._h, self._w)
self._wnd.border()
self._wnd.refresh()
class Label(Form):
def __init__(self, parent, x, y, text):
super().__init__(parent, x, y, len(text) + 2, 1)
self._text = f' {text}'
def do_draw(self, ax, ay):
self._wnd.mvwin(ay, ax)
self._wnd.resize(self._h, self._w)
self._wnd.addstr(0, 0, self._text)
self._wnd.refresh()
class Table(Form):
def __init__(self, parent, x, y, data, legend, layout):
self._data = data
self._legend = legend
self._layout = layout
self.set_data(data, legend, layout)
w, h = self._get_dimensions()
super().__init__(parent, x, y, w, h)
def _get_dimensions(self):
lw, dw, h = self._compute_data_size()
w, h = self._compute_bounding_box(lw, dw, h)
return w, h
def _update_dimensions(self):
w, h = self._get_dimensions()
self.width = w
self.height = h
def set_data(self, data, legend, layout):
self._data = data
self._legend = legend
self._layout = layout
self._update_dimensions()
def _compute_bounding_box(self, lw, dw, h):
return lw + dw + 5, h
def _compute_data_size(self):
max_legend_width = 0
max_data_width = 0
max_height = len(self._layout)
for k, v in self._data.items():
l = self._legend[k]
max_legend_width = max(max_legend_width, len(l))
max_data_width = max(max_data_width, len(str(v)))
return max_legend_width, max_data_width, max_height
def do_draw(self, ax, ay):
y = 0
lw, dw, h = self._compute_data_size()
w, h = self._compute_bounding_box(lw, dw, h)
self._wnd.clear()
self._wnd.resize(h, w)
self._wnd.mvwin(ay, ax)
for k in self._layout:
l = self._legend[k]
if not k in self._data:
continue
v = self._data[k]
self._wnd.addstr(y, 0, l + ':')
self._wnd.addstr(y, lw + 3, str(v))
y += 1
self._wnd.refresh()
class Tui:
def __init__(self):
self._updated = True
self._data = {}
self._legend = {}
self._layout = {}
self._desktop = None
self._stats = None
self._title = None
self._exitmsg = None
self._table = None
def _create_desktop(self):
global _s_screen
_s_screen = curses.initscr()
curses.noecho()
curses.curs_set(0)
curses.start_color()
self._desktop = Form(None, 0, 0)
self._stats = Form(self._desktop, 0, 0, 70, 20)
self._stats.set_centering(True, True)
self._title = Label(self._stats, 0, 0, 'S2E')
self._title.set_centering(True, False)
self._exitmsg = Label(self._stats, 0, 17, 'Press q to exit')
self._exitmsg.set_centering(True, False)
self._table = Table(self._stats, 2, 2, self._data, self._legend,
self._layout)
self._table.set_centering(True, True)
def _cleanup(self):
curses.nocbreak()
_s_screen.keypad(0)
curses.echo()
curses.endwin()
def _redraw(self):
self._desktop.window.clear()
self._desktop.set_size()
self._desktop.draw(0, 0)
def set_content(self, data, legend, layout):
self._data = data
self._legend = legend
self._layout = layout
self._updated = True
self._table.set_data(data, legend, layout)
def _run(self, callback):
self._create_desktop()
if not callback(self):
return
self._redraw()
self._desktop.window.nodelay(True)
while True:
c = self._desktop.window.getch()
if c == curses.ERR:
if not callback(self):
return
time.sleep(1)
elif c == ord('q'):
break
elif c == curses.KEY_RESIZE:
self._updated = True
if self._updated:
self._redraw()
self._updated = False
def run(self, callback):
try:
self._run(callback)
except Exception:
self._cleanup()
raise
finally:
self._cleanup()
| true | true |
f71fd745dc747e5cd621bba11088fb4afbc2acb3 | 1,092 | py | Python | jmatcher/users/migrations/0004_auto_20170303_2141.py | jamesaud/se1-group4 | 5280b13dff33e72ce717318a8dd78a06cd6effb3 | [
"MIT"
] | 1 | 2021-09-09T15:43:09.000Z | 2021-09-09T15:43:09.000Z | jmatcher/users/migrations/0004_auto_20170303_2141.py | jamesaud/se1-group4 | 5280b13dff33e72ce717318a8dd78a06cd6effb3 | [
"MIT"
] | null | null | null | jmatcher/users/migrations/0004_auto_20170303_2141.py | jamesaud/se1-group4 | 5280b13dff33e72ce717318a8dd78a06cd6effb3 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-03-03 21:41
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0003_remove_student_student_name'),
]
operations = [
migrations.CreateModel(
name='Skill',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('skill', models.CharField(choices=[('Django', 'Django'), ('Python', 'Python'), ('Java', 'Java'), ('Ruby', 'Ruby')], max_length=255)),
],
),
migrations.RemoveField(
model_name='student',
name='user',
),
migrations.AddField(
model_name='user',
name='connections',
field=models.ManyToManyField(related_name='_user_connections_+', to=settings.AUTH_USER_MODEL),
),
migrations.DeleteModel(
name='Student',
),
]
| 30.333333 | 150 | 0.57967 |
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0003_remove_student_student_name'),
]
operations = [
migrations.CreateModel(
name='Skill',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('skill', models.CharField(choices=[('Django', 'Django'), ('Python', 'Python'), ('Java', 'Java'), ('Ruby', 'Ruby')], max_length=255)),
],
),
migrations.RemoveField(
model_name='student',
name='user',
),
migrations.AddField(
model_name='user',
name='connections',
field=models.ManyToManyField(related_name='_user_connections_+', to=settings.AUTH_USER_MODEL),
),
migrations.DeleteModel(
name='Student',
),
]
| true | true |
f71fd8d00f312300adb54ea7c3dae8e0ed739a61 | 9,839 | py | Python | datasets/preprocess/mpi_inf_3dhp.py | virgile-hernicot/SPIN | 21871e3d333ef37866402ae21498b331aa771b2d | [
"BSD-3-Clause"
] | 555 | 2019-09-30T01:03:23.000Z | 2022-03-30T03:56:09.000Z | datasets/preprocess/mpi_inf_3dhp.py | virgile-hernicot/SPIN | 21871e3d333ef37866402ae21498b331aa771b2d | [
"BSD-3-Clause"
] | 110 | 2019-10-01T05:51:07.000Z | 2022-03-23T13:51:05.000Z | datasets/preprocess/mpi_inf_3dhp.py | virgile-hernicot/SPIN | 21871e3d333ef37866402ae21498b331aa771b2d | [
"BSD-3-Clause"
] | 158 | 2019-09-30T07:06:48.000Z | 2022-03-22T02:32:03.000Z | import os
import sys
import cv2
import glob
import h5py
import json
import numpy as np
import scipy.io as sio
import scipy.misc
from .read_openpose import read_openpose
def read_calibration(calib_file, vid_list):
Ks, Rs, Ts = [], [], []
file = open(calib_file, 'r')
content = file.readlines()
for vid_i in vid_list:
K = np.array([float(s) for s in content[vid_i*7+5][11:-2].split()])
K = np.reshape(K, (4, 4))
RT = np.array([float(s) for s in content[vid_i*7+6][11:-2].split()])
RT = np.reshape(RT, (4, 4))
R = RT[:3,:3]
T = RT[:3,3]/1000
Ks.append(K)
Rs.append(R)
Ts.append(T)
return Ks, Rs, Ts
def train_data(dataset_path, openpose_path, out_path, joints_idx, scaleFactor, extract_img=False, fits_3d=None):
joints17_idx = [4, 18, 19, 20, 23, 24, 25, 3, 5, 6, 7, 9, 10, 11, 14, 15, 16]
h, w = 2048, 2048
imgnames_, scales_, centers_ = [], [], []
parts_, Ss_, openposes_ = [], [], []
# training data
user_list = range(1,9)
seq_list = range(1,3)
vid_list = list(range(3)) + list(range(4,9))
counter = 0
for user_i in user_list:
for seq_i in seq_list:
seq_path = os.path.join(dataset_path,
'S' + str(user_i),
'Seq' + str(seq_i))
# mat file with annotations
annot_file = os.path.join(seq_path, 'annot.mat')
annot2 = sio.loadmat(annot_file)['annot2']
annot3 = sio.loadmat(annot_file)['annot3']
# calibration file and camera parameters
calib_file = os.path.join(seq_path, 'camera.calibration')
Ks, Rs, Ts = read_calibration(calib_file, vid_list)
for j, vid_i in enumerate(vid_list):
# image folder
imgs_path = os.path.join(seq_path,
'imageFrames',
'video_' + str(vid_i))
# extract frames from video file
if extract_img:
# if doesn't exist
if not os.path.isdir(imgs_path):
os.makedirs(imgs_path)
# video file
vid_file = os.path.join(seq_path,
'imageSequence',
'video_' + str(vid_i) + '.avi')
vidcap = cv2.VideoCapture(vid_file)
# process video
frame = 0
while 1:
# extract all frames
success, image = vidcap.read()
if not success:
break
frame += 1
# image name
imgname = os.path.join(imgs_path,
'frame_%06d.jpg' % frame)
# save image
cv2.imwrite(imgname, image)
# per frame
cam_aa = cv2.Rodrigues(Rs[j])[0].T[0]
pattern = os.path.join(imgs_path, '*.jpg')
img_list = glob.glob(pattern)
for i, img_i in enumerate(img_list):
# for each image we store the relevant annotations
img_name = img_i.split('/')[-1]
img_view = os.path.join('S' + str(user_i),
'Seq' + str(seq_i),
'imageFrames',
'video_' + str(vid_i),
img_name)
joints = np.reshape(annot2[vid_i][0][i], (28, 2))[joints17_idx]
S17 = np.reshape(annot3[vid_i][0][i], (28, 3))/1000
S17 = S17[joints17_idx] - S17[4] # 4 is the root
bbox = [min(joints[:,0]), min(joints[:,1]),
max(joints[:,0]), max(joints[:,1])]
center = [(bbox[2]+bbox[0])/2, (bbox[3]+bbox[1])/2]
scale = scaleFactor*max(bbox[2]-bbox[0], bbox[3]-bbox[1])/200
# check that all joints are visible
x_in = np.logical_and(joints[:, 0] < w, joints[:, 0] >= 0)
y_in = np.logical_and(joints[:, 1] < h, joints[:, 1] >= 0)
ok_pts = np.logical_and(x_in, y_in)
if np.sum(ok_pts) < len(joints_idx):
continue
part = np.zeros([24,3])
part[joints_idx] = np.hstack([joints, np.ones([17,1])])
json_file = os.path.join(openpose_path, 'mpi_inf_3dhp',
img_view.replace('.jpg', '_keypoints.json'))
openpose = read_openpose(json_file, part, 'mpi_inf_3dhp')
S = np.zeros([24,4])
S[joints_idx] = np.hstack([S17, np.ones([17,1])])
# because of the dataset size, we only keep every 10th frame
counter += 1
if counter % 10 != 1:
continue
# store the data
imgnames_.append(img_view)
centers_.append(center)
scales_.append(scale)
parts_.append(part)
Ss_.append(S)
openposes_.append(openpose)
# store the data struct
if not os.path.isdir(out_path):
os.makedirs(out_path)
out_file = os.path.join(out_path, 'mpi_inf_3dhp_train.npz')
if fits_3d is not None:
fits_3d = np.load(fits_3d)
np.savez(out_file, imgname=imgnames_,
center=centers_,
scale=scales_,
part=parts_,
pose=fits_3d['pose'],
shape=fits_3d['shape'],
has_smpl=fits_3d['has_smpl'],
S=Ss_,
openpose=openposes_)
else:
np.savez(out_file, imgname=imgnames_,
center=centers_,
scale=scales_,
part=parts_,
S=Ss_,
openpose=openposes_)
def test_data(dataset_path, out_path, joints_idx, scaleFactor):
joints17_idx = [14, 11, 12, 13, 8, 9, 10, 15, 1, 16, 0, 5, 6, 7, 2, 3, 4]
imgnames_, scales_, centers_, parts_, Ss_ = [], [], [], [], []
# training data
user_list = range(1,7)
for user_i in user_list:
seq_path = os.path.join(dataset_path,
'mpi_inf_3dhp_test_set',
'TS' + str(user_i))
# mat file with annotations
annot_file = os.path.join(seq_path, 'annot_data.mat')
mat_as_h5 = h5py.File(annot_file, 'r')
annot2 = np.array(mat_as_h5['annot2'])
annot3 = np.array(mat_as_h5['univ_annot3'])
valid = np.array(mat_as_h5['valid_frame'])
for frame_i, valid_i in enumerate(valid):
if valid_i == 0:
continue
img_name = os.path.join('mpi_inf_3dhp_test_set',
'TS' + str(user_i),
'imageSequence',
'img_' + str(frame_i+1).zfill(6) + '.jpg')
joints = annot2[frame_i,0,joints17_idx,:]
S17 = annot3[frame_i,0,joints17_idx,:]/1000
S17 = S17 - S17[0]
bbox = [min(joints[:,0]), min(joints[:,1]),
max(joints[:,0]), max(joints[:,1])]
center = [(bbox[2]+bbox[0])/2, (bbox[3]+bbox[1])/2]
scale = scaleFactor*max(bbox[2]-bbox[0], bbox[3]-bbox[1])/200
# check that all joints are visible
img_file = os.path.join(dataset_path, img_name)
I = scipy.misc.imread(img_file)
h, w, _ = I.shape
x_in = np.logical_and(joints[:, 0] < w, joints[:, 0] >= 0)
y_in = np.logical_and(joints[:, 1] < h, joints[:, 1] >= 0)
ok_pts = np.logical_and(x_in, y_in)
if np.sum(ok_pts) < len(joints_idx):
continue
part = np.zeros([24,3])
part[joints_idx] = np.hstack([joints, np.ones([17,1])])
S = np.zeros([24,4])
S[joints_idx] = np.hstack([S17, np.ones([17,1])])
# store the data
imgnames_.append(img_name)
centers_.append(center)
scales_.append(scale)
parts_.append(part)
Ss_.append(S)
# store the data struct
if not os.path.isdir(out_path):
os.makedirs(out_path)
out_file = os.path.join(out_path, 'mpi_inf_3dhp_test.npz')
np.savez(out_file, imgname=imgnames_,
center=centers_,
scale=scales_,
part=parts_,
S=Ss_)
def mpi_inf_3dhp_extract(dataset_path, openpose_path, out_path, mode, extract_img=False, static_fits=None):
scaleFactor = 1.2
joints_idx = [14, 3, 4, 5, 2, 1, 0, 16, 12, 17, 18, 9, 10, 11, 8, 7, 6]
if static_fits is not None:
fits_3d = os.path.join(static_fits,
'mpi-inf-3dhp_mview_fits.npz')
else:
fits_3d = None
if mode == 'train':
train_data(dataset_path, openpose_path, out_path,
joints_idx, scaleFactor, extract_img=extract_img, fits_3d=fits_3d)
elif mode == 'test':
test_data(dataset_path, out_path, joints_idx, scaleFactor)
| 39.514056 | 112 | 0.468035 | import os
import sys
import cv2
import glob
import h5py
import json
import numpy as np
import scipy.io as sio
import scipy.misc
from .read_openpose import read_openpose
def read_calibration(calib_file, vid_list):
Ks, Rs, Ts = [], [], []
file = open(calib_file, 'r')
content = file.readlines()
for vid_i in vid_list:
K = np.array([float(s) for s in content[vid_i*7+5][11:-2].split()])
K = np.reshape(K, (4, 4))
RT = np.array([float(s) for s in content[vid_i*7+6][11:-2].split()])
RT = np.reshape(RT, (4, 4))
R = RT[:3,:3]
T = RT[:3,3]/1000
Ks.append(K)
Rs.append(R)
Ts.append(T)
return Ks, Rs, Ts
def train_data(dataset_path, openpose_path, out_path, joints_idx, scaleFactor, extract_img=False, fits_3d=None):
joints17_idx = [4, 18, 19, 20, 23, 24, 25, 3, 5, 6, 7, 9, 10, 11, 14, 15, 16]
h, w = 2048, 2048
imgnames_, scales_, centers_ = [], [], []
parts_, Ss_, openposes_ = [], [], []
user_list = range(1,9)
seq_list = range(1,3)
vid_list = list(range(3)) + list(range(4,9))
counter = 0
for user_i in user_list:
for seq_i in seq_list:
seq_path = os.path.join(dataset_path,
'S' + str(user_i),
'Seq' + str(seq_i))
annot_file = os.path.join(seq_path, 'annot.mat')
annot2 = sio.loadmat(annot_file)['annot2']
annot3 = sio.loadmat(annot_file)['annot3']
calib_file = os.path.join(seq_path, 'camera.calibration')
Ks, Rs, Ts = read_calibration(calib_file, vid_list)
for j, vid_i in enumerate(vid_list):
imgs_path = os.path.join(seq_path,
'imageFrames',
'video_' + str(vid_i))
if extract_img:
if not os.path.isdir(imgs_path):
os.makedirs(imgs_path)
# video file
vid_file = os.path.join(seq_path,
'imageSequence',
'video_' + str(vid_i) + '.avi')
vidcap = cv2.VideoCapture(vid_file)
# process video
frame = 0
while 1:
# extract all frames
success, image = vidcap.read()
if not success:
break
frame += 1
# image name
imgname = os.path.join(imgs_path,
'frame_%06d.jpg' % frame)
# save image
cv2.imwrite(imgname, image)
# per frame
cam_aa = cv2.Rodrigues(Rs[j])[0].T[0]
pattern = os.path.join(imgs_path, '*.jpg')
img_list = glob.glob(pattern)
for i, img_i in enumerate(img_list):
# for each image we store the relevant annotations
img_name = img_i.split('/')[-1]
img_view = os.path.join('S' + str(user_i),
'Seq' + str(seq_i),
'imageFrames',
'video_' + str(vid_i),
img_name)
joints = np.reshape(annot2[vid_i][0][i], (28, 2))[joints17_idx]
S17 = np.reshape(annot3[vid_i][0][i], (28, 3))/1000
S17 = S17[joints17_idx] - S17[4] # 4 is the root
bbox = [min(joints[:,0]), min(joints[:,1]),
max(joints[:,0]), max(joints[:,1])]
center = [(bbox[2]+bbox[0])/2, (bbox[3]+bbox[1])/2]
scale = scaleFactor*max(bbox[2]-bbox[0], bbox[3]-bbox[1])/200
# check that all joints are visible
x_in = np.logical_and(joints[:, 0] < w, joints[:, 0] >= 0)
y_in = np.logical_and(joints[:, 1] < h, joints[:, 1] >= 0)
ok_pts = np.logical_and(x_in, y_in)
if np.sum(ok_pts) < len(joints_idx):
continue
part = np.zeros([24,3])
part[joints_idx] = np.hstack([joints, np.ones([17,1])])
json_file = os.path.join(openpose_path, 'mpi_inf_3dhp',
img_view.replace('.jpg', '_keypoints.json'))
openpose = read_openpose(json_file, part, 'mpi_inf_3dhp')
S = np.zeros([24,4])
S[joints_idx] = np.hstack([S17, np.ones([17,1])])
# because of the dataset size, we only keep every 10th frame
counter += 1
if counter % 10 != 1:
continue
# store the data
imgnames_.append(img_view)
centers_.append(center)
scales_.append(scale)
parts_.append(part)
Ss_.append(S)
openposes_.append(openpose)
# store the data struct
if not os.path.isdir(out_path):
os.makedirs(out_path)
out_file = os.path.join(out_path, 'mpi_inf_3dhp_train.npz')
if fits_3d is not None:
fits_3d = np.load(fits_3d)
np.savez(out_file, imgname=imgnames_,
center=centers_,
scale=scales_,
part=parts_,
pose=fits_3d['pose'],
shape=fits_3d['shape'],
has_smpl=fits_3d['has_smpl'],
S=Ss_,
openpose=openposes_)
else:
np.savez(out_file, imgname=imgnames_,
center=centers_,
scale=scales_,
part=parts_,
S=Ss_,
openpose=openposes_)
def test_data(dataset_path, out_path, joints_idx, scaleFactor):
joints17_idx = [14, 11, 12, 13, 8, 9, 10, 15, 1, 16, 0, 5, 6, 7, 2, 3, 4]
imgnames_, scales_, centers_, parts_, Ss_ = [], [], [], [], []
# training data
user_list = range(1,7)
for user_i in user_list:
seq_path = os.path.join(dataset_path,
'mpi_inf_3dhp_test_set',
'TS' + str(user_i))
# mat file with annotations
annot_file = os.path.join(seq_path, 'annot_data.mat')
mat_as_h5 = h5py.File(annot_file, 'r')
annot2 = np.array(mat_as_h5['annot2'])
annot3 = np.array(mat_as_h5['univ_annot3'])
valid = np.array(mat_as_h5['valid_frame'])
for frame_i, valid_i in enumerate(valid):
if valid_i == 0:
continue
img_name = os.path.join('mpi_inf_3dhp_test_set',
'TS' + str(user_i),
'imageSequence',
'img_' + str(frame_i+1).zfill(6) + '.jpg')
joints = annot2[frame_i,0,joints17_idx,:]
S17 = annot3[frame_i,0,joints17_idx,:]/1000
S17 = S17 - S17[0]
bbox = [min(joints[:,0]), min(joints[:,1]),
max(joints[:,0]), max(joints[:,1])]
center = [(bbox[2]+bbox[0])/2, (bbox[3]+bbox[1])/2]
scale = scaleFactor*max(bbox[2]-bbox[0], bbox[3]-bbox[1])/200
# check that all joints are visible
img_file = os.path.join(dataset_path, img_name)
I = scipy.misc.imread(img_file)
h, w, _ = I.shape
x_in = np.logical_and(joints[:, 0] < w, joints[:, 0] >= 0)
y_in = np.logical_and(joints[:, 1] < h, joints[:, 1] >= 0)
ok_pts = np.logical_and(x_in, y_in)
if np.sum(ok_pts) < len(joints_idx):
continue
part = np.zeros([24,3])
part[joints_idx] = np.hstack([joints, np.ones([17,1])])
S = np.zeros([24,4])
S[joints_idx] = np.hstack([S17, np.ones([17,1])])
# store the data
imgnames_.append(img_name)
centers_.append(center)
scales_.append(scale)
parts_.append(part)
Ss_.append(S)
# store the data struct
if not os.path.isdir(out_path):
os.makedirs(out_path)
out_file = os.path.join(out_path, 'mpi_inf_3dhp_test.npz')
np.savez(out_file, imgname=imgnames_,
center=centers_,
scale=scales_,
part=parts_,
S=Ss_)
def mpi_inf_3dhp_extract(dataset_path, openpose_path, out_path, mode, extract_img=False, static_fits=None):
scaleFactor = 1.2
joints_idx = [14, 3, 4, 5, 2, 1, 0, 16, 12, 17, 18, 9, 10, 11, 8, 7, 6]
if static_fits is not None:
fits_3d = os.path.join(static_fits,
'mpi-inf-3dhp_mview_fits.npz')
else:
fits_3d = None
if mode == 'train':
train_data(dataset_path, openpose_path, out_path,
joints_idx, scaleFactor, extract_img=extract_img, fits_3d=fits_3d)
elif mode == 'test':
test_data(dataset_path, out_path, joints_idx, scaleFactor)
| true | true |
f71fd8ed4a60f1f0cb0713800b0c028bb7bc4489 | 16,140 | py | Python | desktop/core/ext-py/pysaml2-4.4.0/src/saml2/cert.py | kokosing/hue | 2307f5379a35aae9be871e836432e6f45138b3d9 | [
"Apache-2.0"
] | 3 | 2018-01-29T14:16:02.000Z | 2019-02-05T21:33:05.000Z | desktop/core/ext-py/pysaml2-4.4.0/src/saml2/cert.py | zks888/hue | 93a8c370713e70b216c428caa2f75185ef809deb | [
"Apache-2.0"
] | 4 | 2021-03-11T04:02:00.000Z | 2022-03-27T08:31:56.000Z | desktop/core/ext-py/pysaml2-4.4.0/src/saml2/cert.py | zks888/hue | 93a8c370713e70b216c428caa2f75185ef809deb | [
"Apache-2.0"
] | 2 | 2019-06-17T11:51:56.000Z | 2020-07-25T08:29:56.000Z | __author__ = 'haho0032'
import base64
import datetime
import dateutil.parser
import pytz
import six
from OpenSSL import crypto
from os.path import join
from os import remove
from Cryptodome.Util import asn1
class WrongInput(Exception):
pass
class CertificateError(Exception):
pass
class PayloadError(Exception):
pass
class OpenSSLWrapper(object):
def __init__(self):
pass
def create_certificate(self, cert_info, request=False, valid_from=0,
valid_to=315360000, sn=1, key_length=1024,
hash_alg="sha256", write_to_file=False, cert_dir="",
cipher_passphrase=None):
"""
Can create certificate requests, to be signed later by another
certificate with the method
create_cert_signed_certificate. If request is True.
Can also create self signed root certificates if request is False.
This is default behaviour.
:param cert_info: Contains information about the certificate.
Is a dictionary that must contain the keys:
cn = Common name. This part
must match the host being authenticated
country_code = Two letter description
of the country.
state = State
city = City
organization = Organization, can be a
company name.
organization_unit = A unit at the
organization, can be a department.
Example:
cert_info_ca = {
"cn": "company.com",
"country_code": "se",
"state": "AC",
"city": "Dorotea",
"organization":
"Company",
"organization_unit":
"Sales"
}
:param request: True if this is a request for certificate,
that should be signed.
False if this is a self signed certificate,
root certificate.
:param valid_from: When the certificate starts to be valid.
Amount of seconds from when the
certificate is generated.
:param valid_to: How long the certificate will be valid from
when it is generated.
The value is in seconds. Default is
315360000 seconds, a.k.a 10 years.
:param sn: Serial number for the certificate. Default
is 1.
:param key_length: Length of the key to be generated. Defaults
to 1024.
:param hash_alg: Hash algorithm to use for the key. Default
is sha256.
:param write_to_file: True if you want to write the certificate
to a file. The method will then return
a tuple with path to certificate file and
path to key file.
False if you want to get the result as
strings. The method will then return a tuple
with the certificate string and the key as
string.
WILL OVERWRITE ALL EXISTING FILES WITHOUT
ASKING!
:param cert_dir: Where to save the files if write_to_file is
true.
:param cipher_passphrase A dictionary with cipher and passphrase.
Example::
{"cipher": "blowfish", "passphrase": "qwerty"}
:return: string representation of certificate,
string representation of private key
if write_to_file parameter is False otherwise
path to certificate file, path to private
key file
"""
cn = cert_info["cn"]
c_f = None
k_f = None
if write_to_file:
cert_file = "%s.crt" % cn
key_file = "%s.key" % cn
try:
remove(cert_file)
except:
pass
try:
remove(key_file)
except:
pass
c_f = join(cert_dir, cert_file)
k_f = join(cert_dir, key_file)
# create a key pair
k = crypto.PKey()
k.generate_key(crypto.TYPE_RSA, key_length)
# create a self-signed cert
cert = crypto.X509()
if request:
cert = crypto.X509Req()
if (len(cert_info["country_code"]) != 2):
raise WrongInput("Country code must be two letters!")
cert.get_subject().C = cert_info["country_code"]
cert.get_subject().ST = cert_info["state"]
cert.get_subject().L = cert_info["city"]
cert.get_subject().O = cert_info["organization"]
cert.get_subject().OU = cert_info["organization_unit"]
cert.get_subject().CN = cn
if not request:
cert.set_serial_number(sn)
cert.gmtime_adj_notBefore(valid_from) #Valid before present time
cert.gmtime_adj_notAfter(valid_to) #3 650 days
cert.set_issuer(cert.get_subject())
cert.set_pubkey(k)
cert.sign(k, hash_alg)
filesCreated = False
try:
if request:
tmp_cert = crypto.dump_certificate_request(crypto.FILETYPE_PEM,
cert)
else:
tmp_cert = crypto.dump_certificate(crypto.FILETYPE_PEM, cert)
tmp_key = None
if cipher_passphrase is not None:
passphrase = cipher_passphrase["passphrase"]
if isinstance(cipher_passphrase["passphrase"],
six.string_types):
passphrase = passphrase.encode('utf-8')
tmp_key = crypto.dump_privatekey(crypto.FILETYPE_PEM, k,
cipher_passphrase["cipher"],
passphrase)
else:
tmp_key = crypto.dump_privatekey(crypto.FILETYPE_PEM, k)
if write_to_file:
fc = open(c_f, "wt")
fk = open(k_f, "wt")
if request:
fc.write(tmp_cert.decode('utf-8'))
else:
fc.write(tmp_cert.decode('utf-8'))
fk.write(tmp_key.decode('utf-8'))
filesCreated = True
try:
fc.close()
except:
pass
try:
fk.close()
except:
pass
return c_f, k_f
return tmp_cert, tmp_key
except Exception as ex:
raise CertificateError("Certificate cannot be generated.", ex)
def write_str_to_file(self, file, str_data):
f = open(file, "wt")
f.write(str_data)
f.close()
def read_str_from_file(self, file, type="pem"):
f = open(file, 'rt')
str_data = f.read()
f.close()
if type == "pem":
return str_data
if type in ["der", "cer", "crt"]:
return base64.b64encode(str(str_data))
def create_cert_signed_certificate(self, sign_cert_str, sign_key_str,
request_cert_str, hash_alg="sha256",
valid_from=0, valid_to=315360000, sn=1,
passphrase=None):
"""
Will sign a certificate request with a give certificate.
:param sign_cert_str: This certificate will be used to sign with.
Must be a string representation of
the certificate. If you only have a file
use the method read_str_from_file to
get a string representation.
:param sign_key_str: This is the key for the ca_cert_str
represented as a string.
If you only have a file use the method
read_str_from_file to get a string
representation.
:param request_cert_str: This is the prepared certificate to be
signed. Must be a string representation of
the requested certificate. If you only have
a file use the method read_str_from_file
to get a string representation.
:param hash_alg: Hash algorithm to use for the key. Default
is sha256.
:param valid_from: When the certificate starts to be valid.
Amount of seconds from when the
certificate is generated.
:param valid_to: How long the certificate will be valid from
when it is generated.
The value is in seconds. Default is
315360000 seconds, a.k.a 10 years.
:param sn: Serial number for the certificate. Default
is 1.
:param passphrase: Password for the private key in sign_key_str.
:return: String representation of the signed
certificate.
"""
ca_cert = crypto.load_certificate(crypto.FILETYPE_PEM, sign_cert_str)
ca_key = None
if passphrase is not None:
ca_key = crypto.load_privatekey(crypto.FILETYPE_PEM, sign_key_str,
passphrase)
else:
ca_key = crypto.load_privatekey(crypto.FILETYPE_PEM, sign_key_str)
req_cert = crypto.load_certificate_request(crypto.FILETYPE_PEM,
request_cert_str)
cert = crypto.X509()
cert.set_subject(req_cert.get_subject())
cert.set_serial_number(sn)
cert.gmtime_adj_notBefore(valid_from)
cert.gmtime_adj_notAfter(valid_to)
cert.set_issuer(ca_cert.get_subject())
cert.set_pubkey(req_cert.get_pubkey())
cert.sign(ca_key, hash_alg)
cert_dump = crypto.dump_certificate(crypto.FILETYPE_PEM, cert)
if isinstance(cert_dump, six.string_types):
return cert_dump
return cert_dump.decode('utf-8')
def verify_chain(self, cert_chain_str_list, cert_str):
"""
:param cert_chain_str_list: Must be a list of certificate strings,
where the first certificate to be validate
is in the beginning and the root certificate is last.
:param cert_str: The certificate to be validated.
:return:
"""
for tmp_cert_str in cert_chain_str_list:
valid, message = self.verify(tmp_cert_str, cert_str)
if not valid:
return False, message
else:
cert_str = tmp_cert_str
return (True,
"Signed certificate is valid and correctly signed by CA "
"certificate.")
def certificate_not_valid_yet(self, cert):
starts_to_be_valid = dateutil.parser.parse(cert.get_notBefore())
now = pytz.UTC.localize(datetime.datetime.utcnow())
if starts_to_be_valid < now:
return False
return True
def verify(self, signing_cert_str, cert_str):
"""
Verifies if a certificate is valid and signed by a given certificate.
:param signing_cert_str: This certificate will be used to verify the
signature. Must be a string representation
of the certificate. If you only have a file
use the method read_str_from_file to
get a string representation.
:param cert_str: This certificate will be verified if it is
correct. Must be a string representation
of the certificate. If you only have a file
use the method read_str_from_file to
get a string representation.
:return: Valid, Message
Valid = True if the certificate is valid,
otherwise false.
Message = Why the validation failed.
"""
try:
ca_cert = crypto.load_certificate(crypto.FILETYPE_PEM,
signing_cert_str)
cert = crypto.load_certificate(crypto.FILETYPE_PEM, cert_str)
if self.certificate_not_valid_yet(ca_cert):
return False, "CA certificate is not valid yet."
if ca_cert.has_expired() == 1:
return False, "CA certificate is expired."
if cert.has_expired() == 1:
return False, "The signed certificate is expired."
if self.certificate_not_valid_yet(cert):
return False, "The signed certificate is not valid yet."
if ca_cert.get_subject().CN == cert.get_subject().CN:
return False, ("CN may not be equal for CA certificate and the "
"signed certificate.")
cert_algorithm = cert.get_signature_algorithm()
if six.PY3:
cert_algorithm = cert_algorithm.decode('ascii')
cert_asn1 = crypto.dump_certificate(crypto.FILETYPE_ASN1, cert)
der_seq = asn1.DerSequence()
der_seq.decode(cert_asn1)
cert_certificate = der_seq[0]
#cert_signature_algorithm=der_seq[1]
cert_signature = der_seq[2]
cert_signature_decoded = asn1.DerObject()
cert_signature_decoded.decode(cert_signature)
signature_payload = cert_signature_decoded.payload
sig_pay0 = signature_payload[0]
if ((isinstance(sig_pay0, int) and sig_pay0 != 0) or
(isinstance(sig_pay0, str) and sig_pay0 != '\x00')):
return (False,
"The certificate should not contain any unused bits.")
signature = signature_payload[1:]
try:
crypto.verify(ca_cert, signature, cert_certificate,
cert_algorithm)
return True, "Signed certificate is valid and correctly signed by CA certificate."
except crypto.Error as e:
return False, "Certificate is incorrectly signed."
except Exception as e:
return False, "Certificate is not valid for an unknown reason. %s" % str(e)
| 43.621622 | 98 | 0.495291 | __author__ = 'haho0032'
import base64
import datetime
import dateutil.parser
import pytz
import six
from OpenSSL import crypto
from os.path import join
from os import remove
from Cryptodome.Util import asn1
class WrongInput(Exception):
pass
class CertificateError(Exception):
pass
class PayloadError(Exception):
pass
class OpenSSLWrapper(object):
def __init__(self):
pass
def create_certificate(self, cert_info, request=False, valid_from=0,
valid_to=315360000, sn=1, key_length=1024,
hash_alg="sha256", write_to_file=False, cert_dir="",
cipher_passphrase=None):
cn = cert_info["cn"]
c_f = None
k_f = None
if write_to_file:
cert_file = "%s.crt" % cn
key_file = "%s.key" % cn
try:
remove(cert_file)
except:
pass
try:
remove(key_file)
except:
pass
c_f = join(cert_dir, cert_file)
k_f = join(cert_dir, key_file)
k = crypto.PKey()
k.generate_key(crypto.TYPE_RSA, key_length)
cert = crypto.X509()
if request:
cert = crypto.X509Req()
if (len(cert_info["country_code"]) != 2):
raise WrongInput("Country code must be two letters!")
cert.get_subject().C = cert_info["country_code"]
cert.get_subject().ST = cert_info["state"]
cert.get_subject().L = cert_info["city"]
cert.get_subject().O = cert_info["organization"]
cert.get_subject().OU = cert_info["organization_unit"]
cert.get_subject().CN = cn
if not request:
cert.set_serial_number(sn)
cert.gmtime_adj_notBefore(valid_from)
cert.gmtime_adj_notAfter(valid_to)
cert.set_issuer(cert.get_subject())
cert.set_pubkey(k)
cert.sign(k, hash_alg)
filesCreated = False
try:
if request:
tmp_cert = crypto.dump_certificate_request(crypto.FILETYPE_PEM,
cert)
else:
tmp_cert = crypto.dump_certificate(crypto.FILETYPE_PEM, cert)
tmp_key = None
if cipher_passphrase is not None:
passphrase = cipher_passphrase["passphrase"]
if isinstance(cipher_passphrase["passphrase"],
six.string_types):
passphrase = passphrase.encode('utf-8')
tmp_key = crypto.dump_privatekey(crypto.FILETYPE_PEM, k,
cipher_passphrase["cipher"],
passphrase)
else:
tmp_key = crypto.dump_privatekey(crypto.FILETYPE_PEM, k)
if write_to_file:
fc = open(c_f, "wt")
fk = open(k_f, "wt")
if request:
fc.write(tmp_cert.decode('utf-8'))
else:
fc.write(tmp_cert.decode('utf-8'))
fk.write(tmp_key.decode('utf-8'))
filesCreated = True
try:
fc.close()
except:
pass
try:
fk.close()
except:
pass
return c_f, k_f
return tmp_cert, tmp_key
except Exception as ex:
raise CertificateError("Certificate cannot be generated.", ex)
def write_str_to_file(self, file, str_data):
f = open(file, "wt")
f.write(str_data)
f.close()
def read_str_from_file(self, file, type="pem"):
f = open(file, 'rt')
str_data = f.read()
f.close()
if type == "pem":
return str_data
if type in ["der", "cer", "crt"]:
return base64.b64encode(str(str_data))
def create_cert_signed_certificate(self, sign_cert_str, sign_key_str,
request_cert_str, hash_alg="sha256",
valid_from=0, valid_to=315360000, sn=1,
passphrase=None):
ca_cert = crypto.load_certificate(crypto.FILETYPE_PEM, sign_cert_str)
ca_key = None
if passphrase is not None:
ca_key = crypto.load_privatekey(crypto.FILETYPE_PEM, sign_key_str,
passphrase)
else:
ca_key = crypto.load_privatekey(crypto.FILETYPE_PEM, sign_key_str)
req_cert = crypto.load_certificate_request(crypto.FILETYPE_PEM,
request_cert_str)
cert = crypto.X509()
cert.set_subject(req_cert.get_subject())
cert.set_serial_number(sn)
cert.gmtime_adj_notBefore(valid_from)
cert.gmtime_adj_notAfter(valid_to)
cert.set_issuer(ca_cert.get_subject())
cert.set_pubkey(req_cert.get_pubkey())
cert.sign(ca_key, hash_alg)
cert_dump = crypto.dump_certificate(crypto.FILETYPE_PEM, cert)
if isinstance(cert_dump, six.string_types):
return cert_dump
return cert_dump.decode('utf-8')
def verify_chain(self, cert_chain_str_list, cert_str):
for tmp_cert_str in cert_chain_str_list:
valid, message = self.verify(tmp_cert_str, cert_str)
if not valid:
return False, message
else:
cert_str = tmp_cert_str
return (True,
"Signed certificate is valid and correctly signed by CA "
"certificate.")
def certificate_not_valid_yet(self, cert):
starts_to_be_valid = dateutil.parser.parse(cert.get_notBefore())
now = pytz.UTC.localize(datetime.datetime.utcnow())
if starts_to_be_valid < now:
return False
return True
def verify(self, signing_cert_str, cert_str):
try:
ca_cert = crypto.load_certificate(crypto.FILETYPE_PEM,
signing_cert_str)
cert = crypto.load_certificate(crypto.FILETYPE_PEM, cert_str)
if self.certificate_not_valid_yet(ca_cert):
return False, "CA certificate is not valid yet."
if ca_cert.has_expired() == 1:
return False, "CA certificate is expired."
if cert.has_expired() == 1:
return False, "The signed certificate is expired."
if self.certificate_not_valid_yet(cert):
return False, "The signed certificate is not valid yet."
if ca_cert.get_subject().CN == cert.get_subject().CN:
return False, ("CN may not be equal for CA certificate and the "
"signed certificate.")
cert_algorithm = cert.get_signature_algorithm()
if six.PY3:
cert_algorithm = cert_algorithm.decode('ascii')
cert_asn1 = crypto.dump_certificate(crypto.FILETYPE_ASN1, cert)
der_seq = asn1.DerSequence()
der_seq.decode(cert_asn1)
cert_certificate = der_seq[0]
cert_signature = der_seq[2]
cert_signature_decoded = asn1.DerObject()
cert_signature_decoded.decode(cert_signature)
signature_payload = cert_signature_decoded.payload
sig_pay0 = signature_payload[0]
if ((isinstance(sig_pay0, int) and sig_pay0 != 0) or
(isinstance(sig_pay0, str) and sig_pay0 != '\x00')):
return (False,
"The certificate should not contain any unused bits.")
signature = signature_payload[1:]
try:
crypto.verify(ca_cert, signature, cert_certificate,
cert_algorithm)
return True, "Signed certificate is valid and correctly signed by CA certificate."
except crypto.Error as e:
return False, "Certificate is incorrectly signed."
except Exception as e:
return False, "Certificate is not valid for an unknown reason. %s" % str(e)
| true | true |
f71fd916fcc0b6fd5407ff0c3b8ac492320273d7 | 26,028 | py | Python | tf_rl_tutorial/models.py | 4k4xs4pH1r3/tf_rl_tutorial | c58d10c60cfd79b2e0661b4a49cccae8d4584c57 | [
"Apache-2.0"
] | 40 | 2016-03-09T03:03:08.000Z | 2021-09-11T21:44:12.000Z | tf_rl_tutorial/models.py | 4k4xs4pH1r3/tf_rl_tutorial | c58d10c60cfd79b2e0661b4a49cccae8d4584c57 | [
"Apache-2.0"
] | 1 | 2018-05-08T13:45:43.000Z | 2018-05-08T13:45:43.000Z | tf_rl_tutorial/models.py | 4k4xs4pH1r3/tf_rl_tutorial | c58d10c60cfd79b2e0661b4a49cccae8d4584c57 | [
"Apache-2.0"
] | 17 | 2016-03-17T14:57:11.000Z | 2021-06-04T16:24:48.000Z | # Copyright 2016 Mandiant, A FireEye Company
# Authors: Brian Jones
# License: Apache 2.0
''' Model classes for "Relational Learning with TensorFlow" tutorial '''
import numpy as np
import tensorflow as tf
from .util import ContrastiveTrainingProvider
def least_squares_objective(output, target, add_bias=True):
''' Creates final model output and loss for least squares objective
Args:
output: Model output
target: Training target placeholder
add_bias: If True, a bias Variable will be added to the output
Returns:
tuple (final output, loss)
'''
y = output
if add_bias:
bias = tf.Variable([0.0])
y = output + bias
loss = tf.reduce_sum(tf.square(y - target))
return y, loss
def logistic_objective(output, target, add_bias=True):
''' Creates final model output and loss for logistic objective
Args:
output: Model output
target: Training target placeholder
add_bias: If True, a bias Variable will be added to the output
Returns:
tuple (final output, loss)
'''
y = output
if add_bias:
bias = tf.Variable([0.0])
y = output + bias
sig_y = tf.clip_by_value(tf.sigmoid(y), 0.001, 0.999) # avoid NaNs
loss = -tf.reduce_sum(target*tf.log(sig_y) + (1-target)*tf.log(1-sig_y))
return sig_y, loss
def ranking_margin_objective(output, margin=1.0):
''' Create final model output and loss for pairwise ranking margin objective
Loss for single pair (f(p), f(n)) = [margin - f(p) + f(n)]+
This only works when given model output on alternating positive/negative
pairs: [pos,neg,pos,neg,...]. TODO: check target placeholder
at runtime to make sure this is the case?
Args:
output: Model output
margin: The margin value for the pairwise hinge loss
Returns:
tuple (final output, loss)
'''
y_pairs = tf.reshape(output, [-1,2]) # fold: 1 x n -> [n/2 x 2]
pos_scores, neg_scores = tf.split(1, 2, y_pairs) # separate pairs
hinge_losses = tf.nn.relu(margin - pos_scores + neg_scores)
total_hinge_loss = tf.reduce_sum(hinge_losses)
return output, total_hinge_loss
def sparse_maxnorm_update(var_matrix, indices, maxnorm=1.0):
'''Sparse update operation that ensures selected rows in var_matrix
do not have a Euclidean norm greater than maxnorm. Rows that exceed
it are scaled to length.
Args:
var_matrix: 2D mutable tensor (Variable) to operate on
indices: 1D tensor with the row indices to constrain
maxnorm: the maximum Euclidean norm
Returns:
An operation that will update var_matrix when run in a Session
'''
selected_rows = tf.nn.embedding_lookup(var_matrix, indices)
row_norms = tf.sqrt(tf.reduce_sum(tf.square(selected_rows), 1))
scaling = maxnorm / tf.maximum(row_norms, maxnorm)
scaled = selected_rows * tf.expand_dims(scaling, 1)
return tf.scatter_update(var_matrix, indices, scaled)
def dense_maxnorm_update(var_matrix, maxnorm=1.0):
'''Dense update operation that ensures all rows in var_matrix
do not have a Euclidean norm greater than maxnorm. Rows that exceed
it are scaled to length.
Args:
var_matrix: 2D mutable tensor (Variable) to operate on
maxnorm: the maximum Euclidean norm
Returns:
An operation that will update var_matrix when run in a Session
'''
row_norms = tf.sqrt(tf.reduce_sum(tf.square(var_matrix), 1))
scaling = maxnorm / tf.maximum(row_norms, maxnorm)
scaled = var_matrix * tf.expand_dims(scaling, 1)
return tf.assign(var_matrix, scaled)
def dense_maxnorm(var_matrix, maxnorm=1.0):
'''Similar to dense_maxnorm_update(), except this returns a new Tensor
instead of an operation that modifies var_matrix.
Args:
var_matrix: 2D tensor (Variable)
maxnorm: the maximum Euclidean norm
Returns:
A new tensor where all rows have been scaled as necessary
'''
axis_norms = tf.sqrt(tf.reduce_sum(tf.square(var_matrix), 1))
scaling = maxnorm / tf.maximum(axis_norms, maxnorm)
return var_matrix * tf.expand_dims(scaling, 1)
class BaseModel(object):
''' Base class for embedding-based relational learning models that use
maxnorm regularization. Subclasses must implement _create_model() and
populate self.train_step, and can optionally populate self.post_step for
post-processing.
Note: When model_type is 'ranking_margin', the mini-batch provider returned
by _create_batch_provider() must provide instances in alternating
pos/neg pairs: [pos, neg, pos, neg, ...]. This is satisfied when using
ContrastiveTrainingProvider; be careful if you use a different one.
Args:
embedding_size: Embedding vector length
maxnorm: Maximum Euclidean norm for embedding vectors
batch_pos_cnt: Number of positive examples to use in each mini-batch
max_iter: Maximum number of optimization iterations to perform
model_type: Possible values:
'least_squares': squared loss on 0/1 targets
'logistic': sigmoid link function, crossent loss on 0/1 targets
'ranking_margin': ranking margin on pos/neg pairs
add_bias: If True, a bias Variable will be added to the output for
least_squares and logistic models.
opt: An optimizer object to use. If None, the default optimizer is
tf.train.AdagradOptimizer(1.0)
TODO: add support for other regularizers like L2
'''
def __init__(self, embedding_size, maxnorm=1.0,
batch_pos_cnt=100, max_iter=1000,
model_type='least_squares', add_bias=True,
opt=None):
self.embedding_size = embedding_size
self.maxnorm = maxnorm
self.batch_pos_cnt = batch_pos_cnt
self.max_iter = max_iter
self.model_type = model_type
self.add_bias = add_bias
if opt is None:
opt = tf.train.AdagradOptimizer(1.0)
self.opt = opt
self.sess = None
self.train_step = None
self.post_step = None
self.graph = tf.Graph()
with self.graph.as_default():
self.head_input = tf.placeholder(tf.int32, shape=[None])
self.rel_input = tf.placeholder(tf.int32, shape=[None])
self.tail_input = tf.placeholder(tf.int32, shape=[None])
self.target = tf.placeholder(tf.float32, shape=[None])
def _create_model(self, train_triples):
''' Subclasses must build Graph and set self.train_step '''
raise Exception('subclass must implement')
def _create_batch_provider(self, train_triples):
''' Default implementation '''
return ContrastiveTrainingProvider(train_triples, self.batch_pos_cnt)
def _create_output_and_loss(self, raw_output):
if self.model_type == 'least_squares':
return least_squares_objective(raw_output, self.target, self.add_bias)
elif self.model_type == 'logistic':
return logistic_objective(raw_output, self.target, self.add_bias)
elif self.model_type == 'ranking_margin':
return ranking_margin_objective(raw_output, 1.0)
else:
raise Exception('Unknown model_type')
def _norm_constraint_op(self, var_matrix, row_indices, maxnorm):
'''
Args:
var_matrix: A 2D Tensor holding the vectors to constrain (in rows)
row_indices: The rows in var_tensor that are being considered for
constraint application (typically embedding vectors for
entities observed for a minibatch of training data). These
will be used for a sparse variable update operation if the
chosen optimizer only modified these entries. Otherwise
a dense operation is used and row_indices are ignored.
maxnorm: The maximum Euclidean norm for the rows in var_tensor
Returns:
An operation which will apply the constraints when run in a Session
'''
# Currently, TF optimizers do not update variables with zero gradient
# except AdamOptimizer
if isinstance(self.opt, tf.train.AdamOptimizer):
return dense_maxnorm_update(var_matrix, maxnorm)
else:
return sparse_maxnorm_update(var_matrix, row_indices, maxnorm)
def embeddings(self):
''' Subclass should override this if it uses different embedding
variables
Returns:
A list of pairs: [(embedding name, embedding 2D Tensor)]
'''
return [('entity', self.entity_embedding_vars),
('rel', self.rel_embedding_vars)]
def create_feed_dict(self, triples, labels=None, training=False):
''' Create a TensorFlow feed dict for relationship triples
Args:
triples: A numpy integer array of relationship triples, where each
row contains [head idx, relationship idx, tail idx]
labels: (optional) A label array for triples
training: (optional) A flag indicating whether the feed dict is
for training or test purposes. Useful for things like
dropout where a dropout_probability variable is set differently
in the two contexts.
'''
feed_dict = {self.head_input: triples[:, 0],
self.rel_input: triples[:, 1],
self.tail_input: triples[:, 2]}
if labels is not None:
feed_dict[self.target] = labels
return feed_dict
def close(self):
''' Closes the TensorFlow Session object '''
self.sess.close();
def fit(self, train_triples, step_callback=None):
''' Trains the model on relationship triples
Args:
train_triples: A numpy integer array of relationship triples, where
each row of contains [head idx, relationship idx, tail idx]
step_callback: (optional) A function that will be called before each
optimization step, step_callback(iteration, feed_dict)
'''
if self.sess is not None:
self.sess.close()
self.sess = tf.Session(graph=self.graph)
with self.graph.as_default():
self._create_model(train_triples)
self.sess.run(tf.initialize_all_variables())
batch_provider = self._create_batch_provider(train_triples)
for i in range(self.max_iter):
batch_triples, batch_labels = batch_provider.next_batch()
feed_dict = self.create_feed_dict(batch_triples, batch_labels, training=True)
if step_callback:
keep_going = step_callback(i, feed_dict)
if not keep_going:
break
self.sess.run(self.train_step, feed_dict)
if self.post_step is not None:
self.sess.run(self.post_step, feed_dict)
def predict(self, triples):
''' Runs a trained model on the supplied relationship triples. fit()
must be called before calling this function.
Args:
triples: A numpy integer array of relationship triples, where each
row of contains [head idx, relationship idx, tail idx]
'''
feed_dict = self.create_feed_dict(triples, training=False)
return self.sess.run(self.output, feed_dict=feed_dict)
class Contrastive_CP(BaseModel):
''' Model with a scoring function based on CANDECOMP/PARAFAC tensor
decomposition. Optimization differs, however, in the use of maxnorm
regularization and contrastive negative sampling.
Score for (head i, rel k, tail j) triple is: h_i^T * diag(r_k) * t_j,
where h_i and t_j are embedding vectors for the head and tail entities,
and r_k is an embedding vector for the relationship type.
Args:
embedding_size: Embedding vector length
maxnorm: Maximum Euclidean norm for embedding vectors
batch_pos_cnt: Number of positive examples to use in each mini-batch
max_iter: Maximum number of optimization iterations to perform
model_type: Possible values:
'least_squares': squared loss on 0/1 targets
'logistic': sigmoid link function, crossent loss on 0/1 targets
'ranking_margin': ranking margin on pos/neg pairs
add_bias: If True, a bias Variable will be added to the output for
least_squares and logistic models.
opt: An optimizer object to use. If None, the default optimizer is
tf.train.AdagradOptimizer(1.0)
References:
Kolda, Tamara G., and Brett W. Bader. "Tensor decompositions and
applications." SIAM review 51.3 (2009): 455-500.
'''
def _create_model(self, train_triples):
# Count unique items to determine embedding matrix sizes
head_cnt = len(set(train_triples[:,0]))
rel_cnt = len(set(train_triples[:,1]))
tail_cnt = len(set(train_triples[:,2]))
init_sd = 1.0 / np.sqrt(self.embedding_size)
# Embedding matrices for entities and relationship types
head_init = tf.truncated_normal([head_cnt, self.embedding_size], stddev=init_sd)
rel_init = tf.truncated_normal([rel_cnt, self.embedding_size], stddev=init_sd)
tail_init = tf.truncated_normal([tail_cnt, self.embedding_size], stddev=init_sd)
if self.maxnorm is not None:
# Ensure maxnorm constraints are initially satisfied
head_init = dense_maxnorm(head_init, self.maxnorm)
rel_init = dense_maxnorm(rel_init, self.maxnorm)
tail_init = dense_maxnorm(tail_init, self.maxnorm)
self.head_embedding_vars = tf.Variable(head_init)
self.rel_embedding_vars = tf.Variable(rel_init)
self.tail_embedding_vars = tf.Variable(tail_init)
# Embedding layer for each (head, rel, tail) triple being fed in as input
head_embed = tf.nn.embedding_lookup(self.head_embedding_vars, self.head_input)
rel_embed = tf.nn.embedding_lookup(self.rel_embedding_vars, self.rel_input)
tail_embed = tf.nn.embedding_lookup(self.tail_embedding_vars, self.tail_input)
# Model output
raw_output = tf.reduce_sum(tf.mul(tf.mul(head_embed, rel_embed), tail_embed), 1)
self.output, self.loss = self._create_output_and_loss(raw_output)
# Optimization
self.train_step = self.opt.minimize(self.loss)
if self.maxnorm is not None:
# Post-processing to limit embedding vars to L2 ball
head_constraint = self._norm_constraint_op(self.head_embedding_vars,
tf.unique(self.head_input)[0],
self.maxnorm)
rel_constraint = self._norm_constraint_op(self.rel_embedding_vars,
tf.unique(self.rel_input)[0],
self.maxnorm)
tail_constraint = self._norm_constraint_op(self.tail_embedding_vars,
tf.unique(self.tail_input)[0],
self.maxnorm)
self.post_step = [head_constraint, rel_constraint, tail_constraint]
def _create_batch_provider(self, train):
# CP treats head and tail entities separately
return ContrastiveTrainingProvider(train,
self.batch_pos_cnt,
separate_head_tail=True)
def embeddings(self):
'''
Returns:
A list of pairs: [(embedding name, embedding 2D Tensor)]
'''
return [('head', self.head_embedding_vars),
('tail', self.head_embedding_vars),
('rel', self.rel_embedding_vars)]
class Bilinear(BaseModel):
''' Model with a scoring function based on the bilinear formulation of
RESCAL. Optimization differs, however, in the use of maxnorm
regularization and contrastive negative sampling.
Score for (head i, rel k, tail j) triple is: e_i^T * R_k * e_j
where e_i and e_j are D-dimensional embedding vectors for the head and tail
entities, and R_k is a (D x D) matrix for the relationship type
acting as a bilinear operator.
Args:
embedding_size: Embedding vector length
maxnorm: Maximum Euclidean norm for embedding vectors
rel_maxnorm_mult: Multiplier for the maxnorm threshold used for
relationship embeddings. Example: If maxnorm=2.0 and
rel_maxnorm_mult=4.0, then the maxnorm constrain for relationships
will be 2.0 * 4.0 = 8.0.
batch_pos_cnt: Number of positive examples to use in each mini-batch
max_iter: Maximum number of optimization iterations to perform
model_type: Possible values:
'least_squares': squared loss on 0/1 targets
'logistic': sigmoid link function, crossent loss on 0/1 targets
'ranking_margin': ranking margin on pos/neg pairs
add_bias: If True, a bias Variable will be added to the output for
least_squares and logistic models.
opt: An optimizer object to use. If None, the default optimizer is
tf.train.AdagradOptimizer(1.0)
References:
Nickel, Maximilian, Volker Tresp, and Hans-Peter Kriegel. "A three-way
model for collective learning on multi-relational data." Proceedings of
the 28th international conference on machine learning (ICML-11). 2011.
'''
def __init__(self, embedding_size, maxnorm=1.0, rel_maxnorm_mult=3.0,
batch_pos_cnt=100, max_iter=1000,
model_type='least_squares', add_bias=True, opt=None):
super(Bilinear, self).__init__(
embedding_size=embedding_size,
maxnorm=maxnorm,
batch_pos_cnt=batch_pos_cnt,
max_iter=max_iter,
model_type=model_type,
opt=opt)
self.rel_maxnorm_mult = rel_maxnorm_mult
def _create_model(self, train_triples):
# Count unique items to determine embedding matrix sizes
entity_cnt = len(set(train_triples[:,0]).union(train_triples[:,2]))
rel_cnt = len(set(train_triples[:,1]))
init_sd = 1.0 / np.sqrt(self.embedding_size)
# Embedding variables for all entities and relationship types
entity_embedding_shape = [entity_cnt, self.embedding_size]
# Relationship embeddings will be stored in flattened format to make
# applying maxnorm constraints easier
rel_embedding_shape = [rel_cnt, self.embedding_size * self.embedding_size]
entity_init = tf.truncated_normal(entity_embedding_shape, stddev=init_sd)
rel_init = tf.truncated_normal(rel_embedding_shape, stddev=init_sd)
if self.maxnorm is not None:
# Ensure maxnorm constraints are initially satisfied
entity_init = dense_maxnorm(entity_init, self.maxnorm)
rel_init = dense_maxnorm(rel_init, self.maxnorm)
self.entity_embedding_vars = tf.Variable(entity_init)
self.rel_embedding_vars = tf.Variable(rel_init)
# Embedding layer for each (head, rel, tail) triple being fed in as input
head_embed = tf.nn.embedding_lookup(self.entity_embedding_vars, self.head_input)
tail_embed = tf.nn.embedding_lookup(self.entity_embedding_vars, self.tail_input)
rel_embed = tf.nn.embedding_lookup(self.rel_embedding_vars, self.rel_input)
# Reshape rel_embed into square D x D matrices
rel_embed_square = tf.reshape(rel_embed, (-1, self.embedding_size, self.embedding_size))
# Reshape head_embed and tail_embed to be suitable for the matrix multiplication
head_embed_row = tf.expand_dims(head_embed, 1) # embeddings as row vectors
tail_embed_col = tf.expand_dims(tail_embed, 2) # embeddings as column vectors
head_rel_mult = tf.batch_matmul(head_embed_row, rel_embed_square)
# Output needs a squeeze into a 1d vector
raw_output = tf.squeeze(tf.batch_matmul(head_rel_mult, tail_embed_col))
self.output, self.loss = self._create_output_and_loss(raw_output)
# Optimization
self.train_step = self.opt.minimize(self.loss)
if self.maxnorm is not None:
# Post-processing to limit embedding vars to L2 ball
rel_maxnorm = self.maxnorm * self.rel_maxnorm_mult
unique_ent_indices = tf.unique(tf.concat(0, [self.head_input, self.tail_input]))[0]
unique_rel_indices = tf.unique(self.rel_input)[0]
entity_constraint = self._norm_constraint_op(self.entity_embedding_vars,
unique_ent_indices,
self.maxnorm)
rel_constraint = self._norm_constraint_op(self.rel_embedding_vars,
unique_rel_indices,
rel_maxnorm)
self.post_step = [entity_constraint, rel_constraint]
class TransE(BaseModel):
''' TransE: Translational Embeddings Model
Score for (head i, rel k, tail j) triple is: d(e_i + t_k, e_i)
where e_i and e_j are D-dimensional embedding vectors for the head and
tail entities, t_k is a another D-dimensional vector acting as a
translation, and d() is a dissimilarity function like Euclidean distance.
Optimization is performed uing SGD on ranking margin loss between
contrastive training pairs. Entity embeddings are contrained to lie within
the unit L2 ball, relationship vectors are left unconstrained.
Args:
embedding_size: Embedding vector length
batch_pos_cnt: Number of positive examples to use in each mini-batch
max_iter: Maximum number of optimization iterations to perform
dist: Distance function used in loss:
'euclidean': sqrt(sum((x - y)^2))
'sqeuclidean': squared Euclidean, sum((x - y)^2)
'manhattan': sum of absolute differences, sum(|x - y|)
margin: Margin parameter for parwise ranking hinge loss
opt: An optimizer object to use. If None, the default optimizer is
tf.train.AdagradOptimizer(1.0)
References:
Bordes, Antoine, et al. "Translating embeddings for modeling multi-relational
data." Advances in Neural Information Processing Systems. 2013.
'''
def __init__(self, embedding_size, batch_pos_cnt=100,
max_iter=1000, dist='euclidean',
margin=1.0, opt=None):
super(TransE, self).__init__(embedding_size=embedding_size,
maxnorm=1.0,
batch_pos_cnt=batch_pos_cnt,
max_iter=max_iter,
model_type='ranking_margin',
opt=opt)
self.dist = dist
self.margin = margin
self.EPS = 1e-3 # for sqrt gradient when dist='euclidean'
def _create_model(self, train_triples):
# Count unique items to determine embedding matrix sizes
entity_cnt = len(set(train_triples[:,0]).union(train_triples[:,2]))
rel_cnt = len(set(train_triples[:,1]))
init_sd = 1.0 / np.sqrt(self.embedding_size)
# Embedding variables
entity_var_shape = [entity_cnt, self.embedding_size]
rel_var_shape = [rel_cnt, self.embedding_size]
entity_init = tf.truncated_normal(entity_var_shape, stddev=init_sd)
rel_init = tf.truncated_normal(rel_var_shape, stddev=init_sd)
# Ensure maxnorm constraints are initially satisfied
entity_init = dense_maxnorm(entity_init, self.maxnorm)
self.entity_embedding_vars = tf.Variable(entity_init)
self.rel_embedding_vars = tf.Variable(rel_init)
# Embedding layer for each (head, rel, tail) triple being fed in as input
head_embed = tf.nn.embedding_lookup(self.entity_embedding_vars, self.head_input)
tail_embed = tf.nn.embedding_lookup(self.entity_embedding_vars, self.tail_input)
rel_embed = tf.nn.embedding_lookup(self.rel_embedding_vars, self.rel_input)
# Relationship vector acts as a translation in entity embedding space
diff_vec = tail_embed - (head_embed + rel_embed)
# negative dist so higher scores are better (important for pairwise loss)
if self.dist == 'manhattan':
raw_output = -tf.reduce_sum(tf.abs(diff_vec), 1)
elif self.dist == 'euclidean':
# +eps because gradients can misbehave for small values in sqrt
raw_output = -tf.sqrt(tf.reduce_sum(tf.square(diff_vec), 1) + self.EPS)
elif self.dist == 'sqeuclidean':
raw_output = -tf.reduce_sum(tf.square(diff_vec), 1)
else:
raise Exception('Unknown distance type')
# Model output
self.output, self.loss = ranking_margin_objective(raw_output, self.margin)
# Optimization with postprocessing to limit embedding vars to L2 ball
self.train_step = self.opt.minimize(self.loss)
unique_ent_indices = tf.unique(tf.concat(0, [self.head_input, self.tail_input]))[0]
self.post_step = self._norm_constraint_op(self.entity_embedding_vars,
unique_ent_indices,
self.maxnorm) | 47.67033 | 96 | 0.643807 |
import numpy as np
import tensorflow as tf
from .util import ContrastiveTrainingProvider
def least_squares_objective(output, target, add_bias=True):
y = output
if add_bias:
bias = tf.Variable([0.0])
y = output + bias
loss = tf.reduce_sum(tf.square(y - target))
return y, loss
def logistic_objective(output, target, add_bias=True):
y = output
if add_bias:
bias = tf.Variable([0.0])
y = output + bias
sig_y = tf.clip_by_value(tf.sigmoid(y), 0.001, 0.999)
loss = -tf.reduce_sum(target*tf.log(sig_y) + (1-target)*tf.log(1-sig_y))
return sig_y, loss
def ranking_margin_objective(output, margin=1.0):
y_pairs = tf.reshape(output, [-1,2])
pos_scores, neg_scores = tf.split(1, 2, y_pairs)
hinge_losses = tf.nn.relu(margin - pos_scores + neg_scores)
total_hinge_loss = tf.reduce_sum(hinge_losses)
return output, total_hinge_loss
def sparse_maxnorm_update(var_matrix, indices, maxnorm=1.0):
selected_rows = tf.nn.embedding_lookup(var_matrix, indices)
row_norms = tf.sqrt(tf.reduce_sum(tf.square(selected_rows), 1))
scaling = maxnorm / tf.maximum(row_norms, maxnorm)
scaled = selected_rows * tf.expand_dims(scaling, 1)
return tf.scatter_update(var_matrix, indices, scaled)
def dense_maxnorm_update(var_matrix, maxnorm=1.0):
row_norms = tf.sqrt(tf.reduce_sum(tf.square(var_matrix), 1))
scaling = maxnorm / tf.maximum(row_norms, maxnorm)
scaled = var_matrix * tf.expand_dims(scaling, 1)
return tf.assign(var_matrix, scaled)
def dense_maxnorm(var_matrix, maxnorm=1.0):
axis_norms = tf.sqrt(tf.reduce_sum(tf.square(var_matrix), 1))
scaling = maxnorm / tf.maximum(axis_norms, maxnorm)
return var_matrix * tf.expand_dims(scaling, 1)
class BaseModel(object):
def __init__(self, embedding_size, maxnorm=1.0,
batch_pos_cnt=100, max_iter=1000,
model_type='least_squares', add_bias=True,
opt=None):
self.embedding_size = embedding_size
self.maxnorm = maxnorm
self.batch_pos_cnt = batch_pos_cnt
self.max_iter = max_iter
self.model_type = model_type
self.add_bias = add_bias
if opt is None:
opt = tf.train.AdagradOptimizer(1.0)
self.opt = opt
self.sess = None
self.train_step = None
self.post_step = None
self.graph = tf.Graph()
with self.graph.as_default():
self.head_input = tf.placeholder(tf.int32, shape=[None])
self.rel_input = tf.placeholder(tf.int32, shape=[None])
self.tail_input = tf.placeholder(tf.int32, shape=[None])
self.target = tf.placeholder(tf.float32, shape=[None])
def _create_model(self, train_triples):
raise Exception('subclass must implement')
def _create_batch_provider(self, train_triples):
return ContrastiveTrainingProvider(train_triples, self.batch_pos_cnt)
def _create_output_and_loss(self, raw_output):
if self.model_type == 'least_squares':
return least_squares_objective(raw_output, self.target, self.add_bias)
elif self.model_type == 'logistic':
return logistic_objective(raw_output, self.target, self.add_bias)
elif self.model_type == 'ranking_margin':
return ranking_margin_objective(raw_output, 1.0)
else:
raise Exception('Unknown model_type')
def _norm_constraint_op(self, var_matrix, row_indices, maxnorm):
if isinstance(self.opt, tf.train.AdamOptimizer):
return dense_maxnorm_update(var_matrix, maxnorm)
else:
return sparse_maxnorm_update(var_matrix, row_indices, maxnorm)
def embeddings(self):
return [('entity', self.entity_embedding_vars),
('rel', self.rel_embedding_vars)]
def create_feed_dict(self, triples, labels=None, training=False):
feed_dict = {self.head_input: triples[:, 0],
self.rel_input: triples[:, 1],
self.tail_input: triples[:, 2]}
if labels is not None:
feed_dict[self.target] = labels
return feed_dict
def close(self):
self.sess.close();
def fit(self, train_triples, step_callback=None):
if self.sess is not None:
self.sess.close()
self.sess = tf.Session(graph=self.graph)
with self.graph.as_default():
self._create_model(train_triples)
self.sess.run(tf.initialize_all_variables())
batch_provider = self._create_batch_provider(train_triples)
for i in range(self.max_iter):
batch_triples, batch_labels = batch_provider.next_batch()
feed_dict = self.create_feed_dict(batch_triples, batch_labels, training=True)
if step_callback:
keep_going = step_callback(i, feed_dict)
if not keep_going:
break
self.sess.run(self.train_step, feed_dict)
if self.post_step is not None:
self.sess.run(self.post_step, feed_dict)
def predict(self, triples):
feed_dict = self.create_feed_dict(triples, training=False)
return self.sess.run(self.output, feed_dict=feed_dict)
class Contrastive_CP(BaseModel):
def _create_model(self, train_triples):
head_cnt = len(set(train_triples[:,0]))
rel_cnt = len(set(train_triples[:,1]))
tail_cnt = len(set(train_triples[:,2]))
init_sd = 1.0 / np.sqrt(self.embedding_size)
head_init = tf.truncated_normal([head_cnt, self.embedding_size], stddev=init_sd)
rel_init = tf.truncated_normal([rel_cnt, self.embedding_size], stddev=init_sd)
tail_init = tf.truncated_normal([tail_cnt, self.embedding_size], stddev=init_sd)
if self.maxnorm is not None:
head_init = dense_maxnorm(head_init, self.maxnorm)
rel_init = dense_maxnorm(rel_init, self.maxnorm)
tail_init = dense_maxnorm(tail_init, self.maxnorm)
self.head_embedding_vars = tf.Variable(head_init)
self.rel_embedding_vars = tf.Variable(rel_init)
self.tail_embedding_vars = tf.Variable(tail_init)
head_embed = tf.nn.embedding_lookup(self.head_embedding_vars, self.head_input)
rel_embed = tf.nn.embedding_lookup(self.rel_embedding_vars, self.rel_input)
tail_embed = tf.nn.embedding_lookup(self.tail_embedding_vars, self.tail_input)
raw_output = tf.reduce_sum(tf.mul(tf.mul(head_embed, rel_embed), tail_embed), 1)
self.output, self.loss = self._create_output_and_loss(raw_output)
self.train_step = self.opt.minimize(self.loss)
if self.maxnorm is not None:
head_constraint = self._norm_constraint_op(self.head_embedding_vars,
tf.unique(self.head_input)[0],
self.maxnorm)
rel_constraint = self._norm_constraint_op(self.rel_embedding_vars,
tf.unique(self.rel_input)[0],
self.maxnorm)
tail_constraint = self._norm_constraint_op(self.tail_embedding_vars,
tf.unique(self.tail_input)[0],
self.maxnorm)
self.post_step = [head_constraint, rel_constraint, tail_constraint]
def _create_batch_provider(self, train):
return ContrastiveTrainingProvider(train,
self.batch_pos_cnt,
separate_head_tail=True)
def embeddings(self):
return [('head', self.head_embedding_vars),
('tail', self.head_embedding_vars),
('rel', self.rel_embedding_vars)]
class Bilinear(BaseModel):
def __init__(self, embedding_size, maxnorm=1.0, rel_maxnorm_mult=3.0,
batch_pos_cnt=100, max_iter=1000,
model_type='least_squares', add_bias=True, opt=None):
super(Bilinear, self).__init__(
embedding_size=embedding_size,
maxnorm=maxnorm,
batch_pos_cnt=batch_pos_cnt,
max_iter=max_iter,
model_type=model_type,
opt=opt)
self.rel_maxnorm_mult = rel_maxnorm_mult
def _create_model(self, train_triples):
entity_cnt = len(set(train_triples[:,0]).union(train_triples[:,2]))
rel_cnt = len(set(train_triples[:,1]))
init_sd = 1.0 / np.sqrt(self.embedding_size)
entity_embedding_shape = [entity_cnt, self.embedding_size]
rel_embedding_shape = [rel_cnt, self.embedding_size * self.embedding_size]
entity_init = tf.truncated_normal(entity_embedding_shape, stddev=init_sd)
rel_init = tf.truncated_normal(rel_embedding_shape, stddev=init_sd)
if self.maxnorm is not None:
entity_init = dense_maxnorm(entity_init, self.maxnorm)
rel_init = dense_maxnorm(rel_init, self.maxnorm)
self.entity_embedding_vars = tf.Variable(entity_init)
self.rel_embedding_vars = tf.Variable(rel_init)
head_embed = tf.nn.embedding_lookup(self.entity_embedding_vars, self.head_input)
tail_embed = tf.nn.embedding_lookup(self.entity_embedding_vars, self.tail_input)
rel_embed = tf.nn.embedding_lookup(self.rel_embedding_vars, self.rel_input)
rel_embed_square = tf.reshape(rel_embed, (-1, self.embedding_size, self.embedding_size))
head_embed_row = tf.expand_dims(head_embed, 1)
tail_embed_col = tf.expand_dims(tail_embed, 2)
head_rel_mult = tf.batch_matmul(head_embed_row, rel_embed_square)
raw_output = tf.squeeze(tf.batch_matmul(head_rel_mult, tail_embed_col))
self.output, self.loss = self._create_output_and_loss(raw_output)
self.train_step = self.opt.minimize(self.loss)
if self.maxnorm is not None:
rel_maxnorm = self.maxnorm * self.rel_maxnorm_mult
unique_ent_indices = tf.unique(tf.concat(0, [self.head_input, self.tail_input]))[0]
unique_rel_indices = tf.unique(self.rel_input)[0]
entity_constraint = self._norm_constraint_op(self.entity_embedding_vars,
unique_ent_indices,
self.maxnorm)
rel_constraint = self._norm_constraint_op(self.rel_embedding_vars,
unique_rel_indices,
rel_maxnorm)
self.post_step = [entity_constraint, rel_constraint]
class TransE(BaseModel):
def __init__(self, embedding_size, batch_pos_cnt=100,
max_iter=1000, dist='euclidean',
margin=1.0, opt=None):
super(TransE, self).__init__(embedding_size=embedding_size,
maxnorm=1.0,
batch_pos_cnt=batch_pos_cnt,
max_iter=max_iter,
model_type='ranking_margin',
opt=opt)
self.dist = dist
self.margin = margin
self.EPS = 1e-3
def _create_model(self, train_triples):
entity_cnt = len(set(train_triples[:,0]).union(train_triples[:,2]))
rel_cnt = len(set(train_triples[:,1]))
init_sd = 1.0 / np.sqrt(self.embedding_size)
entity_var_shape = [entity_cnt, self.embedding_size]
rel_var_shape = [rel_cnt, self.embedding_size]
entity_init = tf.truncated_normal(entity_var_shape, stddev=init_sd)
rel_init = tf.truncated_normal(rel_var_shape, stddev=init_sd)
entity_init = dense_maxnorm(entity_init, self.maxnorm)
self.entity_embedding_vars = tf.Variable(entity_init)
self.rel_embedding_vars = tf.Variable(rel_init)
head_embed = tf.nn.embedding_lookup(self.entity_embedding_vars, self.head_input)
tail_embed = tf.nn.embedding_lookup(self.entity_embedding_vars, self.tail_input)
rel_embed = tf.nn.embedding_lookup(self.rel_embedding_vars, self.rel_input)
diff_vec = tail_embed - (head_embed + rel_embed)
if self.dist == 'manhattan':
raw_output = -tf.reduce_sum(tf.abs(diff_vec), 1)
elif self.dist == 'euclidean':
raw_output = -tf.sqrt(tf.reduce_sum(tf.square(diff_vec), 1) + self.EPS)
elif self.dist == 'sqeuclidean':
raw_output = -tf.reduce_sum(tf.square(diff_vec), 1)
else:
raise Exception('Unknown distance type')
self.output, self.loss = ranking_margin_objective(raw_output, self.margin)
self.train_step = self.opt.minimize(self.loss)
unique_ent_indices = tf.unique(tf.concat(0, [self.head_input, self.tail_input]))[0]
self.post_step = self._norm_constraint_op(self.entity_embedding_vars,
unique_ent_indices,
self.maxnorm) | true | true |
f71fd969d3ac6dc91ff8442595549e245b3a9430 | 1,059 | py | Python | salvia/wallet/payment.py | Salvia-Network/salvia-blockchain | b0ce4b9f75c2fc354941b45eb468ffcf917ead30 | [
"Apache-2.0"
] | 6 | 2021-09-13T17:20:49.000Z | 2022-02-09T04:31:47.000Z | salvia/wallet/payment.py | Salvia-Network/salvia-blockchain | b0ce4b9f75c2fc354941b45eb468ffcf917ead30 | [
"Apache-2.0"
] | 21 | 2021-09-20T00:56:54.000Z | 2022-03-22T01:12:12.000Z | salvia/wallet/payment.py | Salvia-Network/salvia-blockchain | b0ce4b9f75c2fc354941b45eb468ffcf917ead30 | [
"Apache-2.0"
] | 9 | 2021-09-13T17:54:04.000Z | 2022-03-15T08:38:35.000Z | from dataclasses import dataclass
from typing import List
from salvia.types.blockchain_format.sized_bytes import bytes32
from salvia.types.blockchain_format.program import Program
from salvia.util.ints import uint64
# This class is supposed to correspond to a CREATE_COIN condition
@dataclass(frozen=True)
class Payment:
puzzle_hash: bytes32
amount: uint64
memos: List[bytes]
def as_condition_args(self) -> List:
return [self.puzzle_hash, self.amount, self.memos]
def as_condition(self) -> Program:
return Program.to([51, *self.as_condition_args()])
def name(self) -> bytes32:
return self.as_condition().get_tree_hash()
@classmethod
def from_condition(cls, condition: Program) -> "Payment":
python_condition: List = condition.as_python()
puzzle_hash, amount = python_condition[1:3]
memos: List[bytes] = []
if len(python_condition) > 3:
memos = python_condition[3]
return cls(bytes32(puzzle_hash), uint64(int.from_bytes(amount, "big")), memos)
| 31.147059 | 86 | 0.705382 | from dataclasses import dataclass
from typing import List
from salvia.types.blockchain_format.sized_bytes import bytes32
from salvia.types.blockchain_format.program import Program
from salvia.util.ints import uint64
@dataclass(frozen=True)
class Payment:
puzzle_hash: bytes32
amount: uint64
memos: List[bytes]
def as_condition_args(self) -> List:
return [self.puzzle_hash, self.amount, self.memos]
def as_condition(self) -> Program:
return Program.to([51, *self.as_condition_args()])
def name(self) -> bytes32:
return self.as_condition().get_tree_hash()
@classmethod
def from_condition(cls, condition: Program) -> "Payment":
python_condition: List = condition.as_python()
puzzle_hash, amount = python_condition[1:3]
memos: List[bytes] = []
if len(python_condition) > 3:
memos = python_condition[3]
return cls(bytes32(puzzle_hash), uint64(int.from_bytes(amount, "big")), memos)
| true | true |
f71fd999a54a3748a94533f9d632879d0495dbcd | 1,271 | py | Python | checklink/parse/__init__.py | zombie110year/find_dead_link | 565ec99c0fcbecaa4f7d82006bc9d58d0c05fa06 | [
"MIT"
] | null | null | null | checklink/parse/__init__.py | zombie110year/find_dead_link | 565ec99c0fcbecaa4f7d82006bc9d58d0c05fa06 | [
"MIT"
] | null | null | null | checklink/parse/__init__.py | zombie110year/find_dead_link | 565ec99c0fcbecaa4f7d82006bc9d58d0c05fa06 | [
"MIT"
] | null | null | null | """
Text Parsers to find url from content.
Every url item should contain:
- url
- location(`filepath:row:column`)
"""
from abc import abstractmethod
from typing import List
class Link:
def __init__(self, url: str, path: str, row: int, column: int):
"""init link object
:param str url: link's href
:param str path: where found this link, file path
:param int row: where found this link, line number
:param int column: where found this link, chars after line beginning
"""
self.__url = url
self.__path = path
self.__row = row
self.__column = column
@property
def url(self) -> str:
return self.__url
@property
def path(self) -> str:
return self.__path
@property
def row(self) -> int:
return self.__row
@property
def column(self) -> int:
return self.__column
@property
def location(self) -> str:
return f"{self.path}:{self.row}:{self.column}"
@path.setter
def path(self, other: str):
self.__path = other
class Parser:
@abstractmethod
def parse(self, text: str) -> List[Link]:
pass
@abstractmethod
def parse_file(self, path: str) -> List[Link]:
pass
| 21.183333 | 76 | 0.601888 | from abc import abstractmethod
from typing import List
class Link:
def __init__(self, url: str, path: str, row: int, column: int):
self.__url = url
self.__path = path
self.__row = row
self.__column = column
@property
def url(self) -> str:
return self.__url
@property
def path(self) -> str:
return self.__path
@property
def row(self) -> int:
return self.__row
@property
def column(self) -> int:
return self.__column
@property
def location(self) -> str:
return f"{self.path}:{self.row}:{self.column}"
@path.setter
def path(self, other: str):
self.__path = other
class Parser:
@abstractmethod
def parse(self, text: str) -> List[Link]:
pass
@abstractmethod
def parse_file(self, path: str) -> List[Link]:
pass
| true | true |
f71fd9b490090c7c03e0f828032d2a989edaca88 | 3,415 | py | Python | components/studio/projects/models.py | MuhammadNaumanAbid/stackn | 484501efda19f8f9c9c088bcf6095060c925d3b1 | [
"Apache-2.0"
] | null | null | null | components/studio/projects/models.py | MuhammadNaumanAbid/stackn | 484501efda19f8f9c9c088bcf6095060c925d3b1 | [
"Apache-2.0"
] | null | null | null | components/studio/projects/models.py | MuhammadNaumanAbid/stackn | 484501efda19f8f9c9c088bcf6095060c925d3b1 | [
"Apache-2.0"
] | null | null | null | import base64
from django.db import models
from django.contrib.auth.models import User
from django.utils.text import slugify
import string
import random
DEFAULT_ENVIRONMENT_ID = 1
class Flavor(models.Model):
name = models.CharField(max_length=512)
slug = models.CharField(max_length=512)
cpu = models.TextField(blank=True, null=True)
mem = models.TextField(blank=True, null=True)
gpu = models.TextField(blank=True, null=True)
updated_at = models.DateTimeField(auto_now=True)
created_at = models.DateTimeField(auto_now_add=True)
def __str__(self):
return str(self.name)
class Environment(models.Model):
name = models.CharField(max_length=512)
slug = models.CharField(max_length=512, blank=True, null=True)
image = models.CharField(max_length=512)
dockerfile = models.TextField(default='FROM jupyter/base-notebook')
startup = models.TextField(null=True, blank=True)
teardown = models.TextField(null=True, blank=True)
updated_at = models.DateTimeField(auto_now=True)
created_at = models.DateTimeField(auto_now_add=True)
def __str__(self):
return str(self.name)
class ProjectManager(models.Manager):
def generate_passkey(self, length=20):
import secrets
import string
alphabet = string.ascii_letters + string.digits
password = ''.join(secrets.choice(alphabet) for _ in range(length))
# Encrypt the key
password = password.encode('ascii')
base64_bytes = base64.b64encode(password)
password = base64_bytes.decode('ascii')
return password
def create_project(self, name, owner, description, repository):
letters = string.ascii_lowercase
slug = name.replace(" ","-").replace("_","-")
from .helpers import urlify
slug = urlify(slug)
slug_extension = ''.join(random.choice(letters) for i in range(3))
slug = '{}-{}'.format(slugify(slug), slug_extension)
key = self.generate_passkey()
secret = self.generate_passkey(40)
project = self.create(name=name, owner=owner, slug=slug, project_key=key, project_secret=secret,
description=description, repository=repository,
repository_imported=False)
return project
class Project(models.Model):
objects = ProjectManager()
name = models.CharField(max_length=512, unique=True)
description = models.TextField(null=True, blank=True)
slug = models.CharField(max_length=512, unique=True)
owner = models.ForeignKey(User, on_delete=models.DO_NOTHING, related_name='owner')
authorized = models.ManyToManyField(User, blank=True)
image = models.CharField(max_length=2048, blank=True, null=True)
project_key = models.CharField(max_length=512)
project_secret = models.CharField(max_length=512)
updated_at = models.DateTimeField(auto_now=True)
created_at = models.DateTimeField(auto_now_add=True)
repository = models.CharField(max_length=512, null=True, blank=True)
repository_imported = models.BooleanField(default=False)
def __str__(self):
return "Name: {} Description: {}".format(self.name, self.description)
environment = models.ForeignKey('projects.Environment', on_delete=models.DO_NOTHING, default=DEFAULT_ENVIRONMENT_ID)
clone_url = models.CharField(max_length=512, null=True, blank=True)
| 34.846939 | 120 | 0.703075 | import base64
from django.db import models
from django.contrib.auth.models import User
from django.utils.text import slugify
import string
import random
DEFAULT_ENVIRONMENT_ID = 1
class Flavor(models.Model):
name = models.CharField(max_length=512)
slug = models.CharField(max_length=512)
cpu = models.TextField(blank=True, null=True)
mem = models.TextField(blank=True, null=True)
gpu = models.TextField(blank=True, null=True)
updated_at = models.DateTimeField(auto_now=True)
created_at = models.DateTimeField(auto_now_add=True)
def __str__(self):
return str(self.name)
class Environment(models.Model):
name = models.CharField(max_length=512)
slug = models.CharField(max_length=512, blank=True, null=True)
image = models.CharField(max_length=512)
dockerfile = models.TextField(default='FROM jupyter/base-notebook')
startup = models.TextField(null=True, blank=True)
teardown = models.TextField(null=True, blank=True)
updated_at = models.DateTimeField(auto_now=True)
created_at = models.DateTimeField(auto_now_add=True)
def __str__(self):
return str(self.name)
class ProjectManager(models.Manager):
def generate_passkey(self, length=20):
import secrets
import string
alphabet = string.ascii_letters + string.digits
password = ''.join(secrets.choice(alphabet) for _ in range(length))
password = password.encode('ascii')
base64_bytes = base64.b64encode(password)
password = base64_bytes.decode('ascii')
return password
def create_project(self, name, owner, description, repository):
letters = string.ascii_lowercase
slug = name.replace(" ","-").replace("_","-")
from .helpers import urlify
slug = urlify(slug)
slug_extension = ''.join(random.choice(letters) for i in range(3))
slug = '{}-{}'.format(slugify(slug), slug_extension)
key = self.generate_passkey()
secret = self.generate_passkey(40)
project = self.create(name=name, owner=owner, slug=slug, project_key=key, project_secret=secret,
description=description, repository=repository,
repository_imported=False)
return project
class Project(models.Model):
objects = ProjectManager()
name = models.CharField(max_length=512, unique=True)
description = models.TextField(null=True, blank=True)
slug = models.CharField(max_length=512, unique=True)
owner = models.ForeignKey(User, on_delete=models.DO_NOTHING, related_name='owner')
authorized = models.ManyToManyField(User, blank=True)
image = models.CharField(max_length=2048, blank=True, null=True)
project_key = models.CharField(max_length=512)
project_secret = models.CharField(max_length=512)
updated_at = models.DateTimeField(auto_now=True)
created_at = models.DateTimeField(auto_now_add=True)
repository = models.CharField(max_length=512, null=True, blank=True)
repository_imported = models.BooleanField(default=False)
def __str__(self):
return "Name: {} Description: {}".format(self.name, self.description)
environment = models.ForeignKey('projects.Environment', on_delete=models.DO_NOTHING, default=DEFAULT_ENVIRONMENT_ID)
clone_url = models.CharField(max_length=512, null=True, blank=True)
| true | true |
f71fdbd179d815f56f9c409701685cd66a7005c3 | 23,154 | py | Python | yuu/ext/abematv.py | soltia48/yuu | 30d2fcf9427cbbea930d01baef337b64ad7fb05b | [
"BSD-3-Clause"
] | null | null | null | yuu/ext/abematv.py | soltia48/yuu | 30d2fcf9427cbbea930d01baef337b64ad7fb05b | [
"BSD-3-Clause"
] | null | null | null | yuu/ext/abematv.py | soltia48/yuu | 30d2fcf9427cbbea930d01baef337b64ad7fb05b | [
"BSD-3-Clause"
] | null | null | null | import hashlib
import hmac
import json
import logging
import os
import re
import struct
import tempfile
import time
import uuid
from base64 import urlsafe_b64encode
from binascii import unhexlify
import m3u8
from Crypto.Cipher import AES
from tqdm import tqdm
def is_channel(url):
url = re.findall('(slot)', url)
if url:
return True
return False
yuu_log = logging.getLogger('yuu.abematv')
class AbemaTVDownloader:
def __init__(self, url, session):
self.key = None
self.iv = None
self.url = url
self.session = session
self.merge = True
if os.name == "nt":
self.yuu_folder = os.path.join(os.getenv('LOCALAPPDATA'), 'yuu_data')
sffx = '\\'
else:
self.yuu_folder = os.path.join(os.getenv('HOME'), '.yuu_data')
sffx = '/'
if not os.path.isdir(self.yuu_folder):
os.mkdir(self.yuu_folder)
self.temporary_folder = tempfile.mkdtemp(dir=self.yuu_folder)
self.temporary_folder = self.temporary_folder + sffx
self._aes = None
def setup_decryptor(self):
self.iv = unhexlify(self.iv)
self._aes = AES.new(self.key, AES.MODE_CBC, IV=self.iv)
def download_chunk(self, files, key, iv):
if iv.startswith('0x'):
self.iv = iv[2:]
else:
self.iv = iv
self.key = key
self.downloaded_files = []
self.setup_decryptor() # Initialize a new decryptor
try:
with tqdm(total=len(files), desc='Downloading', ascii=True, unit='file') as pbar:
for tsf in files:
outputtemp = self.temporary_folder + os.path.basename(tsf)
if outputtemp.find('?tver') != -1:
outputtemp = outputtemp[:outputtemp.find('?tver')]
with open(outputtemp, 'wb') as outf:
try:
vid = self.session.get(tsf)
vid = self._aes.decrypt(vid.content)
outf.write(vid)
except Exception as err:
yuu_log.error('Problem occured\nreason: {}'.format(err))
return None
pbar.update()
self.downloaded_files.append(outputtemp)
except KeyboardInterrupt:
yuu_log.warn('User pressed CTRL+C, cleaning up...')
return None
return self.downloaded_files
class AbemaTV:
def __init__(self, url, session):
self.session = session
self.type = 'AbemaTV'
self.yuu_logger = logging.getLogger('yuu.abematv.AbemaTV')
self.url = url
self.m3u8_url = None
self.resolution = None
self.resolution_o = None
self.device_id = None
self.is_m3u8 = False
self.est_filesize = None # In MiB
self.resolution_data = {
"1080p": ["4000kb/s", "AAC 192kb/s 2ch"],
"720p": ["2000kb/s", "AAC 160kb/s 2ch"],
"480p": ["900kb/s", "AAC 128kb/s 2ch"],
"360p": ["550kb/s", "AAC 128kb/s 2ch"],
"240p": ["240kb/s", "AAC 64kb/s 1ch"],
"180p": ["120kb/s", "AAC 64kb/s 1ch"]
}
self.bitrate_calculation = {
"1080p": 5175,
"720p": 2373,
"480p": 1367,
"360p": 878,
"240p": 292,
"180p": 179
}
self.authorization_required = False
self.authorized = False # Ignore for now
#self.authorize = True # Ignore for now
self.resumable = True
self._STRTABLE = "123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz"
self._HKEY = b"3AF0298C219469522A313570E8583005A642E73EDD58E3EA2FB7339D3DF1597E"
self._KEYPARAMS = {
"osName": "android",
"osVersion": "6.0.1",
"osLand": "ja_JP",
"osTimezone": "Asia/Tokyo",
"appId": "tv.abema",
"appVersion": "3.27.1"
}
self._MEDIATOKEN_API = "https://api.abema.io/v1/media/token"
self._LICENSE_API = "https://license.abema.io/abematv-hls"
self._USERAPI = "https://api.abema.io/v1/users"
self._PROGRAMAPI = 'https://api.abema.io/v1/video/programs/'
self._CHANNELAPI = 'https://api.abema.io/v1/media/slots/'
self._SERIESAPI = "https://api.abema.io/v1/video/series/"
# Use Chrome UA
self.session.headers.update({'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.100 Safari/537.36'})
def __repr__(self):
return '<yuu.AbemaTV: URL={}, Resolution={}, Device ID={}, m3u8 URL={}>'.format(self.url, self.resolution, self.device_id, self.m3u8_url)
def get_downloader(self):
"""
Return a :class: of the Downloader
"""
return AbemaTVDownloader(self.url, self.session)
def resume_prepare(self):
"""
Add support for resuming files, this function will prepare everything to start resuming download.
"""
return None
def authorize(self, username, password):
if not self.device_id:
self.yuu_logger.info('{}: Fetching temporary token'.format(self.type))
res, reas = self.get_token() # Abema needs authorization header before authenticating
if not res:
return res, reas
_ENDPOINT_MAIL = 'https://api.abema.io/v1/auth/user/email'
_ENDPOINT_OTP = 'https://api.abema.io/v1/auth/oneTimePassword'
mail_regex = r'^\w+([\.-]?\w+)*@\w+([\.-]?\w+)*(\.\w{2,3})+$'
if re.search(mail_regex, username):
_ENDPOINT_USE = _ENDPOINT_MAIL
_USERNAME_METHOD = 'email'
else:
_ENDPOINT_USE = _ENDPOINT_OTP
_USERNAME_METHOD = 'userId'
auth_ = {
_USERNAME_METHOD: username,
"password": password
}
res = self.session.post(_ENDPOINT_USE, json=auth_)
if res.status_code > 299:
res_j = res.json()
self.yuu_logger.debug('Abema Response: {}'.format(res_j['message']))
return False, 'Wrong {} and password combination'.format(_USERNAME_METHOD)
res_j = res.json()
self.yuu_logger.debug('Authentication Token: {}'.format(res_j['token']))
self.session.headers.update({'Authorization': 'bearer ' + res_j['token']})
self.authorized = True
return True, 'Authorized'
def get_token(self):
def key_secret(devid):
SECRETKEY = (b"v+Gjs=25Aw5erR!J8ZuvRrCx*rGswhB&qdHd_SYerEWdU&a?3DzN9B"
b"Rbp5KwY4hEmcj5#fykMjJ=AuWz5GSMY-d@H7DMEh3M@9n2G552Us$$"
b"k9cD=3TxwWe86!x#Zyhe")
deviceid = devid.encode("utf-8")
ts_1hour = (int(time.time()) + 60 * 60) // 3600 * 3600
time_struct = time.gmtime(ts_1hour)
ts_1hour_str = str(ts_1hour).encode("utf-8")
h = hmac.new(SECRETKEY, digestmod=hashlib.sha256)
h.update(SECRETKEY)
tmp = h.digest()
for _ in range(time_struct.tm_mon):
h = hmac.new(SECRETKEY, digestmod=hashlib.sha256)
h.update(tmp)
tmp = h.digest()
h = hmac.new(SECRETKEY, digestmod=hashlib.sha256)
h.update(urlsafe_b64encode(tmp).rstrip(b"=") + deviceid)
tmp = h.digest()
for _ in range(time_struct.tm_mday % 5):
h = hmac.new(SECRETKEY, digestmod=hashlib.sha256)
h.update(tmp)
tmp = h.digest()
h = hmac.new(SECRETKEY, digestmod=hashlib.sha256)
h.update(urlsafe_b64encode(tmp).rstrip(b"=") + ts_1hour_str)
tmp = h.digest()
for _ in range(time_struct.tm_hour % 5): # utc hour
h = hmac.new(SECRETKEY, digestmod=hashlib.sha256)
h.update(tmp)
tmp = h.digest()
finalize = urlsafe_b64encode(tmp).rstrip(b"=").decode("utf-8")
self.yuu_logger.debug('Secret Key: {}'.format(finalize))
return finalize
if self.authorized: # Ignore this if already login
return True, 'Success'
deviceid = str(uuid.uuid4())
self.yuu_logger.debug('Generated Device UUID: {}'.format(deviceid))
json_data = {"deviceId": deviceid, "applicationKeySecret": key_secret(deviceid)}
self.yuu_logger.debug('Generated applicationKeySecret: {}'.format(json_data['applicationKeySecret']))
self.yuu_logger.debug('Sending json data')
res = self.session.post(self._USERAPI, json=json_data).json()
try:
self.yuu_logger.debug('Data sent, getting token')
token = res['token']
self.yuu_logger.debug('User token: {}'.format(token))
except:
return None, 'Failed to get user token.'
self.device_id = deviceid
self.session.headers.update({'Authorization': 'bearer ' + token})
return 'Success', 'Success'
def parse(self, resolution=None, check_only=False):
"""
Function to parse abema url
"""
res_list = [
'180p', '240p', '360p', '480p', '720p', '1080p', 'best', 'worst'
]
if resolution not in res_list:
if not check_only:
return None, 'Unknown resolution: {}. (Check it with `-R`)'.format(resolution)
if resolution == 'best':
resolution = '1080p'
self.resolution_o = 'best'
if resolution == 'worst':
resolution = '180p'
# https://abema.tv/video/title/26-55 (series/playlists)
# https://api.abema.io/v1/video/series/26-55
# https://api.abema.io/v1/video/series/26-55/programs?seriesVersion=1577436473958778090&seasonId=26-55_s1&offset=0&order=seq&limit=40
series = re.search(r"(?P<series>title)/(?P<video_id>.*[^-_])", self.url)
if series:
video_id = series.group(2)
self.yuu_logger.info('Series url format detected, fetching all links...')
self.yuu_logger.debug('Requesting data to Abema API.')
req = self.session.get(self._SERIESAPI + video_id)
if req.status_code != 200:
self.yuu_logger.log(40, 'Abema Response: ' + req.text)
return None, 'Error occured when communicating with Abema (Response: {})'.format(req.status_code)
self.yuu_logger.debug('Data requested')
self.yuu_logger.debug('Parsing json results...')
m3u8_url_list = []
output_list = []
jsdata = req.json()
to_be_requested = "{api}{vid}/programs?seriesVersion={sv}&seasonId={si}&offset=0&order={od}"
season_data = jsdata['seasons']
if not season_data:
season_data = [{'id': ''}] # Assume film or some shit
version = jsdata['version']
prog_order = jsdata['programOrder']
for ns, season in enumerate(season_data, 1):
self.yuu_logger.info('Processing season ' + str(ns))
self.yuu_logger.debug('Requesting data to Abema API.')
req_season = self.session.get(to_be_requested.format(api=self._SERIESAPI, vid=video_id, sv=version, si=season['id'], od=prog_order))
if req_season.status_code != 200:
self.yuu_logger.log(40, 'Abema Response: ' + req_season.text)
return None, 'Error occured when communicating with Abema (Response: {})'.format(req_season.status_code)
self.yuu_logger.debug('Data requested')
self.yuu_logger.debug('Parsing json results...')
season_jsdata = req_season.json()
self.yuu_logger.debug('Processing total of {ep} episode for season {se}'.format(ep=len(season_jsdata['programs']), se=ns))
for nep, episode in enumerate(season_jsdata['programs'], 1):
free_episode = False
if 'label' in episode:
if 'free' in episode['label']:
free_episode = True
elif 'freeEndAt' in episode:
free_episode = True
if 'episode' in episode:
try:
episode_name = episode['episode']['title']
if not episode_name:
episode_name = episode_name['title']['number']
except KeyError:
episode_name = episode_name['title']['number']
else:
episode_name = nep
if not free_episode and not self.authorized:
self.yuu_logger.warn('Skipping episode {} (Not authorized and premium video)'.format(episode_name))
continue
self.yuu_logger.info('Processing episode {}'.format(episode_name))
req_ep = self.session.get(self._PROGRAMAPI + episode['id'])
if req_ep.status_code != 200:
self.yuu_logger.log(40, 'Abema Response: ' + req_ep.text)
return None, 'Error occured when communicating with Abema (Response: {})'.format(req_ep.status_code)
self.yuu_logger.debug('Data requested')
self.yuu_logger.debug('Parsing json API')
ep_json = req_ep.json()
title = ep_json['series']['title']
epnum = ep_json['episode']['title']
hls = ep_json['playback']['hls']
output_name = title + ' - ' + epnum
m3u8_url = '{x}/{r}/playlist.m3u8'.format(x=hls[:hls.rfind('/')], r=resolution[:-1])
self.yuu_logger.debug('M3U8 Link: {}'.format(m3u8_url))
self.yuu_logger.debug('Video title: {}'.format(title))
m3u8_url_list.append(m3u8_url)
output_list.append(output_name)
self.resolution = resolution
self.m3u8_url = m3u8_url_list
if not output_list:
err_msg = "All video are for premium only, please provide login details."
else:
err_msg = "Success"
return output_list, err_msg
if '.m3u8' in self.url[-5:]:
reg = re.compile(r'(program|slot)\/[\w+-]+')
self.url = re.search(reg, m3u8)[0]
self.is_m3u8 = True
ep_link = self.url[self.url.rfind('/')+1:]
self.yuu_logger.debug('Requesting data to Abema API')
if is_channel(self.url):
req = self.session.get(self._CHANNELAPI + ep_link)
if req.status_code != 200:
self.yuu_logger.log(40, 'Abema Response: ' + req.text)
return None, 'Error occured when communicating with Abema (Response: {})'.format(req.status_code)
self.yuu_logger.debug('Data requested')
self.yuu_logger.debug('Parsing json API')
jsdata = req.json()
output_name = jsdata['slot']['title']
if 'playback' in jsdata['slot']:
hls = jsdata['slot']['playback']['hls']
else:
hls = jsdata['slot']['chasePlayback']['hls'] # Compat
m3u8_url = '{x}/{r}/playlist.m3u8'.format(x=hls[:hls.rfind('/')], r=resolution[:-1])
if self.is_m3u8:
m3u8_url = self.url
self.yuu_logger.debug('M3U8 Link: {}'.format(m3u8_url))
self.yuu_logger.debug('Title: {}'.format(output_name))
else:
req = self.session.get(self._PROGRAMAPI + ep_link)
if req.status_code != 200:
self.yuu_logger.log(40, 'Abema Response: ' + req.text)
return None, 'Error occured when communicating with Abema (Response: {})'.format(req.status_code)
self.yuu_logger.debug('Data requested')
self.yuu_logger.debug('Parsing json API')
jsdata = req.json()
if jsdata['mediaStatus']:
if 'drm' in jsdata['mediaStatus']:
if jsdata['mediaStatus']['drm']:
return None, 'This video has a different DRM method and cannot be decrypted by yuu for now'
title = jsdata['series']['title']
epnum = jsdata['episode']['title']
hls = jsdata['playback']['hls']
output_name = title + ' - ' + epnum
m3u8_url = '{x}/{r}/playlist.m3u8'.format(x=hls[:hls.rfind('/')], r=resolution[:-1])
if self.is_m3u8:
m3u8_url = self.url
self.yuu_logger.debug('M3U8 Link: {}'.format(m3u8_url))
self.yuu_logger.debug('Video title: {}'.format(title))
self.yuu_logger.debug('Episode number: {}'.format(epnum))
self.resolution = resolution
self.m3u8_url = m3u8_url
return output_name, 'Success'
def parse_m3u8(self, m3u8_url):
self.yuu_logger.debug('Requesting m3u8')
r = self.session.get(m3u8_url)
self.yuu_logger.debug('Data requested')
if 'timeshift forbidden' in r.text:
return None, None, None, 'This video can\'t be downloaded for now.'
if r.status_code == 403:
return None, None, None, 'This video is geo-locked for Japan only.'
self.yuu_logger.debug('Parsing m3u8')
x = m3u8.loads(r.text)
files = x.files[1:]
if not files[0]:
files = files[1:]
if 'tsda' in files[5]:
# Assume DRMed
return None, None, None, 'This video has a different DRM method and cannot be decrypted by yuu for now'
resgex = re.findall(r'(\d*)(?:\/\w+.ts)', files[0])[0]
keys_data = x.keys[0]
iv = x.keys[0].iv
ticket = x.keys[0].uri[18:]
parsed_files = []
for f in files:
if f.startswith('/tsvpg') or f.startswith('/tspg'):
f = 'https://ds-vod-abematv.akamaized.net' + f
parsed_files.append(f)
if self.resolution[:-1] != resgex:
if not self.resolution_o:
self.yuu_logger.warn('Changing resolution, from {} to {}p'.format(self.resolution, resgex))
self.resolution = resgex + 'p'
self.yuu_logger.debug('Total files: {}'.format(len(files)))
self.yuu_logger.debug('IV: {}'.format(iv))
self.yuu_logger.debug('Ticket key: {}'.format(ticket))
n = 0.0
for seg in x.segments:
n += seg.duration
self.est_filesize = round((round(n) * self.bitrate_calculation[self.resolution]) / 1024 / 6, 2)
return parsed_files, iv[2:], ticket, 'Success'
def get_video_key(self, ticket):
self.yuu_logger.debug('Sending parameter to API')
restoken = self.session.get(self._MEDIATOKEN_API, params=self._KEYPARAMS).json()
mediatoken = restoken['token']
self.yuu_logger.debug('Media token: {}'.format(mediatoken))
self.yuu_logger.debug('Sending ticket and media token to License API')
rgl = self.session.post(self._LICENSE_API, params={"t": mediatoken}, json={"kv": "a", "lt": ticket})
if rgl.status_code == 403:
return None, 'Access to this video are not allowed\nProbably a premium video or geo-locked.'
gl = rgl.json()
cid = gl['cid']
k = gl['k']
self.yuu_logger.debug('CID: {}'.format(cid))
self.yuu_logger.debug('K: {}'.format(k))
self.yuu_logger.debug('Summing up data with STRTABLE')
res = sum([self._STRTABLE.find(k[i]) * (58 ** (len(k) - 1 - i)) for i in range(len(k))])
self.yuu_logger.debug('Result: {}'.format(res))
self.yuu_logger.debug('Intepreting data')
encvk = struct.pack('>QQ', res >> 64, res & 0xffffffffffffffff)
self.yuu_logger.debug('Encoded video key: {}'.format(encvk))
self.yuu_logger.debug('Hashing data')
h = hmac.new(unhexlify(self._HKEY), (cid + self.device_id).encode("utf-8"), digestmod=hashlib.sha256)
enckey = h.digest()
self.yuu_logger.debug('Second Encoded video key: {}'.format(enckey))
self.yuu_logger.debug('Decrypting result')
aes = AES.new(enckey, AES.MODE_ECB)
vkey = aes.decrypt(encvk)
self.yuu_logger.debug('Decrypted, Result: {}'.format(vkey))
return vkey, 'Success getting video key'
def resolutions(self, m3u8_uri):
self.yuu_logger.debug('Requesting data to API')
m3u8_ = m3u8_uri[:m3u8_uri.rfind('/')]
base_url = m3u8_[:m3u8_.rfind('/')] + '/'
m3u8_1080 = m3u8_[:m3u8_.rfind('/')] + '/1080/playlist.m3u8'
m3u8_720 = m3u8_[:m3u8_.rfind('/')] + '/720/playlist.m3u8'
m3u8_480 = m3u8_[:m3u8_.rfind('/')] + '/480/playlist.m3u8'
m3u8_360 = m3u8_[:m3u8_.rfind('/')] + '/360/playlist.m3u8'
m3u8_240 = m3u8_[:m3u8_.rfind('/')] + '/240/playlist.m3u8'
m3u8_180 = m3u8_[:m3u8_.rfind('/')] + '/180/playlist.m3u8'
rr_all = self.session.get(base_url + 'playlist.m3u8')
if 'timeshift forbidden' in rr_all.text:
return None, 'This video can\'t be downloaded for now.'
r_all = m3u8.loads(rr_all.text)
play_res = []
for r_p in r_all.playlists:
temp = []
temp.append(r_p.stream_info.resolution)
temp.append(base_url + r_p.uri)
play_res.append(temp)
resgex = re.compile(r'(\d*)(?:\/\w+.ts)')
ava_reso = []
for resdata in play_res:
reswh, m3u8_uri = resdata
resw, resh = reswh
self.yuu_logger.debug('Validating {}p resolution'.format(resh))
rres = m3u8.loads(self.session.get(m3u8_uri).text)
m3f = rres.files[1:]
if not m3f:
return None, 'This video can\'t be downloaded for now.'
self.yuu_logger.debug('Sample link: ' + m3f[5])
if 'tsda' in files[5]:
# Assume DRMed
return None, 'This video has a different DRM method and cannot be decrypted by yuu for now'
if str(resh) in re.findall(resgex, m3f[5]):
ava_reso.append(
[
'{h}p'.format(h=resh),
'{w}x{h}'.format(w=resw, h=resh)
]
)
if ava_reso:
reso = [r[0] for r in ava_reso]
self.yuu_logger.debug('Resolution list: {}'.format(', '.join(reso)))
return ava_reso, 'Success'
def check_output(self, output=None, output_name=None):
if output:
fn_, ext_ = os.path.splitext(output)
if ext_ != 'ts':
output = fn_ + '.ts'
else:
output = '{x} ({m} {r}).ts'.format(x=output_name, m=self.type, r=self.resolution)
return output
| 38.914286 | 170 | 0.556319 | import hashlib
import hmac
import json
import logging
import os
import re
import struct
import tempfile
import time
import uuid
from base64 import urlsafe_b64encode
from binascii import unhexlify
import m3u8
from Crypto.Cipher import AES
from tqdm import tqdm
def is_channel(url):
url = re.findall('(slot)', url)
if url:
return True
return False
yuu_log = logging.getLogger('yuu.abematv')
class AbemaTVDownloader:
def __init__(self, url, session):
self.key = None
self.iv = None
self.url = url
self.session = session
self.merge = True
if os.name == "nt":
self.yuu_folder = os.path.join(os.getenv('LOCALAPPDATA'), 'yuu_data')
sffx = '\\'
else:
self.yuu_folder = os.path.join(os.getenv('HOME'), '.yuu_data')
sffx = '/'
if not os.path.isdir(self.yuu_folder):
os.mkdir(self.yuu_folder)
self.temporary_folder = tempfile.mkdtemp(dir=self.yuu_folder)
self.temporary_folder = self.temporary_folder + sffx
self._aes = None
def setup_decryptor(self):
self.iv = unhexlify(self.iv)
self._aes = AES.new(self.key, AES.MODE_CBC, IV=self.iv)
def download_chunk(self, files, key, iv):
if iv.startswith('0x'):
self.iv = iv[2:]
else:
self.iv = iv
self.key = key
self.downloaded_files = []
self.setup_decryptor()
try:
with tqdm(total=len(files), desc='Downloading', ascii=True, unit='file') as pbar:
for tsf in files:
outputtemp = self.temporary_folder + os.path.basename(tsf)
if outputtemp.find('?tver') != -1:
outputtemp = outputtemp[:outputtemp.find('?tver')]
with open(outputtemp, 'wb') as outf:
try:
vid = self.session.get(tsf)
vid = self._aes.decrypt(vid.content)
outf.write(vid)
except Exception as err:
yuu_log.error('Problem occured\nreason: {}'.format(err))
return None
pbar.update()
self.downloaded_files.append(outputtemp)
except KeyboardInterrupt:
yuu_log.warn('User pressed CTRL+C, cleaning up...')
return None
return self.downloaded_files
class AbemaTV:
def __init__(self, url, session):
self.session = session
self.type = 'AbemaTV'
self.yuu_logger = logging.getLogger('yuu.abematv.AbemaTV')
self.url = url
self.m3u8_url = None
self.resolution = None
self.resolution_o = None
self.device_id = None
self.is_m3u8 = False
self.est_filesize = None
self.resolution_data = {
"1080p": ["4000kb/s", "AAC 192kb/s 2ch"],
"720p": ["2000kb/s", "AAC 160kb/s 2ch"],
"480p": ["900kb/s", "AAC 128kb/s 2ch"],
"360p": ["550kb/s", "AAC 128kb/s 2ch"],
"240p": ["240kb/s", "AAC 64kb/s 1ch"],
"180p": ["120kb/s", "AAC 64kb/s 1ch"]
}
self.bitrate_calculation = {
"1080p": 5175,
"720p": 2373,
"480p": 1367,
"360p": 878,
"240p": 292,
"180p": 179
}
self.authorization_required = False
self.authorized = False
esumable = True
self._STRTABLE = "123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz"
self._HKEY = b"3AF0298C219469522A313570E8583005A642E73EDD58E3EA2FB7339D3DF1597E"
self._KEYPARAMS = {
"osName": "android",
"osVersion": "6.0.1",
"osLand": "ja_JP",
"osTimezone": "Asia/Tokyo",
"appId": "tv.abema",
"appVersion": "3.27.1"
}
self._MEDIATOKEN_API = "https://api.abema.io/v1/media/token"
self._LICENSE_API = "https://license.abema.io/abematv-hls"
self._USERAPI = "https://api.abema.io/v1/users"
self._PROGRAMAPI = 'https://api.abema.io/v1/video/programs/'
self._CHANNELAPI = 'https://api.abema.io/v1/media/slots/'
self._SERIESAPI = "https://api.abema.io/v1/video/series/"
self.session.headers.update({'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.100 Safari/537.36'})
def __repr__(self):
return '<yuu.AbemaTV: URL={}, Resolution={}, Device ID={}, m3u8 URL={}>'.format(self.url, self.resolution, self.device_id, self.m3u8_url)
def get_downloader(self):
return AbemaTVDownloader(self.url, self.session)
def resume_prepare(self):
return None
def authorize(self, username, password):
if not self.device_id:
self.yuu_logger.info('{}: Fetching temporary token'.format(self.type))
res, reas = self.get_token()
if not res:
return res, reas
_ENDPOINT_MAIL = 'https://api.abema.io/v1/auth/user/email'
_ENDPOINT_OTP = 'https://api.abema.io/v1/auth/oneTimePassword'
mail_regex = r'^\w+([\.-]?\w+)*@\w+([\.-]?\w+)*(\.\w{2,3})+$'
if re.search(mail_regex, username):
_ENDPOINT_USE = _ENDPOINT_MAIL
_USERNAME_METHOD = 'email'
else:
_ENDPOINT_USE = _ENDPOINT_OTP
_USERNAME_METHOD = 'userId'
auth_ = {
_USERNAME_METHOD: username,
"password": password
}
res = self.session.post(_ENDPOINT_USE, json=auth_)
if res.status_code > 299:
res_j = res.json()
self.yuu_logger.debug('Abema Response: {}'.format(res_j['message']))
return False, 'Wrong {} and password combination'.format(_USERNAME_METHOD)
res_j = res.json()
self.yuu_logger.debug('Authentication Token: {}'.format(res_j['token']))
self.session.headers.update({'Authorization': 'bearer ' + res_j['token']})
self.authorized = True
return True, 'Authorized'
def get_token(self):
def key_secret(devid):
SECRETKEY = (b"v+Gjs=25Aw5erR!J8ZuvRrCx*rGswhB&qdHd_SYerEWdU&a?3DzN9B"
b"Rbp5KwY4hEmcj5#fykMjJ=AuWz5GSMY-d@H7DMEh3M@9n2G552Us$$"
b"k9cD=3TxwWe86!x#Zyhe")
deviceid = devid.encode("utf-8")
ts_1hour = (int(time.time()) + 60 * 60) // 3600 * 3600
time_struct = time.gmtime(ts_1hour)
ts_1hour_str = str(ts_1hour).encode("utf-8")
h = hmac.new(SECRETKEY, digestmod=hashlib.sha256)
h.update(SECRETKEY)
tmp = h.digest()
for _ in range(time_struct.tm_mon):
h = hmac.new(SECRETKEY, digestmod=hashlib.sha256)
h.update(tmp)
tmp = h.digest()
h = hmac.new(SECRETKEY, digestmod=hashlib.sha256)
h.update(urlsafe_b64encode(tmp).rstrip(b"=") + deviceid)
tmp = h.digest()
for _ in range(time_struct.tm_mday % 5):
h = hmac.new(SECRETKEY, digestmod=hashlib.sha256)
h.update(tmp)
tmp = h.digest()
h = hmac.new(SECRETKEY, digestmod=hashlib.sha256)
h.update(urlsafe_b64encode(tmp).rstrip(b"=") + ts_1hour_str)
tmp = h.digest()
for _ in range(time_struct.tm_hour % 5):
h = hmac.new(SECRETKEY, digestmod=hashlib.sha256)
h.update(tmp)
tmp = h.digest()
finalize = urlsafe_b64encode(tmp).rstrip(b"=").decode("utf-8")
self.yuu_logger.debug('Secret Key: {}'.format(finalize))
return finalize
if self.authorized:
return True, 'Success'
deviceid = str(uuid.uuid4())
self.yuu_logger.debug('Generated Device UUID: {}'.format(deviceid))
json_data = {"deviceId": deviceid, "applicationKeySecret": key_secret(deviceid)}
self.yuu_logger.debug('Generated applicationKeySecret: {}'.format(json_data['applicationKeySecret']))
self.yuu_logger.debug('Sending json data')
res = self.session.post(self._USERAPI, json=json_data).json()
try:
self.yuu_logger.debug('Data sent, getting token')
token = res['token']
self.yuu_logger.debug('User token: {}'.format(token))
except:
return None, 'Failed to get user token.'
self.device_id = deviceid
self.session.headers.update({'Authorization': 'bearer ' + token})
return 'Success', 'Success'
def parse(self, resolution=None, check_only=False):
res_list = [
'180p', '240p', '360p', '480p', '720p', '1080p', 'best', 'worst'
]
if resolution not in res_list:
if not check_only:
return None, 'Unknown resolution: {}. (Check it with `-R`)'.format(resolution)
if resolution == 'best':
resolution = '1080p'
self.resolution_o = 'best'
if resolution == 'worst':
resolution = '180p'
series = re.search(r"(?P<series>title)/(?P<video_id>.*[^-_])", self.url)
if series:
video_id = series.group(2)
self.yuu_logger.info('Series url format detected, fetching all links...')
self.yuu_logger.debug('Requesting data to Abema API.')
req = self.session.get(self._SERIESAPI + video_id)
if req.status_code != 200:
self.yuu_logger.log(40, 'Abema Response: ' + req.text)
return None, 'Error occured when communicating with Abema (Response: {})'.format(req.status_code)
self.yuu_logger.debug('Data requested')
self.yuu_logger.debug('Parsing json results...')
m3u8_url_list = []
output_list = []
jsdata = req.json()
to_be_requested = "{api}{vid}/programs?seriesVersion={sv}&seasonId={si}&offset=0&order={od}"
season_data = jsdata['seasons']
if not season_data:
season_data = [{'id': ''}]
version = jsdata['version']
prog_order = jsdata['programOrder']
for ns, season in enumerate(season_data, 1):
self.yuu_logger.info('Processing season ' + str(ns))
self.yuu_logger.debug('Requesting data to Abema API.')
req_season = self.session.get(to_be_requested.format(api=self._SERIESAPI, vid=video_id, sv=version, si=season['id'], od=prog_order))
if req_season.status_code != 200:
self.yuu_logger.log(40, 'Abema Response: ' + req_season.text)
return None, 'Error occured when communicating with Abema (Response: {})'.format(req_season.status_code)
self.yuu_logger.debug('Data requested')
self.yuu_logger.debug('Parsing json results...')
season_jsdata = req_season.json()
self.yuu_logger.debug('Processing total of {ep} episode for season {se}'.format(ep=len(season_jsdata['programs']), se=ns))
for nep, episode in enumerate(season_jsdata['programs'], 1):
free_episode = False
if 'label' in episode:
if 'free' in episode['label']:
free_episode = True
elif 'freeEndAt' in episode:
free_episode = True
if 'episode' in episode:
try:
episode_name = episode['episode']['title']
if not episode_name:
episode_name = episode_name['title']['number']
except KeyError:
episode_name = episode_name['title']['number']
else:
episode_name = nep
if not free_episode and not self.authorized:
self.yuu_logger.warn('Skipping episode {} (Not authorized and premium video)'.format(episode_name))
continue
self.yuu_logger.info('Processing episode {}'.format(episode_name))
req_ep = self.session.get(self._PROGRAMAPI + episode['id'])
if req_ep.status_code != 200:
self.yuu_logger.log(40, 'Abema Response: ' + req_ep.text)
return None, 'Error occured when communicating with Abema (Response: {})'.format(req_ep.status_code)
self.yuu_logger.debug('Data requested')
self.yuu_logger.debug('Parsing json API')
ep_json = req_ep.json()
title = ep_json['series']['title']
epnum = ep_json['episode']['title']
hls = ep_json['playback']['hls']
output_name = title + ' - ' + epnum
m3u8_url = '{x}/{r}/playlist.m3u8'.format(x=hls[:hls.rfind('/')], r=resolution[:-1])
self.yuu_logger.debug('M3U8 Link: {}'.format(m3u8_url))
self.yuu_logger.debug('Video title: {}'.format(title))
m3u8_url_list.append(m3u8_url)
output_list.append(output_name)
self.resolution = resolution
self.m3u8_url = m3u8_url_list
if not output_list:
err_msg = "All video are for premium only, please provide login details."
else:
err_msg = "Success"
return output_list, err_msg
if '.m3u8' in self.url[-5:]:
reg = re.compile(r'(program|slot)\/[\w+-]+')
self.url = re.search(reg, m3u8)[0]
self.is_m3u8 = True
ep_link = self.url[self.url.rfind('/')+1:]
self.yuu_logger.debug('Requesting data to Abema API')
if is_channel(self.url):
req = self.session.get(self._CHANNELAPI + ep_link)
if req.status_code != 200:
self.yuu_logger.log(40, 'Abema Response: ' + req.text)
return None, 'Error occured when communicating with Abema (Response: {})'.format(req.status_code)
self.yuu_logger.debug('Data requested')
self.yuu_logger.debug('Parsing json API')
jsdata = req.json()
output_name = jsdata['slot']['title']
if 'playback' in jsdata['slot']:
hls = jsdata['slot']['playback']['hls']
else:
hls = jsdata['slot']['chasePlayback']['hls']
m3u8_url = '{x}/{r}/playlist.m3u8'.format(x=hls[:hls.rfind('/')], r=resolution[:-1])
if self.is_m3u8:
m3u8_url = self.url
self.yuu_logger.debug('M3U8 Link: {}'.format(m3u8_url))
self.yuu_logger.debug('Title: {}'.format(output_name))
else:
req = self.session.get(self._PROGRAMAPI + ep_link)
if req.status_code != 200:
self.yuu_logger.log(40, 'Abema Response: ' + req.text)
return None, 'Error occured when communicating with Abema (Response: {})'.format(req.status_code)
self.yuu_logger.debug('Data requested')
self.yuu_logger.debug('Parsing json API')
jsdata = req.json()
if jsdata['mediaStatus']:
if 'drm' in jsdata['mediaStatus']:
if jsdata['mediaStatus']['drm']:
return None, 'This video has a different DRM method and cannot be decrypted by yuu for now'
title = jsdata['series']['title']
epnum = jsdata['episode']['title']
hls = jsdata['playback']['hls']
output_name = title + ' - ' + epnum
m3u8_url = '{x}/{r}/playlist.m3u8'.format(x=hls[:hls.rfind('/')], r=resolution[:-1])
if self.is_m3u8:
m3u8_url = self.url
self.yuu_logger.debug('M3U8 Link: {}'.format(m3u8_url))
self.yuu_logger.debug('Video title: {}'.format(title))
self.yuu_logger.debug('Episode number: {}'.format(epnum))
self.resolution = resolution
self.m3u8_url = m3u8_url
return output_name, 'Success'
def parse_m3u8(self, m3u8_url):
self.yuu_logger.debug('Requesting m3u8')
r = self.session.get(m3u8_url)
self.yuu_logger.debug('Data requested')
if 'timeshift forbidden' in r.text:
return None, None, None, 'This video can\'t be downloaded for now.'
if r.status_code == 403:
return None, None, None, 'This video is geo-locked for Japan only.'
self.yuu_logger.debug('Parsing m3u8')
x = m3u8.loads(r.text)
files = x.files[1:]
if not files[0]:
files = files[1:]
if 'tsda' in files[5]:
# Assume DRMed
return None, None, None, 'This video has a different DRM method and cannot be decrypted by yuu for now'
resgex = re.findall(r'(\d*)(?:\/\w+.ts)', files[0])[0]
keys_data = x.keys[0]
iv = x.keys[0].iv
ticket = x.keys[0].uri[18:]
parsed_files = []
for f in files:
if f.startswith('/tsvpg') or f.startswith('/tspg'):
f = 'https://ds-vod-abematv.akamaized.net' + f
parsed_files.append(f)
if self.resolution[:-1] != resgex:
if not self.resolution_o:
self.yuu_logger.warn('Changing resolution, from {} to {}p'.format(self.resolution, resgex))
self.resolution = resgex + 'p'
self.yuu_logger.debug('Total files: {}'.format(len(files)))
self.yuu_logger.debug('IV: {}'.format(iv))
self.yuu_logger.debug('Ticket key: {}'.format(ticket))
n = 0.0
for seg in x.segments:
n += seg.duration
self.est_filesize = round((round(n) * self.bitrate_calculation[self.resolution]) / 1024 / 6, 2)
return parsed_files, iv[2:], ticket, 'Success'
def get_video_key(self, ticket):
self.yuu_logger.debug('Sending parameter to API')
restoken = self.session.get(self._MEDIATOKEN_API, params=self._KEYPARAMS).json()
mediatoken = restoken['token']
self.yuu_logger.debug('Media token: {}'.format(mediatoken))
self.yuu_logger.debug('Sending ticket and media token to License API')
rgl = self.session.post(self._LICENSE_API, params={"t": mediatoken}, json={"kv": "a", "lt": ticket})
if rgl.status_code == 403:
return None, 'Access to this video are not allowed\nProbably a premium video or geo-locked.'
gl = rgl.json()
cid = gl['cid']
k = gl['k']
self.yuu_logger.debug('CID: {}'.format(cid))
self.yuu_logger.debug('K: {}'.format(k))
self.yuu_logger.debug('Summing up data with STRTABLE')
res = sum([self._STRTABLE.find(k[i]) * (58 ** (len(k) - 1 - i)) for i in range(len(k))])
self.yuu_logger.debug('Result: {}'.format(res))
self.yuu_logger.debug('Intepreting data')
encvk = struct.pack('>QQ', res >> 64, res & 0xffffffffffffffff)
self.yuu_logger.debug('Encoded video key: {}'.format(encvk))
self.yuu_logger.debug('Hashing data')
h = hmac.new(unhexlify(self._HKEY), (cid + self.device_id).encode("utf-8"), digestmod=hashlib.sha256)
enckey = h.digest()
self.yuu_logger.debug('Second Encoded video key: {}'.format(enckey))
self.yuu_logger.debug('Decrypting result')
aes = AES.new(enckey, AES.MODE_ECB)
vkey = aes.decrypt(encvk)
self.yuu_logger.debug('Decrypted, Result: {}'.format(vkey))
return vkey, 'Success getting video key'
def resolutions(self, m3u8_uri):
self.yuu_logger.debug('Requesting data to API')
m3u8_ = m3u8_uri[:m3u8_uri.rfind('/')]
base_url = m3u8_[:m3u8_.rfind('/')] + '/'
m3u8_1080 = m3u8_[:m3u8_.rfind('/')] + '/1080/playlist.m3u8'
m3u8_720 = m3u8_[:m3u8_.rfind('/')] + '/720/playlist.m3u8'
m3u8_480 = m3u8_[:m3u8_.rfind('/')] + '/480/playlist.m3u8'
m3u8_360 = m3u8_[:m3u8_.rfind('/')] + '/360/playlist.m3u8'
m3u8_240 = m3u8_[:m3u8_.rfind('/')] + '/240/playlist.m3u8'
m3u8_180 = m3u8_[:m3u8_.rfind('/')] + '/180/playlist.m3u8'
rr_all = self.session.get(base_url + 'playlist.m3u8')
if 'timeshift forbidden' in rr_all.text:
return None, 'This video can\'t be downloaded for now.'
r_all = m3u8.loads(rr_all.text)
play_res = []
for r_p in r_all.playlists:
temp = []
temp.append(r_p.stream_info.resolution)
temp.append(base_url + r_p.uri)
play_res.append(temp)
resgex = re.compile(r'(\d*)(?:\/\w+.ts)')
ava_reso = []
for resdata in play_res:
reswh, m3u8_uri = resdata
resw, resh = reswh
self.yuu_logger.debug('Validating {}p resolution'.format(resh))
rres = m3u8.loads(self.session.get(m3u8_uri).text)
m3f = rres.files[1:]
if not m3f:
return None, 'This video can\'t be downloaded for now.'
self.yuu_logger.debug('Sample link: ' + m3f[5])
if 'tsda' in files[5]:
# Assume DRMed
return None, 'This video has a different DRM method and cannot be decrypted by yuu for now'
if str(resh) in re.findall(resgex, m3f[5]):
ava_reso.append(
[
'{h}p'.format(h=resh),
'{w}x{h}'.format(w=resw, h=resh)
]
)
if ava_reso:
reso = [r[0] for r in ava_reso]
self.yuu_logger.debug('Resolution list: {}'.format(', '.join(reso)))
return ava_reso, 'Success'
def check_output(self, output=None, output_name=None):
if output:
fn_, ext_ = os.path.splitext(output)
if ext_ != 'ts':
output = fn_ + '.ts'
else:
output = '{x} ({m} {r}).ts'.format(x=output_name, m=self.type, r=self.resolution)
return output
| true | true |
f71fde1bd02fcc1f714d372b9d638e5ed8bbe7be | 4,468 | py | Python | data_steward/cdr_cleaner/cleaning_rules/deid/dateshift.py | lrwb-aou/curation | e80447e56d269dc2c9c8bc79e78218d4b0dc504c | [
"MIT"
] | 16 | 2017-06-30T20:05:05.000Z | 2022-03-08T21:03:19.000Z | data_steward/cdr_cleaner/cleaning_rules/deid/dateshift.py | lrwb-aou/curation | e80447e56d269dc2c9c8bc79e78218d4b0dc504c | [
"MIT"
] | 342 | 2017-06-23T21:37:40.000Z | 2022-03-30T16:44:16.000Z | data_steward/cdr_cleaner/cleaning_rules/deid/dateshift.py | lrwb-aou/curation | e80447e56d269dc2c9c8bc79e78218d4b0dc504c | [
"MIT"
] | 33 | 2017-07-01T00:12:20.000Z | 2022-01-26T18:06:53.000Z | """
The basic date shifting rule..
Original Issue: DC-1005
This is an abstract class and cannot be directly instantiated. It must be
extended to be used.
"""
# Python Imports
import logging
from abc import abstractmethod
# Project imports
from cdr_cleaner.cleaning_rules.base_cleaning_rule import BaseCleaningRule
from common import JINJA_ENV
LOGGER = logging.getLogger(__name__)
SHIFT_EXP = JINJA_ENV.from_string("""
{{field_type}}_SUB( CAST({{field}} AS {{field_type}}), INTERVAL (
SELECT
shift
FROM
`{{project}}.{{mapping_dataset_id}}.{{mapping_table_id}}` AS map
WHERE
map.research_id = remodel.person_id) DAY) AS {{field}}
""")
SELECT_STATEMENT = JINJA_ENV.from_string("""
CREATE OR REPLACE TABLE `{{project}}.{{dataset}}.{{table}}` AS (
SELECT
{{fields}}
FROM `{{project}}.{{dataset}}.{{table}}` AS remodel)
""")
class DateShiftRule(BaseCleaningRule):
"""
Date shift fields from 1 - 365 days in the past.
Performs a "day" shift for any field in the provided table names
and schemas. Uses the field type to determine the shift function to
use. Currently works for the DATE, DATETIME, and TIMESTAMP type fields.
"""
def __init__(self,
project_id,
dataset_id,
sandbox_dataset_id,
issue_numbers,
description,
affected_datasets,
affected_tables,
mapping_dataset_id,
mapping_table_id,
depends_on=None):
"""
Initialize the class.
Set the issue numbers, description and affected datasets. As other
tickets may affect this SQL, append them to the list of Jira Issues.
DO NOT REMOVE ORIGINAL JIRA ISSUE NUMBERS!
"""
if depends_on is None:
depends_on = []
desc = (f'Date shift date and timestamp fields by the date shift '
f'calculated in the static mapping table.')
self.mapping_dataset_id = mapping_dataset_id
self.mapping_table_id = mapping_table_id
super().__init__(issue_numbers=issue_numbers,
description=description,
affected_datasets=affected_datasets,
project_id=project_id,
dataset_id=dataset_id,
sandbox_dataset_id=sandbox_dataset_id,
affected_tables=affected_tables,
depends_on=depends_on)
@abstractmethod
def get_tables_and_schemas(self):
"""
Provide dictionary of table names and schemas.
:returns: a dictionary whose key, value patterns are in the
form of {"tablename": "json schema",}.
"""
pass
def get_query_specs(self):
"""
Return a list of dictionary query specifications.
:return: A list of dictionaries. Each dictionary contains a
single query and a specification for how to execute that query.
The specifications are optional but the query is required.
"""
date_shift_queries = []
for table, schema in self.get_tables_and_schemas().items():
LOGGER.info(f"Building Date Shifting query for {self.dataset_id}."
f"{table}")
fields = []
for field in schema:
field_type = field.get('type').lower()
field_name = field.get('name')
if field_type in ['date', 'datetime', 'timestamp']:
shift_string = SHIFT_EXP.render(
project=self.project_id,
mapping_dataset_id=self.mapping_dataset_id,
mapping_table_id=self.mapping_table_id,
field_type=field_type.upper(),
field=field_name,
table=table)
fields.append(shift_string)
else:
fields.append(field_name)
fields_string = ',\n'.join(fields)
query = SELECT_STATEMENT.render(project=self.project_id,
dataset=self.dataset_id,
table=table,
fields=fields_string)
date_shift_queries.append({'query': query})
return date_shift_queries
| 34.90625 | 78 | 0.576321 |
import logging
from abc import abstractmethod
from cdr_cleaner.cleaning_rules.base_cleaning_rule import BaseCleaningRule
from common import JINJA_ENV
LOGGER = logging.getLogger(__name__)
SHIFT_EXP = JINJA_ENV.from_string("""
{{field_type}}_SUB( CAST({{field}} AS {{field_type}}), INTERVAL (
SELECT
shift
FROM
`{{project}}.{{mapping_dataset_id}}.{{mapping_table_id}}` AS map
WHERE
map.research_id = remodel.person_id) DAY) AS {{field}}
""")
SELECT_STATEMENT = JINJA_ENV.from_string("""
CREATE OR REPLACE TABLE `{{project}}.{{dataset}}.{{table}}` AS (
SELECT
{{fields}}
FROM `{{project}}.{{dataset}}.{{table}}` AS remodel)
""")
class DateShiftRule(BaseCleaningRule):
def __init__(self,
project_id,
dataset_id,
sandbox_dataset_id,
issue_numbers,
description,
affected_datasets,
affected_tables,
mapping_dataset_id,
mapping_table_id,
depends_on=None):
if depends_on is None:
depends_on = []
desc = (f'Date shift date and timestamp fields by the date shift '
f'calculated in the static mapping table.')
self.mapping_dataset_id = mapping_dataset_id
self.mapping_table_id = mapping_table_id
super().__init__(issue_numbers=issue_numbers,
description=description,
affected_datasets=affected_datasets,
project_id=project_id,
dataset_id=dataset_id,
sandbox_dataset_id=sandbox_dataset_id,
affected_tables=affected_tables,
depends_on=depends_on)
@abstractmethod
def get_tables_and_schemas(self):
pass
def get_query_specs(self):
date_shift_queries = []
for table, schema in self.get_tables_and_schemas().items():
LOGGER.info(f"Building Date Shifting query for {self.dataset_id}."
f"{table}")
fields = []
for field in schema:
field_type = field.get('type').lower()
field_name = field.get('name')
if field_type in ['date', 'datetime', 'timestamp']:
shift_string = SHIFT_EXP.render(
project=self.project_id,
mapping_dataset_id=self.mapping_dataset_id,
mapping_table_id=self.mapping_table_id,
field_type=field_type.upper(),
field=field_name,
table=table)
fields.append(shift_string)
else:
fields.append(field_name)
fields_string = ',\n'.join(fields)
query = SELECT_STATEMENT.render(project=self.project_id,
dataset=self.dataset_id,
table=table,
fields=fields_string)
date_shift_queries.append({'query': query})
return date_shift_queries
| true | true |
f71fde73faca108579365cdbb13033f096a89b4b | 37,146 | py | Python | conf_selection_and_DFT/PL_dft_library_201027.py | aspuru-guzik-group/kraken | 4eaad505c1343e6083032b4a3fda47e004e19734 | [
"MIT"
] | 3 | 2022-01-13T12:39:54.000Z | 2022-03-30T00:10:52.000Z | conf_selection_and_DFT/PL_dft_library_201027.py | aspuru-guzik-group/kraken | 4eaad505c1343e6083032b4a3fda47e004e19734 | [
"MIT"
] | null | null | null | conf_selection_and_DFT/PL_dft_library_201027.py | aspuru-guzik-group/kraken | 4eaad505c1343e6083032b4a3fda47e004e19734 | [
"MIT"
] | null | null | null | # 201005: rename/restructure .yml files for consistency with xtb-level data
# 201006: in read_conformer() fix error message when log files are missing
import os,re,itertools,time
#import pybel
#from openbabel import pybel
import numpy as np
import pandas as pd
import pathlib as pl
cwd = pl.Path.cwd()
import yaml
from yaml import CLoader as Loader
from yaml import CDumper as Dumper
from rdkit import Chem,Geometry
from rdkit.Chem import rdmolfiles, AllChem, rdMolAlign,rdmolops
from multiprocessing import Pool
import morfeus # Kjell Jorner
from PL_split_logs_201006 import split_log # TG
from PL_conformer_selection_200411 import mirror_mol, delete_element_from_rdkitmol, delete_haloalkane_halides # TG #changed from PL_conformer_selection_201019 5/17/21 by EP
import PL_gaussian_properties_201021 as gp # TG
import vmin4 as vmin # TG/Iris Guo
import P_int_200916 as P_int # Robert Pollice (,TG(,ML))
# import PL_visvol as visvol # Ellyn Peters
# covalent radii, from Pyykko and Atsumi, Chem. Eur. J. 15, 2009, 188-197
# values for metals decreased by 10% according to Robert Paton's Sterimol implementation
rcov = {
"H": 0.32,"He": 0.46,"Li": 1.2,"Be": 0.94,"B": 0.77,"C": 0.75,"N": 0.71,"O": 0.63,"F": 0.64,"Ne": 0.67,"Na": 1.4,"Mg": 1.25,"Al": 1.13,"Si": 1.04,"P": 1.1,"S": 1.02,"Cl": 0.99,"Ar": 0.96,"K": 1.76,"Ca": 1.54,"Sc": 1.33,"Ti": 1.22,"V": 1.21,"Cr": 1.1,"Mn": 1.07,"Fe": 1.04,"Co": 1.0,"Ni": 0.99,"Cu": 1.01,"Zn": 1.09,"Ga": 1.12,"Ge": 1.09,"As": 1.15,"Se": 1.1,"Br": 1.14,"Kr": 1.17,"Rb": 1.89,"Sr": 1.67,"Y": 1.47,"Zr": 1.39,"Nb": 1.32,"Mo": 1.24,"Tc": 1.15,"Ru": 1.13,"Rh": 1.13,"Pd": 1.08,"Ag": 1.15,"Cd": 1.23,"In": 1.28,"Sn": 1.26,"Sb": 1.26,"Te": 1.23,"I": 1.32,"Xe": 1.31,"Cs": 2.09,"Ba": 1.76,"La": 1.62,"Ce": 1.47,"Pr": 1.58,"Nd": 1.57,"Pm": 1.56,"Sm": 1.55,"Eu": 1.51,"Gd": 1.52,"Tb": 1.51,"Dy": 1.5,"Ho": 1.49,"Er": 1.49,"Tm": 1.48,"Yb": 1.53,"Lu": 1.46,"Hf": 1.37,"Ta": 1.31,"W": 1.23,"Re": 1.18,"Os": 1.16,"Ir": 1.11,"Pt": 1.12,"Au": 1.13,"Hg": 1.32,"Tl": 1.3,"Pb": 1.3,"Bi": 1.36,"Po": 1.31,"At": 1.38,"Rn": 1.42,"Fr": 2.01,"Ra": 1.81,"Ac": 1.67,"Th": 1.58,"Pa": 1.52,"U": 1.53,"Np": 1.54,"Pu": 1.55
}
# some constants
R = 0.0019872036 #kcal mol^-1 K^-1
T = 298.15 #K
hartree_kcalmol = 627.50947
periodictable = ["Bq","H","He","Li","Be","B","C","N","O","F","Ne","Na","Mg","Al","Si","P","S","Cl","Ar","K","Ca","Sc","Ti","V","Cr","Mn","Fe","Co","Ni","Cu","Zn","Ga","Ge","As","Se","Br","Kr","Rb","Sr","Y","Zr","Nb","Mo","Tc","Ru","Rh","Pd","Ag","Cd","In","Sn","Sb","Te","I","Xe","Cs","Ba","La","Ce","Pr","Nd","Pm","Sm","Eu","Gd","Tb","Dy","Ho","Er","Tm","Yb","Lu","Hf","Ta","W","Re","Os","Ir","Pt","Au","Hg","Tl","Pb","Bi","Po","At","Rn","Fr","Ra","Ac","Th","Pa","U","Np","Pu","Am","Cm","Bk","Cf","Es","Fm","Md","No","Lr","Rf","Db","Sg","Bh","Hs","Mt","Ds","Rg","Uub","Uut","Uuq","Uup","Uuh","Uus","Uuo","X"]
def get_conmat(elements, coords):
# partially based on code from Robert Paton's Sterimol script, which based this part on Grimme's D3 code
# elements is a list of strings, coords is a numpy array or nested list of shape N_atoms x 3
if type(coords) == list:
coords = np.asarray(coords)
natom = len(elements)
#max_elem = 94
k1 = 16.0
k2 = 4.0/3.0
conmat = np.zeros((natom,natom))
for i in range(0,natom):
if elements[i] not in rcov.keys():
continue
for iat in range(0,natom):
if elements[iat] not in rcov.keys():
continue
if iat != i:
dxyz = coords[iat]-coords[i]
r = np.linalg.norm(dxyz)
rco = rcov[elements[i]]+rcov[elements[iat]]
rco = rco*k2
rr=rco/r
damp=1.0/(1.0+np.math.exp(-k1*(rr-1.0)))
if damp > 0.85: #check if threshold is good enough for general purpose
conmat[i,iat],conmat[iat,i] = 1,1
return(conmat)
def add_valence(elements,coords,conmat,base_idx,add_element="Pd"):
# Adds a valence to base so that the angle to the previous substituents is maximized and reorders the coordinate output for convenience
# add_element: add any of the following elements:
distpx = {"O":1.5,"Se":2.12,"Pd":2.28,"X":1.8} # typical bond distances to P
if type(coords) == list:
coords = np.asarray(coords)
num_atoms = len(elements)
coord_base = coords[base_idx]
base_element = elements[base_idx]
vec = np.array([0.0,0.0,0.0])
bonded = []
for atom in range(num_atoms):
if conmat[base_idx][atom]:
bonded.append(atom)
vec += coord_base - coords[atom]
coordox = distpx[add_element]*vec/np.linalg.norm(vec) + coord_base
atoms = [x for x in range(num_atoms+1)]
coords_temp = np.vstack((coords,coordox))
if sum(get_conmat(elements+[add_element],coords_temp)[-1]) != 1.0:
print(" Warning: possible collision!")
# sort coordinates so that base is first, add_element is second, and the other atoms bonded to base are next
elements_new = [base_element,add_element]+[elements[a] for a in bonded] + [a for i,a in enumerate(elements) if i not in [base_idx]+bonded]
coords_new = np.vstack((coord_base, coordox, coords[bonded], coords[[i for i,a in enumerate(elements) if i not in [base_idx]+bonded]]))
return(elements_new, coords_new)
def write_xyz(elements,coords,filename):
with open(filename,"w") as f:
f.write(f"{len(elements)}\n\n")
for i,a in enumerate(elements):
f.write(f"{a.title():>3} " + " ".join([f"{coords[i][j]:15f}" for j in range(3)]) + "\n")
def rmsd_matrix(conformers):
molobjects = [rdmolfiles.MolFromMolFile(str(cwd/conformer/f"{conformer}_opt.sdf"),removeHs=False,strictParsing=False) for conformer in conformers]
molobjects = [Chem.RemoveHs(mol) for mol in molobjects] # Remove all H: optional but speeds up RMSD calculation
molobjects = [delete_haloalkane_halides(mol) for mol in molobjects] # Remove halides in perhaloalkyl moieties. Improves RMSD matching and timing
molobjects_inv = [mirror_mol(mol) for mol in molobjects] # create mirror images of each conformer
rmsd_mat = np.zeros((len(conformers),len(conformers)))
for i,j in itertools.product(range(len(conformers)),range(len(conformers))):
if i<j: continue
if i==j:
rmsd_mat[i,j] = 1
else:
rmsd_mat[i,j] = min((rdMolAlign.GetBestRMS(molobjects[i],molobjects[j]),rdMolAlign.GetBestRMS(molobjects[i],molobjects_inv[j])))
rmsd_mat[j,i] = rmsd_mat[i,j]
return(rmsd_mat)
def dict_key_rmsd(candidate_pair):
return float(rmsd_matrix(candidate_pair)[0,1])
# which energies to read from which log-file
energylogs = {
"e_dz":"freq",
"e_tz_gas":"nbo",
"e_tz_gas":"sp",
"e_tz_solv":"solv",
"e_tz_ra":"ra",
"e_tz_rc":"rc",
}
# which properties to read from which log-file
proplogs = {
"freq":["nimag","g","t"],
"sp" :["dipole","homo","qpole","t"],
"ra" :["homo","nbo","t"],
"rc" :["homo","nbo","t"],
"nbo" :["nbo","nborbsP","t"],
"nmr" :["nmr","t"],
"efg" :["efg","nuesp","t"],
"solv":["ecds","t"],
}
# assign names to each descriptor
propoutput = {
"freq_g": ["","g"],
"freq_nimag": ["nimag"],
"sp_dipole": ["dipolemoment",],
"sp_homo": ["fmo_e_homo","fmo_e_lumo","fmo_mu","fmo_eta","fmo_omega"],
"ra_homo":["somo_ra","","","",""],
"rc_homo":["somo_rc","","","",""],
"sp_qpole": ["qpole_amp","qpoletens_xx","qpoletens_yy","qpoletens_zz"],
"nbo_nbo": ["nbo_P"],
"ra_nbo": ["nbo_P_ra","spindens_P_ra"],
"rc_nbo": ["nbo_P_rc","spindens_P_rc"],
"nmr_nmr": ["nmr_P","nmrtens_sxx_P","nmrtens_syy_P","nmrtens_szz_P",],
"efg_efg": ["efg_amp_P","efgtens_xx_P","efgtens_yy_P","efgtens_zz_P"],
"efg_nuesp": ["nuesp_P",],
"solv_ecds": ["E_solv_cds"],
"nbo_dipole": ["dipolemoment",],
"nbo_homo": ["fmo_e_homo","fmo_e_lumo","fmo_mu","fmo_eta","fmo_omega"],
"nbo_qpole": ["qpole_amp","qpoletens_xx","qpoletens_yy","qpoletens_zz"],
}
boltzproperties = ['vmin_vmin','vmin_r','dipolemoment', 'fmo_e_homo', 'fmo_e_lumo', 'fmo_mu', 'fmo_eta', 'fmo_omega', 'somo_ra', 'somo_rc', 'qpole_amp', 'qpoletens_xx', 'qpoletens_yy', 'qpoletens_zz', 'nbo_P', 'nbo_P_ra', 'spindens_P_ra', 'nbo_P_rc', 'spindens_P_rc', 'nmr_P', 'nmrtens_sxx_P', 'nmrtens_syy_P', 'nmrtens_szz_P', 'efg_amp_P', 'efgtens_xx_P', 'efgtens_yy_P', 'efgtens_zz_P', 'nuesp_P', 'E_solv_cds', 'nbo_lp_P_percent_s', 'nbo_lp_P_occ', 'nbo_lp_P_e', 'nbo_bd_e_max', 'nbo_bd_e_avg', 'nbo_bds_e_min', 'nbo_bds_e_avg', 'nbo_bd_occ_min', 'nbo_bd_occ_avg', 'nbo_bds_occ_max', 'nbo_bds_occ_avg', 'E_solv_total', 'E_solv_elstat', 'E_oxidation', 'E_reduction', 'fukui_p', 'fukui_m', 'pyr_P', 'pyr_alpha', 'vbur_vbur', 'vbur_vtot', 'vbur_ratio_vbur_vtot', 'vbur_qvbur_min', 'vbur_qvbur_max', 'vbur_qvtot_min', 'vbur_qvtot_max', 'vbur_max_delta_qvbur', 'vbur_max_delta_qvtot', 'vbur_ovbur_min', 'vbur_ovbur_max', 'vbur_ovtot_min', 'vbur_ovtot_max', 'vbur_near_vbur', 'vbur_far_vbur', 'vbur_near_vtot', 'vbur_far_vtot', 'sterimol_B1', 'sterimol_B5', 'sterimol_L', 'sterimol_burB1', 'sterimol_burB5', 'sterimol_burL',"Pint_P_int","Pint_dP","Pint_P_min","Pint_P_max","volume","surface_area","sphericity"] # "vv_total_visible_volume","vv_proximal_visible_volume","vv_distal_visible_volume","vv_ratio_visible_total","vv_ratio_proxvis_total",
mmproperties = ['dipolemoment', 'qpole_amp', 'qpoletens_xx', 'qpoletens_yy', 'qpoletens_zz', 'pyr_P', 'pyr_alpha', 'vbur_vbur', 'vbur_vtot', 'vbur_qvbur_min', 'vbur_qvbur_max', 'vbur_qvtot_min', 'vbur_qvtot_max', 'vbur_max_delta_qvbur', 'vbur_max_delta_qvtot', 'vbur_ovbur_min', 'vbur_ovbur_max', 'vbur_ovtot_min', 'vbur_ovtot_max', 'vbur_near_vbur', 'vbur_far_vbur', 'vbur_near_vtot', 'vbur_far_vtot', 'sterimol_B1', 'sterimol_B5', 'sterimol_L', 'sterimol_burB1', 'sterimol_burB5', 'sterimol_burL'] # ,"vv_total_visible_volume","vv_proximal_visible_volume","vv_distal_visible_volume","vv_ratio_visible_total","vv_ratio_proxvis_total",
Pintresults = ["Pint_P_int","Pint_dP","Pint_P_min","Pint_P_max","volume","surface_area","sphericity"]
def morfeus_properties(elements,coordinates,confdata):
# Morfeus: Sterimol, Vbur, pyr
morfdict = {}
if "pyr_P" not in confdata.keys() and confdata["p_val"] == 3:
# Pyramidalization - two equivalent measurments P and alpha
pyr = morfeus.Pyramidalization(elements=elements,coordinates=coordinates,atom_index=1,excluded_atoms=[2]) # remove Pd
morfdict["pyr_P"] = float(pyr.P)
morfdict["pyr_alpha"] = float(pyr.alpha)
if "vbur_vbur" not in confdata.keys():
#Buried volume - get quadrant volumes and distal volume
# iterate through P-substituents, aligning the quadrants paralell to each once (= xz_plane definition)
# Metal/point of reference should be 2.28 A away from P
# z_axis_atoms: P
# xz_plane_atoms: each of the substituents once
# keep lowest and highest quadrant and octant volume across all three orientations of the coordinate system
# keep highest difference of any neighboring quadrant volume
# keep volume in each of the two hemispheres
qvbur_all = np.array([])
qvdist_all = np.array([])
qvtot_all = np.array([])
max_delta_qvbur_all = []
max_delta_qvtot_all = []
ovbur_all = np.array([])
ovtot_all = np.array([])
for i in range(3):#confdata["p_val"]):
bv = morfeus.BuriedVolume(elements,coordinates,2,excluded_atoms=[2],z_axis_atoms=[1],xz_plane_atoms=[3+i])
bv.octant_analysis()
bv.compute_distal_volume(method="buried_volume",octants=True)
vbur = bv.buried_volume # these are identical for each iteration
vdist = bv.distal_volume #
vtot = vbur + vdist #
qvbur = np.asarray(list(bv.quadrants["buried_volume"].values()))
qvdist = np.asarray(list(bv.quadrants["distal_volume"].values()))
qvtot = qvbur + qvdist
qvbur_all = np.append(qvbur_all,qvbur)
qvtot_all = np.append(qvtot_all,qvtot)
max_delta_qvbur_all.append(max([abs(qvbur[j]-qvbur[j-1]) for j in range(4)]))
max_delta_qvtot_all.append(max([abs(qvtot[j]-qvtot[j-1]) for j in range(4)]))
ovbur = np.asarray(list(bv.octants["buried_volume"].values()))
ovdist = np.asarray(list(bv.octants["distal_volume"].values()))
ovtot = ovbur + ovdist
ovbur_all = np.append(ovbur_all,ovbur)
ovtot_all = np.append(ovtot_all,ovtot)
near_vbur = ovbur[4:].sum() # these are identical for each iteration
far_vbur = ovbur[:4].sum() #
near_vtot = ovtot[4:].sum() #
far_vtot = ovtot[:4].sum() #
morfdict["vbur_vbur"] = vbur
morfdict["vbur_vtot"] = float(vtot)
morfdict["vbur_ratio_vbur_vtot"] = float(vbur/vtot)
morfdict["vbur_qvbur_min"] = float(min(qvbur_all))
morfdict["vbur_qvbur_max"] = float(max(qvbur_all))
morfdict["vbur_qvtot_min"] = float(min(qvtot_all))
morfdict["vbur_qvtot_max"] = float(max(qvtot_all))
morfdict["vbur_max_delta_qvbur"] = float(max(max_delta_qvbur_all))
morfdict["vbur_max_delta_qvtot"] = float(max(max_delta_qvtot_all))
morfdict["vbur_ovbur_min"] = float(min(ovbur_all))
morfdict["vbur_ovbur_max"] = float(max(ovbur_all))
morfdict["vbur_ovtot_min"] = float(min(ovtot_all))
morfdict["vbur_ovtot_max"] = float(max(ovtot_all))
morfdict["vbur_near_vbur"] = float(near_vbur)
morfdict["vbur_far_vbur"] = float(far_vbur)
morfdict["vbur_near_vtot"] = float(near_vtot)
morfdict["vbur_far_vtot"] = float(far_vtot)
if "sterimol_B1" not in confdata.keys():
# Sterimol
# for Sterimol values matching Rob Paton's implementation:
patonradii = morfeus.helpers.get_radii(elements, radii_type="bondi")
patonradii = np.array(patonradii)
patonradii[patonradii == 1.2] = 1.09
sterimol = morfeus.Sterimol(elements, coordinates, 2, 1, radii=patonradii, n_rot_vectors=3600)
morfdict["sterimol_B1"] = float(sterimol.B_1_value)
morfdict["sterimol_B5"] = float(sterimol.B_5_value)
morfdict["sterimol_L"] = float(sterimol.L_value)
# buried Sterimol
sterimol_bur = morfeus.Sterimol(elements, coordinates, 2, 1,calculate=False,radii=patonradii, n_rot_vectors=3600)
sterimol_bur.bury(sphere_radius=5.5,method="delete",radii_scale=0.5)
# sterimol.bury(sphere_radius=4.5,method="delete",radii_scale=1)
morfdict["sterimol_burB1"] = float(sterimol_bur.B_1_value)
morfdict["sterimol_burB5"] = float(sterimol_bur.B_5_value)
morfdict["sterimol_burL"] = float(sterimol_bur.L_value)
return(morfdict)
def gp_properties(ligand,conformer,p_idx):
# reads gaussian log files
gpdict = {}
gpdict["properties"] = {}
contents = {
"streams":{},
"filecont":{},
}
# read energies
for e,log in energylogs.items():
contents["streams"][log] = gp.get_outstreams(cwd/conformer/f"{conformer}_{log}.log")
if contents["streams"][log] == "failed or incomplete job":
return({"error":True})
else:
gpdict[e] = gp.get_e_hf(contents["streams"][log])
gpdict["error"] = False
# going through each log file, get the relevant properties
for log in proplogs.keys():
contents["filecont"][log] = gp.get_filecont(cwd/conformer/f"{conformer}_{log}.log")
for prop in proplogs[log]:
gpresults = gp.jobtypes[prop][0](contents[gp.jobtypes[prop][1]][log],p_idx)
if prop == "nborbsP": # NBO orbital analysis returns a dictionary with the proper labels
gpdict["properties"].update(gpresults)
elif prop == "t": # subjob time
gpdict[f"{log}_t"] = gpresults
elif prop in ["e_dz","g","e_tz_gas","e_tz_solv","e_tz_ra","e_tz_rc","nimag"]:
gpdict.update({propoutput[f"{log}_{prop}"][i]: float(gpresults[i]) for i in range(len(gpresults))})
else: # all other functions return a list. This is assigned into a dict with proper names here
gpdict["properties"].update({propoutput[f"{log}_{prop}"][i]: float(gpresults[i]) for i in range(len(gpresults))})
gpdict["g_tz_gas"] = gpdict["g"] - gpdict["e_dz"] + gpdict["e_tz_gas"] # in Hartree
gpdict["g_tz_solv"] = gpdict["g"] - gpdict["e_dz"] + gpdict["e_tz_solv"] # in Hartree
gpdict["properties"]["E_solv_total"] = (gpdict["e_tz_solv"] - gpdict["e_tz_gas"]) * hartree_kcalmol # in kcal/mol
gpdict["properties"]["E_solv_elstat"] = gpdict["properties"]["E_solv_total"] - gpdict["properties"]["E_solv_cds"] # in kcal/mol
gpdict["properties"]["E_oxidation"] = gpdict["e_tz_rc"] - gpdict["e_tz_gas"] # in Hartree
gpdict["properties"]["E_reduction"] = gpdict["e_tz_ra"] - gpdict["e_tz_gas"] # in Hartree
gpdict["properties"]["fukui_p"] = gpdict["properties"]["nbo_P"]-gpdict["properties"]["nbo_P_ra"] # fukui electrophilicity
gpdict["properties"]["fukui_m"] = gpdict["properties"]["nbo_P_rc"]-gpdict["properties"]["nbo_P"] # fukui nucleophilicity
gpdict["t_total"] = sum([gpdict[f"{log}_t"] for log in proplogs.keys()])
if "" in gpdict.keys():
del gpdict[""]
if "" in gpdict["properties"].keys():
del gpdict["properties"][""]
return(gpdict)
def read_conformer(cwd, ligand, conformer): # cwd: pathlib path of current working directory. ligand: 0-digit ligand ID. conformer: full name of the conformer (including the ID at the beginnig)
confdata = {}
errors = []
checklogs = [cwd/conformer/f"{conformer}_{l}.log" for l in proplogs.keys() if not (cwd/conformer/f"{conformer}_{l}.log").exists()]
if len(checklogs) != 0:
#! log this as a conformer-level error
err = f"Missing Gaussian log files, flagged in read_conformer: {','.join([chkl.name for chkl in checklogs])}"
errors.append(err)
print(f"{ligand};{conformer};{err}")
with open(cwd/f"{ligand}_errors.txt","a") as f:
f.write(f"{ligand};{conformer};{err}\n")
confdata["error"] = True
return(confdata,errors)
if "elements_pd" not in confdata.keys():
# mol = next(pybel.readfile("g09",str(cwd/conformer/f"{conformer}_nbo.log")))
#mol = next(pybel.readfile("g09",str(cwd/conformer/f"{conformer}_opt.log")))
#elements = [periodictable[a.atomicnum] for a in mol.atoms]
#coordinates = [list(a.coords) for a in mol.atoms]
#coordinates_a = np.array([a.coords for a in mol.atoms])
def read_gaussian_logfile(fn):
time0=time.time()
read=False
for line in open(fn,"r"):
if read:
if "---" in line and len(elements)>0:
read=False
if read:
if "X" not in line and "---" not in line:
atomnum = int(line.split()[1])
#print(line.replace("\n",""))
#print(atomnum)
el = periodictable[atomnum]
elements.append(el)
coordinates.append([float(line.split()[3]),float(line.split()[4]), float(line.split()[5])])
if "Coordinates (Angstroms)" in line:
coordinates, elements = [], []
read=True
time1=time.time()
print("gaussian log parser done in %.2f seconds"%(time1-time0))
return(coordinates, elements)
coordinates, elements = read_gaussian_logfile(str(cwd/conformer/f"{conformer}_opt.log"))
coordinates_a = np.array(coordinates)
conmat = get_conmat(elements,coordinates_a)
p_idx = [i for i in range(len(elements)) if elements[i] == "P" and sum(conmat[i]) <= 3][0] # this removes quaternary P (phosphonium, phosphate etc) but allows for P with 2 substituents (phosphabenzene, phosphaimine etc). Can we be sure that we never have more than one non-quaternary P(III)?
elements_pd, coordinates_pd = add_valence(elements,coordinates,conmat,p_idx,add_element="Pd") # Add "Pd" at the reference position in the P-lone pair region
if not (cwd/conformer/f"{conformer}_opt_Pd.xyz").exists():
#out = pybel.Outputfile("xyz",str(cwd/conformer/f"{conformer}_opt.xyz"))
#out.write(mol)
#out.close()
write_xyz(elements, coordinates, cwd/conformer/f"{conformer}_opt.xyz")
#out = pybel.Outputfile("sdf",str(cwd/conformer/f"{conformer}_opt.sdf"))
#out.write(mol)
#out.close()
os.system("obabel -ixyz %s -osdf >> %s"%(str(cwd/conformer/f"{conformer}_opt.xyz"), str(cwd/conformer/f"{conformer}_opt.sdf")))
write_xyz(elements_pd,coordinates_pd,cwd/conformer/f"{conformer}_opt_Pd.xyz")
confdata["coords"] = coordinates
confdata["coords_pd"] = coordinates_pd.tolist()
confdata["elements"] = elements
confdata["elements_pd"] = elements_pd
confdata["conmat"] = conmat.tolist()
confdata["p_idx"] = p_idx
confdata["p_val"] = int(sum(conmat[p_idx])) # how many substituents at P
confdata["properties"] = {}
## get properties
# gp_properties: everything that can be read from the Gaussian log files (most electronic properties)
confdata.update(gp_properties(ligand,conformer,confdata["p_idx"]))
if confdata["error"]:
#! log this as a conformer-level error
err = "Error in the Gaussian computations, flagged in read_conformer, please check log files."
errors.append(err)
print(f"{ligand};{conformer};{err}")
with open(cwd/f"{ligand}_errors.txt","a") as f:
f.write(f"{ligand};{conformer};{err}\n")
with open(cwd/conformer/f"{conformer}_data.yml","w") as f:
yaml.dump(confdata,f,Dumper=Dumper)
return(confdata,errors)
if confdata["nimag"] != 0:
#! log this as a conformer-level error
err = f"Number of imaginary frequencies: {confdata['nimag']}."
errors.append(err)
print(f"{ligand};{conformer};{err}")
with open(cwd/f"{ligand}_errors.txt","a") as f:
f.write(f"{ligand};{conformer};{err}\n")
with open(cwd/conformer/f"{conformer}_data.yml","w") as f:
yaml.dump(confdata,f,Dumper=Dumper)
confdata["error"] = True
return(confdata,errors)
# morfeus: properties that use the geometry/steric properties
confdata["properties"].update(morfeus_properties(confdata["elements_pd"],confdata["coords_pd"],confdata))
# # P_int
# if "Pint_P_int" not in confdata.keys():
# confdata.update(P_int.P_int_main(name=conformer,directory=cwd/conformer))
# read results
disp = "d3"
pint_read = P_int.read_dedout(cwd/conformer,conformer,disp)+P_int.read_multiwfnout(cwd/conformer,conformer)+P_int.read_disp(cwd/conformer,conformer,disp)
confdata["properties"].update({Pintresults[i]:float(pint_read[i]) for i in range(7)})
# V_min
try:
if "vmin_vmin" not in confdata.keys():
vminob = vmin.get_vmin(f"{conformer}.fchk",str(cwd/conformer)+"/",True)
confdata["properties"]["vmin_vmin"] = float(vminob.v_min)
confdata["properties"]["vmin_r"] = float(vminob.r_min)
except:
err = f"Vmin FileNotFoundError."
errors.append(err)
print(f"{ligand};{conformer};{err}")
with open(cwd/f"{ligand}_errors.txt","a") as f:
f.write(f"{ligand};{conformer};{err}\n")
confdata["error"] = True
# visvol
# if "vv_total_visible_volume" not in confdata.keys():
# confdata.update(visvol.get_vis_vol(cwd/conformer/f"{conformer}_opt_Pd.xyz",radii_type = 'rcov',prox_cutoff = 3.5,ignore_H = 0,write_results = 1, plot = 0))
with open(cwd/conformer/f"{conformer}_data.yml","w") as f:
yaml.dump(confdata,f,Dumper=Dumper)
return(confdata,errors)
def read_ligand(cwd, ligand, conformers, liganddata = {}): # cwd is the ligand-level directory
status = {"ligandlevel": [],}
if len(liganddata.keys()) == 0:
if (cwd/f"{ligand}_data.yml").exists():
with open(cwd/f"{ligand}_data.yml","r") as f:
liganddata = yaml.load(f,Loader=Loader)
if (cwd/f"{ligand}_confdata.yml").exists():
with open(cwd/f"{ligand}_confdata.yml","r") as f:
liganddata["confdata"] = yaml.load(f,Loader=Loader)
else:
liganddata = {
"conformers_all": conformers,
"conformers": conformers.copy(), # Duplicates and computations with errors (including nimag=1) will be removed from this list
"number_of_conformers": len(conformers),
"removed_duplicates": [],
"confdata": {},#{c:{} for c in conformers},
"boltzmann_averaged_data": {},
"min_data": {},
"max_data": {},
"delta_data": {},
"vburminconf_data": {},
}
newconfs = 0
for conformer in conformers:
if conformer in liganddata["removed_duplicates"]:
continue
print(conformer)
if conformer in liganddata["confdata"].keys():
pass
elif (cwd/conformer/f"{conformer}_data.yml").exists():
with open(cwd/conformer/f"{conformer}_data.yml","r") as f:
liganddata["confdata"][conformer] = yaml.load(f,Loader=Loader)
newconfs += 1
else:
print("read conformer data")
liganddata["confdata"][conformer],status[conformer] = read_conformer(cwd, ligand, conformer) # returns the dictionary with the conformer data and a list with errors
newconfs += 1
if newconfs > 0:
# error, NIMAG removal
liganddata["conformers_w_error"] = [conformer for conformer in liganddata["conformers"] if liganddata["confdata"][conformer]["error"]]
liganddata["conformers"] = [c for c in liganddata["conformers"] if c not in liganddata["conformers_w_error"]]
liganddata["number_of_conformers"] = len(liganddata["conformers"])
energies = ["e_dz","g","e_tz_gas","g_tz_gas","e_tz_solv","g_tz_solv"]
liganddata["energies"] = {}
liganddata["relative_energies"] = {}
for e in energies:
liganddata["energies"][e] = {conformer: liganddata["confdata"][conformer][e] for conformer in liganddata["conformers"]}
liganddata[e+"_min"] = min(liganddata["energies"][e].values())
liganddata[e+"_minconf"] = list(liganddata["energies"][e].keys())[np.argmin(list(liganddata["energies"][e].values()))]
liganddata["relative_energies"][e+"_rel"] = {conformer: (liganddata["energies"][e][conformer]-liganddata[e+"_min"])*hartree_kcalmol for conformer in liganddata["conformers"]}
# erel_df = pd.DataFrame(np.array([list(liganddata[e+"_rel"].values()) for e in energies]).T ,columns=energies,index=liganddata["conformers"] )
erel_df = pd.DataFrame([liganddata["relative_energies"][e+"_rel"] for e in energies],index=energies).T
#liganddata["relative_energies_df"] = erel_df
liganddata["relative_energies_dict"] = erel_df.to_dict()
# Find duplicates:
# 1) find pairs of conformers that are within E_rel < 0.1 kcal/mol (relative energies seem to be much more reliable than relative free energies)
# 2) check these pairs to also have RMSD < 0.2 A
# 3) Remove the conformer with higher relative free energy
duplicates_candidates = [(i,j) for i,j in itertools.combinations(liganddata["conformers"],2) if abs(erel_df["e_dz"].loc[i] - erel_df["e_dz"].loc[j]) < 0.1]
try:
# Throw a name error here if you wanna only run the except
cores = max(os.cpu_count() - 2, 1)
with Pool(cores) as p:
values = p.map(dict_key_rmsd, duplicates_candidates)
liganddata["rmsd_candidates"] = {key: value for key, value in zip(duplicates_candidates, values)}
# The less cool, non-parallel way
#liganddata["rmsd_candidates"] = {candidate_pair: float(rmsd_matrix(candidate_pair)[0,1]) for candidate_pair in duplicates_candidates} # keep all RMSD for potential debugging
liganddata["duplicates"] = [candidate_pair for candidate_pair in liganddata["rmsd_candidates"] if liganddata["rmsd_candidates"][candidate_pair] < 0.2]
except: # RDkit failed to generate Mol objects and thus could not compute RMSD, or some of the internal structures in those mol files are different despite actually being the same. Default to duplicate detection based on dipole moment and chemical shift similarity
#! log this on ligand level for double-checking
err = "Warning: RDKit error at duplicate RMSD testing. Please double check."
status["ligandlevel"].append(err)
print(f"{ligand};ligandlevel;{err}")
with open(cwd/f"{ligand}_errors.txt","a") as f:
f.write(f"{ligand};ligandlevel;{err}\n")
dipole_candidates = set([(i,j) for i,j in duplicates_candidates if abs(liganddata["confdata"][i]["properties"]["dipolemoment"] - liganddata["confdata"][j]["properties"]["dipolemoment"]) < 0.025])
nmr_candidates = set([(i,j) for i,j in duplicates_candidates if abs(liganddata["confdata"][i]["properties"]["nmr_P"] - liganddata["confdata"][j]["properties"]["nmr_P"]) < 0.1])
liganddata["duplicates"] = sorted(dipole_candidates & nmr_candidates)
liganddata["removed_duplicates"] = [erel_df.loc[list(pair)]["g_tz_gas"].idxmax() for pair in liganddata["duplicates"]]
liganddata["conformers"] = [c for c in liganddata["conformers"] if c not in liganddata["removed_duplicates"]]
liganddata["number_of_conformers"] = len(liganddata["conformers"])
# Boltzmann averaging
#boltzfacs = {conformer: np.exp(-liganddata["relative_energies_df"]["g_tz_gas"].loc[conformer]/(R*T)) for conformer in liganddata["conformers"]}
boltzfacs = {conformer: np.exp(-erel_df["g_tz_gas"].loc[conformer]/(R*T)) for conformer in liganddata["conformers"]}
Q = sum(boltzfacs.values())
liganddata["boltzmann_weights"] = {conformer: float(boltzfacs[conformer]/Q) for conformer in liganddata["conformers"] } # probability
for prop in boltzproperties:
confsmissingprop = [conf for conf in liganddata["conformers"] if prop not in liganddata["confdata"][conf]["properties"].keys()]
if len(confsmissingprop) == 0:
liganddata["boltzmann_averaged_data"][prop] = sum([liganddata["boltzmann_weights"][conf] * liganddata["confdata"][conf]["properties"][prop] for conf in liganddata["conformers"]])
else: # if a single conformer is missing a property value, set Boltzmann-average to None
#! log this as a ligand-level error with prop and confsmissingprop
err = f"Warning: {len(confsmissingprop)}/{len(liganddata['conformers'])} conformers missing values for property {prop}: {','.join(confsmissingprop)}."
status["ligandlevel"].append(err)
print(f"{ligand};ligandlevel;{err}")
with open(cwd/f"{ligand}_errors.txt","a") as f:
f.write(f"{ligand};ligandlevel;{err}\n")
liganddata["boltzmann_averaged_data"][prop] = None
continue
# "Condensed" properties
liganddata["vburminconf"] = liganddata["conformers"][np.argmin([liganddata["confdata"][conf]["properties"]["vbur_vbur"] for conf in liganddata["conformers"]])]
for prop in mmproperties:
proplist = [liganddata["confdata"][conf]["properties"][prop] for conf in liganddata["conformers"] if prop in liganddata["confdata"][conf]["properties"].keys()]
# if a single conformer is missing a property value, still perform min/max analysis (Boltzmann-average will be None to indicate missing value(s))
# if all confs are missing this prop, set min/max/delta to None
if len(proplist) == 0:
liganddata["min_data"][prop] = None
liganddata["max_data"][prop] = None
liganddata["delta_data"][prop] = None
liganddata["vburminconf_data"][prop] = None
else:
liganddata["min_data"][prop] = min(proplist)
liganddata["max_data"][prop] = max(proplist)
liganddata["delta_data"][prop] = liganddata["max_data"][prop] - liganddata["min_data"][prop]
liganddata["vburminconf_data"][prop] = liganddata["confdata"][liganddata["vburminconf"]]["properties"][prop]
liganddata["time_all"] = sum([liganddata["confdata"][conf]["t_total"] for conf in liganddata["conformers_all"] if "t_total" in liganddata["confdata"][conf].keys()])
with open(cwd/f"{ligand}_data.yml","w") as f:
yaml.dump({k:v for k,v in liganddata.items() if k != "confdata"},f,Dumper=Dumper)
with open(cwd/f"{ligand}_confdata.yml","w") as f:
yaml.dump(liganddata["confdata"],f,Dumper=Dumper)
erel_df.to_csv(cwd/f"{ligand}_relative_energies.csv",sep=";")
return(liganddata,status)
def main_split_logs(cwd, ligand):
if not (cwd/"ERR").exists():
(cwd/"ERR").mkdir()
# if not (cwd/"done").exists():
# (cwd/"done").mkdir()
conformers = [i.name for i in (cwd/ligand).iterdir() if i.is_dir()]
conformers_good = []
for conformer in conformers:
logs = [i.name for i in (cwd/ligand/conformer).rglob("*.log")]
if f"{conformer}.log" in logs and f"{conformer}_opt.log" not in logs:
status = split_log(ligand, conformer)
if status != "Error":
#(cwd/ligand/conformer/f"{conformer}.log").rename(cwd/f"done/{conformer}.log")
conformers_good.append(conformer)
return(conformers_good)
if __name__ == '__main__':
starttime_all = time.time()
ligname = re.compile("[0-9]{8}")
ligands = sorted([i.name for i in cwd.iterdir() if (ligname.match(i.name) and i.is_dir())])
conformers = {ligand: [i.name for i in (cwd/ligand).iterdir() if i.is_dir()] for ligand in ligands}
if not (cwd/"ERR").exists():
(cwd/"ERR").mkdir()
if not (cwd/"done").exists():
(cwd/"done").mkdir()
for ligand in ligands:
for conformer in conformers[ligand]:
logs = [i.name for i in (cwd/ligand/conformer).rglob("*.log")]
if f"{conformer}.log" in logs and f"{conformer}_opt.log" not in logs:
status = split_log(ligand,conformer)
if status != "Error":
(cwd/ligand/conformer/f"{conformer}.log").rename(cwd/f"done/{conformer}.log")
if (cwd/"allligands_data.yml").exists():
with open(cwd/"allligands_data.yml","r") as f:
allliganddata = yaml.load(f,Loader=Loader)
else:
allliganddata = {}
for ligand in ligands:
print(ligand)
print(conformers[ligand])
if ligand in allliganddata.keys():
allliganddata[ligand],status = read_ligand(cwd,ligand,conformers[ligand],allliganddata[ligand])
else:
allliganddata[ligand],status = read_ligand(cwd,ligand,conformers[ligand])
with open(cwd/"allligands_data.yml","w") as f:
yaml.dump(allliganddata,f,Dumper=Dumper)
variants = ["boltz","min","max","delta","vburminconf"]
columns = [i+"_boltz" for i in boltzproperties if i not in mmproperties] + [f"{i}_{j}" for i,j in itertools.product(mmproperties,variants)]# + ["t_total","number_of_conformers"]
df = pd.DataFrame(columns = columns,index = ligands)
for l in ligands:
for c in columns:
print(allliganddata[l]["properties"])
exit()
df.loc[l][c] = allliganddata[l]["properties"][c]
df["t_total"] = [allliganddata[l]["t_total"] for l in ligands]
df["number_of_conformers"] = [allliganddata[l]["number_of_conformers"] for l in ligands]
df.to_csv("allligands_data.csv",sep=";")
print(f"All done. Total time: {round((time.time()-starttime_all),2)} sec")
| 57.324074 | 1,343 | 0.619959 |
import os,re,itertools,time
import numpy as np
import pandas as pd
import pathlib as pl
cwd = pl.Path.cwd()
import yaml
from yaml import CLoader as Loader
from yaml import CDumper as Dumper
from rdkit import Chem,Geometry
from rdkit.Chem import rdmolfiles, AllChem, rdMolAlign,rdmolops
from multiprocessing import Pool
import morfeus
from PL_split_logs_201006 import split_log
from PL_conformer_selection_200411 import mirror_mol, delete_element_from_rdkitmol, delete_haloalkane_halides as vmin
import P_int_200916 as P_int
"H": 0.32,"He": 0.46,"Li": 1.2,"Be": 0.94,"B": 0.77,"C": 0.75,"N": 0.71,"O": 0.63,"F": 0.64,"Ne": 0.67,"Na": 1.4,"Mg": 1.25,"Al": 1.13,"Si": 1.04,"P": 1.1,"S": 1.02,"Cl": 0.99,"Ar": 0.96,"K": 1.76,"Ca": 1.54,"Sc": 1.33,"Ti": 1.22,"V": 1.21,"Cr": 1.1,"Mn": 1.07,"Fe": 1.04,"Co": 1.0,"Ni": 0.99,"Cu": 1.01,"Zn": 1.09,"Ga": 1.12,"Ge": 1.09,"As": 1.15,"Se": 1.1,"Br": 1.14,"Kr": 1.17,"Rb": 1.89,"Sr": 1.67,"Y": 1.47,"Zr": 1.39,"Nb": 1.32,"Mo": 1.24,"Tc": 1.15,"Ru": 1.13,"Rh": 1.13,"Pd": 1.08,"Ag": 1.15,"Cd": 1.23,"In": 1.28,"Sn": 1.26,"Sb": 1.26,"Te": 1.23,"I": 1.32,"Xe": 1.31,"Cs": 2.09,"Ba": 1.76,"La": 1.62,"Ce": 1.47,"Pr": 1.58,"Nd": 1.57,"Pm": 1.56,"Sm": 1.55,"Eu": 1.51,"Gd": 1.52,"Tb": 1.51,"Dy": 1.5,"Ho": 1.49,"Er": 1.49,"Tm": 1.48,"Yb": 1.53,"Lu": 1.46,"Hf": 1.37,"Ta": 1.31,"W": 1.23,"Re": 1.18,"Os": 1.16,"Ir": 1.11,"Pt": 1.12,"Au": 1.13,"Hg": 1.32,"Tl": 1.3,"Pb": 1.3,"Bi": 1.36,"Po": 1.31,"At": 1.38,"Rn": 1.42,"Fr": 2.01,"Ra": 1.81,"Ac": 1.67,"Th": 1.58,"Pa": 1.52,"U": 1.53,"Np": 1.54,"Pu": 1.55
}
# some constants
R = 0.0019872036 #kcal mol^-1 K^-1
T = 298.15 #K
hartree_kcalmol = 627.50947
periodictable = ["Bq","H","He","Li","Be","B","C","N","O","F","Ne","Na","Mg","Al","Si","P","S","Cl","Ar","K","Ca","Sc","Ti","V","Cr","Mn","Fe","Co","Ni","Cu","Zn","Ga","Ge","As","Se","Br","Kr","Rb","Sr","Y","Zr","Nb","Mo","Tc","Ru","Rh","Pd","Ag","Cd","In","Sn","Sb","Te","I","Xe","Cs","Ba","La","Ce","Pr","Nd","Pm","Sm","Eu","Gd","Tb","Dy","Ho","Er","Tm","Yb","Lu","Hf","Ta","W","Re","Os","Ir","Pt","Au","Hg","Tl","Pb","Bi","Po","At","Rn","Fr","Ra","Ac","Th","Pa","U","Np","Pu","Am","Cm","Bk","Cf","Es","Fm","Md","No","Lr","Rf","Db","Sg","Bh","Hs","Mt","Ds","Rg","Uub","Uut","Uuq","Uup","Uuh","Uus","Uuo","X"]
def get_conmat(elements, coords):
# partially based on code from Robert Paton's Sterimol script, which based this part on Grimme's D3 code
# elements is a list of strings, coords is a numpy array or nested list of shape N_atoms x 3
if type(coords) == list:
coords = np.asarray(coords)
natom = len(elements)
#max_elem = 94
k1 = 16.0
k2 = 4.0/3.0
conmat = np.zeros((natom,natom))
for i in range(0,natom):
if elements[i] not in rcov.keys():
continue
for iat in range(0,natom):
if elements[iat] not in rcov.keys():
continue
if iat != i:
dxyz = coords[iat]-coords[i]
r = np.linalg.norm(dxyz)
rco = rcov[elements[i]]+rcov[elements[iat]]
rco = rco*k2
rr=rco/r
damp=1.0/(1.0+np.math.exp(-k1*(rr-1.0)))
if damp > 0.85: #check if threshold is good enough for general purpose
conmat[i,iat],conmat[iat,i] = 1,1
return(conmat)
def add_valence(elements,coords,conmat,base_idx,add_element="Pd"):
# Adds a valence to base so that the angle to the previous substituents is maximized and reorders the coordinate output for convenience
# add_element: add any of the following elements:
distpx = {"O":1.5,"Se":2.12,"Pd":2.28,"X":1.8} # typical bond distances to P
if type(coords) == list:
coords = np.asarray(coords)
num_atoms = len(elements)
coord_base = coords[base_idx]
base_element = elements[base_idx]
vec = np.array([0.0,0.0,0.0])
bonded = []
for atom in range(num_atoms):
if conmat[base_idx][atom]:
bonded.append(atom)
vec += coord_base - coords[atom]
coordox = distpx[add_element]*vec/np.linalg.norm(vec) + coord_base
atoms = [x for x in range(num_atoms+1)]
coords_temp = np.vstack((coords,coordox))
if sum(get_conmat(elements+[add_element],coords_temp)[-1]) != 1.0:
print(" Warning: possible collision!")
# sort coordinates so that base is first, add_element is second, and the other atoms bonded to base are next
elements_new = [base_element,add_element]+[elements[a] for a in bonded] + [a for i,a in enumerate(elements) if i not in [base_idx]+bonded]
coords_new = np.vstack((coord_base, coordox, coords[bonded], coords[[i for i,a in enumerate(elements) if i not in [base_idx]+bonded]]))
return(elements_new, coords_new)
def write_xyz(elements,coords,filename):
with open(filename,"w") as f:
f.write(f"{len(elements)}\n\n")
for i,a in enumerate(elements):
f.write(f"{a.title():>3} " + " ".join([f"{coords[i][j]:15f}" for j in range(3)]) + "\n")
def rmsd_matrix(conformers):
molobjects = [rdmolfiles.MolFromMolFile(str(cwd/conformer/f"{conformer}_opt.sdf"),removeHs=False,strictParsing=False) for conformer in conformers]
molobjects = [Chem.RemoveHs(mol) for mol in molobjects] # Remove all H: optional but speeds up RMSD calculation
molobjects = [delete_haloalkane_halides(mol) for mol in molobjects] # Remove halides in perhaloalkyl moieties. Improves RMSD matching and timing
molobjects_inv = [mirror_mol(mol) for mol in molobjects] # create mirror images of each conformer
rmsd_mat = np.zeros((len(conformers),len(conformers)))
for i,j in itertools.product(range(len(conformers)),range(len(conformers))):
if i<j: continue
if i==j:
rmsd_mat[i,j] = 1
else:
rmsd_mat[i,j] = min((rdMolAlign.GetBestRMS(molobjects[i],molobjects[j]),rdMolAlign.GetBestRMS(molobjects[i],molobjects_inv[j])))
rmsd_mat[j,i] = rmsd_mat[i,j]
return(rmsd_mat)
def dict_key_rmsd(candidate_pair):
return float(rmsd_matrix(candidate_pair)[0,1])
# which energies to read from which log-file
energylogs = {
"e_dz":"freq",
"e_tz_gas":"nbo",
"e_tz_gas":"sp",
"e_tz_solv":"solv",
"e_tz_ra":"ra",
"e_tz_rc":"rc",
}
# which properties to read from which log-file
proplogs = {
"freq":["nimag","g","t"],
"sp" :["dipole","homo","qpole","t"],
"ra" :["homo","nbo","t"],
"rc" :["homo","nbo","t"],
"nbo" :["nbo","nborbsP","t"],
"nmr" :["nmr","t"],
"efg" :["efg","nuesp","t"],
"solv":["ecds","t"],
}
# assign names to each descriptor
propoutput = {
"freq_g": ["","g"],
"freq_nimag": ["nimag"],
"sp_dipole": ["dipolemoment",],
"sp_homo": ["fmo_e_homo","fmo_e_lumo","fmo_mu","fmo_eta","fmo_omega"],
"ra_homo":["somo_ra","","","",""],
"rc_homo":["somo_rc","","","",""],
"sp_qpole": ["qpole_amp","qpoletens_xx","qpoletens_yy","qpoletens_zz"],
"nbo_nbo": ["nbo_P"],
"ra_nbo": ["nbo_P_ra","spindens_P_ra"],
"rc_nbo": ["nbo_P_rc","spindens_P_rc"],
"nmr_nmr": ["nmr_P","nmrtens_sxx_P","nmrtens_syy_P","nmrtens_szz_P",],
"efg_efg": ["efg_amp_P","efgtens_xx_P","efgtens_yy_P","efgtens_zz_P"],
"efg_nuesp": ["nuesp_P",],
"solv_ecds": ["E_solv_cds"],
"nbo_dipole": ["dipolemoment",],
"nbo_homo": ["fmo_e_homo","fmo_e_lumo","fmo_mu","fmo_eta","fmo_omega"],
"nbo_qpole": ["qpole_amp","qpoletens_xx","qpoletens_yy","qpoletens_zz"],
}
boltzproperties = ['vmin_vmin','vmin_r','dipolemoment', 'fmo_e_homo', 'fmo_e_lumo', 'fmo_mu', 'fmo_eta', 'fmo_omega', 'somo_ra', 'somo_rc', 'qpole_amp', 'qpoletens_xx', 'qpoletens_yy', 'qpoletens_zz', 'nbo_P', 'nbo_P_ra', 'spindens_P_ra', 'nbo_P_rc', 'spindens_P_rc', 'nmr_P', 'nmrtens_sxx_P', 'nmrtens_syy_P', 'nmrtens_szz_P', 'efg_amp_P', 'efgtens_xx_P', 'efgtens_yy_P', 'efgtens_zz_P', 'nuesp_P', 'E_solv_cds', 'nbo_lp_P_percent_s', 'nbo_lp_P_occ', 'nbo_lp_P_e', 'nbo_bd_e_max', 'nbo_bd_e_avg', 'nbo_bds_e_min', 'nbo_bds_e_avg', 'nbo_bd_occ_min', 'nbo_bd_occ_avg', 'nbo_bds_occ_max', 'nbo_bds_occ_avg', 'E_solv_total', 'E_solv_elstat', 'E_oxidation', 'E_reduction', 'fukui_p', 'fukui_m', 'pyr_P', 'pyr_alpha', 'vbur_vbur', 'vbur_vtot', 'vbur_ratio_vbur_vtot', 'vbur_qvbur_min', 'vbur_qvbur_max', 'vbur_qvtot_min', 'vbur_qvtot_max', 'vbur_max_delta_qvbur', 'vbur_max_delta_qvtot', 'vbur_ovbur_min', 'vbur_ovbur_max', 'vbur_ovtot_min', 'vbur_ovtot_max', 'vbur_near_vbur', 'vbur_far_vbur', 'vbur_near_vtot', 'vbur_far_vtot', 'sterimol_B1', 'sterimol_B5', 'sterimol_L', 'sterimol_burB1', 'sterimol_burB5', 'sterimol_burL',"Pint_P_int","Pint_dP","Pint_P_min","Pint_P_max","volume","surface_area","sphericity"] # "vv_total_visible_volume","vv_proximal_visible_volume","vv_distal_visible_volume","vv_ratio_visible_total","vv_ratio_proxvis_total",
mmproperties = ['dipolemoment', 'qpole_amp', 'qpoletens_xx', 'qpoletens_yy', 'qpoletens_zz', 'pyr_P', 'pyr_alpha', 'vbur_vbur', 'vbur_vtot', 'vbur_qvbur_min', 'vbur_qvbur_max', 'vbur_qvtot_min', 'vbur_qvtot_max', 'vbur_max_delta_qvbur', 'vbur_max_delta_qvtot', 'vbur_ovbur_min', 'vbur_ovbur_max', 'vbur_ovtot_min', 'vbur_ovtot_max', 'vbur_near_vbur', 'vbur_far_vbur', 'vbur_near_vtot', 'vbur_far_vtot', 'sterimol_B1', 'sterimol_B5', 'sterimol_L', 'sterimol_burB1', 'sterimol_burB5', 'sterimol_burL'] # ,"vv_total_visible_volume","vv_proximal_visible_volume","vv_distal_visible_volume","vv_ratio_visible_total","vv_ratio_proxvis_total",
Pintresults = ["Pint_P_int","Pint_dP","Pint_P_min","Pint_P_max","volume","surface_area","sphericity"]
def morfeus_properties(elements,coordinates,confdata):
# Morfeus: Sterimol, Vbur, pyr
morfdict = {}
if "pyr_P" not in confdata.keys() and confdata["p_val"] == 3:
# Pyramidalization - two equivalent measurments P and alpha
pyr = morfeus.Pyramidalization(elements=elements,coordinates=coordinates,atom_index=1,excluded_atoms=[2]) # remove Pd
morfdict["pyr_P"] = float(pyr.P)
morfdict["pyr_alpha"] = float(pyr.alpha)
if "vbur_vbur" not in confdata.keys():
#Buried volume - get quadrant volumes and distal volume
# iterate through P-substituents, aligning the quadrants paralell to each once (= xz_plane definition)
# Metal/point of reference should be 2.28 A away from P
# z_axis_atoms: P
# xz_plane_atoms: each of the substituents once
# keep lowest and highest quadrant and octant volume across all three orientations of the coordinate system
# keep highest difference of any neighboring quadrant volume
# keep volume in each of the two hemispheres
qvbur_all = np.array([])
qvdist_all = np.array([])
qvtot_all = np.array([])
max_delta_qvbur_all = []
max_delta_qvtot_all = []
ovbur_all = np.array([])
ovtot_all = np.array([])
for i in range(3):#confdata["p_val"]):
bv = morfeus.BuriedVolume(elements,coordinates,2,excluded_atoms=[2],z_axis_atoms=[1],xz_plane_atoms=[3+i])
bv.octant_analysis()
bv.compute_distal_volume(method="buried_volume",octants=True)
vbur = bv.buried_volume # these are identical for each iteration
vdist = bv.distal_volume #
vtot = vbur + vdist #
qvbur = np.asarray(list(bv.quadrants["buried_volume"].values()))
qvdist = np.asarray(list(bv.quadrants["distal_volume"].values()))
qvtot = qvbur + qvdist
qvbur_all = np.append(qvbur_all,qvbur)
qvtot_all = np.append(qvtot_all,qvtot)
max_delta_qvbur_all.append(max([abs(qvbur[j]-qvbur[j-1]) for j in range(4)]))
max_delta_qvtot_all.append(max([abs(qvtot[j]-qvtot[j-1]) for j in range(4)]))
ovbur = np.asarray(list(bv.octants["buried_volume"].values()))
ovdist = np.asarray(list(bv.octants["distal_volume"].values()))
ovtot = ovbur + ovdist
ovbur_all = np.append(ovbur_all,ovbur)
ovtot_all = np.append(ovtot_all,ovtot)
near_vbur = ovbur[4:].sum() # these are identical for each iteration
far_vbur = ovbur[:4].sum() #
near_vtot = ovtot[4:].sum() #
far_vtot = ovtot[:4].sum() #
morfdict["vbur_vbur"] = vbur
morfdict["vbur_vtot"] = float(vtot)
morfdict["vbur_ratio_vbur_vtot"] = float(vbur/vtot)
morfdict["vbur_qvbur_min"] = float(min(qvbur_all))
morfdict["vbur_qvbur_max"] = float(max(qvbur_all))
morfdict["vbur_qvtot_min"] = float(min(qvtot_all))
morfdict["vbur_qvtot_max"] = float(max(qvtot_all))
morfdict["vbur_max_delta_qvbur"] = float(max(max_delta_qvbur_all))
morfdict["vbur_max_delta_qvtot"] = float(max(max_delta_qvtot_all))
morfdict["vbur_ovbur_min"] = float(min(ovbur_all))
morfdict["vbur_ovbur_max"] = float(max(ovbur_all))
morfdict["vbur_ovtot_min"] = float(min(ovtot_all))
morfdict["vbur_ovtot_max"] = float(max(ovtot_all))
morfdict["vbur_near_vbur"] = float(near_vbur)
morfdict["vbur_far_vbur"] = float(far_vbur)
morfdict["vbur_near_vtot"] = float(near_vtot)
morfdict["vbur_far_vtot"] = float(far_vtot)
if "sterimol_B1" not in confdata.keys():
# Sterimol
# for Sterimol values matching Rob Paton's implementation:
patonradii = morfeus.helpers.get_radii(elements, radii_type="bondi")
patonradii = np.array(patonradii)
patonradii[patonradii == 1.2] = 1.09
sterimol = morfeus.Sterimol(elements, coordinates, 2, 1, radii=patonradii, n_rot_vectors=3600)
morfdict["sterimol_B1"] = float(sterimol.B_1_value)
morfdict["sterimol_B5"] = float(sterimol.B_5_value)
morfdict["sterimol_L"] = float(sterimol.L_value)
sterimol_bur = morfeus.Sterimol(elements, coordinates, 2, 1,calculate=False,radii=patonradii, n_rot_vectors=3600)
sterimol_bur.bury(sphere_radius=5.5,method="delete",radii_scale=0.5)
morfdict["sterimol_burB1"] = float(sterimol_bur.B_1_value)
morfdict["sterimol_burB5"] = float(sterimol_bur.B_5_value)
morfdict["sterimol_burL"] = float(sterimol_bur.L_value)
return(morfdict)
def gp_properties(ligand,conformer,p_idx):
gpdict = {}
gpdict["properties"] = {}
contents = {
"streams":{},
"filecont":{},
}
for e,log in energylogs.items():
contents["streams"][log] = gp.get_outstreams(cwd/conformer/f"{conformer}_{log}.log")
if contents["streams"][log] == "failed or incomplete job":
return({"error":True})
else:
gpdict[e] = gp.get_e_hf(contents["streams"][log])
gpdict["error"] = False
for log in proplogs.keys():
contents["filecont"][log] = gp.get_filecont(cwd/conformer/f"{conformer}_{log}.log")
for prop in proplogs[log]:
gpresults = gp.jobtypes[prop][0](contents[gp.jobtypes[prop][1]][log],p_idx)
if prop == "nborbsP":
gpdict["properties"].update(gpresults)
elif prop == "t":
gpdict[f"{log}_t"] = gpresults
elif prop in ["e_dz","g","e_tz_gas","e_tz_solv","e_tz_ra","e_tz_rc","nimag"]:
gpdict.update({propoutput[f"{log}_{prop}"][i]: float(gpresults[i]) for i in range(len(gpresults))})
else:
gpdict["properties"].update({propoutput[f"{log}_{prop}"][i]: float(gpresults[i]) for i in range(len(gpresults))})
gpdict["g_tz_gas"] = gpdict["g"] - gpdict["e_dz"] + gpdict["e_tz_gas"]
gpdict["g_tz_solv"] = gpdict["g"] - gpdict["e_dz"] + gpdict["e_tz_solv"]
gpdict["properties"]["E_solv_total"] = (gpdict["e_tz_solv"] - gpdict["e_tz_gas"]) * hartree_kcalmol
gpdict["properties"]["E_solv_elstat"] = gpdict["properties"]["E_solv_total"] - gpdict["properties"]["E_solv_cds"]
gpdict["properties"]["E_oxidation"] = gpdict["e_tz_rc"] - gpdict["e_tz_gas"]
gpdict["properties"]["E_reduction"] = gpdict["e_tz_ra"] - gpdict["e_tz_gas"]
gpdict["properties"]["fukui_p"] = gpdict["properties"]["nbo_P"]-gpdict["properties"]["nbo_P_ra"]
gpdict["properties"]["fukui_m"] = gpdict["properties"]["nbo_P_rc"]-gpdict["properties"]["nbo_P"]
gpdict["t_total"] = sum([gpdict[f"{log}_t"] for log in proplogs.keys()])
if "" in gpdict.keys():
del gpdict[""]
if "" in gpdict["properties"].keys():
del gpdict["properties"][""]
return(gpdict)
def read_conformer(cwd, ligand, conformer):
confdata = {}
errors = []
checklogs = [cwd/conformer/f"{conformer}_{l}.log" for l in proplogs.keys() if not (cwd/conformer/f"{conformer}_{l}.log").exists()]
if len(checklogs) != 0:
err = f"Missing Gaussian log files, flagged in read_conformer: {','.join([chkl.name for chkl in checklogs])}"
errors.append(err)
print(f"{ligand};{conformer};{err}")
with open(cwd/f"{ligand}_errors.txt","a") as f:
f.write(f"{ligand};{conformer};{err}\n")
confdata["error"] = True
return(confdata,errors)
if "elements_pd" not in confdata.keys():
def read_gaussian_logfile(fn):
time0=time.time()
read=False
for line in open(fn,"r"):
if read:
if "---" in line and len(elements)>0:
read=False
if read:
if "X" not in line and "---" not in line:
atomnum = int(line.split()[1])
el = periodictable[atomnum]
elements.append(el)
coordinates.append([float(line.split()[3]),float(line.split()[4]), float(line.split()[5])])
if "Coordinates (Angstroms)" in line:
coordinates, elements = [], []
read=True
time1=time.time()
print("gaussian log parser done in %.2f seconds"%(time1-time0))
return(coordinates, elements)
coordinates, elements = read_gaussian_logfile(str(cwd/conformer/f"{conformer}_opt.log"))
coordinates_a = np.array(coordinates)
conmat = get_conmat(elements,coordinates_a)
p_idx = [i for i in range(len(elements)) if elements[i] == "P" and sum(conmat[i]) <= 3][0]
elements_pd, coordinates_pd = add_valence(elements,coordinates,conmat,p_idx,add_element="Pd")
if not (cwd/conformer/f"{conformer}_opt_Pd.xyz").exists():
write_xyz(elements, coordinates, cwd/conformer/f"{conformer}_opt.xyz")
os.system("obabel -ixyz %s -osdf >> %s"%(str(cwd/conformer/f"{conformer}_opt.xyz"), str(cwd/conformer/f"{conformer}_opt.sdf")))
write_xyz(elements_pd,coordinates_pd,cwd/conformer/f"{conformer}_opt_Pd.xyz")
confdata["coords"] = coordinates
confdata["coords_pd"] = coordinates_pd.tolist()
confdata["elements"] = elements
confdata["elements_pd"] = elements_pd
confdata["conmat"] = conmat.tolist()
confdata["p_idx"] = p_idx
confdata["p_val"] = int(sum(conmat[p_idx]))
confdata["properties"] = {}
a.update(gp_properties(ligand,conformer,confdata["p_idx"]))
if confdata["error"]:
err = "Error in the Gaussian computations, flagged in read_conformer, please check log files."
errors.append(err)
print(f"{ligand};{conformer};{err}")
with open(cwd/f"{ligand}_errors.txt","a") as f:
f.write(f"{ligand};{conformer};{err}\n")
with open(cwd/conformer/f"{conformer}_data.yml","w") as f:
yaml.dump(confdata,f,Dumper=Dumper)
return(confdata,errors)
if confdata["nimag"] != 0:
err = f"Number of imaginary frequencies: {confdata['nimag']}."
errors.append(err)
print(f"{ligand};{conformer};{err}")
with open(cwd/f"{ligand}_errors.txt","a") as f:
f.write(f"{ligand};{conformer};{err}\n")
with open(cwd/conformer/f"{conformer}_data.yml","w") as f:
yaml.dump(confdata,f,Dumper=Dumper)
confdata["error"] = True
return(confdata,errors)
confdata["properties"].update(morfeus_properties(confdata["elements_pd"],confdata["coords_pd"],confdata))
disp = "d3"
pint_read = P_int.read_dedout(cwd/conformer,conformer,disp)+P_int.read_multiwfnout(cwd/conformer,conformer)+P_int.read_disp(cwd/conformer,conformer,disp)
confdata["properties"].update({Pintresults[i]:float(pint_read[i]) for i in range(7)})
try:
if "vmin_vmin" not in confdata.keys():
vminob = vmin.get_vmin(f"{conformer}.fchk",str(cwd/conformer)+"/",True)
confdata["properties"]["vmin_vmin"] = float(vminob.v_min)
confdata["properties"]["vmin_r"] = float(vminob.r_min)
except:
err = f"Vmin FileNotFoundError."
errors.append(err)
print(f"{ligand};{conformer};{err}")
with open(cwd/f"{ligand}_errors.txt","a") as f:
f.write(f"{ligand};{conformer};{err}\n")
confdata["error"] = True
with open(cwd/conformer/f"{conformer}_data.yml","w") as f:
yaml.dump(confdata,f,Dumper=Dumper)
return(confdata,errors)
def read_ligand(cwd, ligand, conformers, liganddata = {}):
status = {"ligandlevel": [],}
if len(liganddata.keys()) == 0:
if (cwd/f"{ligand}_data.yml").exists():
with open(cwd/f"{ligand}_data.yml","r") as f:
liganddata = yaml.load(f,Loader=Loader)
if (cwd/f"{ligand}_confdata.yml").exists():
with open(cwd/f"{ligand}_confdata.yml","r") as f:
liganddata["confdata"] = yaml.load(f,Loader=Loader)
else:
liganddata = {
"conformers_all": conformers,
"conformers": conformers.copy(),
"number_of_conformers": len(conformers),
"removed_duplicates": [],
"confdata": {},
"boltzmann_averaged_data": {},
"min_data": {},
"max_data": {},
"delta_data": {},
"vburminconf_data": {},
}
newconfs = 0
for conformer in conformers:
if conformer in liganddata["removed_duplicates"]:
continue
print(conformer)
if conformer in liganddata["confdata"].keys():
pass
elif (cwd/conformer/f"{conformer}_data.yml").exists():
with open(cwd/conformer/f"{conformer}_data.yml","r") as f:
liganddata["confdata"][conformer] = yaml.load(f,Loader=Loader)
newconfs += 1
else:
print("read conformer data")
liganddata["confdata"][conformer],status[conformer] = read_conformer(cwd, ligand, conformer)
newconfs += 1
if newconfs > 0:
liganddata["conformers_w_error"] = [conformer for conformer in liganddata["conformers"] if liganddata["confdata"][conformer]["error"]]
liganddata["conformers"] = [c for c in liganddata["conformers"] if c not in liganddata["conformers_w_error"]]
liganddata["number_of_conformers"] = len(liganddata["conformers"])
energies = ["e_dz","g","e_tz_gas","g_tz_gas","e_tz_solv","g_tz_solv"]
liganddata["energies"] = {}
liganddata["relative_energies"] = {}
for e in energies:
liganddata["energies"][e] = {conformer: liganddata["confdata"][conformer][e] for conformer in liganddata["conformers"]}
liganddata[e+"_min"] = min(liganddata["energies"][e].values())
liganddata[e+"_minconf"] = list(liganddata["energies"][e].keys())[np.argmin(list(liganddata["energies"][e].values()))]
liganddata["relative_energies"][e+"_rel"] = {conformer: (liganddata["energies"][e][conformer]-liganddata[e+"_min"])*hartree_kcalmol for conformer in liganddata["conformers"]}
erel_df = pd.DataFrame([liganddata["relative_energies"][e+"_rel"] for e in energies],index=energies).T
liganddata["relative_energies_dict"] = erel_df.to_dict()
duplicates_candidates = [(i,j) for i,j in itertools.combinations(liganddata["conformers"],2) if abs(erel_df["e_dz"].loc[i] - erel_df["e_dz"].loc[j]) < 0.1]
try:
cores = max(os.cpu_count() - 2, 1)
with Pool(cores) as p:
values = p.map(dict_key_rmsd, duplicates_candidates)
liganddata["rmsd_candidates"] = {key: value for key, value in zip(duplicates_candidates, values)}
[candidate_pair for candidate_pair in liganddata["rmsd_candidates"] if liganddata["rmsd_candidates"][candidate_pair] < 0.2]
except:
err = "Warning: RDKit error at duplicate RMSD testing. Please double check."
status["ligandlevel"].append(err)
print(f"{ligand};ligandlevel;{err}")
with open(cwd/f"{ligand}_errors.txt","a") as f:
f.write(f"{ligand};ligandlevel;{err}\n")
dipole_candidates = set([(i,j) for i,j in duplicates_candidates if abs(liganddata["confdata"][i]["properties"]["dipolemoment"] - liganddata["confdata"][j]["properties"]["dipolemoment"]) < 0.025])
nmr_candidates = set([(i,j) for i,j in duplicates_candidates if abs(liganddata["confdata"][i]["properties"]["nmr_P"] - liganddata["confdata"][j]["properties"]["nmr_P"]) < 0.1])
liganddata["duplicates"] = sorted(dipole_candidates & nmr_candidates)
liganddata["removed_duplicates"] = [erel_df.loc[list(pair)]["g_tz_gas"].idxmax() for pair in liganddata["duplicates"]]
liganddata["conformers"] = [c for c in liganddata["conformers"] if c not in liganddata["removed_duplicates"]]
liganddata["number_of_conformers"] = len(liganddata["conformers"])
boltzfacs = {conformer: np.exp(-erel_df["g_tz_gas"].loc[conformer]/(R*T)) for conformer in liganddata["conformers"]}
Q = sum(boltzfacs.values())
liganddata["boltzmann_weights"] = {conformer: float(boltzfacs[conformer]/Q) for conformer in liganddata["conformers"] }
for prop in boltzproperties:
confsmissingprop = [conf for conf in liganddata["conformers"] if prop not in liganddata["confdata"][conf]["properties"].keys()]
if len(confsmissingprop) == 0:
liganddata["boltzmann_averaged_data"][prop] = sum([liganddata["boltzmann_weights"][conf] * liganddata["confdata"][conf]["properties"][prop] for conf in liganddata["conformers"]])
else:
err = f"Warning: {len(confsmissingprop)}/{len(liganddata['conformers'])} conformers missing values for property {prop}: {','.join(confsmissingprop)}."
status["ligandlevel"].append(err)
print(f"{ligand};ligandlevel;{err}")
with open(cwd/f"{ligand}_errors.txt","a") as f:
f.write(f"{ligand};ligandlevel;{err}\n")
liganddata["boltzmann_averaged_data"][prop] = None
continue
liganddata["vburminconf"] = liganddata["conformers"][np.argmin([liganddata["confdata"][conf]["properties"]["vbur_vbur"] for conf in liganddata["conformers"]])]
for prop in mmproperties:
proplist = [liganddata["confdata"][conf]["properties"][prop] for conf in liganddata["conformers"] if prop in liganddata["confdata"][conf]["properties"].keys()]
if len(proplist) == 0:
liganddata["min_data"][prop] = None
liganddata["max_data"][prop] = None
liganddata["delta_data"][prop] = None
liganddata["vburminconf_data"][prop] = None
else:
liganddata["min_data"][prop] = min(proplist)
liganddata["max_data"][prop] = max(proplist)
liganddata["delta_data"][prop] = liganddata["max_data"][prop] - liganddata["min_data"][prop]
liganddata["vburminconf_data"][prop] = liganddata["confdata"][liganddata["vburminconf"]]["properties"][prop]
liganddata["time_all"] = sum([liganddata["confdata"][conf]["t_total"] for conf in liganddata["conformers_all"] if "t_total" in liganddata["confdata"][conf].keys()])
with open(cwd/f"{ligand}_data.yml","w") as f:
yaml.dump({k:v for k,v in liganddata.items() if k != "confdata"},f,Dumper=Dumper)
with open(cwd/f"{ligand}_confdata.yml","w") as f:
yaml.dump(liganddata["confdata"],f,Dumper=Dumper)
erel_df.to_csv(cwd/f"{ligand}_relative_energies.csv",sep=";")
return(liganddata,status)
def main_split_logs(cwd, ligand):
if not (cwd/"ERR").exists():
(cwd/"ERR").mkdir()
conformers = [i.name for i in (cwd/ligand).iterdir() if i.is_dir()]
conformers_good = []
for conformer in conformers:
logs = [i.name for i in (cwd/ligand/conformer).rglob("*.log")]
if f"{conformer}.log" in logs and f"{conformer}_opt.log" not in logs:
status = split_log(ligand, conformer)
if status != "Error":
conformers_good.append(conformer)
return(conformers_good)
if __name__ == '__main__':
starttime_all = time.time()
ligname = re.compile("[0-9]{8}")
ligands = sorted([i.name for i in cwd.iterdir() if (ligname.match(i.name) and i.is_dir())])
conformers = {ligand: [i.name for i in (cwd/ligand).iterdir() if i.is_dir()] for ligand in ligands}
if not (cwd/"ERR").exists():
(cwd/"ERR").mkdir()
if not (cwd/"done").exists():
(cwd/"done").mkdir()
for ligand in ligands:
for conformer in conformers[ligand]:
logs = [i.name for i in (cwd/ligand/conformer).rglob("*.log")]
if f"{conformer}.log" in logs and f"{conformer}_opt.log" not in logs:
status = split_log(ligand,conformer)
if status != "Error":
(cwd/ligand/conformer/f"{conformer}.log").rename(cwd/f"done/{conformer}.log")
if (cwd/"allligands_data.yml").exists():
with open(cwd/"allligands_data.yml","r") as f:
allliganddata = yaml.load(f,Loader=Loader)
else:
allliganddata = {}
for ligand in ligands:
print(ligand)
print(conformers[ligand])
if ligand in allliganddata.keys():
allliganddata[ligand],status = read_ligand(cwd,ligand,conformers[ligand],allliganddata[ligand])
else:
allliganddata[ligand],status = read_ligand(cwd,ligand,conformers[ligand])
with open(cwd/"allligands_data.yml","w") as f:
yaml.dump(allliganddata,f,Dumper=Dumper)
variants = ["boltz","min","max","delta","vburminconf"]
columns = [i+"_boltz" for i in boltzproperties if i not in mmproperties] + [f"{i}_{j}" for i,j in itertools.product(mmproperties,variants)]
df = pd.DataFrame(columns = columns,index = ligands)
for l in ligands:
for c in columns:
print(allliganddata[l]["properties"])
exit()
df.loc[l][c] = allliganddata[l]["properties"][c]
df["t_total"] = [allliganddata[l]["t_total"] for l in ligands]
df["number_of_conformers"] = [allliganddata[l]["number_of_conformers"] for l in ligands]
df.to_csv("allligands_data.csv",sep=";")
print(f"All done. Total time: {round((time.time()-starttime_all),2)} sec")
| true | true |
f71fdf25a6bbbf3c2ecdf90eda58463aa369ca88 | 52,543 | py | Python | t5/seqio/dataset_providers.py | dptam/text-to-text-transfer-transformer | 3662823b126ebf39d9d8ed147a8af0c6973f0ba9 | [
"Apache-2.0"
] | null | null | null | t5/seqio/dataset_providers.py | dptam/text-to-text-transfer-transformer | 3662823b126ebf39d9d8ed147a8af0c6973f0ba9 | [
"Apache-2.0"
] | null | null | null | t5/seqio/dataset_providers.py | dptam/text-to-text-transfer-transformer | 3662823b126ebf39d9d8ed147a8af0c6973f0ba9 | [
"Apache-2.0"
] | null | null | null | # Copyright 2021 The T5 Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Classes for data loading and processing.
Defines Tasks, TaskRegistry, Mixture, and MixtureRegistry
"""
import abc
import collections
import inspect
import json
import os
import re
from typing import Any, Callable, Iterable, Mapping, MutableMapping, Optional, Sequence, Tuple, Type, Union
from absl import logging
import dataclasses
import numpy as np
from packaging import version
from t5.seqio import utils
from t5.seqio.feature_converters import FeatureConverter
from t5.seqio.vocabularies import Vocabulary
import tensorflow.compat.v2 as tf
import tensorflow_datasets as tfds
import typing_extensions
_DEFAULT_FEATURE_KEYS = ["inputs", "targets"]
_VALID_TASK_NAME_REGEX = re.compile(r"^[\w\d\._]+$")
_MAX_EXAMPLES_TO_MEM_CACHE = 10000
SHUFFLE_BUFFER_SIZE = 1000
@dataclasses.dataclass(frozen=True)
class Feature:
"""A container for attributes of output features of data providers."""
vocabulary: Vocabulary
add_eos: bool = True
required: bool = True
dtype: tf.DType = tf.int32
@dataclasses.dataclass(frozen=True)
class ShardInfo:
"""A container for specifying sharding info."""
index: int
num_shards: int
class DatasetProviderBase(metaclass=abc.ABCMeta):
"""Abstract base for classes that provide a tf.data.Dataset."""
@abc.abstractproperty
def output_features(self) -> Mapping[str, Feature]:
raise NotImplementedError
@abc.abstractproperty
def splits(self) -> Sequence[str]:
raise NotImplementedError
@abc.abstractmethod
def get_dataset(
self,
sequence_length: int,
split: str,
use_cached: bool = False,
shuffle: bool = True,
seed: Optional[int] = None,
shard_info: Optional[ShardInfo] = None,
num_epochs: int = 1
) -> tf.data.Dataset:
"""Returns the requested tf.data.Dataset."""
raise NotImplementedError
@abc.abstractmethod
def num_input_examples(self, split: str) -> int:
raise NotImplementedError
class DatasetProviderRegistry(object):
"""Base for registry of data providers.
Subclasses must wrap `get` method to override the return type for pytype.
TODO(adarob): Remove the need to override `get`.
"""
# Class variables must be defined in subclasses.
_REGISTRY: MutableMapping[str, DatasetProviderBase]
_PROVIDER_TYPE: Type[DatasetProviderBase]
@classmethod
def add_provider(cls, name: str, provider):
"""Adds a data provider instance to the registry."""
if name in cls._REGISTRY:
raise ValueError("Attempting to register duplicate provider: %s" % name)
if not isinstance(provider, cls._PROVIDER_TYPE):
raise ValueError(
"Attempting to register a class not of an invalid type. "
"Expecting instance of %s, got %s" %
(cls._PROVIDER_TYPE, type(provider).__name__))
cls._REGISTRY[name] = provider
@classmethod
def add(
cls,
name: str,
provider_cls,
*provider_args,
**provider_kwargs
):
"""Instantiates and adds provider to the registry."""
if not issubclass(provider_cls, cls._PROVIDER_TYPE):
raise ValueError(
"Attempting to register a class not of an invalid type. "
"Expecting instance of %s, got %s" %
(cls._PROVIDER_TYPE, provider_cls))
provider = provider_cls(*provider_args, **provider_kwargs)
cls.add_provider(name, provider)
return provider
@classmethod
def remove(cls, name):
"""Remove provider from the registry, if it exists."""
if name in cls._REGISTRY:
del cls._REGISTRY[name]
@classmethod
def get(cls, name):
"""Returns provider from the registry."""
if name not in cls._REGISTRY:
raise ValueError("Provider name not registered: %s" % name)
return cls._REGISTRY[name]
@classmethod
def names(cls):
"""Returns all provider names in registry."""
return cls._REGISTRY.keys()
@classmethod
def reset(cls):
"""Removes all of the registered tasks."""
cls._REGISTRY = {}
@classmethod
def get_dataset(
cls,
name,
sequence_length,
split,
use_cached=False,
shuffle=True,
seed=None,
shard_info=None,
num_epochs=1):
"""Returns the requested tf.data.Dataset."""
return cls.get(name).get_dataset(
sequence_length=sequence_length, split=split, use_cached=use_cached,
shuffle=shuffle, seed=seed, shard_info=shard_info,
num_epochs=num_epochs)
# =============================== DataSources ==================================
class DataSource(DatasetProviderBase):
"""A `DatasetProvider` that provides raw data from an input source.
Inherits all abstract methods and properties of `DatasetProviderBase` except
those overidden below.
"""
def __init__(
self,
splits: Iterable[str],
num_input_examples: Optional[Mapping[str, int]] = None):
self._splits = tuple(splits)
self._num_input_examples = (
dict(num_input_examples) if num_input_examples is not None else None)
@property
def splits(self) -> Sequence[str]:
return self._splits
@property
def output_features(self) -> Mapping[str, Feature]:
"""Override unused property of `DatasetProviderBase`."""
raise NotImplementedError
@abc.abstractmethod
def list_shards(self, split: str) -> Sequence[str]:
"""Returns string identifiers of input shards."""
raise NotImplementedError
@abc.abstractmethod
def get_dataset(
self,
split: str,
shuffle: bool = True,
seed: Optional[int] = None,
shard_info: Optional[ShardInfo] = None
) -> tf.data.Dataset:
"""Overrides base class to add shard identifier and remove use_cached.
Args:
split: string, the split to return.
shuffle: bool, whether to shuffle the input source.
seed: tf.int64 scalar tf.Tensor (or None) for shuffling input source.
shard_info: optional specification for loading a shard of the split.
"""
raise NotImplementedError
def num_input_examples(self, split: str) -> Optional[int]:
if self._num_input_examples is None:
return None
return self._num_input_examples[split]
def _validate_args(fn, expected_pos_args):
"""Ensure function has exactly expected positional args."""
argspec = inspect.getfullargspec(fn)
expected_pos_args = tuple(expected_pos_args)
actual_args = tuple(argspec.args)
if actual_args[:len(expected_pos_args)] != expected_pos_args:
raise ValueError(
"'%s' must have positional args %s, got: %s" % (
fn.__name__, expected_pos_args, actual_args))
actual_pos_args = tuple(
argspec.args[:-len(argspec.defaults)]
if argspec.defaults else argspec.args)
if actual_pos_args != expected_pos_args[:len(actual_pos_args)]:
raise ValueError(
"'%s' may only have positional args %s, got: %s" % (
fn.__name__, expected_pos_args, actual_pos_args))
class DatasetFnCallable(typing_extensions.Protocol):
def __call__(self,
split: str,
shuffle_files: bool,
seed: Optional[int] = None) -> tf.data.Dataset:
...
class FunctionDataSource(DataSource):
"""A `DataSource` that uses a function to provide the input data."""
def __init__(
self,
dataset_fn: DatasetFnCallable,
splits: Iterable[str],
num_input_examples: Optional[Mapping[str, int]] = None
):
"""FunctionDataSource constructor.
Args:
dataset_fn: a function with the signature `dataset_fn(split,
shuffle_files)' (and optionally the variable `seed`) that returns a
`tf.data.Dataset`.
splits: an iterable of applicable string split names.
num_input_examples: dict or None, an optional dictionary mapping split
to its size in number of input examples (before preprocessing). The
`num_input_examples` method will return None if not provided.
"""
_validate_args(dataset_fn, ["split", "shuffle_files"])
self._dataset_fn = dataset_fn
super().__init__(splits=splits, num_input_examples=num_input_examples)
def get_dataset(
self,
split: str,
shuffle: bool = True,
seed: Optional[int] = None,
shard_info: Optional[ShardInfo] = None
) -> tf.data.Dataset:
if shard_info and shard_info.num_shards > 1:
raise ValueError(
"`FunctionDataSource` does not support low-level sharding. Use "
"tf.data.Dataset.shard instead.")
if seed is None:
ds = self._dataset_fn(split=split, shuffle_files=shuffle)
else:
_validate_args(self._dataset_fn, ["split", "shuffle_files", "seed"])
ds = self._dataset_fn(split=split, shuffle_files=shuffle, seed=seed)
return ds
def list_shards(self, split: str) -> Sequence[str]:
return [split]
class TfdsDataSource(DataSource):
"""A `DataSource` that uses TensorFlow Datasets to provide the input data."""
def __init__(
self,
tfds_name: str,
tfds_data_dir: Optional[str] = None,
splits: Optional[Union[Iterable[str], Mapping[str, str]]] = None
):
"""TfdsTask constructor.
Args:
tfds_name: string, the name and version number of a TFDS dataset,
optionally with a config.
tfds_data_dir: string, an optional path to a specific TFDS data directory
to use.
splits: an iterable of allowable string split names, a dict mapping
allowable canonical splits (e.g., 'validation') to TFDS splits or slices
(e.g., 'train[':1%']), or None. The default, None, uses all available
splits from the TFDS dataset info.
"""
if ":" not in tfds_name:
raise ValueError("TFDS name must contain a version number, got: %s" %
tfds_name)
self._tfds_dataset = utils.LazyTfdsLoader(
tfds_name,
data_dir=tfds_data_dir,
split_map=splits if isinstance(splits, dict) else None)
# If splits are not provided, we pass an empty tuple and use the lazy
# lookup in the `splits` property.
super().__init__(splits=splits or ())
@property
def splits(self):
"""Overrides since we can't call `info.splits` until after init."""
return self._splits or self._tfds_dataset.info.splits
@property
def tfds_dataset(self):
return self._tfds_dataset
def get_dataset(
self,
split: str,
shuffle: bool = True,
seed: Optional[int] = None,
shard_info: Optional[ShardInfo] = None
) -> tf.data.Dataset:
return self.tfds_dataset.load(
split, shuffle_files=shuffle, seed=seed, shard_info=shard_info)
def num_input_examples(self, split: str) -> int:
"""Overrides since we can't call `info.splits` until after init."""
return self.tfds_dataset.size(split)
def list_shards(self, split: str) -> Sequence[str]:
return self.tfds_dataset.files(split)
class FileDataSource(DataSource):
"""A `DataSource` that reads a file to provide the input dataset."""
def __init__(
self,
read_file_fn: Callable[[tf.data.Dataset], tf.data.Dataset],
split_to_filepattern: Mapping[str, Union[str, Iterable[str]]],
num_input_examples: Optional[Mapping[str, int]] = None,
):
"""FileDataSource constructor.
Args:
read_file_fn: a callable for creating a `tf.data.Dataset` from a
`tf.data.Dataset` of file paths, e.g., `tf.data.TFRecordDataset`.
split_to_filepattern: a mapping from split names to filepatterns to be
expanded with glob.
num_input_examples: dict or None, an optional dictionary mapping split
to its size in number of input examples (before preprocessing). The
`num_input_examples` method will return None if not provided.
"""
self._split_to_filepattern = split_to_filepattern
self._reader = read_file_fn
super().__init__(
splits=split_to_filepattern.keys(),
num_input_examples=num_input_examples)
def get_dataset(
self,
split: str,
shuffle: bool = True,
seed: Optional[int] = None,
shard_info: Optional[ShardInfo] = None
) -> tf.data.Dataset:
files = self.list_shards(split)
if not files:
raise ValueError(
"No file is found for the file pattern: "
f"{self._split_to_filepattern[split]}."
)
files_ds = tf.data.Dataset.from_tensor_slices(np.array(files, dtype=np.str))
if shard_info:
if len(files) < shard_info.num_shards:
raise ValueError(
f"Dataset has too few files to shard. {len(files)} files vs "
f"{shard_info.num_shards} shards requested.")
files_ds = files_ds.shard(shard_info.num_shards, shard_info.index)
if shuffle:
files_ds = files_ds.shuffle(buffer_size=16, seed=seed)
return files_ds.interleave(
self._reader,
cycle_length=16,
block_length=16,
num_parallel_calls=tf.data.experimental.AUTOTUNE)
def list_shards(self, split: str) -> Sequence[str]:
return tf.io.gfile.glob(self._split_to_filepattern[split])
class TextLineDataSource(FileDataSource):
"""A `FileDataSource` that reads lines of text from a file as input."""
def __init__(
self,
split_to_filepattern: Mapping[str, Union[str, Iterable[str]]],
skip_header_lines: int = 0,
num_input_examples: Optional[Mapping[str, int]] = None,
):
"""TextLineDataSource constructor.
Args:
split_to_filepattern: a mapping from split names to filepatterns to be
expanded with glob.
skip_header_lines: int, number of header lines to skip in each source
file.
num_input_examples: dict or None, an optional dictionary mapping split to
its size in number of input examples (before preprocessing). The
`num_input_examples` method will return None if not provided.
"""
# Used during caching.
self._skip_header_lines = skip_header_lines
def read_file_fn(filepattern):
return tf.data.TextLineDataset(filepattern).skip(skip_header_lines)
super().__init__(
read_file_fn=read_file_fn,
split_to_filepattern=split_to_filepattern,
num_input_examples=num_input_examples)
class TFExampleDataSource(FileDataSource):
"""A `FileDataSource` that reads files of tf.train.Example protos as input."""
def __init__(
self,
split_to_filepattern: Mapping[str, Union[str, Iterable[str]]],
feature_description: Mapping[str, Union[tf.io.FixedLenFeature,
tf.io.VarLenFeature]],
reader_cls: Type[tf.data.Dataset] = tf.data.TFRecordDataset,
num_input_examples: Optional[Mapping[str, int]] = None,
):
"""TFExampleDataSource constructor.
Args:
split_to_filepattern: dict of string (split name) to either string
(filename or filepattern) or list of strings (filenames or
filepatterns).
feature_description: dict, a mapping of string feature keys to
`tf.io.FixedLenFeature` or `tf.io.VarLenFeature` values.
reader_cls: `tf.data.Dataset`, a dataset class to read the input files.
num_input_examples: dict or None, an optional dictionary mapping split to
its size in number of input examples (before preprocessing). The
`num_input_examples` method will return None if not provided.
"""
def read_file_fn(filepattern):
return reader_cls(filepattern).map(
lambda pb: tf.io.parse_single_example(pb, feature_description),
num_parallel_calls=tf.data.experimental.AUTOTUNE)
super().__init__(
read_file_fn=read_file_fn,
split_to_filepattern=split_to_filepattern,
num_input_examples=num_input_examples)
# ========================== Offline Caching Helpers ===========================
def _rename_plaintext_to_pretokenized(
dataset: tf.data.Dataset) -> tf.data.Dataset:
"""Rename cached _plaintext features to new _pretokenized standard."""
def _rename(inputs):
outputs = {}
for k, v in inputs.items():
if k.endswith("_plaintext"):
k = k[:-len("plaintext")] + "pretokenized"
outputs[k] = v
return outputs
return dataset.map(
_rename, num_parallel_calls=tf.data.experimental.AUTOTUNE)
class _CachedDataSource(FileDataSource):
"""A `FileDataSource` for reading datasets cached offline."""
def __init__(self, cache_dir: str, split: str):
with tf.io.gfile.GFile(utils.get_cached_info_path(cache_dir, split)) as f:
split_info = json.load(f)
features = split_info["features"]
with tf.io.gfile.GFile(utils.get_cached_stats_path(cache_dir, split)) as f:
stats = json.load(f)
version_when_cached = version.Version(
split_info.get("seqio_version", "0.pre"))
version_with_true_dtypes = version.Version("0.0.0")
if version_when_cached < version_with_true_dtypes:
# Assume that all int64 features are really int32.
for name, feat in features.items():
if feat["dtype"] == "int64":
logging.info("Casting cached '%s' to int32.", name)
feat["dtype"] = "int32"
# Use `FixedLenSequenceFeature` for sequences with variable length.
def _feature_config(shape, dtype):
if dtype in ("int32", "bool"):
# int32 and bool are stored as int64 in the tf.train.Example protobuf.
# TODO(adarob): Support other conversions.
dtype = "int64"
if shape and shape[0] is None:
return tf.io.FixedLenSequenceFeature(
shape[1:], dtype, allow_missing=True)
return tf.io.FixedLenFeature(shape, dtype)
feature_description = {
feat: _feature_config(**desc) for feat, desc in features.items()
}
def read_file_fn(filepattern):
ds = tf.data.TFRecordDataset(filepattern)
ds = ds.map(
lambda pb: tf.io.parse_single_example(pb, feature_description),
num_parallel_calls=tf.data.experimental.AUTOTUNE)
# Cast features back to the types from the info JSON since some features
# must be cast for storage (e.g., in32 is stored as int64).
ds = ds.map(
lambda x: {k: tf.cast(v, features[k]["dtype"]) for k, v in x.items()},
num_parallel_calls=tf.data.experimental.AUTOTUNE)
# Legacy cached datasets may use old "_plaintext" suffix. Rename to
# "_pretokenized".
ds = _rename_plaintext_to_pretokenized(ds)
return ds
split_to_filepattern = {
split: "%s-*-of-*%d" % (
utils.get_cached_tfrecord_prefix(cache_dir, split),
split_info["num_shards"])
}
super().__init__(
read_file_fn=read_file_fn,
split_to_filepattern=split_to_filepattern,
num_input_examples={split: stats["examples"]}
)
class CacheDatasetPlaceholder(object):
"""A placeholder to signal when in the pipeline offline caching will occur."""
def __init__(self, required=False):
"""CacheDatasetPlaceholder constructor.
Args:
required: whether the dataset must be accessed in its cached form, and
on-the-fly preprocessing is disallowed.
"""
self._required = required
@property
def required(self):
return self._required
def __call__(self, dataset):
raise RuntimeError("`CacheDatasetPlaceholder` should never be called.")
# ================================ Tasks =======================================
MetricFnCallable = Callable[..., Mapping[str, float]]
class Task(DatasetProviderBase):
"""A class to manage a dataset and its related metrics."""
def __init__(
self,
name: str,
source: DataSource,
output_features: Mapping[str, Feature],
preprocessors: Optional[Sequence[Callable[..., tf.data.Dataset]]] = None,
postprocess_fn: Optional[Callable[..., Any]] = None,
metric_fns: Optional[Sequence[MetricFnCallable]] = None,
shuffle_buffer_size: Optional[int] = SHUFFLE_BUFFER_SIZE):
"""Task constructor.
Args:
name: a unique name for the Task.
source: a `DataSource` that provides a raw `tf.data.Dataset`.
output_features: dict(str, Feature), output features of the Task to be
passed to the model. After preprocessing, examples will be validated to
ensure they include features that match this specification. Note that
additional features may be included (e.g., for evaluation), but they
will not be passed to the model.
preprocessors: list(callable), an optional list of functions that receive
a tf.data.Dataset and return a tf.data.Dataset. These will be executed
sequentually and the final dataset must include features matching
`output_features`.
postprocess_fn: callable, an optional function that receives decoded model
outputs and converts them to a form that is ready for evaluation using
the metric functions in `metric_fns`.
metric_fns: list(callable), an optional list of metric functions with the
signature `metric_fn(targets, predictions)` to use during evaluation. If
undefined or empty, no evaluation will occur on the task.
shuffle_buffer_size: an optional integer to set the shuffle buffer size.
If None, shuffling will be disallowed.
"""
if not _VALID_TASK_NAME_REGEX.match(name):
raise ValueError(
"Task name '%s' contains invalid characters. Must match regex: %s" % (
name, _VALID_TASK_NAME_REGEX.pattern))
metric_fns = metric_fns or []
self._predict_metric_fns = []
self._score_metric_fns = []
for metric_fn in metric_fns:
pos_args = tuple(
key for key, param in inspect.signature(metric_fn).parameters.items()
if param.default == inspect.Parameter.empty
)
if pos_args == ("targets", "scores"):
self._score_metric_fns.append(metric_fn)
elif pos_args == ("targets", "predictions"):
self._predict_metric_fns.append(metric_fn)
else:
raise ValueError(
"Metric functions must have positional arguments matching either "
"('targets', 'predictions') or ('targets', 'scores'). "
f"Got: {pos_args}")
self._name = name
self._source = source
# Find optional CacheDatasetPlaceholder.
preprocessors = tuple(preprocessors or [])
cache_step_idxs = [
i for i, p in enumerate(preprocessors)
if isinstance(p, CacheDatasetPlaceholder)
]
if len(cache_step_idxs) > 1:
raise ValueError(
"`CacheDatasetPlaceholder` can appear at most once in the "
f"preprocessing pipeline. Found {len(cache_step_idxs)} in '{name}'.")
cache_step_idx = cache_step_idxs[0] if cache_step_idxs else None
if cache_step_idx is not None:
for prep in preprocessors[:cache_step_idx]:
prep_args = inspect.signature(prep).parameters.keys()
if "sequence_length" in prep_args:
raise ValueError(
f"'{prep.__name__}' has a `sequence_length` argument but occurs "
f"before `CacheDatasetPlaceholder` in '{name}'. This is not "
"allowed since the sequence length is specified at run time.")
if "seed" in prep_args or "seeds" in prep_args:
raise logging.warning( # pylint:disable=logging-format-interpolation
f"'{prep.__name__}' has a `seed(s)` argument but occurs before "
f"`CacheDatasetPlaceholder` in '{name}'. This is not recommended "
"since the same samples will be used each epoch when reading "
"from the cache.")
self._cache_step_idx = cache_step_idx
self._preprocessors = preprocessors
self._metric_fns = tuple(metric_fns)
self._postprocess_fn = postprocess_fn
self._cache_dir = None
self._stats = {}
self._shuffle_buffer_size = shuffle_buffer_size
self._output_features = collections.OrderedDict(
sorted(list(output_features.items()))
)
@property
def name(self) -> str:
return self._name
@property
def metric_fns(self) -> Sequence[MetricFnCallable]:
"""List of all metric functions."""
return self._predict_metric_fns + self._score_metric_fns
@property
def score_metric_fns(self) -> Sequence[MetricFnCallable]:
"""List of metric functions that use log likelihood scores."""
return self._score_metric_fns
@property
def predict_metric_fns(self) -> Sequence[MetricFnCallable]:
"""List of metric functions that use model predictions."""
return self._predict_metric_fns
@property
def output_features(self) -> Mapping[str, Feature]:
return self._output_features
@property
def splits(self) -> Sequence[str]:
s = self.source.splits
if not s:
raise ValueError(f"Task {self.name} has no splits")
return s
@property
def source(self) -> DataSource:
return self._source
@property
def preprocessors(self) -> Sequence[Callable[..., tf.data.Dataset]]:
return self._preprocessors
def num_input_examples(self, split: str) -> Optional[int]:
return self.source.num_input_examples(split)
def _preprocess_dataset(
self,
dataset: tf.data.Dataset,
preprocessors: Sequence[Callable[..., tf.data.Dataset]],
sequence_length: Optional[Mapping[str, int]] = None) -> tf.data.Dataset:
"""Sequentially applies preprocessors."""
for prep_fn in preprocessors:
# prep_fn must not rely on variable length keyword args such as **kwargs.
fn_args = set(inspect.signature(prep_fn).parameters.keys())
kwargs = {}
if "sequence_length" in fn_args:
kwargs["sequence_length"] = sequence_length
if "output_features" in fn_args:
kwargs["output_features"] = self.output_features
dataset = prep_fn(dataset, **kwargs)
return dataset
def _validate_preprocessing(
self, dataset: tf.data.Dataset
) -> tf.data.Dataset:
"""Validates preprocessed dataset, raising Exceptions if needed.
Args:
dataset: a tf.data.Dataset to validate.
Returns:
a validated tf.data.Dataset.
"""
actual_specs = dataset.element_spec
for feat, feat_spec in self.output_features.items():
if feat not in actual_specs:
if feat_spec.required:
raise ValueError(
"Task dataset is missing expected output feature after "
f"preprocessing: {feat}")
else:
# It's ok that this feature does not exist.
continue
actual_spec = actual_specs[feat]
if feat_spec.dtype != actual_spec.dtype:
raise ValueError(
f"Task dataset has incorrect type for feature '{feat}' after "
f"preprocessing: Got {actual_spec.dtype.name}, expected "
f"{feat_spec.dtype.name}")
if actual_spec.shape.rank != 1:
raise ValueError(
f"Task dataset has incorrect rank for feature '{feat}' after "
f"preprocessing: Got {actual_spec.shape.rank}, expected 1")
return dataset
def _trim_output_features(
self,
dataset: tf.data.Dataset,
sequence_length: Optional[Mapping[str, int]]
) -> tf.data.Dataset:
"""Trim output features to sequence length."""
def _trim(k: str, v: tf.Tensor) -> tf.Tensor:
if k not in self.output_features or not sequence_length:
return v
return v[:sequence_length[k]]
return dataset.map(
lambda ex: {k: _trim(k, v) for k, v in ex.items()},
num_parallel_calls=tf.data.experimental.AUTOTUNE)
def preprocess_precache(
self,
dataset: tf.data.Dataset,
seed: Optional[int] = None
) -> tf.data.Dataset:
"""Runs preprocessing steps before the optional CacheDatasetPlaceholder."""
if not self.supports_caching:
return dataset
with utils.map_seed_manager(seed):
return self._preprocess_dataset(
dataset,
self._preprocessors[:self._cache_step_idx],
)
def preprocess_postcache(
self,
dataset: tf.data.Dataset,
sequence_length: Optional[Mapping[str, int]],
seed: Optional[int] = None
) -> tf.data.Dataset:
"""Runs preprocessing steps after the optional CacheDatasetPlaceholder.
Args:
dataset: a tf.data.Dataset
sequence_length: dict mapping feature key to int length for that feature.
If None, the features will not be truncated.
seed: an optional random seed for deterministic preprocessing.
Returns:
a tf.data.Dataset
"""
start_idx = 0
if self.supports_caching:
# Skip a sufficient number of seeds to avoid duplicating any from
# pre-cache preprocessing.
seed = None if seed is None else seed + 42 * self._cache_step_idx
start_idx = self._cache_step_idx + 1
with utils.map_seed_manager(seed):
dataset = self._preprocess_dataset(
dataset,
self._preprocessors[start_idx:],
sequence_length=sequence_length,
)
return dataset
@property
def cache_dir(self) -> Optional[str]:
"""Returns the cache directory (or None), initializing if needed."""
if not self._cache_dir:
# See if cached data exists in any of the cache directories.
potential_cache_dirs = [
os.path.join(d, self.name) for d in utils.get_global_cache_dirs()]
for cache_dir in potential_cache_dirs:
try:
if tf.io.gfile.exists(os.path.join(cache_dir, "COMPLETED")):
self._cache_dir = cache_dir
logging.info("'%s' is cached at %s.", self.name, self.cache_dir)
break
except tf.errors.PermissionDeniedError:
logging.warning(
"Permission denied for global cache folder: %s", cache_dir)
if not self._cache_dir:
logging.info(
"'%s' does not exist in any task cache directories (searched %s).",
self.name,
potential_cache_dirs,
)
return self._cache_dir
@property
def supports_caching(self) -> bool:
"""Whether or not this task supports offline caching."""
return self._cache_step_idx is not None
@property
def requires_caching(self) -> bool:
"""Whether or not this task requires offline caching."""
return (self._cache_step_idx is not None and
self.preprocessors[self._cache_step_idx].required)
def assert_cached(self) -> None:
"""Raises an assertion error if cached dataset does not exist."""
assert self.cache_dir, (
f"'{self.name}' does not exist in any of the task cache directories.")
def get_cached_stats(self,
split: str = tfds.Split.TRAIN
) -> Mapping[str, Union[int, float]]:
"""Returns basic statistics for cached dataset."""
self.assert_cached()
if split not in self._stats:
stats_path = utils.get_cached_stats_path(self.cache_dir, split)
if not tf.io.gfile.exists(stats_path):
raise ValueError(
"Stats do not exist for '%s' split: %s" % (self.name, split))
with tf.io.gfile.GFile(stats_path) as f:
self._stats[split] = json.load(f)
return self._stats[split]
def get_dataset(
self,
sequence_length: Optional[Mapping[str, int]],
split: str = tfds.Split.TRAIN,
use_cached: bool = False,
shuffle: bool = True,
shuffle_buffer_size: Optional[int] = None,
seed: Optional[int] = None,
shard_info: Optional[ShardInfo] = None,
num_epochs: Optional[int] = 1
) -> tf.data.Dataset:
"""Returns a tf.data.Dataset from cache or generated on the fly.
Args:
sequence_length: dict mapping feature key to maximum int length for that
feature. If longer after preprocessing, the feature will be truncated.
May be set to None to avoid truncation.
split: string, the split to return.
use_cached: bool, whether to use the cached dataset instead of processing
it on the fly. Defaults to False.
shuffle: bool, whether to shuffle the dataset. Only used when generating
on the fly (use_cached=False).
shuffle_buffer_size: an integer or None to use task-specific buffer size.
seed: tf.int64 scalar tf.Tensor (or None) for shuffling tf.data.
shard_info: optional specification for loading a shard of the split. If
the Task's DataSource contains at least the number of shards in the
specification, it will be passed the shard info to avoid loading the
full source dataset. Otherwise, the full source dataset will be loaded
and sharded at the individual examples.
num_epochs: the number of times to iterate through the dataset, or `None`
to repeat indefinitely. Note that the repeat occurs in the pipeline
after offline caching, but before applying potentially stochastic
post-cache preprocessors and is therefore typically preferred to calling
`repeat()` on the returned dataset. Defaults to `1`.
Returns:
A tf.data.Dataset.
"""
if use_cached and not self.supports_caching:
logging.warning(
"Task '%s' does not support caching. Switching to on-the-fly "
"preprocessing.", self.name)
use_cached = False
elif self.requires_caching and not use_cached:
raise ValueError(
f"Task '{self.name}' requires caching, but was called with "
"`use_cached=False`.")
if shard_info:
# Whether we should shard at source or on the examples from the source.
shard_data_source = (
len(self.source.list_shards(split=split)) >= shard_info.num_shards)
logging.info("Sharding at the %s: %d of %d",
"data source" if shard_data_source else "examples",
shard_info.index, shard_info.num_shards)
else:
# No sharding.
shard_data_source = False
shard_info = ShardInfo(0, 1)
if use_cached:
source = self._get_cached_source(split)
else:
source = self.source
if shard_data_source:
ds = source.get_dataset(
split=split, shuffle=shuffle, seed=seed, shard_info=shard_info)
else:
ds = source.get_dataset(split=split, shuffle=shuffle, seed=seed)
ds = ds.shard(shard_info.num_shards, shard_info.index)
if ((use_cached and
self.get_cached_stats(split)["examples"] < _MAX_EXAMPLES_TO_MEM_CACHE)
or (self.num_input_examples(split) and
self.num_input_examples(split) < _MAX_EXAMPLES_TO_MEM_CACHE)):
logging.info(
"Automatically caching small dataset in memory: '%s:%s'",
self.name, split)
ds = ds.cache()
if not use_cached:
ds = self.preprocess_precache(ds, seed=seed)
ds = ds.prefetch(tf.data.experimental.AUTOTUNE)
# We repeat before calling any (potentially) stochastic post-cache
# preprocessing in order to take new samples each epoch.
ds = ds.repeat(num_epochs)
# Post cache processing.
ds = self.preprocess_postcache(
ds, sequence_length=sequence_length, seed=seed)
ds = self._validate_preprocessing(ds)
ds = self._trim_output_features(ds, sequence_length=sequence_length)
if shuffle:
if self._shuffle_buffer_size is None:
raise ValueError(
f"Shuffling is disallowed for Task '{self.name}' since its "
"`shuffle_buffer_size` was set to `None` on construction.")
shuffle_buffer_size = shuffle_buffer_size or self._shuffle_buffer_size
# Shuffle before mixing since preprocessor can output multiple
# (correlated) examples per input.
ds = ds.shuffle(shuffle_buffer_size, seed=seed)
return ds.prefetch(tf.data.experimental.AUTOTUNE)
def _get_cached_source(self, split) -> _CachedDataSource:
"""Returns a DataSource to read cached files for split."""
self.assert_cached()
return _CachedDataSource(self.cache_dir, split)
def postprocess_fn(self, decoded_model_output: Any,
**postprocess_kwargs) -> Any:
"""Returns the model output after applying the postprocess function."""
if self._postprocess_fn:
return self._postprocess_fn(decoded_model_output, **postprocess_kwargs)
return decoded_model_output
class TaskRegistry(DatasetProviderRegistry):
"""Registry of Tasks."""
_REGISTRY = {}
_PROVIDER_TYPE = Task
@classmethod
def add(
cls,
name: str,
source: DataSource,
output_features: Mapping[str, Feature],
preprocessors: Optional[Sequence[Callable[..., tf.data.Dataset]]] = None,
postprocess_fn: Optional[Callable[..., Any]] = None,
metric_fns: Optional[Sequence[Callable[..., Mapping[str, float]]]] = None,
**kwargs) -> Task:
return super().add(name, Task, name, source, output_features, preprocessors,
postprocess_fn, metric_fns, **kwargs)
@classmethod
def get(cls, name) -> Task:
return super().get(name)
# ================================ Mixtures ====================================
class Mixture(DatasetProviderBase):
"""Class for mixing multiple tasks."""
def __init__(self,
name: str,
tasks: Union[Sequence[str],
Sequence[Tuple[str, Union[int, float,
Callable[[Task],
float]]]]],
default_rate: Union[float, Callable[[Task], float]] = None):
"""Mixture constructor.
A mixture specifies a set of tasks with associated mixing rates.
Mixing happens on preprocessed tokenized examples.
The mixing rates represent relative numbers of examples to use from their
associated tasks. Setting the mixing rates to be equal to the numbers of
examples in the tasks will result in each task going through an epoch in
about the same amount of time - i.e. all examples are sampled equally across
all tasks.
Rates can be expressed either as absolute numbers or as functions that
receive the Task as an argument.
Args:
name: string, a unique name for the Mixture.
tasks: a list where each element is either a string (task name) or a
pair whose first element is the task name and whose second element
is either a float (rate) or a function from Task to float.
default_rate: a float or a function from Task to float. This specifies the
default rate if rates are not provided in the `tasks` argument.
"""
self._task_to_rate = {}
self._tasks = []
self._sub_mixtures = []
self._name = name
for t in tasks:
if isinstance(t, str):
task_name = t
rate = default_rate
if default_rate is None:
raise ValueError("need a rate for each task")
else:
task_name, rate = t
if task_name in TaskRegistry.names():
self._tasks.append(TaskRegistry.get(task_name))
self._task_to_rate[task_name] = rate
else:
self._sub_mixtures.append(MixtureRegistry.get(task_name)) # pytype:disable=name-error
self._task_to_rate[task_name] = rate
if len(set(tuple(t.output_features) for t in self.tasks)) != 1:
raise ValueError(
"All Tasks in a Mixture must have the same output features."
)
@property
def name(self) -> str:
return self._name
@property
def tasks(self) -> Sequence[Task]:
sub_tasks = (mix.tasks for mix in self._sub_mixtures)
return list(sorted(set(sum(sub_tasks, self._tasks)), key=lambda t: t.name))
@property
def total_rate(self) -> float:
return sum(float(rate(TaskRegistry.get(name)) if callable(rate) else rate)
for name, rate in self._task_to_rate.items())
def get_rate(self, task: Task) -> float:
"""Computes the mixing rate for the given task."""
value = 0.0
for mix in self._sub_mixtures:
if task in mix.tasks:
rate = self._task_to_rate[mix.name]
value += rate * mix.get_rate(task) / mix.total_rate
if task.name in self._task_to_rate:
rate = self._task_to_rate[task.name]
value += float(rate(task) if callable(rate) else rate)
return value
def num_input_examples(self, split: str) -> int:
return sum(t.num_input_examples(split) for t in self.tasks)
@property
def splits(self) -> Sequence[str]:
splits = set()
for task in self.tasks:
splits.update(task.splits)
return tuple(splits)
@property
def output_features(self) -> Mapping[str, Feature]:
# We require all tasks to have the same output_features in __init__
# so we can just get the output_features for the 0th task
return self.tasks[0].output_features
def _check_compatible_features(self) -> None:
"""Throw Exception if features across tasks have different vocabs or dtypes.
"""
for name, feature in self.tasks[0].output_features.items():
for task in self.tasks[1:]:
if task.output_features[name].vocabulary != feature.vocabulary:
raise ValueError(
"Features across tasks in a mixture must use the same vocabulary."
)
if task.output_features[name].dtype != feature.dtype:
raise ValueError(
"Features across tasks in a mixture must use the same dtype."
)
def get_dataset(
self,
sequence_length: Optional[Mapping[str, int]],
split: str = tfds.Split.TRAIN,
use_cached: bool = False,
shuffle: bool = True,
seed: Optional[int] = None,
shard_info: Optional[ShardInfo] = None,
num_epochs: Optional[int] = None,
copy_pretokenized: bool = False,
compute_stats_empirically: bool = False,
) -> tf.data.Dataset:
"""Returns the dataset of mixed tasks using the object-specified rates.
Args:
sequence_length: dict mapping feature key to maximum int length for that
feature. If longer after preprocessing, the feature will be truncated.
May be set to None to avoid truncation.
split: string, the split to return for all tasks.
use_cached: bool, whether to use the cached dataset instead of processing
it on the fly. Defaults to False.
shuffle: bool, whether to shuffle the dataset. Only used when generating
on the fly (use_cached=False).
seed: tf.int64 scalar tf.Tensor (or None) for shuffling tf.data.
shard_info: optional specification for loading a shard of the split.
num_epochs: the number of times to iterate through the dataset, or `None`
to repeat indefinitely. Note that the repeat occurs in the pipeline
after offline caching, but before applying potentially stochastic
post-cache preprocessors and is therefore typically preferred to calling
`repeat()` on the returned dataset. Defaults to `None`.
copy_pretokenized: bool, whether to pass through copies of pretokenized
features a "_pretokenized" suffix added to the key.
compute_stats_empirically: a boolean - does not work on TPU
"""
self._check_compatible_features()
tasks = []
for task in self.tasks:
if split not in task.splits:
logging.warning(
"Task %s has no '%s' split, skipping.", task.name, split
)
continue
tasks.append(task)
if not tasks:
raise ValueError("No datasets have a '{}' split".format(split))
output_feature_keys = set(self.output_features.keys())
if copy_pretokenized:
output_feature_keys.update(
{f + "_pretokenized" for f in output_feature_keys})
def filter_features(ex):
return {k: v for k, v in ex.items() if k in output_feature_keys}
datasets = [
task.get_dataset( # pylint:disable=g-complex-comprehension
sequence_length,
split=split,
use_cached=use_cached,
shuffle=shuffle,
seed=seed,
shard_info=shard_info,
num_epochs=num_epochs)
.map(filter_features, num_parallel_calls=tf.data.experimental.AUTOTUNE)
for task in tasks]
rates = [self.get_rate(task) for task in tasks]
# Sample from the dataset with the rates rates
if seed is not None:
sample_seed = seed
elif shuffle:
sample_seed = None
else:
sample_seed = 42
dataset = tf.data.experimental.sample_from_datasets(
datasets, rates, sample_seed)
if (split == "train" and use_cached and
all(t.supports_caching for t in tasks)):
_log_mixing_proportions(tasks, datasets, rates, dataset, sequence_length,
compute_stats_empirically)
return dataset
def _log_padding_fractions(dataset, sequence_length, num_examples=100):
"""Empirically compute the fraction of padding - log the results.
Args:
dataset: a tf.data.Dataset
sequence_length: dict from string to int (packed lengths)
num_examples: an integer
"""
logging.info("computing padding fractions")
keys = sequence_length.keys()
padding_frac = {k: 0 for k in keys}
for ex in tfds.as_numpy(dataset.take(num_examples)):
for k in keys:
padding_frac[k] += 1 - (sequence_length[k] / len(ex[k]))
for k in keys:
logging.info("%s padding fraction = %g", k, padding_frac[k])
def _log_mixing_proportions(
tasks, datasets, rates, mixed_dataset,
sequence_length, compute_stats_empirically):
"""Log information about the mixing proportions.
Called from Mixture.get_dataset.
Args:
tasks: a list of Task
datasets: a list of tf.data.Dataset
rates: a list of floats
mixed_dataset: a tf.data.Dataset
sequence_length: dict from string to int (packed lengths)
compute_stats_empirically: a boolean - does not work on TPU
"""
def _normalize(l):
denom = sum(l)
if not denom:
return l
return [x / denom for x in l]
# compute some stats about the mixture
examples_fraction = _normalize(rates)
if compute_stats_empirically:
stats_examples = 100
mean_inputs_length = []
mean_targets_length = []
for dataset in datasets:
inputs_sum = 0
targets_sum = 0
for ex in tfds.as_numpy(dataset.take(stats_examples)):
# Some tasks, like LMs, don't have inputs.
if "inputs" in ex:
inputs_sum += ex["inputs"].size
targets_sum += ex["targets"].size
mean_inputs_length.append(inputs_sum / float(stats_examples))
mean_targets_length.append(targets_sum / float(stats_examples))
else:
def _estimated_mean_length(task, key):
if key not in sequence_length:
return 0
if (task.supports_caching and
task._cache_step_idx < len(task._preprocessors) - 1): # pylint:disable=protected-access
# There is processing after caching, so we can't rely on the stats.
return sequence_length[key]
# Some tasks, like LMs, don't have inputs.
if key + "_tokens" in task.get_cached_stats("train"):
return min(sequence_length[key],
(task.get_cached_stats("train")[key + "_tokens"] /
task.get_cached_stats("train")["examples"]))
else:
return 0
mean_inputs_length = [_estimated_mean_length(task, "inputs")
for task in tasks]
mean_targets_length = [_estimated_mean_length(task, "targets")
for task in tasks]
inputs_fraction = _normalize(
[l * r for l, r in zip(mean_inputs_length, rates)])
targets_fraction = _normalize(
[l * r for l, r in zip(mean_targets_length, rates)])
logging.info("%12s %12s %12s %12s %12s %12s %s",
"rate", "ex.frac.", "inp.frac.", "tgt.frac.",
"inp.len.", "tgt.len", "task")
for i in range(len(rates)):
logging.info("%12g %12g %12g %12g %12g %12g %s",
rates[i], examples_fraction[i],
inputs_fraction[i], targets_fraction[i],
mean_inputs_length[i], mean_targets_length[i],
tasks[i].name)
if compute_stats_empirically:
_log_padding_fractions(mixed_dataset, sequence_length)
class MixtureRegistry(DatasetProviderRegistry):
"""Registry of Mixtures."""
_REGISTRY = {}
_PROVIDER_TYPE = Mixture
@classmethod
def add(cls, name, tasks, default_rate=None) -> Mixture:
return super().add(name, Mixture, name, tasks, default_rate)
@classmethod
def get(cls, name) -> Mixture:
return super().get(name)
def get_mixture_or_task(task_or_mixture_name):
"""Return the Task or Mixture from the appropriate registry."""
mixtures = MixtureRegistry.names()
tasks = TaskRegistry.names()
if task_or_mixture_name in mixtures:
if task_or_mixture_name in tasks:
logging.warning("%s is both a Task and a Mixture, returning Mixture",
task_or_mixture_name)
return MixtureRegistry.get(task_or_mixture_name)
if task_or_mixture_name in tasks:
return TaskRegistry.get(task_or_mixture_name)
else:
raise ValueError("No Task or Mixture found with name: %s" %
task_or_mixture_name)
def get_subtasks(task_or_mixture):
"""Returns all the Tasks in a Mixture as a list or the Task itself."""
if isinstance(task_or_mixture, Task):
return [task_or_mixture]
else:
return task_or_mixture.tasks
def get_dataset(
mixture_or_task_name: str,
task_feature_lengths: Mapping[str, int],
feature_converter: FeatureConverter,
dataset_split: str = "train",
use_cached: bool = False,
shuffle: bool = False,
num_epochs: Optional[int] = 1,
shard_info: ShardInfo = None,
verbose: bool = True,
seed: Optional[int] = None
) -> tf.data.Dataset:
"""Get processed dataset with the model features.
In order to use options specific to a feature converter, e.g., packing,
`feature_converter` instance should be instantiated with those options before
being pased to this function.
Getting sharded datasets is supported. To use this feature, pass in
`shard_info`, with shard_index and num_shards information. Sharding is done
before the feature converter stage. Therefore, if packing is used it will be
done on the sharded dataset.
Args:
mixture_or_task_name: mixture or task name for the Task API.
task_feature_lengths: dict mapping task feature key to its sequence length.
This specifies the sequence length of the dataset from the Task API.
feature_converter: a feature converter object to use to convert the task
features to model features.
Must be a subclass of FeatureConverter.
dataset_split: the split to use.
use_cached: whether to use the cached dataset instead of processing it on
the fly.
shuffle: whether to shuffle the dataset.
num_epochs: the number of times to iterate through the dataset, or `None` to
repeat indefinitely. Note that the repeat occurs in the pipeline after
offline caching, but before applying potentially stochastic post-cache
preprocessors and is therefore typically preferred to calling `repeat()`
on the returned dataset. Defaults to `1`.
shard_info: number of shards and shard index information.
verbose: if true, log the feature shapes.
seed: a random seed to for shuffling tf.data.
Returns:
ds: the processed dataset.
"""
if not isinstance(feature_converter, FeatureConverter):
raise TypeError(
"feature_converter should be an instance of FeatureConverter.")
mixture_or_task = get_mixture_or_task(mixture_or_task_name)
ds = mixture_or_task.get_dataset(
task_feature_lengths,
split=dataset_split,
use_cached=use_cached,
shuffle=shuffle,
seed=seed,
shard_info=shard_info,
num_epochs=num_epochs)
ds = feature_converter(ds, task_feature_lengths=task_feature_lengths)
if verbose:
logging.info(
"The output dataset from seqio.get_dataset has the following features")
for feature_name, tensor_spec in ds.element_spec.items():
logging.info("feature: %s \t shape: %s \t dtype: %s", feature_name,
tensor_spec.shape.as_list(), tensor_spec.dtype.name)
return ds
| 36.161734 | 107 | 0.671888 |
import abc
import collections
import inspect
import json
import os
import re
from typing import Any, Callable, Iterable, Mapping, MutableMapping, Optional, Sequence, Tuple, Type, Union
from absl import logging
import dataclasses
import numpy as np
from packaging import version
from t5.seqio import utils
from t5.seqio.feature_converters import FeatureConverter
from t5.seqio.vocabularies import Vocabulary
import tensorflow.compat.v2 as tf
import tensorflow_datasets as tfds
import typing_extensions
_DEFAULT_FEATURE_KEYS = ["inputs", "targets"]
_VALID_TASK_NAME_REGEX = re.compile(r"^[\w\d\._]+$")
_MAX_EXAMPLES_TO_MEM_CACHE = 10000
SHUFFLE_BUFFER_SIZE = 1000
@dataclasses.dataclass(frozen=True)
class Feature:
vocabulary: Vocabulary
add_eos: bool = True
required: bool = True
dtype: tf.DType = tf.int32
@dataclasses.dataclass(frozen=True)
class ShardInfo:
index: int
num_shards: int
class DatasetProviderBase(metaclass=abc.ABCMeta):
@abc.abstractproperty
def output_features(self) -> Mapping[str, Feature]:
raise NotImplementedError
@abc.abstractproperty
def splits(self) -> Sequence[str]:
raise NotImplementedError
@abc.abstractmethod
def get_dataset(
self,
sequence_length: int,
split: str,
use_cached: bool = False,
shuffle: bool = True,
seed: Optional[int] = None,
shard_info: Optional[ShardInfo] = None,
num_epochs: int = 1
) -> tf.data.Dataset:
raise NotImplementedError
@abc.abstractmethod
def num_input_examples(self, split: str) -> int:
raise NotImplementedError
class DatasetProviderRegistry(object):
_REGISTRY: MutableMapping[str, DatasetProviderBase]
_PROVIDER_TYPE: Type[DatasetProviderBase]
@classmethod
def add_provider(cls, name: str, provider):
if name in cls._REGISTRY:
raise ValueError("Attempting to register duplicate provider: %s" % name)
if not isinstance(provider, cls._PROVIDER_TYPE):
raise ValueError(
"Attempting to register a class not of an invalid type. "
"Expecting instance of %s, got %s" %
(cls._PROVIDER_TYPE, type(provider).__name__))
cls._REGISTRY[name] = provider
@classmethod
def add(
cls,
name: str,
provider_cls,
*provider_args,
**provider_kwargs
):
if not issubclass(provider_cls, cls._PROVIDER_TYPE):
raise ValueError(
"Attempting to register a class not of an invalid type. "
"Expecting instance of %s, got %s" %
(cls._PROVIDER_TYPE, provider_cls))
provider = provider_cls(*provider_args, **provider_kwargs)
cls.add_provider(name, provider)
return provider
@classmethod
def remove(cls, name):
if name in cls._REGISTRY:
del cls._REGISTRY[name]
@classmethod
def get(cls, name):
if name not in cls._REGISTRY:
raise ValueError("Provider name not registered: %s" % name)
return cls._REGISTRY[name]
@classmethod
def names(cls):
return cls._REGISTRY.keys()
@classmethod
def reset(cls):
cls._REGISTRY = {}
@classmethod
def get_dataset(
cls,
name,
sequence_length,
split,
use_cached=False,
shuffle=True,
seed=None,
shard_info=None,
num_epochs=1):
return cls.get(name).get_dataset(
sequence_length=sequence_length, split=split, use_cached=use_cached,
shuffle=shuffle, seed=seed, shard_info=shard_info,
num_epochs=num_epochs)
class DataSource(DatasetProviderBase):
def __init__(
self,
splits: Iterable[str],
num_input_examples: Optional[Mapping[str, int]] = None):
self._splits = tuple(splits)
self._num_input_examples = (
dict(num_input_examples) if num_input_examples is not None else None)
@property
def splits(self) -> Sequence[str]:
return self._splits
@property
def output_features(self) -> Mapping[str, Feature]:
raise NotImplementedError
@abc.abstractmethod
def list_shards(self, split: str) -> Sequence[str]:
raise NotImplementedError
@abc.abstractmethod
def get_dataset(
self,
split: str,
shuffle: bool = True,
seed: Optional[int] = None,
shard_info: Optional[ShardInfo] = None
) -> tf.data.Dataset:
raise NotImplementedError
def num_input_examples(self, split: str) -> Optional[int]:
if self._num_input_examples is None:
return None
return self._num_input_examples[split]
def _validate_args(fn, expected_pos_args):
argspec = inspect.getfullargspec(fn)
expected_pos_args = tuple(expected_pos_args)
actual_args = tuple(argspec.args)
if actual_args[:len(expected_pos_args)] != expected_pos_args:
raise ValueError(
"'%s' must have positional args %s, got: %s" % (
fn.__name__, expected_pos_args, actual_args))
actual_pos_args = tuple(
argspec.args[:-len(argspec.defaults)]
if argspec.defaults else argspec.args)
if actual_pos_args != expected_pos_args[:len(actual_pos_args)]:
raise ValueError(
"'%s' may only have positional args %s, got: %s" % (
fn.__name__, expected_pos_args, actual_pos_args))
class DatasetFnCallable(typing_extensions.Protocol):
def __call__(self,
split: str,
shuffle_files: bool,
seed: Optional[int] = None) -> tf.data.Dataset:
...
class FunctionDataSource(DataSource):
def __init__(
self,
dataset_fn: DatasetFnCallable,
splits: Iterable[str],
num_input_examples: Optional[Mapping[str, int]] = None
):
_validate_args(dataset_fn, ["split", "shuffle_files"])
self._dataset_fn = dataset_fn
super().__init__(splits=splits, num_input_examples=num_input_examples)
def get_dataset(
self,
split: str,
shuffle: bool = True,
seed: Optional[int] = None,
shard_info: Optional[ShardInfo] = None
) -> tf.data.Dataset:
if shard_info and shard_info.num_shards > 1:
raise ValueError(
"`FunctionDataSource` does not support low-level sharding. Use "
"tf.data.Dataset.shard instead.")
if seed is None:
ds = self._dataset_fn(split=split, shuffle_files=shuffle)
else:
_validate_args(self._dataset_fn, ["split", "shuffle_files", "seed"])
ds = self._dataset_fn(split=split, shuffle_files=shuffle, seed=seed)
return ds
def list_shards(self, split: str) -> Sequence[str]:
return [split]
class TfdsDataSource(DataSource):
def __init__(
self,
tfds_name: str,
tfds_data_dir: Optional[str] = None,
splits: Optional[Union[Iterable[str], Mapping[str, str]]] = None
):
if ":" not in tfds_name:
raise ValueError("TFDS name must contain a version number, got: %s" %
tfds_name)
self._tfds_dataset = utils.LazyTfdsLoader(
tfds_name,
data_dir=tfds_data_dir,
split_map=splits if isinstance(splits, dict) else None)
super().__init__(splits=splits or ())
@property
def splits(self):
return self._splits or self._tfds_dataset.info.splits
@property
def tfds_dataset(self):
return self._tfds_dataset
def get_dataset(
self,
split: str,
shuffle: bool = True,
seed: Optional[int] = None,
shard_info: Optional[ShardInfo] = None
) -> tf.data.Dataset:
return self.tfds_dataset.load(
split, shuffle_files=shuffle, seed=seed, shard_info=shard_info)
def num_input_examples(self, split: str) -> int:
return self.tfds_dataset.size(split)
def list_shards(self, split: str) -> Sequence[str]:
return self.tfds_dataset.files(split)
class FileDataSource(DataSource):
def __init__(
self,
read_file_fn: Callable[[tf.data.Dataset], tf.data.Dataset],
split_to_filepattern: Mapping[str, Union[str, Iterable[str]]],
num_input_examples: Optional[Mapping[str, int]] = None,
):
self._split_to_filepattern = split_to_filepattern
self._reader = read_file_fn
super().__init__(
splits=split_to_filepattern.keys(),
num_input_examples=num_input_examples)
def get_dataset(
self,
split: str,
shuffle: bool = True,
seed: Optional[int] = None,
shard_info: Optional[ShardInfo] = None
) -> tf.data.Dataset:
files = self.list_shards(split)
if not files:
raise ValueError(
"No file is found for the file pattern: "
f"{self._split_to_filepattern[split]}."
)
files_ds = tf.data.Dataset.from_tensor_slices(np.array(files, dtype=np.str))
if shard_info:
if len(files) < shard_info.num_shards:
raise ValueError(
f"Dataset has too few files to shard. {len(files)} files vs "
f"{shard_info.num_shards} shards requested.")
files_ds = files_ds.shard(shard_info.num_shards, shard_info.index)
if shuffle:
files_ds = files_ds.shuffle(buffer_size=16, seed=seed)
return files_ds.interleave(
self._reader,
cycle_length=16,
block_length=16,
num_parallel_calls=tf.data.experimental.AUTOTUNE)
def list_shards(self, split: str) -> Sequence[str]:
return tf.io.gfile.glob(self._split_to_filepattern[split])
class TextLineDataSource(FileDataSource):
def __init__(
self,
split_to_filepattern: Mapping[str, Union[str, Iterable[str]]],
skip_header_lines: int = 0,
num_input_examples: Optional[Mapping[str, int]] = None,
):
self._skip_header_lines = skip_header_lines
def read_file_fn(filepattern):
return tf.data.TextLineDataset(filepattern).skip(skip_header_lines)
super().__init__(
read_file_fn=read_file_fn,
split_to_filepattern=split_to_filepattern,
num_input_examples=num_input_examples)
class TFExampleDataSource(FileDataSource):
def __init__(
self,
split_to_filepattern: Mapping[str, Union[str, Iterable[str]]],
feature_description: Mapping[str, Union[tf.io.FixedLenFeature,
tf.io.VarLenFeature]],
reader_cls: Type[tf.data.Dataset] = tf.data.TFRecordDataset,
num_input_examples: Optional[Mapping[str, int]] = None,
):
def read_file_fn(filepattern):
return reader_cls(filepattern).map(
lambda pb: tf.io.parse_single_example(pb, feature_description),
num_parallel_calls=tf.data.experimental.AUTOTUNE)
super().__init__(
read_file_fn=read_file_fn,
split_to_filepattern=split_to_filepattern,
num_input_examples=num_input_examples)
def _rename_plaintext_to_pretokenized(
dataset: tf.data.Dataset) -> tf.data.Dataset:
def _rename(inputs):
outputs = {}
for k, v in inputs.items():
if k.endswith("_plaintext"):
k = k[:-len("plaintext")] + "pretokenized"
outputs[k] = v
return outputs
return dataset.map(
_rename, num_parallel_calls=tf.data.experimental.AUTOTUNE)
class _CachedDataSource(FileDataSource):
def __init__(self, cache_dir: str, split: str):
with tf.io.gfile.GFile(utils.get_cached_info_path(cache_dir, split)) as f:
split_info = json.load(f)
features = split_info["features"]
with tf.io.gfile.GFile(utils.get_cached_stats_path(cache_dir, split)) as f:
stats = json.load(f)
version_when_cached = version.Version(
split_info.get("seqio_version", "0.pre"))
version_with_true_dtypes = version.Version("0.0.0")
if version_when_cached < version_with_true_dtypes:
for name, feat in features.items():
if feat["dtype"] == "int64":
logging.info("Casting cached '%s' to int32.", name)
feat["dtype"] = "int32"
def _feature_config(shape, dtype):
if dtype in ("int32", "bool"):
dtype = "int64"
if shape and shape[0] is None:
return tf.io.FixedLenSequenceFeature(
shape[1:], dtype, allow_missing=True)
return tf.io.FixedLenFeature(shape, dtype)
feature_description = {
feat: _feature_config(**desc) for feat, desc in features.items()
}
def read_file_fn(filepattern):
ds = tf.data.TFRecordDataset(filepattern)
ds = ds.map(
lambda pb: tf.io.parse_single_example(pb, feature_description),
num_parallel_calls=tf.data.experimental.AUTOTUNE)
ds = ds.map(
lambda x: {k: tf.cast(v, features[k]["dtype"]) for k, v in x.items()},
num_parallel_calls=tf.data.experimental.AUTOTUNE)
ds = _rename_plaintext_to_pretokenized(ds)
return ds
split_to_filepattern = {
split: "%s-*-of-*%d" % (
utils.get_cached_tfrecord_prefix(cache_dir, split),
split_info["num_shards"])
}
super().__init__(
read_file_fn=read_file_fn,
split_to_filepattern=split_to_filepattern,
num_input_examples={split: stats["examples"]}
)
class CacheDatasetPlaceholder(object):
def __init__(self, required=False):
self._required = required
@property
def required(self):
return self._required
def __call__(self, dataset):
raise RuntimeError("`CacheDatasetPlaceholder` should never be called.")
MetricFnCallable = Callable[..., Mapping[str, float]]
class Task(DatasetProviderBase):
def __init__(
self,
name: str,
source: DataSource,
output_features: Mapping[str, Feature],
preprocessors: Optional[Sequence[Callable[..., tf.data.Dataset]]] = None,
postprocess_fn: Optional[Callable[..., Any]] = None,
metric_fns: Optional[Sequence[MetricFnCallable]] = None,
shuffle_buffer_size: Optional[int] = SHUFFLE_BUFFER_SIZE):
if not _VALID_TASK_NAME_REGEX.match(name):
raise ValueError(
"Task name '%s' contains invalid characters. Must match regex: %s" % (
name, _VALID_TASK_NAME_REGEX.pattern))
metric_fns = metric_fns or []
self._predict_metric_fns = []
self._score_metric_fns = []
for metric_fn in metric_fns:
pos_args = tuple(
key for key, param in inspect.signature(metric_fn).parameters.items()
if param.default == inspect.Parameter.empty
)
if pos_args == ("targets", "scores"):
self._score_metric_fns.append(metric_fn)
elif pos_args == ("targets", "predictions"):
self._predict_metric_fns.append(metric_fn)
else:
raise ValueError(
"Metric functions must have positional arguments matching either "
"('targets', 'predictions') or ('targets', 'scores'). "
f"Got: {pos_args}")
self._name = name
self._source = source
preprocessors = tuple(preprocessors or [])
cache_step_idxs = [
i for i, p in enumerate(preprocessors)
if isinstance(p, CacheDatasetPlaceholder)
]
if len(cache_step_idxs) > 1:
raise ValueError(
"`CacheDatasetPlaceholder` can appear at most once in the "
f"preprocessing pipeline. Found {len(cache_step_idxs)} in '{name}'.")
cache_step_idx = cache_step_idxs[0] if cache_step_idxs else None
if cache_step_idx is not None:
for prep in preprocessors[:cache_step_idx]:
prep_args = inspect.signature(prep).parameters.keys()
if "sequence_length" in prep_args:
raise ValueError(
f"'{prep.__name__}' has a `sequence_length` argument but occurs "
f"before `CacheDatasetPlaceholder` in '{name}'. This is not "
"allowed since the sequence length is specified at run time.")
if "seed" in prep_args or "seeds" in prep_args:
raise logging.warning(
f"'{prep.__name__}' has a `seed(s)` argument but occurs before "
f"`CacheDatasetPlaceholder` in '{name}'. This is not recommended "
"since the same samples will be used each epoch when reading "
"from the cache.")
self._cache_step_idx = cache_step_idx
self._preprocessors = preprocessors
self._metric_fns = tuple(metric_fns)
self._postprocess_fn = postprocess_fn
self._cache_dir = None
self._stats = {}
self._shuffle_buffer_size = shuffle_buffer_size
self._output_features = collections.OrderedDict(
sorted(list(output_features.items()))
)
@property
def name(self) -> str:
return self._name
@property
def metric_fns(self) -> Sequence[MetricFnCallable]:
return self._predict_metric_fns + self._score_metric_fns
@property
def score_metric_fns(self) -> Sequence[MetricFnCallable]:
return self._score_metric_fns
@property
def predict_metric_fns(self) -> Sequence[MetricFnCallable]:
return self._predict_metric_fns
@property
def output_features(self) -> Mapping[str, Feature]:
return self._output_features
@property
def splits(self) -> Sequence[str]:
s = self.source.splits
if not s:
raise ValueError(f"Task {self.name} has no splits")
return s
@property
def source(self) -> DataSource:
return self._source
@property
def preprocessors(self) -> Sequence[Callable[..., tf.data.Dataset]]:
return self._preprocessors
def num_input_examples(self, split: str) -> Optional[int]:
return self.source.num_input_examples(split)
def _preprocess_dataset(
self,
dataset: tf.data.Dataset,
preprocessors: Sequence[Callable[..., tf.data.Dataset]],
sequence_length: Optional[Mapping[str, int]] = None) -> tf.data.Dataset:
for prep_fn in preprocessors:
fn_args = set(inspect.signature(prep_fn).parameters.keys())
kwargs = {}
if "sequence_length" in fn_args:
kwargs["sequence_length"] = sequence_length
if "output_features" in fn_args:
kwargs["output_features"] = self.output_features
dataset = prep_fn(dataset, **kwargs)
return dataset
def _validate_preprocessing(
self, dataset: tf.data.Dataset
) -> tf.data.Dataset:
actual_specs = dataset.element_spec
for feat, feat_spec in self.output_features.items():
if feat not in actual_specs:
if feat_spec.required:
raise ValueError(
"Task dataset is missing expected output feature after "
f"preprocessing: {feat}")
else:
continue
actual_spec = actual_specs[feat]
if feat_spec.dtype != actual_spec.dtype:
raise ValueError(
f"Task dataset has incorrect type for feature '{feat}' after "
f"preprocessing: Got {actual_spec.dtype.name}, expected "
f"{feat_spec.dtype.name}")
if actual_spec.shape.rank != 1:
raise ValueError(
f"Task dataset has incorrect rank for feature '{feat}' after "
f"preprocessing: Got {actual_spec.shape.rank}, expected 1")
return dataset
def _trim_output_features(
self,
dataset: tf.data.Dataset,
sequence_length: Optional[Mapping[str, int]]
) -> tf.data.Dataset:
def _trim(k: str, v: tf.Tensor) -> tf.Tensor:
if k not in self.output_features or not sequence_length:
return v
return v[:sequence_length[k]]
return dataset.map(
lambda ex: {k: _trim(k, v) for k, v in ex.items()},
num_parallel_calls=tf.data.experimental.AUTOTUNE)
def preprocess_precache(
self,
dataset: tf.data.Dataset,
seed: Optional[int] = None
) -> tf.data.Dataset:
if not self.supports_caching:
return dataset
with utils.map_seed_manager(seed):
return self._preprocess_dataset(
dataset,
self._preprocessors[:self._cache_step_idx],
)
def preprocess_postcache(
self,
dataset: tf.data.Dataset,
sequence_length: Optional[Mapping[str, int]],
seed: Optional[int] = None
) -> tf.data.Dataset:
start_idx = 0
if self.supports_caching:
# Skip a sufficient number of seeds to avoid duplicating any from
# pre-cache preprocessing.
seed = None if seed is None else seed + 42 * self._cache_step_idx
start_idx = self._cache_step_idx + 1
with utils.map_seed_manager(seed):
dataset = self._preprocess_dataset(
dataset,
self._preprocessors[start_idx:],
sequence_length=sequence_length,
)
return dataset
@property
def cache_dir(self) -> Optional[str]:
if not self._cache_dir:
# See if cached data exists in any of the cache directories.
potential_cache_dirs = [
os.path.join(d, self.name) for d in utils.get_global_cache_dirs()]
for cache_dir in potential_cache_dirs:
try:
if tf.io.gfile.exists(os.path.join(cache_dir, "COMPLETED")):
self._cache_dir = cache_dir
logging.info("'%s' is cached at %s.", self.name, self.cache_dir)
break
except tf.errors.PermissionDeniedError:
logging.warning(
"Permission denied for global cache folder: %s", cache_dir)
if not self._cache_dir:
logging.info(
"'%s' does not exist in any task cache directories (searched %s).",
self.name,
potential_cache_dirs,
)
return self._cache_dir
@property
def supports_caching(self) -> bool:
return self._cache_step_idx is not None
@property
def requires_caching(self) -> bool:
return (self._cache_step_idx is not None and
self.preprocessors[self._cache_step_idx].required)
def assert_cached(self) -> None:
assert self.cache_dir, (
f"'{self.name}' does not exist in any of the task cache directories.")
def get_cached_stats(self,
split: str = tfds.Split.TRAIN
) -> Mapping[str, Union[int, float]]:
self.assert_cached()
if split not in self._stats:
stats_path = utils.get_cached_stats_path(self.cache_dir, split)
if not tf.io.gfile.exists(stats_path):
raise ValueError(
"Stats do not exist for '%s' split: %s" % (self.name, split))
with tf.io.gfile.GFile(stats_path) as f:
self._stats[split] = json.load(f)
return self._stats[split]
def get_dataset(
self,
sequence_length: Optional[Mapping[str, int]],
split: str = tfds.Split.TRAIN,
use_cached: bool = False,
shuffle: bool = True,
shuffle_buffer_size: Optional[int] = None,
seed: Optional[int] = None,
shard_info: Optional[ShardInfo] = None,
num_epochs: Optional[int] = 1
) -> tf.data.Dataset:
if use_cached and not self.supports_caching:
logging.warning(
"Task '%s' does not support caching. Switching to on-the-fly "
"preprocessing.", self.name)
use_cached = False
elif self.requires_caching and not use_cached:
raise ValueError(
f"Task '{self.name}' requires caching, but was called with "
"`use_cached=False`.")
if shard_info:
# Whether we should shard at source or on the examples from the source.
shard_data_source = (
len(self.source.list_shards(split=split)) >= shard_info.num_shards)
logging.info("Sharding at the %s: %d of %d",
"data source" if shard_data_source else "examples",
shard_info.index, shard_info.num_shards)
else:
# No sharding.
shard_data_source = False
shard_info = ShardInfo(0, 1)
if use_cached:
source = self._get_cached_source(split)
else:
source = self.source
if shard_data_source:
ds = source.get_dataset(
split=split, shuffle=shuffle, seed=seed, shard_info=shard_info)
else:
ds = source.get_dataset(split=split, shuffle=shuffle, seed=seed)
ds = ds.shard(shard_info.num_shards, shard_info.index)
if ((use_cached and
self.get_cached_stats(split)["examples"] < _MAX_EXAMPLES_TO_MEM_CACHE)
or (self.num_input_examples(split) and
self.num_input_examples(split) < _MAX_EXAMPLES_TO_MEM_CACHE)):
logging.info(
"Automatically caching small dataset in memory: '%s:%s'",
self.name, split)
ds = ds.cache()
if not use_cached:
ds = self.preprocess_precache(ds, seed=seed)
ds = ds.prefetch(tf.data.experimental.AUTOTUNE)
# We repeat before calling any (potentially) stochastic post-cache
# preprocessing in order to take new samples each epoch.
ds = ds.repeat(num_epochs)
# Post cache processing.
ds = self.preprocess_postcache(
ds, sequence_length=sequence_length, seed=seed)
ds = self._validate_preprocessing(ds)
ds = self._trim_output_features(ds, sequence_length=sequence_length)
if shuffle:
if self._shuffle_buffer_size is None:
raise ValueError(
f"Shuffling is disallowed for Task '{self.name}' since its "
"`shuffle_buffer_size` was set to `None` on construction.")
shuffle_buffer_size = shuffle_buffer_size or self._shuffle_buffer_size
# Shuffle before mixing since preprocessor can output multiple
# (correlated) examples per input.
ds = ds.shuffle(shuffle_buffer_size, seed=seed)
return ds.prefetch(tf.data.experimental.AUTOTUNE)
def _get_cached_source(self, split) -> _CachedDataSource:
self.assert_cached()
return _CachedDataSource(self.cache_dir, split)
def postprocess_fn(self, decoded_model_output: Any,
**postprocess_kwargs) -> Any:
if self._postprocess_fn:
return self._postprocess_fn(decoded_model_output, **postprocess_kwargs)
return decoded_model_output
class TaskRegistry(DatasetProviderRegistry):
_REGISTRY = {}
_PROVIDER_TYPE = Task
@classmethod
def add(
cls,
name: str,
source: DataSource,
output_features: Mapping[str, Feature],
preprocessors: Optional[Sequence[Callable[..., tf.data.Dataset]]] = None,
postprocess_fn: Optional[Callable[..., Any]] = None,
metric_fns: Optional[Sequence[Callable[..., Mapping[str, float]]]] = None,
**kwargs) -> Task:
return super().add(name, Task, name, source, output_features, preprocessors,
postprocess_fn, metric_fns, **kwargs)
@classmethod
def get(cls, name) -> Task:
return super().get(name)
# ================================ Mixtures ====================================
class Mixture(DatasetProviderBase):
def __init__(self,
name: str,
tasks: Union[Sequence[str],
Sequence[Tuple[str, Union[int, float,
Callable[[Task],
float]]]]],
default_rate: Union[float, Callable[[Task], float]] = None):
self._task_to_rate = {}
self._tasks = []
self._sub_mixtures = []
self._name = name
for t in tasks:
if isinstance(t, str):
task_name = t
rate = default_rate
if default_rate is None:
raise ValueError("need a rate for each task")
else:
task_name, rate = t
if task_name in TaskRegistry.names():
self._tasks.append(TaskRegistry.get(task_name))
self._task_to_rate[task_name] = rate
else:
self._sub_mixtures.append(MixtureRegistry.get(task_name)) # pytype:disable=name-error
self._task_to_rate[task_name] = rate
if len(set(tuple(t.output_features) for t in self.tasks)) != 1:
raise ValueError(
"All Tasks in a Mixture must have the same output features."
)
@property
def name(self) -> str:
return self._name
@property
def tasks(self) -> Sequence[Task]:
sub_tasks = (mix.tasks for mix in self._sub_mixtures)
return list(sorted(set(sum(sub_tasks, self._tasks)), key=lambda t: t.name))
@property
def total_rate(self) -> float:
return sum(float(rate(TaskRegistry.get(name)) if callable(rate) else rate)
for name, rate in self._task_to_rate.items())
def get_rate(self, task: Task) -> float:
value = 0.0
for mix in self._sub_mixtures:
if task in mix.tasks:
rate = self._task_to_rate[mix.name]
value += rate * mix.get_rate(task) / mix.total_rate
if task.name in self._task_to_rate:
rate = self._task_to_rate[task.name]
value += float(rate(task) if callable(rate) else rate)
return value
def num_input_examples(self, split: str) -> int:
return sum(t.num_input_examples(split) for t in self.tasks)
@property
def splits(self) -> Sequence[str]:
splits = set()
for task in self.tasks:
splits.update(task.splits)
return tuple(splits)
@property
def output_features(self) -> Mapping[str, Feature]:
# We require all tasks to have the same output_features in __init__
# so we can just get the output_features for the 0th task
return self.tasks[0].output_features
def _check_compatible_features(self) -> None:
for name, feature in self.tasks[0].output_features.items():
for task in self.tasks[1:]:
if task.output_features[name].vocabulary != feature.vocabulary:
raise ValueError(
"Features across tasks in a mixture must use the same vocabulary."
)
if task.output_features[name].dtype != feature.dtype:
raise ValueError(
"Features across tasks in a mixture must use the same dtype."
)
def get_dataset(
self,
sequence_length: Optional[Mapping[str, int]],
split: str = tfds.Split.TRAIN,
use_cached: bool = False,
shuffle: bool = True,
seed: Optional[int] = None,
shard_info: Optional[ShardInfo] = None,
num_epochs: Optional[int] = None,
copy_pretokenized: bool = False,
compute_stats_empirically: bool = False,
) -> tf.data.Dataset:
self._check_compatible_features()
tasks = []
for task in self.tasks:
if split not in task.splits:
logging.warning(
"Task %s has no '%s' split, skipping.", task.name, split
)
continue
tasks.append(task)
if not tasks:
raise ValueError("No datasets have a '{}' split".format(split))
output_feature_keys = set(self.output_features.keys())
if copy_pretokenized:
output_feature_keys.update(
{f + "_pretokenized" for f in output_feature_keys})
def filter_features(ex):
return {k: v for k, v in ex.items() if k in output_feature_keys}
datasets = [
task.get_dataset( # pylint:disable=g-complex-comprehension
sequence_length,
split=split,
use_cached=use_cached,
shuffle=shuffle,
seed=seed,
shard_info=shard_info,
num_epochs=num_epochs)
.map(filter_features, num_parallel_calls=tf.data.experimental.AUTOTUNE)
for task in tasks]
rates = [self.get_rate(task) for task in tasks]
# Sample from the dataset with the rates rates
if seed is not None:
sample_seed = seed
elif shuffle:
sample_seed = None
else:
sample_seed = 42
dataset = tf.data.experimental.sample_from_datasets(
datasets, rates, sample_seed)
if (split == "train" and use_cached and
all(t.supports_caching for t in tasks)):
_log_mixing_proportions(tasks, datasets, rates, dataset, sequence_length,
compute_stats_empirically)
return dataset
def _log_padding_fractions(dataset, sequence_length, num_examples=100):
logging.info("computing padding fractions")
keys = sequence_length.keys()
padding_frac = {k: 0 for k in keys}
for ex in tfds.as_numpy(dataset.take(num_examples)):
for k in keys:
padding_frac[k] += 1 - (sequence_length[k] / len(ex[k]))
for k in keys:
logging.info("%s padding fraction = %g", k, padding_frac[k])
def _log_mixing_proportions(
tasks, datasets, rates, mixed_dataset,
sequence_length, compute_stats_empirically):
def _normalize(l):
denom = sum(l)
if not denom:
return l
return [x / denom for x in l]
# compute some stats about the mixture
examples_fraction = _normalize(rates)
if compute_stats_empirically:
stats_examples = 100
mean_inputs_length = []
mean_targets_length = []
for dataset in datasets:
inputs_sum = 0
targets_sum = 0
for ex in tfds.as_numpy(dataset.take(stats_examples)):
# Some tasks, like LMs, don't have inputs.
if "inputs" in ex:
inputs_sum += ex["inputs"].size
targets_sum += ex["targets"].size
mean_inputs_length.append(inputs_sum / float(stats_examples))
mean_targets_length.append(targets_sum / float(stats_examples))
else:
def _estimated_mean_length(task, key):
if key not in sequence_length:
return 0
if (task.supports_caching and
task._cache_step_idx < len(task._preprocessors) - 1):
return sequence_length[key]
# Some tasks, like LMs, don't have inputs.
if key + "_tokens" in task.get_cached_stats("train"):
return min(sequence_length[key],
(task.get_cached_stats("train")[key + "_tokens"] /
task.get_cached_stats("train")["examples"]))
else:
return 0
mean_inputs_length = [_estimated_mean_length(task, "inputs")
for task in tasks]
mean_targets_length = [_estimated_mean_length(task, "targets")
for task in tasks]
inputs_fraction = _normalize(
[l * r for l, r in zip(mean_inputs_length, rates)])
targets_fraction = _normalize(
[l * r for l, r in zip(mean_targets_length, rates)])
logging.info("%12s %12s %12s %12s %12s %12s %s",
"rate", "ex.frac.", "inp.frac.", "tgt.frac.",
"inp.len.", "tgt.len", "task")
for i in range(len(rates)):
logging.info("%12g %12g %12g %12g %12g %12g %s",
rates[i], examples_fraction[i],
inputs_fraction[i], targets_fraction[i],
mean_inputs_length[i], mean_targets_length[i],
tasks[i].name)
if compute_stats_empirically:
_log_padding_fractions(mixed_dataset, sequence_length)
class MixtureRegistry(DatasetProviderRegistry):
_REGISTRY = {}
_PROVIDER_TYPE = Mixture
@classmethod
def add(cls, name, tasks, default_rate=None) -> Mixture:
return super().add(name, Mixture, name, tasks, default_rate)
@classmethod
def get(cls, name) -> Mixture:
return super().get(name)
def get_mixture_or_task(task_or_mixture_name):
mixtures = MixtureRegistry.names()
tasks = TaskRegistry.names()
if task_or_mixture_name in mixtures:
if task_or_mixture_name in tasks:
logging.warning("%s is both a Task and a Mixture, returning Mixture",
task_or_mixture_name)
return MixtureRegistry.get(task_or_mixture_name)
if task_or_mixture_name in tasks:
return TaskRegistry.get(task_or_mixture_name)
else:
raise ValueError("No Task or Mixture found with name: %s" %
task_or_mixture_name)
def get_subtasks(task_or_mixture):
if isinstance(task_or_mixture, Task):
return [task_or_mixture]
else:
return task_or_mixture.tasks
def get_dataset(
mixture_or_task_name: str,
task_feature_lengths: Mapping[str, int],
feature_converter: FeatureConverter,
dataset_split: str = "train",
use_cached: bool = False,
shuffle: bool = False,
num_epochs: Optional[int] = 1,
shard_info: ShardInfo = None,
verbose: bool = True,
seed: Optional[int] = None
) -> tf.data.Dataset:
if not isinstance(feature_converter, FeatureConverter):
raise TypeError(
"feature_converter should be an instance of FeatureConverter.")
mixture_or_task = get_mixture_or_task(mixture_or_task_name)
ds = mixture_or_task.get_dataset(
task_feature_lengths,
split=dataset_split,
use_cached=use_cached,
shuffle=shuffle,
seed=seed,
shard_info=shard_info,
num_epochs=num_epochs)
ds = feature_converter(ds, task_feature_lengths=task_feature_lengths)
if verbose:
logging.info(
"The output dataset from seqio.get_dataset has the following features")
for feature_name, tensor_spec in ds.element_spec.items():
logging.info("feature: %s \t shape: %s \t dtype: %s", feature_name,
tensor_spec.shape.as_list(), tensor_spec.dtype.name)
return ds
| true | true |
f71fdf38821c8803ae681fd71c4c71d7da8b1c90 | 2,037 | py | Python | airflow/migrations/versions/0090_30867afad44a_rename_concurrency_column_in_dag_table_.py | npodewitz/airflow | 511ea702d5f732582d018dad79754b54d5e53f9d | [
"Apache-2.0"
] | 8,092 | 2016-04-27T20:32:29.000Z | 2019-01-05T07:39:33.000Z | airflow/migrations/versions/0090_30867afad44a_rename_concurrency_column_in_dag_table_.py | npodewitz/airflow | 511ea702d5f732582d018dad79754b54d5e53f9d | [
"Apache-2.0"
] | 2,961 | 2016-05-05T07:16:16.000Z | 2019-01-05T08:47:59.000Z | airflow/migrations/versions/0090_30867afad44a_rename_concurrency_column_in_dag_table_.py | npodewitz/airflow | 511ea702d5f732582d018dad79754b54d5e53f9d | [
"Apache-2.0"
] | 3,546 | 2016-05-04T20:33:16.000Z | 2019-01-05T05:14:26.000Z | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Rename ``concurrency`` column in ``dag`` table to`` max_active_tasks``
Revision ID: 30867afad44a
Revises: e9304a3141f0
Create Date: 2021-06-04 22:11:19.849981
"""
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = '30867afad44a'
down_revision = 'e9304a3141f0'
branch_labels = None
depends_on = None
airflow_version = '2.2.0'
def upgrade():
"""Apply Rename ``concurrency`` column in ``dag`` table to`` max_active_tasks``"""
conn = op.get_bind()
is_sqlite = bool(conn.dialect.name == "sqlite")
if is_sqlite:
op.execute("PRAGMA foreign_keys=off")
with op.batch_alter_table('dag') as batch_op:
batch_op.alter_column(
'concurrency',
new_column_name='max_active_tasks',
type_=sa.Integer(),
nullable=False,
)
if is_sqlite:
op.execute("PRAGMA foreign_keys=on")
def downgrade():
"""Unapply Rename ``concurrency`` column in ``dag`` table to`` max_active_tasks``"""
with op.batch_alter_table('dag') as batch_op:
batch_op.alter_column(
'max_active_tasks',
new_column_name='concurrency',
type_=sa.Integer(),
nullable=False,
)
| 31.338462 | 88 | 0.691703 |
import sqlalchemy as sa
from alembic import op
revision = '30867afad44a'
down_revision = 'e9304a3141f0'
branch_labels = None
depends_on = None
airflow_version = '2.2.0'
def upgrade():
conn = op.get_bind()
is_sqlite = bool(conn.dialect.name == "sqlite")
if is_sqlite:
op.execute("PRAGMA foreign_keys=off")
with op.batch_alter_table('dag') as batch_op:
batch_op.alter_column(
'concurrency',
new_column_name='max_active_tasks',
type_=sa.Integer(),
nullable=False,
)
if is_sqlite:
op.execute("PRAGMA foreign_keys=on")
def downgrade():
with op.batch_alter_table('dag') as batch_op:
batch_op.alter_column(
'max_active_tasks',
new_column_name='concurrency',
type_=sa.Integer(),
nullable=False,
)
| true | true |
f71fdf687002e4d434788ab435b395447a70728b | 228 | py | Python | dstlib/node.py | Algebra7/dstlib | 3f891fb48c8c00caf89255c45a1b41f76331d252 | [
"MIT"
] | 1 | 2021-07-27T08:22:29.000Z | 2021-07-27T08:22:29.000Z | dstlib/node.py | Algebra7/dstlib | 3f891fb48c8c00caf89255c45a1b41f76331d252 | [
"MIT"
] | null | null | null | dstlib/node.py | Algebra7/dstlib | 3f891fb48c8c00caf89255c45a1b41f76331d252 | [
"MIT"
] | null | null | null | class Node:
"""Class for storing linked list node."""
def __init__(self, element, next_pointer, prev_pointer=None):
self._element = element
self._next = next_pointer
self._prev = prev_pointer | 32.571429 | 65 | 0.653509 | class Node:
def __init__(self, element, next_pointer, prev_pointer=None):
self._element = element
self._next = next_pointer
self._prev = prev_pointer | true | true |
f71fdf8b98012c19be34962342155bc04ff638eb | 25,034 | py | Python | garnets.py | seanth/garnets | 3ff37bcbf095df14586cccb39a52bcf7b221c8ee | [
"MIT"
] | 1 | 2022-02-25T14:32:34.000Z | 2022-02-25T14:32:34.000Z | garnets.py | seanth/garnets | 3ff37bcbf095df14586cccb39a52bcf7b221c8ee | [
"MIT"
] | null | null | null | garnets.py | seanth/garnets | 3ff37bcbf095df14586cccb39a52bcf7b221c8ee | [
"MIT"
] | 2 | 2020-09-02T17:18:33.000Z | 2022-02-25T14:32:48.000Z | import logging
import random
from math import sqrt, log
from stellar_system import Star
from stellar_system import Planetesimal
from stellar_system import Protoplanet
from stellar_system import Protomoon
from stellar_system import Planet
from stellar_system import Orbit
from accrete import CircumstellarDisk
from constants import ECCENTRICITY_COEFF, PROTOPLANET_MASS
from constants import SUN_MASS_IN_EARTH_MASSES
from constants import EARTH_ALBEDO, GAS_GIANT_ALBEDO, FREEZING_POINT_OF_WATER, KM_PER_AU, EARTH_AVERAGE_KELVIN, EARTH_EXOSPHERE_TEMP
from constants import MOL_NITROGEN, MOL_HYDROGEN, HELIUM
from constants import ASTEROID_MASS_LIMIT
from constants import MILLIBARS_PER_BAR
from enviroment import kothari_radius, gas_life, rms_vel, est_temp, period, day_length, acceleration, gravity, min_molec_weight, orb_zone, volume_radius, volume_density, grnhouse, boiling_point, escape_vel, empirical_density, inclination, iterate_surface_temp, pressure, vol_inventory
from enviroment import PlanetType
from math import exp
from math import inf as INCREDIBLY_LARGE_NUMBER # TODO(woursler): Just use inf
from util import about, random_number, random_eccentricity
from chemtable import gases
logging.getLogger().setLevel(logging.INFO)
def random_star(): # TODO: Add seed?
# Sources
# exoplanets.co/exoplanet-correlations/host-star-mass-distribution.html
# en.wikipedia.org/wiki/Main_sequence#mediaviewer/File:HRDiagram.png
# TODO: Code up generation.
age = random.randrange(1*10**9, 6*10**9)
return Star(age=age, mass_ratio=1)
def generate_stellar_system(star, do_gases=True, do_moons=True):
protoplanets = generate_planetary_masses(
star,
0.0,
star.stellar_dust_limit,
do_moons=do_moons
)
star.planets = [
generate_planet(
p,
star,
do_gases=do_gases,
do_moons=do_moons
) for p in protoplanets
]
return star
# Create protoplanets.
def random_planetesimal(disk):
a = random.uniform(disk.planet_inner_bound, disk.planet_outer_bound)
e = 1.0 - (random.uniform(0.0, 1.0) ** ECCENTRICITY_COEFF)
if e > .99:
e = .99
return Planetesimal(
disk=disk,
orbit=Orbit(
a=a,
e=e,
),
dust_mass=PROTOPLANET_MASS,
gas_mass=0,
)
def generate_planetary_masses(star, inner_dust, outer_dust, do_moons=True):
disk = CircumstellarDisk(star)
planets = []
sequential_failures = 0
while disk.dust_left and sequential_failures < 10**3:
canidate = random_planetesimal(disk)
iel = canidate.inner_effect_limit
oel = canidate.outer_effect_limit
if disk.dust_available(iel, oel) > 0:
sequential_failures = 0
logging.info("Injecting planetesimal at " +
str(canidate.orbit.a) + " AU ...")
disk.accrete_dust(canidate)
if canidate.mass > PROTOPLANET_MASS:
coalesce_planetesimals(disk, planets, canidate, do_moons)
logging.info("\tsuccess.\n")
else:
logging.info("\tfailed due to large neighbor.\n")
else:
sequential_failures += 1
return planets
def convert_planetesimal_to_protoplanet(planetesimal):
return Protoplanet(
star=planetesimal.disk.star,
orbit=planetesimal.orbit,
dust_mass=planetesimal.dust_mass,
gas_mass=planetesimal.gas_mass
)
def convert_planetesimal_to_protomoon(planetesimal, planet):
print(" Capturing a protomoon.")
return Protomoon(
protoplanet=planet,
orbit=Orbit(
a=None,
e=None,
),
dust_mass=planetesimal.dust_mass,
gas_mass=planetesimal.gas_mass,
)
def coalesce_planetesimals(disk, planets, canidate, do_moons):
finished = False
# First we try to find an existing planet with an over-lapping orbit.
for planet in planets:
#print("Out of order", planet, canidate)
diff = planet.orbit.a - canidate.orbit.a
if diff > 0.0:
dist1 = canidate.orbit.apoapsis * (1.0 + canidate.reduced_mass) - canidate.orbit.a
# x aphelion
dist2 = planet.orbit.a - (planet.orbit.periapsis * (1.0 - planet.reduced_mass))
else:
dist1 = canidate.orbit.a - (canidate.orbit.periapsis * (1.0 - canidate.reduced_mass))
# x perihelion
dist2 = (planet.orbit.apoapsis * (1.0 + planet.reduced_mass)) - planet.orbit.a
if abs(diff) <= abs(dist1) or abs(diff) <= abs(dist2):
# Figure out the new orbit.
a = (planet.mass + canidate.mass) / \
((planet.mass / planet.orbit.a) + (canidate.mass / canidate.orbit.a))
temp = planet.mass * sqrt(planet.orbit.a) * sqrt(1.0 - (planet.orbit.e ** 2.0))
temp = temp + (canidate.mass * sqrt(canidate.orbit.a) *
sqrt(sqrt(1.0 - (canidate.orbit.e ** 2.0))))
temp = temp / ((planet.mass + canidate.mass) * sqrt(canidate.orbit.a))
temp = 1.0 - (temp ** 2.0)
if temp < 0.0 or temp >= 1.0:
temp = 0.0
e = sqrt(temp)
if do_moons:
if canidate.mass < canidate.critical_mass:
if canidate.mass * SUN_MASS_IN_EARTH_MASSES < 2.5 \
and canidate.mass * SUN_MASS_IN_EARTH_MASSES > .0001 \
and planet.mass_of_moons < planet.mass * .05 \
and planet.mass > canidate.mass:
# TODO: Remove planet.mass > canidate.mass distinction, just switch the canidate and planet!
planet.add_moon(
convert_planetesimal_to_protomoon(canidate, planet))
logging.info("Moon captured at " + str(planet.orbit.a) + " AU. Planet Mass: " + str(planet.mass * SUN_MASS_IN_EARTH_MASSES) +
" earth masses Moon Mass: " + str(canidate.mass * SUN_MASS_IN_EARTH_MASSES) + " earth masses.")
finished = True
break
else:
# TODO: Reasons.
logging.info("Did not capture potential moon at " +
str(planet.orbit.a) + " AU. Collision imminent.")
logging.info(
"Collision between two planetesimals! Computing new orbit and accumulating additional mass.")
# Accrete MORE DUST! TODO: Refactor to this.
disk.accrete_dust(planet)
planet.orbit = Orbit(a=a, e=e)
#####
planet.orbit_a = a
planet.orbit_e = e
planet.dust_mass = planet.dust_mass + canidate.dust_mass # + new_dust
planet.gas_mass = planet.gas_mass + canidate.gas_mass # + new_gas
finished = True
logging.info(
"Conglomerate is now " +
str(planet.mass * SUN_MASS_IN_EARTH_MASSES) +
" earth masses at " + str(planet.orbit.a) + " AU."
)
if not finished:
# TODO: Extra info.
logging.info("New Protoplanet at " + str(canidate.orbit.a) + "AU.")
planets.append(convert_planetesimal_to_protoplanet(canidate))
def calculate_gases(star, planet, planet_id):
if planet.surf_pressure > 0:
amount = [0 for _ in range(len(gases))]
totamount = 0
pressure = planet.surf_pressure/MILLIBARS_PER_BAR
n = 0
for i in range(len(gases)):
yp = gases[i].boil / \
(373. * ((log((pressure) + 0.001) / -5050.5) + (1.0 / 373.)))
if ((yp >= 0 and yp < planet.low_temp) and (gases[i].weight >= planet.molec_weight)):
vrms = rms_vel(gases[i].weight, planet.exospheric_temp)
pvrms = pow(1 / (1 + vrms / planet.esc_velocity),
star.age / 1e9)
abund = gases[i].abunds # gases[i].abunde
react = 1.0
fract = 1.0
pres2 = 1.0
if gases[i].symbol == "Ar":
react = .15 * star.age/4e9
elif gases[i].symbol == "He":
abund = abund * (0.001 + (planet.gas_mass / planet.mass))
pres2 = (0.75 + pressure)
react = pow(1 / (1 + gases[i].reactivity),
star.age/2e9 * pres2)
elif (gases[i].symbol == "O" or gases[i].symbol == "O2") and star.age > 2e9 and planet.surf_temp > 270 and planet.surf_temp < 400:
pres2 = (0.89 + pressure/4)
react = pow(
1 / (1 + gases[i].reactivity), pow(star.age/2e9, 0.25) * pres2)
elif gases[i].symbol == "CO2" and star.age > 2e9 and planet.surf_temp > 270 and planet.surf_temp < 400:
pres2 = (0.75 + pressure)
react = pow(
1 / (1 + gases[i].reactivity), pow(star.age/2e9, 0.5) * pres2)
react *= 1.5
else:
pres2 = 0.75 + pressure
react = pow(
1 / (1 + gases[i].reactivity), star.age/2e9 * pres2)
fract = (1 - (planet.molec_weight / gases[i].weight))
amount[i] = abund * pvrms * react * fract
'''if ((flag_verbose & 0x4000) and
(strcmp(gases[i].symbol, "O") == 0 or
strcmp(gases[i].symbol, "N") == 0 or
strcmp(gases[i].symbol, "Ar") == 0 or
strcmp(gases[i].symbol, "He") == 0 or
strcmp(gases[i].symbol, "CO2") == 0))
fprintf (stderr, "%-5.2Lf %-3.3s, %-5.2Lf = a %-5.2Lf * p %-5.2Lf * r %-5.2Lf * p2 %-5.2Lf * f %-5.2Lf\t(%.3Lf%%)\n",
planet.mass * SUN_MASS_IN_EARTH_MASSES,
gases[i].symbol,
amount[i],
abund,
pvrms,
react,
pres2,
fract,
100.0 * (planet.gas_mass / planet.mass)
)'''
totamount += amount[i]
if (amount[i] > 0.0):
n += 1
else:
amount[i] = 0.0
if n > 0:
planet.gases = n
planet.atmosphere = []
for i in range(len(gases)):
if amount[i] > 0.0:
planet.atmosphere.append((gases[i], planet.surf_pressure * amount[i] / totamount))
'''if (flag_verbose & 0x2000)
if ((planet.atmosphere[n].num == AN_O) and
inspired_partial_pressure (planet.surf_pressure,
planet.atmosphere[n].surf_pressure)
> gases[i].max_ipp)
fprintf (stderr, "%s\t Poisoned by O2\n",
planet_id)'''
n += 1
# TODO(woursler): sort planet.atmosphere
'''if (flag_verbose & 0x0010):
fprintf (stderr, "\n%s (%5.1Lf AU) gases:\n",
planet_id, planet.orbit.a)
for (i = 0; i < planet.gases; i++)
fprintf (stderr, "%3d: %6.1Lf, %11.7Lf%%\n",
planet.atmosphere[i].num,
planet.atmosphere[i].surf_pressure,
100. * (planet.atmosphere[i].surf_pressure /
planet.surf_pressure)
)'''
def roche_limit(planet, moon):
return 2.44 * planet.radius * pow((planet.density / moon.density), (1.0 / 3.0))
def hill_sphere(planet, star):
return planet.orbit.a * KM_PER_AU * pow((planet.mass / (3.0 * star.mass_ratio)), (1.0 / 3.0))
def generate_planet(protoplanet, star, random_tilt=0, planet_id=None, do_gases=True, do_moons=True, is_moon=False):
planet = Planet(
sun=star,
orbit=protoplanet.orbit,
dust_mass=protoplanet.dust_mass,
gas_mass=protoplanet.gas_mass,
mass=protoplanet.mass,
axial_tilt=inclination(protoplanet.orbit.a) if random_tilt else 0,
atmosphere=None,
surf_temp=0,
high_temp=0,
low_temp=0,
max_temp=0,
min_temp=0,
greenhs_rise=0,
resonant_period=False,
orbit_zone=orb_zone(star.luminosity_ratio, protoplanet.orbit.a),
orb_period=period(protoplanet.orbit.a, protoplanet.mass, star.mass_ratio)
)
planet.exospheric_temp = EARTH_EXOSPHERE_TEMP / \
((planet.orbit.a / star.r_ecosphere) ** 2)
planet.rms_velocity = rms_vel(MOL_NITROGEN, planet.exospheric_temp)
planet.core_radius = kothari_radius(
planet.dust_mass, False, planet.orbit_zone)
# Calculate the radius as a gas giant, to verify it will retain gas.
# Then if mass > Earth, it's at least 5% gas and retains He, it's
# some flavor of gas giant.
planet.density = empirical_density(
planet.mass, planet.orbit.a, star.r_ecosphere, True)
planet.radius = volume_radius(planet.mass, planet.density)
planet.surf_accel = acceleration(planet.mass, planet.radius)
planet.surf_grav = gravity(planet.surf_accel)
planet.molec_weight = min_molec_weight(planet)
if (((planet.mass * SUN_MASS_IN_EARTH_MASSES) > 1.0)
and ((planet.gas_mass / planet.mass) > 0.05)
and (min_molec_weight(planet) <= 4.0)):
if ((planet.gas_mass / planet.mass) < 0.20):
planet.type = PlanetType.SUB_SUB_GAS_GIANT
elif ((planet.mass * SUN_MASS_IN_EARTH_MASSES) < 20.0):
planet.type = PlanetType.SUB_GAS_GIANT
else:
planet.type = PlanetType.GAS_GIANT
else: # If not, it's rocky.
planet.radius = kothari_radius(planet.mass, False, planet.orbit_zone)
planet.density = volume_density(planet.mass, planet.radius)
planet.surf_accel = acceleration(planet.mass, planet.radius)
planet.surf_grav = gravity(planet.surf_accel)
if ((planet.gas_mass / planet.mass) > 0.000001):
h2_mass = planet.gas_mass * 0.85
he_mass = (planet.gas_mass - h2_mass) * 0.999
h2_loss = 0.0
he_loss = 0.0
h2_life = gas_life(MOL_HYDROGEN, planet)
he_life = gas_life(HELIUM, planet)
if (h2_life < star.age):
#math.exp with a value above 709 results in a math range error
#this is a dumb fix. STH 2021-0131
if (star.age / h2_life)>709:
h2_loss = ((1.0 - (1.0 / exp(709.0))) * h2_mass)
else:
h2_loss = ((1.0 - (1.0 / exp(star.age / h2_life))) * h2_mass)
planet.gas_mass -= h2_loss
planet.mass -= h2_loss
planet.surf_accel = acceleration(planet.mass, planet.radius)
planet.surf_grav = gravity(planet.surf_accel)
if (he_life < star.age):
he_loss = ((1.0 - (1.0 / exp(star.age / he_life))) * he_mass)
planet.gas_mass -= he_loss
planet.mass -= he_loss
planet.surf_accel = acceleration(planet.mass, planet.radius)
planet.surf_grav = gravity(planet.surf_accel)
'''if (((h2_loss + he_loss) > .000001) and (flag_verbose & 0x0080)):
fprintf(stderr, "%s\tLosing gas: H2: %5.3Lf EM, He: %5.3Lf EM\n",
planet_id,
h2_loss * SUN_MASS_IN_EARTH_MASSES, he_loss * SUN_MASS_IN_EARTH_MASSES)'''
planet.day = day_length(planet) # Modifies planet.resonant_period
planet.esc_velocity = escape_vel(planet.mass, planet.radius)
if planet.type == PlanetType.GAS_GIANT or planet.type == PlanetType.SUB_GAS_GIANT or planet.type == PlanetType.SUB_SUB_GAS_GIANT:
planet.greenhouse_effect = False
planet.volatile_gas_inventory = INCREDIBLY_LARGE_NUMBER
planet.surf_pressure = INCREDIBLY_LARGE_NUMBER
planet.boil_point = INCREDIBLY_LARGE_NUMBER
planet.surf_temp = INCREDIBLY_LARGE_NUMBER
planet.greenhs_rise = 0
planet.albedo = about(GAS_GIANT_ALBEDO, 0.1)
planet.hydrosphere = 1.0
planet.cloud_cover = 1.0
planet.ice_cover = 0.0
planet.surf_grav = gravity(planet.surf_accel)
planet.molec_weight = min_molec_weight(planet)
planet.surf_grav = INCREDIBLY_LARGE_NUMBER
planet.estimated_temp = est_temp(
star.r_ecosphere, planet.orbit.a, planet.albedo)
planet.estimated_terr_temp = est_temp(
star.r_ecosphere, planet.orbit.a, EARTH_ALBEDO)
temp = planet.estimated_terr_temp
if (temp >= FREEZING_POINT_OF_WATER) and (temp <= EARTH_AVERAGE_KELVIN + 10.) and (star.age > 2.0E9):
pass
'''if (flag_verbose & 0x8000):
fprintf (stderr, "%s\t%s (%4.2LfEM %5.3Lf By)%s with earth-like temperature (%.1Lf C, %.1Lf F, %+.1Lf C Earth).\n",
planet_id,
planet.type == PlanetType.GAS_GIANT ? "Jovian" :
planet.type == PlanetType.SUB_GAS_GIANT ? "Sub-Jovian" :
planet.type == PlanetType.SUB_SUB_GAS_GIANT ? "Gas Dwarf" :
"Big",
planet.mass * SUN_MASS_IN_EARTH_MASSES,
star.age /1.0E9,
planet.first_moon == NULL ? "" : " WITH MOON",
temp - FREEZING_POINT_OF_WATER,
32 + ((temp - FREEZING_POINT_OF_WATER) * 1.8),
temp - EARTH_AVERAGE_KELVIN)'''
else:
planet.estimated_temp = est_temp(
star.r_ecosphere, planet.orbit.a, EARTH_ALBEDO)
planet.estimated_terr_temp = est_temp(
star.r_ecosphere, planet.orbit.a, EARTH_ALBEDO)
planet.surf_grav = gravity(planet.surf_accel)
planet.molec_weight = min_molec_weight(planet)
planet.greenhouse_effect = grnhouse(star.r_ecosphere, planet.orbit.a)
planet.volatile_gas_inventory = vol_inventory(planet.mass,
planet.esc_velocity,
planet.rms_velocity,
star.mass_ratio,
planet.orbit_zone,
planet.greenhouse_effect,
(planet.gas_mass
/ planet.mass) > 0.000001)
planet.surf_pressure = pressure(planet.volatile_gas_inventory,
planet.radius,
planet.surf_grav)
if ((planet.surf_pressure == 0.0)):
planet.boil_point = 0.0
else:
planet.boil_point = boiling_point(planet.surf_pressure)
# Sets:
# planet.surf_temp
# planet.greenhs_rise
# planet.albedo
# planet.hydrosphere
# planet.cloud_cover
# planet.ice_cover
iterate_surface_temp(planet)
if (do_gases and (planet.max_temp >= FREEZING_POINT_OF_WATER) and (planet.min_temp <= planet.boil_point)):
calculate_gases(star, planet, planet_id)
# Next we assign a type to the planet.
if (planet.surf_pressure < 1.0):
if (not is_moon) and ((planet.mass * SUN_MASS_IN_EARTH_MASSES) < ASTEROID_MASS_LIMIT):
planet.type = PlanetType.ASTEROIDS
else:
planet.type = PlanetType.ROCK
elif (planet.surf_pressure > 6000.0) and (planet.molec_weight <= 2.0): # Retains Hydrogen
planet.type = PlanetType.SUB_SUB_GAS_GIANT
planet.gases = 0
planet.atmosphere = None
else:
# Atmospheres:
if (int(planet.day) == int(planet.orb_period * 24.0)) or planet.resonant_period:
planet.type = PlanetType.ONE_FACE
elif (planet.hydrosphere >= 0.95):
planet.type = PlanetType.WATER # >95% water
elif (planet.ice_cover >= 0.95):
planet.type = PlanetType.ICE # >95% ice
elif (planet.hydrosphere > 0.05):
planet.type = PlanetType.TERRESTRIAL # Terrestrial
# else <5% water
elif (planet.max_temp > planet.boil_point):
planet.type = PlanetType.VENUSIAN # Hot = Venusian
elif ((planet.gas_mass / planet.mass) > 0.0001):
# Accreted gas
planet.type = PlanetType.ICE # But no Greenhouse
planet.ice_cover = 1.0 # or liquid water
# Make it an Ice World
elif (planet.surf_pressure <= 250.0): # Thin air = Martian
planet.type = PlanetType.MARTIAN
elif (planet.surf_temp < FREEZING_POINT_OF_WATER):
planet.type = PlanetType.ICE
else:
planet.type = PlanetType.UNKNOWN # TODO(woursler): Consider throwing an error here.
'''if (flag_verbose & 0x0001)
fprintf (stderr, "%12s\tp=%4.2Lf\tm=%4.2Lf\tg=%4.2Lf\tt=%+.1Lf\t%s\t Unknown %s\n",
type_string (planet.type),
planet.surf_pressure,
planet.mass * SUN_MASS_IN_EARTH_MASSES,
planet.surf_grav,
planet.surf_temp - EARTH_AVERAGE_KELVIN,
planet_id,
((int)planet.day == (int)(planet.orb_period * 24.0) or
(planet.resonant_period)) ? "(1-Face)" : ""
)'''
if do_moons and not is_moon:
for protomoon in protoplanet.moons:
if protomoon.mass * SUN_MASS_IN_EARTH_MASSES > .000001:
protomoon.orbit = planet.orbit
# Note: adjusts density, which is used in computing the roche limit.
moon = generate_planet(
protoplanet=protomoon,
star=star,
random_tilt=random_tilt,
do_gases=do_gases,
do_moons=do_moons,
is_moon=True
)
# TODO(woursler): these should be their own subroutines.
roche_limit_r = roche_limit(planet, moon)
hill_sphere_r = hill_sphere(planet, star)
if (roche_limit_r * 3.0) < hill_sphere_r:
moon_a = random_number(
roche_limit_r * 1.5, hill_sphere_r / 2.0) / KM_PER_AU
moon_e = random_eccentricity()
moon.orbit = Orbit(a=moon_a, e=moon_e)
else:
moon.orbit = Orbit(a=0, e=0)
planet.moons.append(moon)
'''
if (flag_verbose & 0x40000):
fprintf (stderr,
" Roche limit: R = %4.2Lg, rM = %4.2Lg, rm = %4.2Lg . %.0Lf km\n"
" Hill Sphere: a = %4.2Lg, m = %4.2Lg, M = %4.2Lg . %.0Lf km\n"
"%s Moon orbit: a = %.0Lf km, e = %.0Lg\n",
planet.radius, planet.density, ptr.density,
roche_limit,
planet.orbit.a * KM_PER_AU, planet.mass * SOLAR_MASS_IN_KILOGRAMS, star.mass_ratio * SOLAR_MASS_IN_KILOGRAMS,
hill_sphere,
moon_id,
ptr.moon_a * KM_PER_AU, ptr.moon_e
)
if (flag_verbose & 0x1000):
fprintf (stderr, " %s: (%7.2LfEM) %d %4.2LgEM\n",
planet_id,
planet.mass * SUN_MASS_IN_EARTH_MASSES,
n,
ptr.mass * SUN_MASS_IN_EARTH_MASSES)'''
return planet
###
# Smoke Test
###
if __name__ == '__main__':
random.seed('earth')
print(generate_stellar_system(random_star()))
| 40.247588 | 284 | 0.533874 | import logging
import random
from math import sqrt, log
from stellar_system import Star
from stellar_system import Planetesimal
from stellar_system import Protoplanet
from stellar_system import Protomoon
from stellar_system import Planet
from stellar_system import Orbit
from accrete import CircumstellarDisk
from constants import ECCENTRICITY_COEFF, PROTOPLANET_MASS
from constants import SUN_MASS_IN_EARTH_MASSES
from constants import EARTH_ALBEDO, GAS_GIANT_ALBEDO, FREEZING_POINT_OF_WATER, KM_PER_AU, EARTH_AVERAGE_KELVIN, EARTH_EXOSPHERE_TEMP
from constants import MOL_NITROGEN, MOL_HYDROGEN, HELIUM
from constants import ASTEROID_MASS_LIMIT
from constants import MILLIBARS_PER_BAR
from enviroment import kothari_radius, gas_life, rms_vel, est_temp, period, day_length, acceleration, gravity, min_molec_weight, orb_zone, volume_radius, volume_density, grnhouse, boiling_point, escape_vel, empirical_density, inclination, iterate_surface_temp, pressure, vol_inventory
from enviroment import PlanetType
from math import exp
from math import inf as INCREDIBLY_LARGE_NUMBER
from util import about, random_number, random_eccentricity
from chemtable import gases
logging.getLogger().setLevel(logging.INFO)
def random_star():
e(1*10**9, 6*10**9)
return Star(age=age, mass_ratio=1)
def generate_stellar_system(star, do_gases=True, do_moons=True):
protoplanets = generate_planetary_masses(
star,
0.0,
star.stellar_dust_limit,
do_moons=do_moons
)
star.planets = [
generate_planet(
p,
star,
do_gases=do_gases,
do_moons=do_moons
) for p in protoplanets
]
return star
def random_planetesimal(disk):
a = random.uniform(disk.planet_inner_bound, disk.planet_outer_bound)
e = 1.0 - (random.uniform(0.0, 1.0) ** ECCENTRICITY_COEFF)
if e > .99:
e = .99
return Planetesimal(
disk=disk,
orbit=Orbit(
a=a,
e=e,
),
dust_mass=PROTOPLANET_MASS,
gas_mass=0,
)
def generate_planetary_masses(star, inner_dust, outer_dust, do_moons=True):
disk = CircumstellarDisk(star)
planets = []
sequential_failures = 0
while disk.dust_left and sequential_failures < 10**3:
canidate = random_planetesimal(disk)
iel = canidate.inner_effect_limit
oel = canidate.outer_effect_limit
if disk.dust_available(iel, oel) > 0:
sequential_failures = 0
logging.info("Injecting planetesimal at " +
str(canidate.orbit.a) + " AU ...")
disk.accrete_dust(canidate)
if canidate.mass > PROTOPLANET_MASS:
coalesce_planetesimals(disk, planets, canidate, do_moons)
logging.info("\tsuccess.\n")
else:
logging.info("\tfailed due to large neighbor.\n")
else:
sequential_failures += 1
return planets
def convert_planetesimal_to_protoplanet(planetesimal):
return Protoplanet(
star=planetesimal.disk.star,
orbit=planetesimal.orbit,
dust_mass=planetesimal.dust_mass,
gas_mass=planetesimal.gas_mass
)
def convert_planetesimal_to_protomoon(planetesimal, planet):
print(" Capturing a protomoon.")
return Protomoon(
protoplanet=planet,
orbit=Orbit(
a=None,
e=None,
),
dust_mass=planetesimal.dust_mass,
gas_mass=planetesimal.gas_mass,
)
def coalesce_planetesimals(disk, planets, canidate, do_moons):
finished = False
for planet in planets:
diff = planet.orbit.a - canidate.orbit.a
if diff > 0.0:
dist1 = canidate.orbit.apoapsis * (1.0 + canidate.reduced_mass) - canidate.orbit.a
dist2 = planet.orbit.a - (planet.orbit.periapsis * (1.0 - planet.reduced_mass))
else:
dist1 = canidate.orbit.a - (canidate.orbit.periapsis * (1.0 - canidate.reduced_mass))
dist2 = (planet.orbit.apoapsis * (1.0 + planet.reduced_mass)) - planet.orbit.a
if abs(diff) <= abs(dist1) or abs(diff) <= abs(dist2):
a = (planet.mass + canidate.mass) / \
((planet.mass / planet.orbit.a) + (canidate.mass / canidate.orbit.a))
temp = planet.mass * sqrt(planet.orbit.a) * sqrt(1.0 - (planet.orbit.e ** 2.0))
temp = temp + (canidate.mass * sqrt(canidate.orbit.a) *
sqrt(sqrt(1.0 - (canidate.orbit.e ** 2.0))))
temp = temp / ((planet.mass + canidate.mass) * sqrt(canidate.orbit.a))
temp = 1.0 - (temp ** 2.0)
if temp < 0.0 or temp >= 1.0:
temp = 0.0
e = sqrt(temp)
if do_moons:
if canidate.mass < canidate.critical_mass:
if canidate.mass * SUN_MASS_IN_EARTH_MASSES < 2.5 \
and canidate.mass * SUN_MASS_IN_EARTH_MASSES > .0001 \
and planet.mass_of_moons < planet.mass * .05 \
and planet.mass > canidate.mass:
planet.add_moon(
convert_planetesimal_to_protomoon(canidate, planet))
logging.info("Moon captured at " + str(planet.orbit.a) + " AU. Planet Mass: " + str(planet.mass * SUN_MASS_IN_EARTH_MASSES) +
" earth masses Moon Mass: " + str(canidate.mass * SUN_MASS_IN_EARTH_MASSES) + " earth masses.")
finished = True
break
else:
logging.info("Did not capture potential moon at " +
str(planet.orbit.a) + " AU. Collision imminent.")
logging.info(
"Collision between two planetesimals! Computing new orbit and accumulating additional mass.")
disk.accrete_dust(planet)
planet.orbit = Orbit(a=a, e=e)
planet.orbit_a = a
planet.orbit_e = e
planet.dust_mass = planet.dust_mass + canidate.dust_mass
planet.gas_mass = planet.gas_mass + canidate.gas_mass
finished = True
logging.info(
"Conglomerate is now " +
str(planet.mass * SUN_MASS_IN_EARTH_MASSES) +
" earth masses at " + str(planet.orbit.a) + " AU."
)
if not finished:
logging.info("New Protoplanet at " + str(canidate.orbit.a) + "AU.")
planets.append(convert_planetesimal_to_protoplanet(canidate))
def calculate_gases(star, planet, planet_id):
if planet.surf_pressure > 0:
amount = [0 for _ in range(len(gases))]
totamount = 0
pressure = planet.surf_pressure/MILLIBARS_PER_BAR
n = 0
for i in range(len(gases)):
yp = gases[i].boil / \
(373. * ((log((pressure) + 0.001) / -5050.5) + (1.0 / 373.)))
if ((yp >= 0 and yp < planet.low_temp) and (gases[i].weight >= planet.molec_weight)):
vrms = rms_vel(gases[i].weight, planet.exospheric_temp)
pvrms = pow(1 / (1 + vrms / planet.esc_velocity),
star.age / 1e9)
abund = gases[i].abunds
react = 1.0
fract = 1.0
pres2 = 1.0
if gases[i].symbol == "Ar":
react = .15 * star.age/4e9
elif gases[i].symbol == "He":
abund = abund * (0.001 + (planet.gas_mass / planet.mass))
pres2 = (0.75 + pressure)
react = pow(1 / (1 + gases[i].reactivity),
star.age/2e9 * pres2)
elif (gases[i].symbol == "O" or gases[i].symbol == "O2") and star.age > 2e9 and planet.surf_temp > 270 and planet.surf_temp < 400:
pres2 = (0.89 + pressure/4)
react = pow(
1 / (1 + gases[i].reactivity), pow(star.age/2e9, 0.25) * pres2)
elif gases[i].symbol == "CO2" and star.age > 2e9 and planet.surf_temp > 270 and planet.surf_temp < 400:
pres2 = (0.75 + pressure)
react = pow(
1 / (1 + gases[i].reactivity), pow(star.age/2e9, 0.5) * pres2)
react *= 1.5
else:
pres2 = 0.75 + pressure
react = pow(
1 / (1 + gases[i].reactivity), star.age/2e9 * pres2)
fract = (1 - (planet.molec_weight / gases[i].weight))
amount[i] = abund * pvrms * react * fract
totamount += amount[i]
if (amount[i] > 0.0):
n += 1
else:
amount[i] = 0.0
if n > 0:
planet.gases = n
planet.atmosphere = []
for i in range(len(gases)):
if amount[i] > 0.0:
planet.atmosphere.append((gases[i], planet.surf_pressure * amount[i] / totamount))
n += 1
def roche_limit(planet, moon):
return 2.44 * planet.radius * pow((planet.density / moon.density), (1.0 / 3.0))
def hill_sphere(planet, star):
return planet.orbit.a * KM_PER_AU * pow((planet.mass / (3.0 * star.mass_ratio)), (1.0 / 3.0))
def generate_planet(protoplanet, star, random_tilt=0, planet_id=None, do_gases=True, do_moons=True, is_moon=False):
planet = Planet(
sun=star,
orbit=protoplanet.orbit,
dust_mass=protoplanet.dust_mass,
gas_mass=protoplanet.gas_mass,
mass=protoplanet.mass,
axial_tilt=inclination(protoplanet.orbit.a) if random_tilt else 0,
atmosphere=None,
surf_temp=0,
high_temp=0,
low_temp=0,
max_temp=0,
min_temp=0,
greenhs_rise=0,
resonant_period=False,
orbit_zone=orb_zone(star.luminosity_ratio, protoplanet.orbit.a),
orb_period=period(protoplanet.orbit.a, protoplanet.mass, star.mass_ratio)
)
planet.exospheric_temp = EARTH_EXOSPHERE_TEMP / \
((planet.orbit.a / star.r_ecosphere) ** 2)
planet.rms_velocity = rms_vel(MOL_NITROGEN, planet.exospheric_temp)
planet.core_radius = kothari_radius(
planet.dust_mass, False, planet.orbit_zone)
planet.density = empirical_density(
planet.mass, planet.orbit.a, star.r_ecosphere, True)
planet.radius = volume_radius(planet.mass, planet.density)
planet.surf_accel = acceleration(planet.mass, planet.radius)
planet.surf_grav = gravity(planet.surf_accel)
planet.molec_weight = min_molec_weight(planet)
if (((planet.mass * SUN_MASS_IN_EARTH_MASSES) > 1.0)
and ((planet.gas_mass / planet.mass) > 0.05)
and (min_molec_weight(planet) <= 4.0)):
if ((planet.gas_mass / planet.mass) < 0.20):
planet.type = PlanetType.SUB_SUB_GAS_GIANT
elif ((planet.mass * SUN_MASS_IN_EARTH_MASSES) < 20.0):
planet.type = PlanetType.SUB_GAS_GIANT
else:
planet.type = PlanetType.GAS_GIANT
else:
planet.radius = kothari_radius(planet.mass, False, planet.orbit_zone)
planet.density = volume_density(planet.mass, planet.radius)
planet.surf_accel = acceleration(planet.mass, planet.radius)
planet.surf_grav = gravity(planet.surf_accel)
if ((planet.gas_mass / planet.mass) > 0.000001):
h2_mass = planet.gas_mass * 0.85
he_mass = (planet.gas_mass - h2_mass) * 0.999
h2_loss = 0.0
he_loss = 0.0
h2_life = gas_life(MOL_HYDROGEN, planet)
he_life = gas_life(HELIUM, planet)
if (h2_life < star.age):
#math.exp with a value above 709 results in a math range error
#this is a dumb fix. STH 2021-0131
if (star.age / h2_life)>709:
h2_loss = ((1.0 - (1.0 / exp(709.0))) * h2_mass)
else:
h2_loss = ((1.0 - (1.0 / exp(star.age / h2_life))) * h2_mass)
planet.gas_mass -= h2_loss
planet.mass -= h2_loss
planet.surf_accel = acceleration(planet.mass, planet.radius)
planet.surf_grav = gravity(planet.surf_accel)
if (he_life < star.age):
he_loss = ((1.0 - (1.0 / exp(star.age / he_life))) * he_mass)
planet.gas_mass -= he_loss
planet.mass -= he_loss
planet.surf_accel = acceleration(planet.mass, planet.radius)
planet.surf_grav = gravity(planet.surf_accel)
'''if (((h2_loss + he_loss) > .000001) and (flag_verbose & 0x0080)):
fprintf(stderr, "%s\tLosing gas: H2: %5.3Lf EM, He: %5.3Lf EM\n",
planet_id,
h2_loss * SUN_MASS_IN_EARTH_MASSES, he_loss * SUN_MASS_IN_EARTH_MASSES)'''
planet.day = day_length(planet) # Modifies planet.resonant_period
planet.esc_velocity = escape_vel(planet.mass, planet.radius)
if planet.type == PlanetType.GAS_GIANT or planet.type == PlanetType.SUB_GAS_GIANT or planet.type == PlanetType.SUB_SUB_GAS_GIANT:
planet.greenhouse_effect = False
planet.volatile_gas_inventory = INCREDIBLY_LARGE_NUMBER
planet.surf_pressure = INCREDIBLY_LARGE_NUMBER
planet.boil_point = INCREDIBLY_LARGE_NUMBER
planet.surf_temp = INCREDIBLY_LARGE_NUMBER
planet.greenhs_rise = 0
planet.albedo = about(GAS_GIANT_ALBEDO, 0.1)
planet.hydrosphere = 1.0
planet.cloud_cover = 1.0
planet.ice_cover = 0.0
planet.surf_grav = gravity(planet.surf_accel)
planet.molec_weight = min_molec_weight(planet)
planet.surf_grav = INCREDIBLY_LARGE_NUMBER
planet.estimated_temp = est_temp(
star.r_ecosphere, planet.orbit.a, planet.albedo)
planet.estimated_terr_temp = est_temp(
star.r_ecosphere, planet.orbit.a, EARTH_ALBEDO)
temp = planet.estimated_terr_temp
if (temp >= FREEZING_POINT_OF_WATER) and (temp <= EARTH_AVERAGE_KELVIN + 10.) and (star.age > 2.0E9):
pass
else:
planet.estimated_temp = est_temp(
star.r_ecosphere, planet.orbit.a, EARTH_ALBEDO)
planet.estimated_terr_temp = est_temp(
star.r_ecosphere, planet.orbit.a, EARTH_ALBEDO)
planet.surf_grav = gravity(planet.surf_accel)
planet.molec_weight = min_molec_weight(planet)
planet.greenhouse_effect = grnhouse(star.r_ecosphere, planet.orbit.a)
planet.volatile_gas_inventory = vol_inventory(planet.mass,
planet.esc_velocity,
planet.rms_velocity,
star.mass_ratio,
planet.orbit_zone,
planet.greenhouse_effect,
(planet.gas_mass
/ planet.mass) > 0.000001)
planet.surf_pressure = pressure(planet.volatile_gas_inventory,
planet.radius,
planet.surf_grav)
if ((planet.surf_pressure == 0.0)):
planet.boil_point = 0.0
else:
planet.boil_point = boiling_point(planet.surf_pressure)
# Sets:
# planet.surf_temp
# planet.greenhs_rise
# planet.albedo
# planet.hydrosphere
# planet.cloud_cover
# planet.ice_cover
iterate_surface_temp(planet)
if (do_gases and (planet.max_temp >= FREEZING_POINT_OF_WATER) and (planet.min_temp <= planet.boil_point)):
calculate_gases(star, planet, planet_id)
# Next we assign a type to the planet.
if (planet.surf_pressure < 1.0):
if (not is_moon) and ((planet.mass * SUN_MASS_IN_EARTH_MASSES) < ASTEROID_MASS_LIMIT):
planet.type = PlanetType.ASTEROIDS
else:
planet.type = PlanetType.ROCK
elif (planet.surf_pressure > 6000.0) and (planet.molec_weight <= 2.0): # Retains Hydrogen
planet.type = PlanetType.SUB_SUB_GAS_GIANT
planet.gases = 0
planet.atmosphere = None
else:
# Atmospheres:
if (int(planet.day) == int(planet.orb_period * 24.0)) or planet.resonant_period:
planet.type = PlanetType.ONE_FACE
elif (planet.hydrosphere >= 0.95):
planet.type = PlanetType.WATER # >95% water
elif (planet.ice_cover >= 0.95):
planet.type = PlanetType.ICE # >95% ice
elif (planet.hydrosphere > 0.05):
planet.type = PlanetType.TERRESTRIAL # Terrestrial
# else <5% water
elif (planet.max_temp > planet.boil_point):
planet.type = PlanetType.VENUSIAN # Hot = Venusian
elif ((planet.gas_mass / planet.mass) > 0.0001):
# Accreted gas
planet.type = PlanetType.ICE # But no Greenhouse
planet.ice_cover = 1.0 # or liquid water
# Make it an Ice World
elif (planet.surf_pressure <= 250.0): # Thin air = Martian
planet.type = PlanetType.MARTIAN
elif (planet.surf_temp < FREEZING_POINT_OF_WATER):
planet.type = PlanetType.ICE
else:
planet.type = PlanetType.UNKNOWN # TODO(woursler): Consider throwing an error here.
'''if (flag_verbose & 0x0001)
fprintf (stderr, "%12s\tp=%4.2Lf\tm=%4.2Lf\tg=%4.2Lf\tt=%+.1Lf\t%s\t Unknown %s\n",
type_string (planet.type),
planet.surf_pressure,
planet.mass * SUN_MASS_IN_EARTH_MASSES,
planet.surf_grav,
planet.surf_temp - EARTH_AVERAGE_KELVIN,
planet_id,
((int)planet.day == (int)(planet.orb_period * 24.0) or
(planet.resonant_period)) ? "(1-Face)" : ""
)'''
if do_moons and not is_moon:
for protomoon in protoplanet.moons:
if protomoon.mass * SUN_MASS_IN_EARTH_MASSES > .000001:
protomoon.orbit = planet.orbit
# Note: adjusts density, which is used in computing the roche limit.
moon = generate_planet(
protoplanet=protomoon,
star=star,
random_tilt=random_tilt,
do_gases=do_gases,
do_moons=do_moons,
is_moon=True
)
# TODO(woursler): these should be their own subroutines.
roche_limit_r = roche_limit(planet, moon)
hill_sphere_r = hill_sphere(planet, star)
if (roche_limit_r * 3.0) < hill_sphere_r:
moon_a = random_number(
roche_limit_r * 1.5, hill_sphere_r / 2.0) / KM_PER_AU
moon_e = random_eccentricity()
moon.orbit = Orbit(a=moon_a, e=moon_e)
else:
moon.orbit = Orbit(a=0, e=0)
planet.moons.append(moon)
return planet
###
# Smoke Test
###
if __name__ == '__main__':
random.seed('earth')
print(generate_stellar_system(random_star()))
| true | true |
f71fe0dc7aac7d9afad4c1f08c6e9b94fde74e57 | 4,160 | py | Python | vivisect/extensions/example_gui_extension.py | bat-serjo/vivisect | f60934a2c8c51c7acdba52a65756e717a108a440 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | vivisect/extensions/example_gui_extension.py | bat-serjo/vivisect | f60934a2c8c51c7acdba52a65756e717a108a440 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | vivisect/extensions/example_gui_extension.py | bat-serjo/vivisect | f60934a2c8c51c7acdba52a65756e717a108a440 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | try:
from PyQt5.QtWidgets import QToolBar, QLabel, QPushButton, QTextEdit, QWidget, QInputDialog
from PyQt5 import QtCore
except:
from PyQt4.QtGui import QToolBar, QLabel, QPushButton, QTextEdit, QWidget, QInputDialog
from PyQt4 import QtCore
from vqt.main import idlethread
from vqt.basics import VBox
from vqt.common import ACT
'''
This is an example of a vivisect GUI extension module.
Set the environment variable VIV_EXT_PATH to point at a
directory full of python modules such as this to extend
and implement your own vivisect features.
The extension should be a python module, either in the
form of a .py file or a directory with a __init__.py
file. Either way, the module will be loaded into
memory and the "vivExtension" function called.
'''
from PyQt5.QtWidgets import QToolBar, QLabel, QPushButton, QTextEdit
from vqt.main import idlethread
from vqt.basics import VBox
class ExampleToolbar(QToolBar):
def __init__(self, vw, vwgui):
self.vw = vw
self.vwgui = vwgui
QToolBar.__init__(self, parent=vwgui)
# Add a label to the toolbar
self.addWidget( QLabel('Example Toolbar:', parent=self) )
# Add an action button to the toolbar
self.addAction('ONE', self.doOne)
def doOne(self):
self.vw.vprint('did one!')
class ExampleWindow(QWidget):
def __init__(self, vw, vwgui):
self.vw = vw
self.vwgui = vwgui
QWidget.__init__(self, parent=vwgui)
# Set the window title
self.setWindowTitle('Example Window!')
# Add a Button and a Text Edit object in a basic VBox layout
button = QPushButton('My Button!', parent=self)
textedit = QTextEdit('WOOT! Some text!', parent=self)
self.setLayout( VBox(button, textedit) )
def vprint(vw, s, *args, **kwargs):
vw.vprint(s % args)
print(s % args)
def ctxMenuHook(vw, va, expr, menu, parent, nav):
'''
Context Menu handler (adds options as we wish)
'''
try:
if va == 0x41414141:
menu.addAction('WAT?', ACT(vw.vprint, "We're at AAAA!"))
menu.addAction('bookmark (B)', ACT(vw.getVivGui().addBookmark, va))
menu.addAction('YEEE HAH', ACT(vw.vprint, "YEE HAH %x %r %r %r %r" % (va, expr, menu, parent, nav)))
menu.addAction('YEEE HAH1', ACT(vprint, vw, "YEE HAH %x %r %r %r %r", va, expr, menu, parent, nav))
except Exception as e:
import traceback
traceback.print_exc()
class Crap:
'''
This is a helpful class for storing vw and vwgui and "doing the thing"
Currently Vivisect's Hot Keys are tied to the many gui widgets, so
vw and vwgui are not available when the "thing" is called.
'''
def __init__(self, vw, vwgui):
self.vw = vw
self.vwgui = vwgui
def thing(self):
vprint(self.vw, "Blah Blah Blah")
def printUserInput(self):
# ok is whether the "OK" button was pressed, utext is the user text
utext, ok = QInputDialog.getText(self.vwgui, 'Enter...', 'User Text')
vprint(self.vw, '%r: %r', ok, utext)
@idlethread
def vivExtension(vw, vwgui):
# Create a toolbar and add it to the GUI
toolbar = ExampleToolbar(vw, vwgui)
vwgui.addToolBar(QtCore.Qt.TopToolBarArea, toolbar)
# Create a new Vivisect Dock Window (based on a QWidget)
window = ExampleWindow(vw, vwgui)
d = vwgui.vqDockWidget(window, floating=True)
d.resize(300,200)
# Add a menu item
vwgui.vqAddMenuField('&Example.&FooBar.&PrintDiscoveredStats', vw.printDiscoveredStats, ())
# hook context menu
vw.addCtxMenuHook('example', ctxMenuHook)
# add HotKeyTargets and HotKeys
tempmod = Crap(vw, vwgui)
vwgui.addHotKey('ctrl+p', 'file:hackme')
vwgui.addHotKeyTarget('file:hackme', tempmod.thing)
# Popups/Dialogs - add a menu entry to ask for input and print the output
vwgui.vqAddMenuField("&Example.&FooBar.&PrintUserInput", tempmod.printUserInput, ())
# get Dock Windows by name
for w, vqDW in vwgui.vqGetDockWidgetsByName('viv'):
vprint(vw, "Window: %r DockWidget: %r (%r)", w, vqDW, w.getEnviNavName())
| 31.755725 | 111 | 0.665625 | try:
from PyQt5.QtWidgets import QToolBar, QLabel, QPushButton, QTextEdit, QWidget, QInputDialog
from PyQt5 import QtCore
except:
from PyQt4.QtGui import QToolBar, QLabel, QPushButton, QTextEdit, QWidget, QInputDialog
from PyQt4 import QtCore
from vqt.main import idlethread
from vqt.basics import VBox
from vqt.common import ACT
from PyQt5.QtWidgets import QToolBar, QLabel, QPushButton, QTextEdit
from vqt.main import idlethread
from vqt.basics import VBox
class ExampleToolbar(QToolBar):
def __init__(self, vw, vwgui):
self.vw = vw
self.vwgui = vwgui
QToolBar.__init__(self, parent=vwgui)
self.addWidget( QLabel('Example Toolbar:', parent=self) )
self.addAction('ONE', self.doOne)
def doOne(self):
self.vw.vprint('did one!')
class ExampleWindow(QWidget):
def __init__(self, vw, vwgui):
self.vw = vw
self.vwgui = vwgui
QWidget.__init__(self, parent=vwgui)
self.setWindowTitle('Example Window!')
button = QPushButton('My Button!', parent=self)
textedit = QTextEdit('WOOT! Some text!', parent=self)
self.setLayout( VBox(button, textedit) )
def vprint(vw, s, *args, **kwargs):
vw.vprint(s % args)
print(s % args)
def ctxMenuHook(vw, va, expr, menu, parent, nav):
try:
if va == 0x41414141:
menu.addAction('WAT?', ACT(vw.vprint, "We're at AAAA!"))
menu.addAction('bookmark (B)', ACT(vw.getVivGui().addBookmark, va))
menu.addAction('YEEE HAH', ACT(vw.vprint, "YEE HAH %x %r %r %r %r" % (va, expr, menu, parent, nav)))
menu.addAction('YEEE HAH1', ACT(vprint, vw, "YEE HAH %x %r %r %r %r", va, expr, menu, parent, nav))
except Exception as e:
import traceback
traceback.print_exc()
class Crap:
def __init__(self, vw, vwgui):
self.vw = vw
self.vwgui = vwgui
def thing(self):
vprint(self.vw, "Blah Blah Blah")
def printUserInput(self):
# ok is whether the "OK" button was pressed, utext is the user text
utext, ok = QInputDialog.getText(self.vwgui, 'Enter...', 'User Text')
vprint(self.vw, '%r: %r', ok, utext)
@idlethread
def vivExtension(vw, vwgui):
# Create a toolbar and add it to the GUI
toolbar = ExampleToolbar(vw, vwgui)
vwgui.addToolBar(QtCore.Qt.TopToolBarArea, toolbar)
# Create a new Vivisect Dock Window (based on a QWidget)
window = ExampleWindow(vw, vwgui)
d = vwgui.vqDockWidget(window, floating=True)
d.resize(300,200)
# Add a menu item
vwgui.vqAddMenuField('&Example.&FooBar.&PrintDiscoveredStats', vw.printDiscoveredStats, ())
# hook context menu
vw.addCtxMenuHook('example', ctxMenuHook)
# add HotKeyTargets and HotKeys
tempmod = Crap(vw, vwgui)
vwgui.addHotKey('ctrl+p', 'file:hackme')
vwgui.addHotKeyTarget('file:hackme', tempmod.thing)
# Popups/Dialogs - add a menu entry to ask for input and print the output
vwgui.vqAddMenuField("&Example.&FooBar.&PrintUserInput", tempmod.printUserInput, ())
# get Dock Windows by name
for w, vqDW in vwgui.vqGetDockWidgetsByName('viv'):
vprint(vw, "Window: %r DockWidget: %r (%r)", w, vqDW, w.getEnviNavName())
| true | true |
f71fe1a680e38a876089f6c92424e564085015cd | 728 | py | Python | tests/test_bit.py | robertchase/aiomysql | 80236fca02c70cd693cb02112646ca14f2c7e2be | [
"MIT"
] | null | null | null | tests/test_bit.py | robertchase/aiomysql | 80236fca02c70cd693cb02112646ca14f2c7e2be | [
"MIT"
] | null | null | null | tests/test_bit.py | robertchase/aiomysql | 80236fca02c70cd693cb02112646ca14f2c7e2be | [
"MIT"
] | 1 | 2021-04-30T14:11:42.000Z | 2021-04-30T14:11:42.000Z | """test Bit operations"""
import pytest
from aiomysql.bit import Bit
@pytest.mark.parametrize(
'length, value, expected', (
(10, None, ValueError),
(10, 1, 1),
(10, '123', TypeError),
(10, '0', 0),
(10, '1', 1),
(10, '010', 2),
(10, '1010', 10),
(10, '01000000000', ValueError),
),
)
def test_bit(length, value, expected):
"""test different inputs"""
bit = Bit(length)
if expected in (ValueError, TypeError):
with pytest.raises(expected):
bit(value)
else:
assert bit(value).value == expected
def test_as_binary():
"""verify binary conversion"""
bit = Bit(5)(10)
assert bit.as_binary() == '1010'
| 22.060606 | 43 | 0.54533 | import pytest
from aiomysql.bit import Bit
@pytest.mark.parametrize(
'length, value, expected', (
(10, None, ValueError),
(10, 1, 1),
(10, '123', TypeError),
(10, '0', 0),
(10, '1', 1),
(10, '010', 2),
(10, '1010', 10),
(10, '01000000000', ValueError),
),
)
def test_bit(length, value, expected):
bit = Bit(length)
if expected in (ValueError, TypeError):
with pytest.raises(expected):
bit(value)
else:
assert bit(value).value == expected
def test_as_binary():
bit = Bit(5)(10)
assert bit.as_binary() == '1010'
| true | true |
f71fe27393e1a3a75142186240ffddd9c0d963a4 | 15,812 | py | Python | contrib/experimental/input/osx.py | bitcraft/pyglet | 144257c365ca85528c6a4c5bed8141e683d7a9b6 | [
"BSD-3-Clause"
] | 15 | 2015-01-21T12:29:01.000Z | 2018-12-09T09:17:33.000Z | contrib/experimental/input/osx.py | bitcraft/pyglet | 144257c365ca85528c6a4c5bed8141e683d7a9b6 | [
"BSD-3-Clause"
] | null | null | null | contrib/experimental/input/osx.py | bitcraft/pyglet | 144257c365ca85528c6a4c5bed8141e683d7a9b6 | [
"BSD-3-Clause"
] | 9 | 2015-12-12T09:12:46.000Z | 2021-12-26T13:29:14.000Z | #!/usr/bin/env python
"""
"""
__docformat__ = 'restructuredtext'
__version__ = '$Id: $'
import ctypes
import pyglet
from pyglet.libs.darwin import carbon, _oscheck, create_cfstring
from pyglet.libs.darwin.constants import *
import input
import usage
# non-broken c_void_p
void_p = ctypes.POINTER(ctypes.c_int)
class CFUUIDBytes(ctypes.Structure):
_fields_ = [('byte%d' % i, ctypes.c_uint8) for i in range(16)]
mach_port_t = void_p
io_iterator_t = void_p
kern_return_t = ctypes.c_int
IOReturn = ctypes.c_uint
CFDictionaryRef = void_p
CFMutableDictionaryRef = void_p
CFArrayRef = void_p
CFUUIDRef = ctypes.POINTER(CFUUIDBytes)
AbsoluteTime = ctypes.c_double
HRESULT = ctypes.c_int
REFIID = CFUUIDBytes
IOHIDElementType = ctypes.c_int
kIOHIDElementTypeInput_Misc = 1
kIOHIDElementTypeInput_Button = 2
kIOHIDElementTypeInput_Axis = 3
kIOHIDElementTypeInput_ScanCodes = 4
kIOHIDElementTypeOutput = 129
kIOHIDElementTypeFeature = 257
kIOHIDElementTypeCollection = 513
IOHIDElementCookie = ctypes.c_void_p
MACH_PORT_NULL = 0
kIOHIDDeviceKey = "IOHIDDevice"
kIOServicePlane = "IOService"
kIOHIDProductIDKey = "ProductID"
kCFNumberIntType = 9
kIOHIDOptionsTypeSeizeDevice = 1
kIOReturnExclusiveAccess = 0xe00002c5
carbon.CFUUIDGetConstantUUIDWithBytes.restype = CFUUIDRef
kIOHIDDeviceUserClientTypeID = carbon.CFUUIDGetConstantUUIDWithBytes(None,
0xFA, 0x12,
0xFA, 0x38,
0x6F, 0x1A,
0x11, 0xD4,
0xBA, 0x0C,
0x00, 0x05,
0x02, 0x8F,
0x18, 0xD5)
kIOCFPlugInInterfaceID = carbon.CFUUIDGetConstantUUIDWithBytes(None,
0xC2, 0x44, 0xE8,
0x58, 0x10, 0x9C,
0x11, 0xD4,
0x91, 0xD4, 0x00,
0x50, 0xE4, 0xC6,
0x42, 0x6F)
kIOHIDDeviceInterfaceID = carbon.CFUUIDGetConstantUUIDWithBytes(None,
0x78, 0xBD,
0x42, 0x0C,
0x6F, 0x14,
0x11, 0xD4,
0x94, 0x74,
0x00, 0x05,
0x02, 0x8F,
0x18, 0xD5)
class IOHIDEventStruct(ctypes.Structure):
_fields_ = (
('type', IOHIDElementType),
('elementCookie', IOHIDElementCookie),
('value', ctypes.c_int32),
('timestamp', AbsoluteTime),
('longValueSize', ctypes.c_uint32),
('longValue', ctypes.c_void_p)
)
Self = ctypes.c_void_p
class IUnknown(ctypes.Structure):
_fields_ = (
('_reserved', ctypes.c_void_p),
('QueryInterface',
ctypes.CFUNCTYPE(HRESULT, Self, REFIID, ctypes.c_void_p)),
('AddRef',
ctypes.CFUNCTYPE(ctypes.c_ulong, Self)),
('Release',
ctypes.CFUNCTYPE(ctypes.c_ulong, Self)),
)
# Most of these function prototypes are not filled in yet because I haven't
# bothered.
class IOHIDQueueInterface(ctypes.Structure):
_fields_ = IUnknown._fields_ + (
('createAsyncEventSource', ctypes.c_void_p),
('getAsyncEventSource', ctypes.c_void_p),
('createAsyncPort', ctypes.c_void_p),
('getAsyncPort', ctypes.c_void_p),
('create', ctypes.CFUNCTYPE(IOReturn,
Self, ctypes.c_uint32, ctypes.c_uint32)),
('dispose', ctypes.CFUNCTYPE(IOReturn,
Self)),
('addElement', ctypes.CFUNCTYPE(IOReturn,
Self, IOHIDElementCookie)),
('removeElement', ctypes.c_void_p),
('hasElement', ctypes.c_void_p),
('start', ctypes.CFUNCTYPE(IOReturn,
Self)),
('stop', ctypes.CFUNCTYPE(IOReturn,
Self)),
('getNextEvent', ctypes.CFUNCTYPE(IOReturn,
Self,
ctypes.POINTER(IOHIDEventStruct),
AbsoluteTime,
ctypes.c_uint32)),
('setEventCallout', ctypes.c_void_p),
('getEventCallout', ctypes.c_void_p),
)
class IOHIDDeviceInterface(ctypes.Structure):
_fields_ = IUnknown._fields_ + (
('createAsyncEventSource', ctypes.c_void_p),
('getAsyncEventSource', ctypes.c_void_p),
('createAsyncPort', ctypes.c_void_p),
('getAsyncPort', ctypes.c_void_p),
('open', ctypes.CFUNCTYPE(IOReturn,
Self, ctypes.c_uint32)),
('close', ctypes.CFUNCTYPE(IOReturn,
Self)),
('setRemovalCallback', ctypes.c_void_p),
('getElementValue', ctypes.CFUNCTYPE(IOReturn,
Self, IOHIDElementCookie,
ctypes.POINTER(IOHIDEventStruct))),
('setElementValue', ctypes.c_void_p),
('queryElementValue', ctypes.c_void_p),
('startAllQueues', ctypes.c_void_p),
('stopAllQueues', ctypes.c_void_p),
('allocQueue', ctypes.CFUNCTYPE(
ctypes.POINTER(ctypes.POINTER(IOHIDQueueInterface)),
Self)),
('allocOutputTransaction', ctypes.c_void_p),
# 1.2.1 (10.2.3)
('setReport', ctypes.c_void_p),
('getReport', ctypes.c_void_p),
# 1.2.2 (10.3)
('copyMatchingElements', ctypes.CFUNCTYPE(IOReturn,
Self, CFDictionaryRef,
ctypes.POINTER(CFArrayRef))),
('setInterruptReportHandlerCallback', ctypes.c_void_p),
)
def get_master_port():
master_port = mach_port_t()
_oscheck(
carbon.IOMasterPort(MACH_PORT_NULL, ctypes.byref(master_port))
)
return master_port
def get_matching_dictionary():
carbon.IOServiceMatching.restype = CFMutableDictionaryRef
matching_dictionary = carbon.IOServiceMatching(kIOHIDDeviceKey)
return matching_dictionary
def get_existing_devices(master_port, matching_dictionary):
# Consumes reference to matching_dictionary
iterator = io_iterator_t()
_oscheck(
carbon.IOServiceGetMatchingServices(master_port,
matching_dictionary,
ctypes.byref(iterator))
)
devices = list()
while carbon.IOIteratorIsValid(iterator):
device = carbon.IOIteratorNext(iterator)
if not device:
break
devices.append(Device(device))
carbon.IOObjectRelease(iterator)
return devices
def cfstring_to_string(value_string):
value_length = carbon.CFStringGetLength(value_string)
buffer_length = carbon.CFStringGetMaximumSizeForEncoding(
value_length, kCFStringEncodingUTF8)
buffer = ctypes.c_buffer(buffer_length + 1)
result = carbon.CFStringGetCString(value_string,
buffer,
len(buffer),
kCFStringEncodingUTF8)
if not result:
return
return buffer.value
def cfnumber_to_int(value):
result = ctypes.c_int()
carbon.CFNumberGetValue(value, kCFNumberIntType, ctypes.byref(result))
return result.value
def cfvalue_to_value(value):
if not value:
return None
value_type = carbon.CFGetTypeID(value)
if value_type == carbon.CFStringGetTypeID():
return cfstring_to_string(value)
elif value_type == carbon.CFNumberGetTypeID():
return cfnumber_to_int(value)
else:
return None
def get_property_value(properties, key):
key_string = create_cfstring(key)
value = ctypes.c_void_p()
present = carbon.CFDictionaryGetValueIfPresent(properties,
key_string,
ctypes.byref(value))
carbon.CFRelease(key_string)
if not present:
return None
return value
def get_property(properties, key):
return cfvalue_to_value(get_property_value(properties, key))
def dump_properties(properties):
def func(key, value, context):
print('%s = %s' % (cfstring_to_string(key), cfvalue_to_value(value)))
CFDictionaryApplierFunction = ctypes.CFUNCTYPE(None,
ctypes.c_void_p,
ctypes.c_void_p,
ctypes.c_void_p)
carbon.CFDictionaryApplyFunction(properties,
CFDictionaryApplierFunction(func), None)
class Device:
"""
:IVariables:
`name` : str
`manufacturer` : str
"""
def __init__(self, generic_device):
self._init_properties(generic_device)
self._device = self._get_device_interface(generic_device)
self.elements = self._get_elements()
self._open = False
self._queue = None
self._queue_depth = 8 # Number of events queue can buffer
def _init_properties(self, generic_device):
properties = CFMutableDictionaryRef()
_oscheck(
carbon.IORegistryEntryCreateCFProperties(generic_device,
ctypes.byref(properties),
None, 0)
)
self.name = get_property(properties, "Product")
self.manufacturer = get_property(properties, "Manufacturer")
carbon.CFRelease(properties)
def _get_device_interface(self, generic_device):
plug_in_interface = \
ctypes.POINTER(ctypes.POINTER(IUnknown))()
score = ctypes.c_int32()
_oscheck(
carbon.IOCreatePlugInInterfaceForService(
generic_device,
kIOHIDDeviceUserClientTypeID,
kIOCFPlugInInterfaceID,
ctypes.byref(plug_in_interface),
ctypes.byref(score))
)
carbon.CFUUIDGetUUIDBytes.restype = CFUUIDBytes
hid_device_interface = \
ctypes.POINTER(ctypes.POINTER(IOHIDDeviceInterface))()
_oscheck(
plug_in_interface.contents.contents.QueryInterface(
plug_in_interface,
carbon.CFUUIDGetUUIDBytes(kIOHIDDeviceInterfaceID),
ctypes.byref(hid_device_interface))
)
plug_in_interface.contents.contents.Release(plug_in_interface)
return hid_device_interface
def _get_elements(self):
elements_array = CFArrayRef()
_oscheck(
self._device.contents.contents.copyMatchingElements(self._device,
None,
ctypes.byref(
elements_array))
)
self._element_cookies = dict()
elements = list()
n_elements = carbon.CFArrayGetCount(elements_array)
for i in range(n_elements):
properties = carbon.CFArrayGetValueAtIndex(elements_array, i)
element = DeviceElement(self, properties)
elements.append(element)
self._element_cookies[element._cookie] = element
carbon.CFRelease(elements_array)
return elements
def __repr__(self):
return '%s(name=%r, manufacturer=%r)' % (
self.__class__.__name__, self.product, self.manufacturer)
def open(self, exclusive=False):
flags = 0
if exclusive:
flags |= kIOHIDOptionsTypeSeizeDevice
result = self._device.contents.contents.open(self._device, flags)
if result == 0:
self._open = True
elif result == kIOReturnExclusiveAccess:
raise input.InputDeviceExclusiveException()
# Create event queue
self._queue = self._device.contents.contents.allocQueue(self._device)
_oscheck(
self._queue.contents.contents.create(self._queue,
0, self._queue_depth)
)
# Add all elements into queue
# TODO: only "interesting/known" elements?
for element in self.elements:
r = self._queue.contents.contents.addElement(self._queue,
element._cookie, 0)
if r != 0:
print('error adding %r' % element)
_oscheck(
self._queue.contents.contents.start(self._queue)
)
# HACK TODO:
pyglet.clock.schedule(self.dispatch_events)
def close(self):
if not self._open:
return
# HACK TODO:
pyglet.clock.unschedule(self.dispatch_events)
_oscheck(
self._queue.contents.contents.stop(self._queue)
)
_oscheck(
self._queue.contents.contents.dispose(self._queue)
)
self._queue.contents.contents.Release(self._queue)
self._queue = None
_oscheck(
self._device.contents.contents.close(self._device)
)
self._open = False
# TODO: TEMP/HACK
def dispatch_events(self, dt=None):
if not self._open:
return
event = IOHIDEventStruct()
r = self._queue.contents.contents.getNextEvent(self._queue,
ctypes.byref(event), 0,
0)
if r != 0:
# Undocumented behaviour? returns 3758097127L when no events are
# in queue (is documented to block)
return
try:
element = self._element_cookies[event.elementCookie]
element.value = event.value
except KeyError:
pass
class DeviceElement:
def __init__(self, device, properties):
self.device = device
self._cookie = get_property(properties, 'ElementCookie')
_usage = get_property(properties, 'Usage')
usage_page = get_property(properties, 'UsagePage')
self.name = usage.get_element_usage_name(usage_page, _usage)
self.known = usage.get_element_usage_known(usage_page, _usage)
self.value = None
def get_value(self):
return self.value
"""
def get_value(self):
event = IOHIDEventStruct()
self.device._device.contents.contents.getElementValue(
self.device._device, self._cookie, ctypes.byref(event))
return event.value
"""
def get_devices():
return get_existing_devices(get_master_port(), get_matching_dictionary())
| 34.982301 | 84 | 0.544397 |
__docformat__ = 'restructuredtext'
__version__ = '$Id: $'
import ctypes
import pyglet
from pyglet.libs.darwin import carbon, _oscheck, create_cfstring
from pyglet.libs.darwin.constants import *
import input
import usage
void_p = ctypes.POINTER(ctypes.c_int)
class CFUUIDBytes(ctypes.Structure):
_fields_ = [('byte%d' % i, ctypes.c_uint8) for i in range(16)]
mach_port_t = void_p
io_iterator_t = void_p
kern_return_t = ctypes.c_int
IOReturn = ctypes.c_uint
CFDictionaryRef = void_p
CFMutableDictionaryRef = void_p
CFArrayRef = void_p
CFUUIDRef = ctypes.POINTER(CFUUIDBytes)
AbsoluteTime = ctypes.c_double
HRESULT = ctypes.c_int
REFIID = CFUUIDBytes
IOHIDElementType = ctypes.c_int
kIOHIDElementTypeInput_Misc = 1
kIOHIDElementTypeInput_Button = 2
kIOHIDElementTypeInput_Axis = 3
kIOHIDElementTypeInput_ScanCodes = 4
kIOHIDElementTypeOutput = 129
kIOHIDElementTypeFeature = 257
kIOHIDElementTypeCollection = 513
IOHIDElementCookie = ctypes.c_void_p
MACH_PORT_NULL = 0
kIOHIDDeviceKey = "IOHIDDevice"
kIOServicePlane = "IOService"
kIOHIDProductIDKey = "ProductID"
kCFNumberIntType = 9
kIOHIDOptionsTypeSeizeDevice = 1
kIOReturnExclusiveAccess = 0xe00002c5
carbon.CFUUIDGetConstantUUIDWithBytes.restype = CFUUIDRef
kIOHIDDeviceUserClientTypeID = carbon.CFUUIDGetConstantUUIDWithBytes(None,
0xFA, 0x12,
0xFA, 0x38,
0x6F, 0x1A,
0x11, 0xD4,
0xBA, 0x0C,
0x00, 0x05,
0x02, 0x8F,
0x18, 0xD5)
kIOCFPlugInInterfaceID = carbon.CFUUIDGetConstantUUIDWithBytes(None,
0xC2, 0x44, 0xE8,
0x58, 0x10, 0x9C,
0x11, 0xD4,
0x91, 0xD4, 0x00,
0x50, 0xE4, 0xC6,
0x42, 0x6F)
kIOHIDDeviceInterfaceID = carbon.CFUUIDGetConstantUUIDWithBytes(None,
0x78, 0xBD,
0x42, 0x0C,
0x6F, 0x14,
0x11, 0xD4,
0x94, 0x74,
0x00, 0x05,
0x02, 0x8F,
0x18, 0xD5)
class IOHIDEventStruct(ctypes.Structure):
_fields_ = (
('type', IOHIDElementType),
('elementCookie', IOHIDElementCookie),
('value', ctypes.c_int32),
('timestamp', AbsoluteTime),
('longValueSize', ctypes.c_uint32),
('longValue', ctypes.c_void_p)
)
Self = ctypes.c_void_p
class IUnknown(ctypes.Structure):
_fields_ = (
('_reserved', ctypes.c_void_p),
('QueryInterface',
ctypes.CFUNCTYPE(HRESULT, Self, REFIID, ctypes.c_void_p)),
('AddRef',
ctypes.CFUNCTYPE(ctypes.c_ulong, Self)),
('Release',
ctypes.CFUNCTYPE(ctypes.c_ulong, Self)),
)
# bothered.
class IOHIDQueueInterface(ctypes.Structure):
_fields_ = IUnknown._fields_ + (
('createAsyncEventSource', ctypes.c_void_p),
('getAsyncEventSource', ctypes.c_void_p),
('createAsyncPort', ctypes.c_void_p),
('getAsyncPort', ctypes.c_void_p),
('create', ctypes.CFUNCTYPE(IOReturn,
Self, ctypes.c_uint32, ctypes.c_uint32)),
('dispose', ctypes.CFUNCTYPE(IOReturn,
Self)),
('addElement', ctypes.CFUNCTYPE(IOReturn,
Self, IOHIDElementCookie)),
('removeElement', ctypes.c_void_p),
('hasElement', ctypes.c_void_p),
('start', ctypes.CFUNCTYPE(IOReturn,
Self)),
('stop', ctypes.CFUNCTYPE(IOReturn,
Self)),
('getNextEvent', ctypes.CFUNCTYPE(IOReturn,
Self,
ctypes.POINTER(IOHIDEventStruct),
AbsoluteTime,
ctypes.c_uint32)),
('setEventCallout', ctypes.c_void_p),
('getEventCallout', ctypes.c_void_p),
)
class IOHIDDeviceInterface(ctypes.Structure):
_fields_ = IUnknown._fields_ + (
('createAsyncEventSource', ctypes.c_void_p),
('getAsyncEventSource', ctypes.c_void_p),
('createAsyncPort', ctypes.c_void_p),
('getAsyncPort', ctypes.c_void_p),
('open', ctypes.CFUNCTYPE(IOReturn,
Self, ctypes.c_uint32)),
('close', ctypes.CFUNCTYPE(IOReturn,
Self)),
('setRemovalCallback', ctypes.c_void_p),
('getElementValue', ctypes.CFUNCTYPE(IOReturn,
Self, IOHIDElementCookie,
ctypes.POINTER(IOHIDEventStruct))),
('setElementValue', ctypes.c_void_p),
('queryElementValue', ctypes.c_void_p),
('startAllQueues', ctypes.c_void_p),
('stopAllQueues', ctypes.c_void_p),
('allocQueue', ctypes.CFUNCTYPE(
ctypes.POINTER(ctypes.POINTER(IOHIDQueueInterface)),
Self)),
('allocOutputTransaction', ctypes.c_void_p),
# 1.2.1 (10.2.3)
('setReport', ctypes.c_void_p),
('getReport', ctypes.c_void_p),
# 1.2.2 (10.3)
('copyMatchingElements', ctypes.CFUNCTYPE(IOReturn,
Self, CFDictionaryRef,
ctypes.POINTER(CFArrayRef))),
('setInterruptReportHandlerCallback', ctypes.c_void_p),
)
def get_master_port():
master_port = mach_port_t()
_oscheck(
carbon.IOMasterPort(MACH_PORT_NULL, ctypes.byref(master_port))
)
return master_port
def get_matching_dictionary():
carbon.IOServiceMatching.restype = CFMutableDictionaryRef
matching_dictionary = carbon.IOServiceMatching(kIOHIDDeviceKey)
return matching_dictionary
def get_existing_devices(master_port, matching_dictionary):
# Consumes reference to matching_dictionary
iterator = io_iterator_t()
_oscheck(
carbon.IOServiceGetMatchingServices(master_port,
matching_dictionary,
ctypes.byref(iterator))
)
devices = list()
while carbon.IOIteratorIsValid(iterator):
device = carbon.IOIteratorNext(iterator)
if not device:
break
devices.append(Device(device))
carbon.IOObjectRelease(iterator)
return devices
def cfstring_to_string(value_string):
value_length = carbon.CFStringGetLength(value_string)
buffer_length = carbon.CFStringGetMaximumSizeForEncoding(
value_length, kCFStringEncodingUTF8)
buffer = ctypes.c_buffer(buffer_length + 1)
result = carbon.CFStringGetCString(value_string,
buffer,
len(buffer),
kCFStringEncodingUTF8)
if not result:
return
return buffer.value
def cfnumber_to_int(value):
result = ctypes.c_int()
carbon.CFNumberGetValue(value, kCFNumberIntType, ctypes.byref(result))
return result.value
def cfvalue_to_value(value):
if not value:
return None
value_type = carbon.CFGetTypeID(value)
if value_type == carbon.CFStringGetTypeID():
return cfstring_to_string(value)
elif value_type == carbon.CFNumberGetTypeID():
return cfnumber_to_int(value)
else:
return None
def get_property_value(properties, key):
key_string = create_cfstring(key)
value = ctypes.c_void_p()
present = carbon.CFDictionaryGetValueIfPresent(properties,
key_string,
ctypes.byref(value))
carbon.CFRelease(key_string)
if not present:
return None
return value
def get_property(properties, key):
return cfvalue_to_value(get_property_value(properties, key))
def dump_properties(properties):
def func(key, value, context):
print('%s = %s' % (cfstring_to_string(key), cfvalue_to_value(value)))
CFDictionaryApplierFunction = ctypes.CFUNCTYPE(None,
ctypes.c_void_p,
ctypes.c_void_p,
ctypes.c_void_p)
carbon.CFDictionaryApplyFunction(properties,
CFDictionaryApplierFunction(func), None)
class Device:
def __init__(self, generic_device):
self._init_properties(generic_device)
self._device = self._get_device_interface(generic_device)
self.elements = self._get_elements()
self._open = False
self._queue = None
self._queue_depth = 8 # Number of events queue can buffer
def _init_properties(self, generic_device):
properties = CFMutableDictionaryRef()
_oscheck(
carbon.IORegistryEntryCreateCFProperties(generic_device,
ctypes.byref(properties),
None, 0)
)
self.name = get_property(properties, "Product")
self.manufacturer = get_property(properties, "Manufacturer")
carbon.CFRelease(properties)
def _get_device_interface(self, generic_device):
plug_in_interface = \
ctypes.POINTER(ctypes.POINTER(IUnknown))()
score = ctypes.c_int32()
_oscheck(
carbon.IOCreatePlugInInterfaceForService(
generic_device,
kIOHIDDeviceUserClientTypeID,
kIOCFPlugInInterfaceID,
ctypes.byref(plug_in_interface),
ctypes.byref(score))
)
carbon.CFUUIDGetUUIDBytes.restype = CFUUIDBytes
hid_device_interface = \
ctypes.POINTER(ctypes.POINTER(IOHIDDeviceInterface))()
_oscheck(
plug_in_interface.contents.contents.QueryInterface(
plug_in_interface,
carbon.CFUUIDGetUUIDBytes(kIOHIDDeviceInterfaceID),
ctypes.byref(hid_device_interface))
)
plug_in_interface.contents.contents.Release(plug_in_interface)
return hid_device_interface
def _get_elements(self):
elements_array = CFArrayRef()
_oscheck(
self._device.contents.contents.copyMatchingElements(self._device,
None,
ctypes.byref(
elements_array))
)
self._element_cookies = dict()
elements = list()
n_elements = carbon.CFArrayGetCount(elements_array)
for i in range(n_elements):
properties = carbon.CFArrayGetValueAtIndex(elements_array, i)
element = DeviceElement(self, properties)
elements.append(element)
self._element_cookies[element._cookie] = element
carbon.CFRelease(elements_array)
return elements
def __repr__(self):
return '%s(name=%r, manufacturer=%r)' % (
self.__class__.__name__, self.product, self.manufacturer)
def open(self, exclusive=False):
flags = 0
if exclusive:
flags |= kIOHIDOptionsTypeSeizeDevice
result = self._device.contents.contents.open(self._device, flags)
if result == 0:
self._open = True
elif result == kIOReturnExclusiveAccess:
raise input.InputDeviceExclusiveException()
# Create event queue
self._queue = self._device.contents.contents.allocQueue(self._device)
_oscheck(
self._queue.contents.contents.create(self._queue,
0, self._queue_depth)
)
# Add all elements into queue
# TODO: only "interesting/known" elements?
for element in self.elements:
r = self._queue.contents.contents.addElement(self._queue,
element._cookie, 0)
if r != 0:
print('error adding %r' % element)
_oscheck(
self._queue.contents.contents.start(self._queue)
)
# HACK TODO:
pyglet.clock.schedule(self.dispatch_events)
def close(self):
if not self._open:
return
# HACK TODO:
pyglet.clock.unschedule(self.dispatch_events)
_oscheck(
self._queue.contents.contents.stop(self._queue)
)
_oscheck(
self._queue.contents.contents.dispose(self._queue)
)
self._queue.contents.contents.Release(self._queue)
self._queue = None
_oscheck(
self._device.contents.contents.close(self._device)
)
self._open = False
# TODO: TEMP/HACK
def dispatch_events(self, dt=None):
if not self._open:
return
event = IOHIDEventStruct()
r = self._queue.contents.contents.getNextEvent(self._queue,
ctypes.byref(event), 0,
0)
if r != 0:
# Undocumented behaviour? returns 3758097127L when no events are
# in queue (is documented to block)
return
try:
element = self._element_cookies[event.elementCookie]
element.value = event.value
except KeyError:
pass
class DeviceElement:
def __init__(self, device, properties):
self.device = device
self._cookie = get_property(properties, 'ElementCookie')
_usage = get_property(properties, 'Usage')
usage_page = get_property(properties, 'UsagePage')
self.name = usage.get_element_usage_name(usage_page, _usage)
self.known = usage.get_element_usage_known(usage_page, _usage)
self.value = None
def get_value(self):
return self.value
def get_devices():
return get_existing_devices(get_master_port(), get_matching_dictionary())
| true | true |
f71fe39b002b6987bd56fdc8f822aa4a1ab3f554 | 548 | py | Python | config_parser.py | benkelaci/qrcode_medicinedispenser | 41cee011dc0e9ab5d1ef0738efd5e1ea11c13d0a | [
"MIT"
] | null | null | null | config_parser.py | benkelaci/qrcode_medicinedispenser | 41cee011dc0e9ab5d1ef0738efd5e1ea11c13d0a | [
"MIT"
] | null | null | null | config_parser.py | benkelaci/qrcode_medicinedispenser | 41cee011dc0e9ab5d1ef0738efd5e1ea11c13d0a | [
"MIT"
] | null | null | null | import json
class Struct(object):
def __init__(self, data):
for name, value in data.items():
setattr(self, name, self._wrap(value))
def _wrap(self, value):
if isinstance(value, (tuple, list, set, frozenset)):
return type(value)([self._wrap(v) for v in value])
else:
return Struct(value) if isinstance(value, dict) else value
config_path = "/home/pi/qrcode_detect/cfg.json"
with open(config_path) as config:
cfg = json.load(config, object_hook=Struct)
| 27.4 | 71 | 0.616788 | import json
class Struct(object):
def __init__(self, data):
for name, value in data.items():
setattr(self, name, self._wrap(value))
def _wrap(self, value):
if isinstance(value, (tuple, list, set, frozenset)):
return type(value)([self._wrap(v) for v in value])
else:
return Struct(value) if isinstance(value, dict) else value
config_path = "/home/pi/qrcode_detect/cfg.json"
with open(config_path) as config:
cfg = json.load(config, object_hook=Struct)
| true | true |
f71fe39bd8323e42c4bdd0221f1966de94ab7729 | 904 | py | Python | examples/sawyer/moveit_planning.py | gujralsanyam22/pyrobot | a0448714857b684d8b280f710e9304988524d2e0 | [
"MIT"
] | 2,150 | 2019-06-12T20:55:41.000Z | 2022-03-21T07:14:51.000Z | examples/sawyer/moveit_planning.py | gujralsanyam22/pyrobot | a0448714857b684d8b280f710e9304988524d2e0 | [
"MIT"
] | 124 | 2019-06-22T17:12:27.000Z | 2022-02-26T11:43:13.000Z | examples/sawyer/moveit_planning.py | gujralsanyam22/pyrobot | a0448714857b684d8b280f710e9304988524d2e0 | [
"MIT"
] | 329 | 2019-06-13T03:03:54.000Z | 2022-03-30T07:04:55.000Z | # Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Example for commanding robot with position control using moveit planner
"""
import time
from pyrobot import Robot
def main():
target_joints = [
[0.704, -0.455, -0.159, 1.395, -1.240, 1.069, 2.477],
[-0.341, -0.384, -0.018, 1.533, -0.977, -1.492, -1.084],
]
config = dict(moveit_planner_type="ESTkConfigDefault")
bot = Robot(
"sawyer",
use_arm=True,
use_base=False,
use_camera=False,
use_gripper=True,
arm_config=config,
)
bot.arm.go_home()
time.sleep(1)
for joint in target_joints:
bot.arm.set_joint_positions(joint, plan=True)
time.sleep(1)
bot.arm.go_home()
if __name__ == "__main__":
main()
| 21.52381 | 71 | 0.622788 |
import time
from pyrobot import Robot
def main():
target_joints = [
[0.704, -0.455, -0.159, 1.395, -1.240, 1.069, 2.477],
[-0.341, -0.384, -0.018, 1.533, -0.977, -1.492, -1.084],
]
config = dict(moveit_planner_type="ESTkConfigDefault")
bot = Robot(
"sawyer",
use_arm=True,
use_base=False,
use_camera=False,
use_gripper=True,
arm_config=config,
)
bot.arm.go_home()
time.sleep(1)
for joint in target_joints:
bot.arm.set_joint_positions(joint, plan=True)
time.sleep(1)
bot.arm.go_home()
if __name__ == "__main__":
main()
| true | true |
f71fe3a7ec4b148032db722f5c53b2c067b9a249 | 1,863 | py | Python | random_colors.py | electric-blue-green/trinket | 82e1e265934252c0cf3b2fa72f9bc1d60a35ac93 | [
"Unlicense"
] | 1 | 2021-06-05T03:12:36.000Z | 2021-06-05T03:12:36.000Z | random_colors.py | aejb/trinket | 82e1e265934252c0cf3b2fa72f9bc1d60a35ac93 | [
"Unlicense"
] | 1 | 2018-02-26T11:22:50.000Z | 2018-02-26T11:22:50.000Z | random_colors.py | electric-blue-green/trinket | 82e1e265934252c0cf3b2fa72f9bc1d60a35ac93 | [
"Unlicense"
] | null | null | null | import board
import busio
import time
import random
dotstar = busio.SPI(board.APA102_SCK, board.APA102_MOSI)
#colors = [1, 128, 244] # set colors all to 1
colors = [random.randint(3, 240), random.randint(3, 240), random.randint(3, 240), ] # selects random start color in "safe zone"
steps = [1, 3, 4] # set wavelength
steps = [random.randint(1, 5), random.randint(1, 5), random.randint(1, 5)] # selects random step beteween 1 and 5
print("INIT") ## REPL
def getColor(index, colors, steps):
if colors[index] >= 255 or colors[index] <= 0: # flip the sign of the step at the max/min
steps[index] *= -1
colors[index] += steps[index] # increment the value
if colors[index] > 255: colors[index] = 255 # accounting for stepping over 255
if colors[index] < 0: colors[index] = 0 # accounting for stepping under 0
return (colors[index], colors, steps) # returns colors for index
def setPixel(red, green, blue): # call setpixel
if not dotstar.try_lock(): # see if clock is locked
return
#print("setting pixel to: %d %d %d" % (red, green, blue)) # debug
dotstar.write(bytearray([0x00, 0x00, 0x00, 0x00, 0xff, blue, green, red, 0xff, 0xff, 0xff, 0xff]))
dotstar.unlock() # pass new color
while True:
r, colors, steps = getColor(0, colors, steps) # gets red
g, colors, steps = getColor(1, colors, steps) # gets green
b, colors, steps = getColor(2, colors, steps) # gets blue
print("STEP = ", steps, "COLOR = ", colors) # REPL debug print
setPixel(r, g, b) # calls setPixel
time.sleep(random.random()) # random wait time between 0 and 1
| 56.454545 | 131 | 0.574342 | import board
import busio
import time
import random
dotstar = busio.SPI(board.APA102_SCK, board.APA102_MOSI)
dint(3, 240), random.randint(3, 240), random.randint(3, 240), ]
steps = [1, 3, 4]
steps = [random.randint(1, 5), random.randint(1, 5), random.randint(1, 5)]
print("INIT") etColor(index, colors, steps):
if colors[index] >= 255 or colors[index] <= 0:
steps[index] *= -1
colors[index] += steps[index]
if colors[index] > 255: colors[index] = 255
if colors[index] < 0: colors[index] = 0
return (colors[index], colors, steps)
def setPixel(red, green, blue):
if not dotstar.try_lock():
return
tstar.write(bytearray([0x00, 0x00, 0x00, 0x00, 0xff, blue, green, red, 0xff, 0xff, 0xff, 0xff]))
dotstar.unlock()
while True:
r, colors, steps = getColor(0, colors, steps)
g, colors, steps = getColor(1, colors, steps)
b, colors, steps = getColor(2, colors, steps)
print("STEP = ", steps, "COLOR = ", colors)
setPixel(r, g, b)
time.sleep(random.random())
| true | true |
f71fe485cdc1d845da328b9bf8355e5b4665fa3d | 393 | py | Python | project-euler/py/e6.py | aaycee/aaycee.github.io | b609a869c5c9d02f7cbc1798b643ec083475f741 | [
"MIT"
] | null | null | null | project-euler/py/e6.py | aaycee/aaycee.github.io | b609a869c5c9d02f7cbc1798b643ec083475f741 | [
"MIT"
] | null | null | null | project-euler/py/e6.py | aaycee/aaycee.github.io | b609a869c5c9d02f7cbc1798b643ec083475f741 | [
"MIT"
] | null | null | null | # Akachukwu Obi, 2018
# Project Euler #6
# see .js file for build up
def diffOfSumOfSquares(max):
sumOfNumbers = max * (max + 1) / 2 # sum of n natural numbers is n(n + 1)/2
sumOfSquares = (max / 6.0) * (2 * max + 1) * (max + 1) # I used 6.0 to avoid getting a math.floor situation in puthon2.7
return sumOfNumbers * sumOfNumbers - sumOfSquares
print(diffOfSumOfSquares(100)) #25164150.0 | 35.727273 | 121 | 0.689567 |
def diffOfSumOfSquares(max):
sumOfNumbers = max * (max + 1) / 2
sumOfSquares = (max / 6.0) * (2 * max + 1) * (max + 1)
return sumOfNumbers * sumOfNumbers - sumOfSquares
print(diffOfSumOfSquares(100)) | true | true |
f71fe4adf7dd0cb5bea1c0b3a038207a017240b9 | 250 | py | Python | experiments_approximate/experiments/create_dico_alphacsc.py | bmalezieux/unrolled_dl | 5854a6991e44db025a99a9f0d38be6b1e669aa83 | [
"MIT"
] | null | null | null | experiments_approximate/experiments/create_dico_alphacsc.py | bmalezieux/unrolled_dl | 5854a6991e44db025a99a9f0d38be6b1e669aa83 | [
"MIT"
] | null | null | null | experiments_approximate/experiments/create_dico_alphacsc.py | bmalezieux/unrolled_dl | 5854a6991e44db025a99a9f0d38be6b1e669aa83 | [
"MIT"
] | null | null | null | import numpy as np
atoms_to_save = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 12, 15, 18])
u_cdl = np.load("u_cdl.npy")
v_cdl = np.load("v_cdl.npy")
np.save("u_cdl_modified.npy", u_cdl[atoms_to_save])
np.save("v_cdl_modified.npy", v_cdl[atoms_to_save])
| 25 | 65 | 0.68 | import numpy as np
atoms_to_save = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 12, 15, 18])
u_cdl = np.load("u_cdl.npy")
v_cdl = np.load("v_cdl.npy")
np.save("u_cdl_modified.npy", u_cdl[atoms_to_save])
np.save("v_cdl_modified.npy", v_cdl[atoms_to_save])
| true | true |
f71fead294ee942753789629c9a72d7384c394a0 | 2,087 | py | Python | main.py | KevHg/reddit-sentiment | 383407105957b8a582a524fa29b9f21d7b2cbd23 | [
"MIT"
] | 3 | 2020-12-22T09:03:15.000Z | 2021-05-13T18:17:44.000Z | main.py | KevHg/reddit-sentiment | 383407105957b8a582a524fa29b9f21d7b2cbd23 | [
"MIT"
] | 3 | 2020-11-11T15:33:13.000Z | 2021-12-13T20:18:41.000Z | main.py | KevHg/reddit-sentiment | 383407105957b8a582a524fa29b9f21d7b2cbd23 | [
"MIT"
] | 1 | 2021-02-18T19:56:09.000Z | 2021-02-18T19:56:09.000Z | import os
from scrapy.crawler import CrawlerProcess
import pandas as pd
import logging
import nltk
import json_reader
from sentiment_score import clean_text, calculate_sentiment_score
from reddit_scraper.reddit_scraper.spiders.reddit_post_scraper import RedditPostCrawler
if __name__ == '__main__':
# Initial setup: Disable scrapy logs and download NLTK files
logging.getLogger('scrapy').propagate = False
nltk.download('averaged_perceptron_tagger', quiet=True)
nltk.download('wordnet', quiet=True)
# Ask for user query
subreddit = input('Subreddit: ')
term = input('Search term: ')
term = term.replace(' ', '+')
# Start crawler process
print('[LOG] Crawling Reddit, this will take a little time...')
process = CrawlerProcess(settings={
'FEED_FORMAT': 'jl',
'FEED_URI': 'data.jl'
})
process.crawl(RedditPostCrawler,
domain=f'https://old.reddit.com/r/{subreddit}/search?q={term}&restrict_sr=on&sort=relevance&t=all')
process.start()
# Convert data file to class
print('[LOG] Creating DataFrame table...')
reddit_posts = json_reader.convert_json('data.jl')
all_comments = []
all_upvotes = []
for post in reddit_posts:
for comment in post.comments:
all_comments.append(clean_text(comment.text))
# Convert upvote text to float, e.g. '15.3k upvotes' -> 15300
upvote = comment.upvotes.split(' ')[0]
if 'k' in upvote:
upvote = upvote[:-1]
upvote = float(upvote) * 1000
all_upvotes.append(float(upvote))
df = pd.DataFrame({'comment': all_comments, 'upvotes': all_upvotes})
df = df[df.upvotes >= 1]
print('[LOG] Calculating sentiment score, this may take a longer time...')
df = calculate_sentiment_score(df)
# df.to_csv('results.csv')
normalized_result = df.sentiment.mean()
print('[LOG] Completed!\n')
print('Average sentiment:', normalized_result)
print('where +1 is most positive and -1 is most negative')
os.remove('data.jl')
| 33.66129 | 117 | 0.661236 | import os
from scrapy.crawler import CrawlerProcess
import pandas as pd
import logging
import nltk
import json_reader
from sentiment_score import clean_text, calculate_sentiment_score
from reddit_scraper.reddit_scraper.spiders.reddit_post_scraper import RedditPostCrawler
if __name__ == '__main__':
logging.getLogger('scrapy').propagate = False
nltk.download('averaged_perceptron_tagger', quiet=True)
nltk.download('wordnet', quiet=True)
subreddit = input('Subreddit: ')
term = input('Search term: ')
term = term.replace(' ', '+')
print('[LOG] Crawling Reddit, this will take a little time...')
process = CrawlerProcess(settings={
'FEED_FORMAT': 'jl',
'FEED_URI': 'data.jl'
})
process.crawl(RedditPostCrawler,
domain=f'https://old.reddit.com/r/{subreddit}/search?q={term}&restrict_sr=on&sort=relevance&t=all')
process.start()
print('[LOG] Creating DataFrame table...')
reddit_posts = json_reader.convert_json('data.jl')
all_comments = []
all_upvotes = []
for post in reddit_posts:
for comment in post.comments:
all_comments.append(clean_text(comment.text))
upvote = comment.upvotes.split(' ')[0]
if 'k' in upvote:
upvote = upvote[:-1]
upvote = float(upvote) * 1000
all_upvotes.append(float(upvote))
df = pd.DataFrame({'comment': all_comments, 'upvotes': all_upvotes})
df = df[df.upvotes >= 1]
print('[LOG] Calculating sentiment score, this may take a longer time...')
df = calculate_sentiment_score(df)
normalized_result = df.sentiment.mean()
print('[LOG] Completed!\n')
print('Average sentiment:', normalized_result)
print('where +1 is most positive and -1 is most negative')
os.remove('data.jl')
| true | true |
f71feb945e7c962481692be8e2384e367a3c4bbd | 3,443 | py | Python | pollbot/models/reference.py | shubham-king/poll | 677e870bea36dffbf27f24e4cdeec892b40f7128 | [
"MIT"
] | 112 | 2019-06-11T17:52:57.000Z | 2022-03-18T00:05:21.000Z | pollbot/models/reference.py | shubham-king/poll | 677e870bea36dffbf27f24e4cdeec892b40f7128 | [
"MIT"
] | 91 | 2019-05-28T11:33:40.000Z | 2022-02-27T12:12:07.000Z | pollbot/models/reference.py | shubham-king/poll | 677e870bea36dffbf27f24e4cdeec892b40f7128 | [
"MIT"
] | 69 | 2019-07-10T16:58:06.000Z | 2022-03-30T22:09:44.000Z | """The sqlalchemy model for a polloption."""
from __future__ import annotations
from sqlalchemy import Column, ForeignKey, Index, func
from sqlalchemy.orm import relationship
from sqlalchemy.types import BigInteger, DateTime, Integer, String
from pollbot.db import base
from pollbot.enums import ReferenceType
class Reference(base):
"""The model for a Reference."""
__tablename__ = "reference"
__mapper_args__ = {"confirm_deleted_rows": False}
id = Column(Integer, primary_key=True)
type = Column(String)
bot_inline_message_id = Column(String)
message_id = Column(BigInteger)
# Keep those for now, in case we migrate to mtproto
message_dc_id = Column(BigInteger)
message_access_hash = Column(BigInteger)
user_id = Column(
BigInteger,
ForeignKey("user.id", ondelete="cascade", name="user_fk"),
nullable=True,
index=True,
)
user = relationship("User", foreign_keys="Reference.user_id")
created_at = Column(DateTime, server_default=func.now(), nullable=False)
updated_at = Column(
DateTime, server_default=func.now(), onupdate=func.now(), nullable=False
)
# ManyToOne
poll_id = Column(
Integer,
ForeignKey("poll.id", ondelete="cascade", name="reference_poll"),
nullable=False,
index=True,
)
poll = relationship("Poll", back_populates="references")
def __init__(
self,
poll,
reference_type,
user=None,
message_id=None,
inline_message_id=None,
):
"""Create a new poll."""
self.poll = poll
self.type = reference_type
# There are three types of references
# 1. Messages in private chat:
# - Admin interface
# - Private vote
if (
user is not None
and message_id is not None
and reference_type
in [ReferenceType.admin.name, ReferenceType.private_vote.name]
):
self.user = user
self.message_id = message_id
# 2. Messages shared via inline query
elif (
inline_message_id is not None
and reference_type == ReferenceType.inline.name
):
self.bot_inline_message_id = inline_message_id
else:
raise Exception(
"Tried to create Reference with wrong type or missing parameters"
)
def __repr__(self):
"""Print as string."""
if self.type == ReferenceType.inline.name:
message = f"Reference {self.id}: message_id {self.message_id}"
elif self.type == ReferenceType.admin.name:
message = f"Reference {self.id}: message_id {self.message_id}, admin: {self.user.id}"
else:
message = f"Reference {self.id}: message_id {self.message_id}, user: {self.user.id}"
return message
Index(
"ix_unique_admin_reference",
Reference.poll_id,
Reference.user_id,
Reference.message_id,
unique=True,
postgresql_where=Reference.type == "admin",
)
Index(
"ix_unique_private_vote_reference",
Reference.poll_id,
Reference.user_id,
Reference.message_id,
unique=True,
postgresql_where=Reference.type == "private_vote",
)
Index(
"ix_unique_inline_share",
Reference.poll_id,
Reference.bot_inline_message_id,
unique=True,
postgresql_where=Reference.type == "inline",
)
| 27.99187 | 97 | 0.637816 | from __future__ import annotations
from sqlalchemy import Column, ForeignKey, Index, func
from sqlalchemy.orm import relationship
from sqlalchemy.types import BigInteger, DateTime, Integer, String
from pollbot.db import base
from pollbot.enums import ReferenceType
class Reference(base):
__tablename__ = "reference"
__mapper_args__ = {"confirm_deleted_rows": False}
id = Column(Integer, primary_key=True)
type = Column(String)
bot_inline_message_id = Column(String)
message_id = Column(BigInteger)
message_dc_id = Column(BigInteger)
message_access_hash = Column(BigInteger)
user_id = Column(
BigInteger,
ForeignKey("user.id", ondelete="cascade", name="user_fk"),
nullable=True,
index=True,
)
user = relationship("User", foreign_keys="Reference.user_id")
created_at = Column(DateTime, server_default=func.now(), nullable=False)
updated_at = Column(
DateTime, server_default=func.now(), onupdate=func.now(), nullable=False
)
poll_id = Column(
Integer,
ForeignKey("poll.id", ondelete="cascade", name="reference_poll"),
nullable=False,
index=True,
)
poll = relationship("Poll", back_populates="references")
def __init__(
self,
poll,
reference_type,
user=None,
message_id=None,
inline_message_id=None,
):
self.poll = poll
self.type = reference_type
if (
user is not None
and message_id is not None
and reference_type
in [ReferenceType.admin.name, ReferenceType.private_vote.name]
):
self.user = user
self.message_id = message_id
elif (
inline_message_id is not None
and reference_type == ReferenceType.inline.name
):
self.bot_inline_message_id = inline_message_id
else:
raise Exception(
"Tried to create Reference with wrong type or missing parameters"
)
def __repr__(self):
if self.type == ReferenceType.inline.name:
message = f"Reference {self.id}: message_id {self.message_id}"
elif self.type == ReferenceType.admin.name:
message = f"Reference {self.id}: message_id {self.message_id}, admin: {self.user.id}"
else:
message = f"Reference {self.id}: message_id {self.message_id}, user: {self.user.id}"
return message
Index(
"ix_unique_admin_reference",
Reference.poll_id,
Reference.user_id,
Reference.message_id,
unique=True,
postgresql_where=Reference.type == "admin",
)
Index(
"ix_unique_private_vote_reference",
Reference.poll_id,
Reference.user_id,
Reference.message_id,
unique=True,
postgresql_where=Reference.type == "private_vote",
)
Index(
"ix_unique_inline_share",
Reference.poll_id,
Reference.bot_inline_message_id,
unique=True,
postgresql_where=Reference.type == "inline",
)
| true | true |
f71fec1a43d05877719e3969203eaab05fae4883 | 10,145 | py | Python | metadl/core/scoring/scoring.py | mikehuisman/metadl | 61ece0364b08e67412ab87da4a41425b2e88a562 | [
"Apache-2.0"
] | 26 | 2020-09-23T13:04:52.000Z | 2022-03-03T03:07:49.000Z | metadl/core/scoring/scoring.py | mikehuisman/metadl | 61ece0364b08e67412ab87da4a41425b2e88a562 | [
"Apache-2.0"
] | 5 | 2020-11-04T13:26:09.000Z | 2021-09-17T07:42:01.000Z | metadl/core/scoring/scoring.py | mikehuisman/metadl | 61ece0364b08e67412ab87da4a41425b2e88a562 | [
"Apache-2.0"
] | 12 | 2020-11-03T12:01:35.000Z | 2021-12-19T03:58:50.000Z | """ Runs the scoring procedure for the challenge.
It assumes that there exists a ./model_dir folder containing both the
submission code and the saved learner.
It will create a folder named ./scoring_output (default) in which a txt file
will contain the average score over 600 episodes. You can change the folder
name via the score_dir flag.
Usage example executed from the metadl/ repository :
python -m metadl.core.scoring.scoring --meta_test_dir=<path_dataset.meta_test>
"""
import os
from sys import path
import scipy.stats
import gin
import numpy as np
from absl import app
from absl import flags
from absl import logging
import tensorflow as tf
from metadl.data.dataset import DataGenerator
from metadl.core.ingestion.ingestion import get_gin_path, show_dir
FLAGS = flags.FLAGS
flags.DEFINE_string('meta_test_dir',
'/Users/adrian/GitInria/meta-dataset/records/',
('Directory of the meta-test dataset. This directory '
+ 'should contain records and a json spec file.'))
flags.DEFINE_string('saved_model_dir',
'./model_dir',
('Directory path that contains the participant\'s code '
+ 'along with the serialized learner from meta-fit.'))
flags.DEFINE_string('score_dir',
'./scoring_output',
'Path to the score directory.')
flags.DEFINE_string('evaltype',
'test',
'Data type on which to perform evaluation. [train, val, test]')
tf.random.set_seed(1234)
def NwayKshot_accuracy(predictions, ground_truth, metric):
""" N-way, K-shot accuracy which corresponds to the accuracy in a
multi-classification context with N classes.
Args:
predictions : tensors, sparse tensors corresponding to the predicted
labels.
ground_truth : tensors, sparse tensors corresponding the ground truth
labels.
metric : keras.metrics , the metric we use to evaluate the
classification performance of the meta-learning algorithm. We use
the SparseCategoricalAccuracy in this challenge.
Retruns:
score : Float, the resulting performance using the given metric.
"""
ground_truth = tf.expand_dims(ground_truth, axis = 1)
predictions = tf.expand_dims(predictions, axis = 1)
logging.debug('Predictions shape : {} - Ground truth shape : {}'.format(
predictions.shape, ground_truth.shape))
metric.update_state(ground_truth, predictions)
score = metric.result()
logging.debug('An episode score: {}'.format(score))
metric.reset_states()
return score
def is_one_hot_vector(x, axis=None, keepdims=False):
"""Check if a vector 'x' is one-hot (i.e. one entry is 1 and others 0)."""
norm_1 = np.linalg.norm(x, ord=1, axis=axis, keepdims=keepdims)
norm_inf = np.linalg.norm(x, ord=np.inf, axis=axis, keepdims=keepdims)
return np.logical_and(norm_1 == 1, norm_inf == 1)
def write_score(score, conf_int, file_score, duration=-1):
"""Write score of the k-th task in the given file_score."""
file_score.write('set1_score: {:.6f}\n'.format(float(score)))
file_score.write('conf_int: {:.3f}\n'.format(float(conf_int)))
file_score.write('Duration: {:.6f}\n'.format(float(duration)))
def extract_elapsed_time(saved_model_dir):
""" Extracts elapsed time from the metadata file. It corresponds to the
meta-training time, the duration of the ingestion process.
"""
if not os.path.isdir(saved_model_dir):
raise ValueError('Saved model directory does not exists.')
if os.path.isfile(os.path.join(saved_model_dir, 'metadata')):
with open(os.path.join(saved_model_dir, 'metadata'), 'r') as f :
lines = f.readlines()
for line in lines :
splitted_line = line.split(' ')
for k, word in enumerate(splitted_line):
if 'elapsed' in splitted_line[k]:
elapsed_time = float(splitted_line[k+1])
return elapsed_time
return -1
def process_task(task):
"""We are using the meta-dataset code to generate episodes from a dataset.
Generated episodes have a specific format. Each is processed such that the
the support and query sets are ready to be used by the participants. Each
set is returned as a tf.data.Dataset object.
The que_labs are kept hidden.
Returns :
support_dataset : tf.data.Dataset containing the support examples and
labels.
query_dataset : tf.data.Dataset containing the query examples
que_labs : tuple (query_batch_size, 1), the query examples labels
i.e. the ground truth labels.
"""
sup_set = tf.data.Dataset.from_tensor_slices(\
(task[0][1], task[0][0]))
dim = task[0][4].shape[1]
arr = np.arange(dim)
np.random.shuffle(arr) # shuffling arr
query_labs = task[0][4]
query_imgs = task[0][3]
query_labs_s = tf.gather(query_labs, arr, axis=1)
query_imgs_s = tf.gather(query_imgs, arr, axis=1)
que_set = tf.data.Dataset.from_tensor_slices(
(query_labs_s, query_imgs_s)
)
new_ds = tf.data.Dataset.zip((sup_set, que_set))
for ((supp_labs, supp_img), (que_labs, que_img)) \
in new_ds :
logging.debug('Supp labs : {}'.format(supp_labs))
logging.debug('Query labs : {}'.format(que_labs))
support_set = tf.data.Dataset.from_tensor_slices(\
(supp_img, supp_labs))
query_set = tf.data.Dataset.from_tensor_slices(\
(que_img,))
support_set = support_set.batch(5)
query_set = query_set.batch(95)
return support_set, query_set, que_labs
def scoring(argv):
"""
For each task, load and fit the Learner with the support set and evaluate
the submission performance with the query set.
A directory 'scoring_output' is created and contains a txt file that
contains the submission score and duration. Note that the former is the
time elapsed during the ingestion program and hence the meta-fit()
duration.
The metric considered here is the Sparse Categorical Accuracy for a
5 classes image classification problem.
"""
del argv
saved_model_dir = FLAGS.saved_model_dir
meta_test_dir = FLAGS.meta_test_dir
eval_type = FLAGS.evaltype
# Making eval type compatible with DataGenerator specs
if eval_type == 'train' or eval_type == 'val':
data_generator_eval_type = 'train'
elif eval_type == 'test':
data_generator_eval_type = 'test'
# Use CodaLab's path `run/input/ref` in parallel with `run/input/res`
if not os.path.isdir(meta_test_dir):
meta_test_dir = os.path.join(saved_model_dir, os.pardir, 'ref')
# Evaluation type scenario: if meta_test is specified -> act as normal
# scoring on meta_test data
if (eval_type == 'train' or eval_type == 'val') and 'meta_test' in meta_test_dir:
raise ValueError('Cannot perform train/val evaluation on meta-test data!')
#if 'meta_test' not in meta_test_dir:
# if eval_type == 'test':
# meta_test_dir = os.path.join(meta_test_dir, 'meta_test')
# else:
# meta_test_dir = os.path.join(meta_test_dir, 'meta_train')
code_dir = os.path.join(saved_model_dir, 'code_dir')
score_dir = FLAGS.score_dir
path.append(code_dir)
from model import MyLearner
if(os.path.exists(os.path.join(code_dir, 'model.gin'))):
gin.parse_config_file(os.path.join(code_dir, 'model.gin'))
logging.info('Ingestion done! Starting scoring process ... ')
logging.info('Creating the meta-test episode generator ... \n ')
generator = DataGenerator(path_to_records=meta_test_dir,
batch_config=None,
episode_config=[28, 5, 1, 19],
pool= data_generator_eval_type,
mode='episode')
if eval_type == 'test':
meta_test_dataset = generator.meta_test_pipeline
elif eval_type == 'train':
meta_test_dataset = generator.meta_train_pipeline
elif eval_type == 'val':
meta_test_dataset = generator.meta_valid_pipeline
else:
raise ValueError('Wrong eval_type : {}'.format(eval_type))
logging.info('Evaluating performance on episodes ... ')
meta_test_dataset = meta_test_dataset.batch(1)
meta_test_dataset = meta_test_dataset.prefetch(5)
learner = MyLearner()
if (not os.path.isdir(score_dir)):
os.mkdir(score_dir)
score_file = os.path.join(score_dir, 'scores.txt')
results = []
metric = tf.metrics.SparseCategoricalAccuracy()
nbr_episodes = 600
for k , task in enumerate(meta_test_dataset) :
support_set, query_set, ground_truth = process_task(task)
learner.load(saved_model_dir)
predictor = learner.fit(support_set)
predictions = predictor.predict(query_set)
score = NwayKshot_accuracy(predictions, ground_truth, metric)
results.append(score)
logging.debug('Score on {} : {}'.format(k, score))
logging.debug('Results : {}'.format(results[:20]))
if(k > nbr_episodes):
break
def mean_confidence_interval(data, confidence=0.95):
a = 1.0 * np.array(data)
n = len(a)
m, se = np.mean(a), scipy.stats.sem(a)
h = se * scipy.stats.t.ppf((1 + confidence) / 2., n-1)
return m, h
m, conf_int = mean_confidence_interval(results)
with open(score_file, 'w') as f :
write_score(m,
conf_int,
f,
extract_elapsed_time(saved_model_dir))
logging.info(('Scoring done! The average score over {} '
+ 'episodes is : {:.3%}').format(nbr_episodes,
sum(results)/len(results))
)
if __name__ == '__main__':
np.random.seed(seed=1234)
tf.get_logger().setLevel('ERROR')
app.run(scoring)
| 38.869732 | 99 | 0.648398 | import os
from sys import path
import scipy.stats
import gin
import numpy as np
from absl import app
from absl import flags
from absl import logging
import tensorflow as tf
from metadl.data.dataset import DataGenerator
from metadl.core.ingestion.ingestion import get_gin_path, show_dir
FLAGS = flags.FLAGS
flags.DEFINE_string('meta_test_dir',
'/Users/adrian/GitInria/meta-dataset/records/',
('Directory of the meta-test dataset. This directory '
+ 'should contain records and a json spec file.'))
flags.DEFINE_string('saved_model_dir',
'./model_dir',
('Directory path that contains the participant\'s code '
+ 'along with the serialized learner from meta-fit.'))
flags.DEFINE_string('score_dir',
'./scoring_output',
'Path to the score directory.')
flags.DEFINE_string('evaltype',
'test',
'Data type on which to perform evaluation. [train, val, test]')
tf.random.set_seed(1234)
def NwayKshot_accuracy(predictions, ground_truth, metric):
ground_truth = tf.expand_dims(ground_truth, axis = 1)
predictions = tf.expand_dims(predictions, axis = 1)
logging.debug('Predictions shape : {} - Ground truth shape : {}'.format(
predictions.shape, ground_truth.shape))
metric.update_state(ground_truth, predictions)
score = metric.result()
logging.debug('An episode score: {}'.format(score))
metric.reset_states()
return score
def is_one_hot_vector(x, axis=None, keepdims=False):
norm_1 = np.linalg.norm(x, ord=1, axis=axis, keepdims=keepdims)
norm_inf = np.linalg.norm(x, ord=np.inf, axis=axis, keepdims=keepdims)
return np.logical_and(norm_1 == 1, norm_inf == 1)
def write_score(score, conf_int, file_score, duration=-1):
file_score.write('set1_score: {:.6f}\n'.format(float(score)))
file_score.write('conf_int: {:.3f}\n'.format(float(conf_int)))
file_score.write('Duration: {:.6f}\n'.format(float(duration)))
def extract_elapsed_time(saved_model_dir):
if not os.path.isdir(saved_model_dir):
raise ValueError('Saved model directory does not exists.')
if os.path.isfile(os.path.join(saved_model_dir, 'metadata')):
with open(os.path.join(saved_model_dir, 'metadata'), 'r') as f :
lines = f.readlines()
for line in lines :
splitted_line = line.split(' ')
for k, word in enumerate(splitted_line):
if 'elapsed' in splitted_line[k]:
elapsed_time = float(splitted_line[k+1])
return elapsed_time
return -1
def process_task(task):
sup_set = tf.data.Dataset.from_tensor_slices(\
(task[0][1], task[0][0]))
dim = task[0][4].shape[1]
arr = np.arange(dim)
np.random.shuffle(arr) # shuffling arr
query_labs = task[0][4]
query_imgs = task[0][3]
query_labs_s = tf.gather(query_labs, arr, axis=1)
query_imgs_s = tf.gather(query_imgs, arr, axis=1)
que_set = tf.data.Dataset.from_tensor_slices(
(query_labs_s, query_imgs_s)
)
new_ds = tf.data.Dataset.zip((sup_set, que_set))
for ((supp_labs, supp_img), (que_labs, que_img)) \
in new_ds :
logging.debug('Supp labs : {}'.format(supp_labs))
logging.debug('Query labs : {}'.format(que_labs))
support_set = tf.data.Dataset.from_tensor_slices(\
(supp_img, supp_labs))
query_set = tf.data.Dataset.from_tensor_slices(\
(que_img,))
support_set = support_set.batch(5)
query_set = query_set.batch(95)
return support_set, query_set, que_labs
def scoring(argv):
del argv
saved_model_dir = FLAGS.saved_model_dir
meta_test_dir = FLAGS.meta_test_dir
eval_type = FLAGS.evaltype
# Making eval type compatible with DataGenerator specs
if eval_type == 'train' or eval_type == 'val':
data_generator_eval_type = 'train'
elif eval_type == 'test':
data_generator_eval_type = 'test'
# Use CodaLab's path `run/input/ref` in parallel with `run/input/res`
if not os.path.isdir(meta_test_dir):
meta_test_dir = os.path.join(saved_model_dir, os.pardir, 'ref')
if (eval_type == 'train' or eval_type == 'val') and 'meta_test' in meta_test_dir:
raise ValueError('Cannot perform train/val evaluation on meta-test data!')
code_dir = os.path.join(saved_model_dir, 'code_dir')
score_dir = FLAGS.score_dir
path.append(code_dir)
from model import MyLearner
if(os.path.exists(os.path.join(code_dir, 'model.gin'))):
gin.parse_config_file(os.path.join(code_dir, 'model.gin'))
logging.info('Ingestion done! Starting scoring process ... ')
logging.info('Creating the meta-test episode generator ... \n ')
generator = DataGenerator(path_to_records=meta_test_dir,
batch_config=None,
episode_config=[28, 5, 1, 19],
pool= data_generator_eval_type,
mode='episode')
if eval_type == 'test':
meta_test_dataset = generator.meta_test_pipeline
elif eval_type == 'train':
meta_test_dataset = generator.meta_train_pipeline
elif eval_type == 'val':
meta_test_dataset = generator.meta_valid_pipeline
else:
raise ValueError('Wrong eval_type : {}'.format(eval_type))
logging.info('Evaluating performance on episodes ... ')
meta_test_dataset = meta_test_dataset.batch(1)
meta_test_dataset = meta_test_dataset.prefetch(5)
learner = MyLearner()
if (not os.path.isdir(score_dir)):
os.mkdir(score_dir)
score_file = os.path.join(score_dir, 'scores.txt')
results = []
metric = tf.metrics.SparseCategoricalAccuracy()
nbr_episodes = 600
for k , task in enumerate(meta_test_dataset) :
support_set, query_set, ground_truth = process_task(task)
learner.load(saved_model_dir)
predictor = learner.fit(support_set)
predictions = predictor.predict(query_set)
score = NwayKshot_accuracy(predictions, ground_truth, metric)
results.append(score)
logging.debug('Score on {} : {}'.format(k, score))
logging.debug('Results : {}'.format(results[:20]))
if(k > nbr_episodes):
break
def mean_confidence_interval(data, confidence=0.95):
a = 1.0 * np.array(data)
n = len(a)
m, se = np.mean(a), scipy.stats.sem(a)
h = se * scipy.stats.t.ppf((1 + confidence) / 2., n-1)
return m, h
m, conf_int = mean_confidence_interval(results)
with open(score_file, 'w') as f :
write_score(m,
conf_int,
f,
extract_elapsed_time(saved_model_dir))
logging.info(('Scoring done! The average score over {} '
+ 'episodes is : {:.3%}').format(nbr_episodes,
sum(results)/len(results))
)
if __name__ == '__main__':
np.random.seed(seed=1234)
tf.get_logger().setLevel('ERROR')
app.run(scoring)
| true | true |
f71fed6c463f4fb9305f4215a3d3f237674e9c98 | 6,399 | py | Python | Graph-based/processor/recognition.py | EnTimeMent/Group-Behavior-Recognition | d6606e9e7bef836a9ccc5b4ada66933a4770171c | [
"MIT"
] | 3 | 2020-12-29T04:07:58.000Z | 2022-01-11T14:47:16.000Z | Graph-based/processor/recognition.py | EnTimeMent/Group-Behavior-Recognition | d6606e9e7bef836a9ccc5b4ada66933a4770171c | [
"MIT"
] | 1 | 2021-01-02T10:28:07.000Z | 2021-01-04T18:01:42.000Z | Graph-based/processor/recognition.py | EnTimeMent/Group-Behavior-Recognition | d6606e9e7bef836a9ccc5b4ada66933a4770171c | [
"MIT"
] | 1 | 2022-01-09T12:55:41.000Z | 2022-01-09T12:55:41.000Z | #!/usr/bin/env python
# pylint: disable=W0201
import sys
import argparse
import yaml
import numpy as np
# torch
import torch
import torch.nn as nn
import torch.optim as optim
# torchlight
import torchlight
from torchlight import str2bool
from torchlight import DictAction
from torchlight import import_class
from .processor import Processor
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Conv1d') != -1:
m.weight.data.normal_(0.0, 0.02)
if m.bias is not None:
m.bias.data.fill_(0)
elif classname.find('Conv2d') != -1:
m.weight.data.normal_(0.0, 0.02)
if m.bias is not None:
m.bias.data.fill_(0)
elif classname.find('BatchNorm') != -1:
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
class REC_Processor(Processor):
"""
Processor for Skeleton-based Action Recgnition
"""
def load_model(self):
# print("load model")
self.model = self.io.load_model(self.arg.model,
**(self.arg.model_args))
self.model.apply(weights_init)
self.loss = nn.CrossEntropyLoss()
# self.loss = nn.BCEWithLogitsLoss()
def load_optimizer(self):
if self.arg.optimizer == 'SGD':
self.optimizer = optim.SGD(
self.model.parameters(),
lr=self.arg.base_lr,
momentum=0.9,
nesterov=self.arg.nesterov,
weight_decay=self.arg.weight_decay)
elif self.arg.optimizer == 'Adam':
self.optimizer = optim.Adam(
self.model.parameters(),
lr=self.arg.base_lr,
weight_decay=self.arg.weight_decay)
else:
raise ValueError()
def adjust_lr(self):
if self.arg.optimizer == 'SGD' and self.arg.step:
lr = self.arg.base_lr * (
0.1**np.sum(self.meta_info['epoch'] >= np.array(self.arg.step)))
for param_group in self.optimizer.param_groups:
param_group['lr'] = lr
self.lr = lr
else:
self.lr = self.arg.base_lr
def show_topk(self, k):
rank = self.result.argsort()
hit_top_k = [l in rank[i, -k:] for i, l in enumerate(self.label)]
accuracy = sum(hit_top_k) * 1.0 / len(hit_top_k)
self.io.print_log('\tTop{}: {:.2f}%'.format(k, 100 * accuracy))
def train(self):
self.model.train()
self.adjust_lr()
loader = self.data_loader['train']
loss_value = []
result_frag = []
label_frag = []
# print("train")
for data, label in loader:
# get data
data = data.float().to(self.dev)
label = label.long().to(self.dev)
# forward
output = self.model(data)
result_frag.extend(
output.data.cpu().numpy().argmax(axis=1))
label_frag.extend(label.data.cpu().numpy())
# print(output)
loss = self.loss(output, label)
# print(label)
# backward
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
# statistics
self.iter_info['loss'] = loss.data.item()
self.iter_info['lr'] = '{:.6f}'.format(self.lr)
loss_value.append(self.iter_info['loss'])
self.show_iter_info()
self.meta_info['iter'] += 1
ac = accuracy_score(label_frag, result_frag)
# print(result_frag)
# print(label_frag)
print("train acc: {}".format(ac))
self.epoch_info['mean_loss'] = np.mean(loss_value)
self.show_epoch_info()
# self.io.print_timer()
def test(self, evaluation=True):
self.model.eval()
loader = self.data_loader['test']
loss_value = []
result_frag = []
label_frag = []
for data, label in loader:
# get data
data = data.float().to(self.dev)
label = label.long().to(self.dev)
# inference
with torch.no_grad():
output = self.model(data)
result_frag.append(output.data.cpu().numpy())
# get loss
if evaluation:
loss = self.loss(output, label)
loss_value.append(loss.item())
label_frag.append(label.data.cpu().numpy())
self.result = np.concatenate(result_frag)
# print(self.result)
if evaluation:
self.label = np.concatenate(label_frag)
self.epoch_info['mean_loss'] = np.mean(loss_value)
self.show_epoch_info()
# show top-k accuracy
for k in self.arg.show_topk:
self.show_topk(k)
top = self.result.argmax(axis=1)
print(top)
print(self.label)
cm = confusion_matrix(self.label, top)
print(cm)
@staticmethod
def get_parser(add_help=False):
# parameter priority: command line > config > default
parent_parser = Processor.get_parser(add_help=False)
parser = argparse.ArgumentParser(
add_help=add_help,
parents=[parent_parser],
description='Spatial Temporal Graph Convolution Network')
# region arguments yapf: disable
# evaluation
parser.add_argument('--show_topk', type=int,
default=[1], nargs='+', help='which Top K accuracy will be shown')
# optim
parser.add_argument('--base_lr', type=float,
default=0.01, help='initial learning rate')
parser.add_argument('--step', type=int, default=[], nargs='+',
help='the epoch where optimizer reduce the learning rate')
parser.add_argument('--optimizer', default='SGD',
help='type of optimizer')
parser.add_argument('--nesterov', type=str2bool,
default=True, help='use nesterov or not')
parser.add_argument('--weight_decay', type=float,
default=0.0001, help='weight decay for optimizer')
# endregion yapf: enable
return parser
| 32.482234 | 94 | 0.556493 |
import sys
import argparse
import yaml
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import torchlight
from torchlight import str2bool
from torchlight import DictAction
from torchlight import import_class
from .processor import Processor
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Conv1d') != -1:
m.weight.data.normal_(0.0, 0.02)
if m.bias is not None:
m.bias.data.fill_(0)
elif classname.find('Conv2d') != -1:
m.weight.data.normal_(0.0, 0.02)
if m.bias is not None:
m.bias.data.fill_(0)
elif classname.find('BatchNorm') != -1:
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
class REC_Processor(Processor):
def load_model(self):
self.model = self.io.load_model(self.arg.model,
**(self.arg.model_args))
self.model.apply(weights_init)
self.loss = nn.CrossEntropyLoss()
def load_optimizer(self):
if self.arg.optimizer == 'SGD':
self.optimizer = optim.SGD(
self.model.parameters(),
lr=self.arg.base_lr,
momentum=0.9,
nesterov=self.arg.nesterov,
weight_decay=self.arg.weight_decay)
elif self.arg.optimizer == 'Adam':
self.optimizer = optim.Adam(
self.model.parameters(),
lr=self.arg.base_lr,
weight_decay=self.arg.weight_decay)
else:
raise ValueError()
def adjust_lr(self):
if self.arg.optimizer == 'SGD' and self.arg.step:
lr = self.arg.base_lr * (
0.1**np.sum(self.meta_info['epoch'] >= np.array(self.arg.step)))
for param_group in self.optimizer.param_groups:
param_group['lr'] = lr
self.lr = lr
else:
self.lr = self.arg.base_lr
def show_topk(self, k):
rank = self.result.argsort()
hit_top_k = [l in rank[i, -k:] for i, l in enumerate(self.label)]
accuracy = sum(hit_top_k) * 1.0 / len(hit_top_k)
self.io.print_log('\tTop{}: {:.2f}%'.format(k, 100 * accuracy))
def train(self):
self.model.train()
self.adjust_lr()
loader = self.data_loader['train']
loss_value = []
result_frag = []
label_frag = []
for data, label in loader:
data = data.float().to(self.dev)
label = label.long().to(self.dev)
output = self.model(data)
result_frag.extend(
output.data.cpu().numpy().argmax(axis=1))
label_frag.extend(label.data.cpu().numpy())
loss = self.loss(output, label)
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
self.iter_info['loss'] = loss.data.item()
self.iter_info['lr'] = '{:.6f}'.format(self.lr)
loss_value.append(self.iter_info['loss'])
self.show_iter_info()
self.meta_info['iter'] += 1
ac = accuracy_score(label_frag, result_frag)
print("train acc: {}".format(ac))
self.epoch_info['mean_loss'] = np.mean(loss_value)
self.show_epoch_info()
def test(self, evaluation=True):
self.model.eval()
loader = self.data_loader['test']
loss_value = []
result_frag = []
label_frag = []
for data, label in loader:
data = data.float().to(self.dev)
label = label.long().to(self.dev)
with torch.no_grad():
output = self.model(data)
result_frag.append(output.data.cpu().numpy())
if evaluation:
loss = self.loss(output, label)
loss_value.append(loss.item())
label_frag.append(label.data.cpu().numpy())
self.result = np.concatenate(result_frag)
if evaluation:
self.label = np.concatenate(label_frag)
self.epoch_info['mean_loss'] = np.mean(loss_value)
self.show_epoch_info()
for k in self.arg.show_topk:
self.show_topk(k)
top = self.result.argmax(axis=1)
print(top)
print(self.label)
cm = confusion_matrix(self.label, top)
print(cm)
@staticmethod
def get_parser(add_help=False):
parent_parser = Processor.get_parser(add_help=False)
parser = argparse.ArgumentParser(
add_help=add_help,
parents=[parent_parser],
description='Spatial Temporal Graph Convolution Network')
parser.add_argument('--show_topk', type=int,
default=[1], nargs='+', help='which Top K accuracy will be shown')
parser.add_argument('--base_lr', type=float,
default=0.01, help='initial learning rate')
parser.add_argument('--step', type=int, default=[], nargs='+',
help='the epoch where optimizer reduce the learning rate')
parser.add_argument('--optimizer', default='SGD',
help='type of optimizer')
parser.add_argument('--nesterov', type=str2bool,
default=True, help='use nesterov or not')
parser.add_argument('--weight_decay', type=float,
default=0.0001, help='weight decay for optimizer')
return parser
| true | true |
f71fedf23526603a8b5b482439d51773bdec5bd3 | 3,347 | py | Python | medseer/migrations/0001_initial.py | noureldin-eg/medseer | 8a68cd92a757ab3141081547d322c0c6b2056d66 | [
"Apache-2.0"
] | null | null | null | medseer/migrations/0001_initial.py | noureldin-eg/medseer | 8a68cd92a757ab3141081547d322c0c6b2056d66 | [
"Apache-2.0"
] | null | null | null | medseer/migrations/0001_initial.py | noureldin-eg/medseer | 8a68cd92a757ab3141081547d322c0c6b2056d66 | [
"Apache-2.0"
] | null | null | null | # Generated by Django 3.2.10 on 2022-01-25 22:13
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Author',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('forename', models.CharField(max_length=100)),
('surname', models.CharField(max_length=100)),
('email', models.EmailField(blank=True, max_length=254, null=True, unique=True)),
('created_at', models.DateTimeField(auto_now_add=True)),
('modified_at', models.DateTimeField(auto_now=True)),
],
),
migrations.CreateModel(
name='Journal',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200, unique=True)),
('rank', models.PositiveSmallIntegerField()),
('created_at', models.DateTimeField(auto_now_add=True)),
('modified_at', models.DateTimeField(auto_now=True)),
],
),
migrations.CreateModel(
name='Organization',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200, unique=True)),
('rank', models.PositiveSmallIntegerField()),
('created_at', models.DateTimeField(auto_now_add=True)),
('modified_at', models.DateTimeField(auto_now=True)),
],
),
migrations.CreateModel(
name='Paper',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('pdf', models.FileField(null=True, unique=True, upload_to='pdfs/%Y/%m/%d/')),
('tei', models.FileField(null=True, unique=True, upload_to='xmls/%Y/%m/%d/')),
('title', models.TextField(blank=True, null=True, unique=True)),
('abstract', models.TextField(blank=True, null=True, unique=True)),
('doi', models.CharField(blank=True, max_length=100, null=True, unique=True)),
('url', models.URLField(blank=True, null=True, unique=True)),
('created_at', models.DateTimeField(auto_now_add=True)),
('modified_at', models.DateTimeField(auto_now=True)),
('authors', models.ManyToManyField(to='medseer.Author')),
('journal', models.ForeignKey(null=True, on_delete=django.db.models.deletion.PROTECT, to='medseer.journal')),
],
),
migrations.AddField(
model_name='author',
name='organization',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.PROTECT, to='medseer.organization'),
),
migrations.AddConstraint(
model_name='author',
constraint=models.UniqueConstraint(fields=('forename', 'surname'), name='unique_author_name'),
),
]
| 46.486111 | 125 | 0.582611 |
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Author',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('forename', models.CharField(max_length=100)),
('surname', models.CharField(max_length=100)),
('email', models.EmailField(blank=True, max_length=254, null=True, unique=True)),
('created_at', models.DateTimeField(auto_now_add=True)),
('modified_at', models.DateTimeField(auto_now=True)),
],
),
migrations.CreateModel(
name='Journal',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200, unique=True)),
('rank', models.PositiveSmallIntegerField()),
('created_at', models.DateTimeField(auto_now_add=True)),
('modified_at', models.DateTimeField(auto_now=True)),
],
),
migrations.CreateModel(
name='Organization',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200, unique=True)),
('rank', models.PositiveSmallIntegerField()),
('created_at', models.DateTimeField(auto_now_add=True)),
('modified_at', models.DateTimeField(auto_now=True)),
],
),
migrations.CreateModel(
name='Paper',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('pdf', models.FileField(null=True, unique=True, upload_to='pdfs/%Y/%m/%d/')),
('tei', models.FileField(null=True, unique=True, upload_to='xmls/%Y/%m/%d/')),
('title', models.TextField(blank=True, null=True, unique=True)),
('abstract', models.TextField(blank=True, null=True, unique=True)),
('doi', models.CharField(blank=True, max_length=100, null=True, unique=True)),
('url', models.URLField(blank=True, null=True, unique=True)),
('created_at', models.DateTimeField(auto_now_add=True)),
('modified_at', models.DateTimeField(auto_now=True)),
('authors', models.ManyToManyField(to='medseer.Author')),
('journal', models.ForeignKey(null=True, on_delete=django.db.models.deletion.PROTECT, to='medseer.journal')),
],
),
migrations.AddField(
model_name='author',
name='organization',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.PROTECT, to='medseer.organization'),
),
migrations.AddConstraint(
model_name='author',
constraint=models.UniqueConstraint(fields=('forename', 'surname'), name='unique_author_name'),
),
]
| true | true |
f71fee147634858badedf4ea69e3f4bc26bb7e78 | 2,052 | py | Python | tests/parsers/plist_plugins/ipod.py | jeppetrost/plaso | b48008c6ea79950eeeef3a05b3a859086c8704b6 | [
"Apache-2.0"
] | null | null | null | tests/parsers/plist_plugins/ipod.py | jeppetrost/plaso | b48008c6ea79950eeeef3a05b3a859086c8704b6 | [
"Apache-2.0"
] | null | null | null | tests/parsers/plist_plugins/ipod.py | jeppetrost/plaso | b48008c6ea79950eeeef3a05b3a859086c8704b6 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for the iPod plist plugin."""
from __future__ import unicode_literals
import unittest
from plaso.formatters import ipod as _ # pylint: disable=unused-import
from plaso.lib import definitions
from plaso.parsers.plist_plugins import ipod
from tests import test_lib as shared_test_lib
from tests.parsers.plist_plugins import test_lib
class TestIPodPlugin(test_lib.PlistPluginTestCase):
"""Tests for the iPod plist plugin."""
@shared_test_lib.skipUnlessHasTestFile(['com.apple.iPod.plist'])
def testProcess(self):
"""Tests the Process function."""
plist_name = 'com.apple.iPod.plist'
plugin = ipod.IPodPlugin()
storage_writer = self._ParsePlistFileWithPlugin(
plugin, [plist_name], plist_name)
self.assertEqual(storage_writer.number_of_warnings, 0)
self.assertEqual(storage_writer.number_of_events, 4)
# The order in which PlistParser generates events is nondeterministic
# hence we sort the events.
events = list(storage_writer.GetSortedEvents())
event = events[0]
self.CheckTimestamp(event.timestamp, '1995-11-22 18:25:07.000000')
self.assertEqual(event.device_id, '0000A11300000000')
event = events[2]
self.CheckTimestamp(event.timestamp, '2013-10-09 19:27:54.000000')
expected_message = (
'Device ID: 4C6F6F6E65000000 '
'Type: iPhone [10016] '
'Connected 1 times '
'Serial nr: 526F676572 '
'IMEI [012345678901234]')
expected_short_message = '{0:s}...'.format(expected_message[:77])
self._TestGetMessageStrings(event, expected_message, expected_short_message)
self.assertEqual(
event.timestamp_desc, definitions.TIME_DESCRIPTION_LAST_CONNECTED)
self.assertEqual(event.device_class, 'iPhone')
self.assertEqual(event.device_id, '4C6F6F6E65000000')
self.assertEqual(event.firmware_version, 256)
self.assertEqual(event.imei, '012345678901234')
self.assertEqual(event.use_count, 1)
if __name__ == '__main__':
unittest.main()
| 30.176471 | 80 | 0.729532 |
from __future__ import unicode_literals
import unittest
from plaso.formatters import ipod as _
from plaso.lib import definitions
from plaso.parsers.plist_plugins import ipod
from tests import test_lib as shared_test_lib
from tests.parsers.plist_plugins import test_lib
class TestIPodPlugin(test_lib.PlistPluginTestCase):
@shared_test_lib.skipUnlessHasTestFile(['com.apple.iPod.plist'])
def testProcess(self):
plist_name = 'com.apple.iPod.plist'
plugin = ipod.IPodPlugin()
storage_writer = self._ParsePlistFileWithPlugin(
plugin, [plist_name], plist_name)
self.assertEqual(storage_writer.number_of_warnings, 0)
self.assertEqual(storage_writer.number_of_events, 4)
events = list(storage_writer.GetSortedEvents())
event = events[0]
self.CheckTimestamp(event.timestamp, '1995-11-22 18:25:07.000000')
self.assertEqual(event.device_id, '0000A11300000000')
event = events[2]
self.CheckTimestamp(event.timestamp, '2013-10-09 19:27:54.000000')
expected_message = (
'Device ID: 4C6F6F6E65000000 '
'Type: iPhone [10016] '
'Connected 1 times '
'Serial nr: 526F676572 '
'IMEI [012345678901234]')
expected_short_message = '{0:s}...'.format(expected_message[:77])
self._TestGetMessageStrings(event, expected_message, expected_short_message)
self.assertEqual(
event.timestamp_desc, definitions.TIME_DESCRIPTION_LAST_CONNECTED)
self.assertEqual(event.device_class, 'iPhone')
self.assertEqual(event.device_id, '4C6F6F6E65000000')
self.assertEqual(event.firmware_version, 256)
self.assertEqual(event.imei, '012345678901234')
self.assertEqual(event.use_count, 1)
if __name__ == '__main__':
unittest.main()
| true | true |
f71fee22e27eb7d42dc3efe0a61407b797d283a4 | 63,809 | py | Python | pandas/core/groupby/groupby.py | paritoshmittal09/pandas | 862d2d89b8fe0a93ec8e714315175e2eba1fa6e5 | [
"BSD-3-Clause"
] | null | null | null | pandas/core/groupby/groupby.py | paritoshmittal09/pandas | 862d2d89b8fe0a93ec8e714315175e2eba1fa6e5 | [
"BSD-3-Clause"
] | null | null | null | pandas/core/groupby/groupby.py | paritoshmittal09/pandas | 862d2d89b8fe0a93ec8e714315175e2eba1fa6e5 | [
"BSD-3-Clause"
] | null | null | null | """
Provide the groupby split-apply-combine paradigm. Define the GroupBy
class providing the base-class of operations.
The SeriesGroupBy and DataFrameGroupBy sub-class
(defined in pandas.core.groupby.generic)
expose these user-facing objects to provide specific functionailty.
"""
import types
from functools import wraps, partial
import datetime
import collections
import warnings
from contextlib import contextmanager
import numpy as np
from pandas._libs import groupby as libgroupby, Timestamp
from pandas.util._validators import validate_kwargs
from pandas.util._decorators import (
cache_readonly, Substitution, Appender)
from pandas import compat
from pandas.compat import zip, range, callable, set_function_name
from pandas.compat.numpy import function as nv
from pandas.core.dtypes.common import (
is_numeric_dtype,
is_scalar,
ensure_float)
from pandas.core.dtypes.cast import maybe_downcast_to_dtype
from pandas.core.dtypes.missing import isna, notna
from pandas.core.groupby import base
from pandas.core.base import (PandasObject, SelectionMixin, GroupByError,
DataError, SpecificationError)
from pandas.core.index import Index, MultiIndex
from pandas.core.generic import NDFrame
from pandas.core.frame import DataFrame
from pandas.core.series import Series
from pandas.core.sorting import get_group_index_sorter
import pandas.core.common as com
import pandas.core.algorithms as algorithms
from pandas.core.config import option_context
_doc_template = """
See also
--------
pandas.Series.%(name)s
pandas.DataFrame.%(name)s
pandas.Panel.%(name)s
"""
_apply_docs = dict(
template="""
Apply function `func` group-wise and combine the results together.
The function passed to `apply` must take a {input} as its first
argument and return a DataFrame, Series or scalar. `apply` will
then take care of combining the results back together into a single
dataframe or series. `apply` is therefore a highly flexible
grouping method.
While `apply` is a very flexible method, its downside is that
using it can be quite a bit slower than using more specific methods
like `agg` or `transform`. Pandas offers a wide range of method that will
be much faster than using `apply` for their specific purposes, so try to
use them before reaching for `apply`.
Parameters
----------
func : callable
A callable that takes a {input} as its first argument, and
returns a dataframe, a series or a scalar. In addition the
callable may take positional and keyword arguments.
args, kwargs : tuple and dict
Optional positional and keyword arguments to pass to `func`.
Returns
-------
applied : Series or DataFrame
Notes
-----
In the current implementation `apply` calls `func` twice on the
first group to decide whether it can take a fast or slow code
path. This can lead to unexpected behavior if `func` has
side-effects, as they will take effect twice for the first
group.
Examples
--------
{examples}
See also
--------
pipe : Apply function to the full GroupBy object instead of to each
group.
aggregate : Apply aggregate function to the GroupBy object.
transform : Apply function column-by-column to the GroupBy object.
Series.apply : Apply a function to a Series.
DataFrame.apply : Apply a function to each row or column of a DataFrame.
""",
dataframe_examples="""
>>> df = pd.DataFrame({'A': 'a a b'.split(),
'B': [1,2,3],
'C': [4,6, 5]})
>>> g = df.groupby('A')
Notice that ``g`` has two groups, ``a`` and ``b``.
Calling `apply` in various ways, we can get different grouping results:
Example 1: below the function passed to `apply` takes a DataFrame as
its argument and returns a DataFrame. `apply` combines the result for
each group together into a new DataFrame:
>>> g[['B', 'C']].apply(lambda x: x / x.sum())
B C
0 0.333333 0.4
1 0.666667 0.6
2 1.000000 1.0
Example 2: The function passed to `apply` takes a DataFrame as
its argument and returns a Series. `apply` combines the result for
each group together into a new DataFrame:
>>> g[['B', 'C']].apply(lambda x: x.max() - x.min())
B C
A
a 1 2
b 0 0
Example 3: The function passed to `apply` takes a DataFrame as
its argument and returns a scalar. `apply` combines the result for
each group together into a Series, including setting the index as
appropriate:
>>> g.apply(lambda x: x.C.max() - x.B.min())
A
a 5
b 2
dtype: int64
""",
series_examples="""
>>> s = pd.Series([0, 1, 2], index='a a b'.split())
>>> g = s.groupby(s.index)
From ``s`` above we can see that ``g`` has two groups, ``a`` and ``b``.
Calling `apply` in various ways, we can get different grouping results:
Example 1: The function passed to `apply` takes a Series as
its argument and returns a Series. `apply` combines the result for
each group together into a new Series:
>>> g.apply(lambda x: x*2 if x.name == 'b' else x/2)
0 0.0
1 0.5
2 4.0
dtype: float64
Example 2: The function passed to `apply` takes a Series as
its argument and returns a scalar. `apply` combines the result for
each group together into a Series, including setting the index as
appropriate:
>>> g.apply(lambda x: x.max() - x.min())
a 1
b 0
dtype: int64
""")
_pipe_template = """\
Apply a function `func` with arguments to this %(klass)s object and return
the function's result.
%(versionadded)s
Use `.pipe` when you want to improve readability by chaining together
functions that expect Series, DataFrames, GroupBy or Resampler objects.
Instead of writing
>>> h(g(f(df.groupby('group')), arg1=a), arg2=b, arg3=c)
You can write
>>> (df.groupby('group')
... .pipe(f)
... .pipe(g, arg1=a)
... .pipe(h, arg2=b, arg3=c))
which is much more readable.
Parameters
----------
func : callable or tuple of (callable, string)
Function to apply to this %(klass)s object or, alternatively,
a `(callable, data_keyword)` tuple where `data_keyword` is a
string indicating the keyword of `callable` that expects the
%(klass)s object.
args : iterable, optional
positional arguments passed into `func`.
kwargs : dict, optional
a dictionary of keyword arguments passed into `func`.
Returns
-------
object : the return type of `func`.
Notes
-----
See more `here
<http://pandas.pydata.org/pandas-docs/stable/groupby.html#piping-function-calls>`_
Examples
--------
%(examples)s
See Also
--------
pandas.Series.pipe : Apply a function with arguments to a series
pandas.DataFrame.pipe: Apply a function with arguments to a dataframe
apply : Apply function to each group instead of to the
full %(klass)s object.
"""
_transform_template = """
Call function producing a like-indexed %(klass)s on each group and
return a %(klass)s having the same indexes as the original object
filled with the transformed values
Parameters
----------
f : function
Function to apply to each group
Notes
-----
Each group is endowed the attribute 'name' in case you need to know
which group you are working on.
The current implementation imposes three requirements on f:
* f must return a value that either has the same shape as the input
subframe or can be broadcast to the shape of the input subframe.
For example, f returns a scalar it will be broadcast to have the
same shape as the input subframe.
* if this is a DataFrame, f must support application column-by-column
in the subframe. If f also supports application to the entire subframe,
then a fast path is used starting from the second chunk.
* f must not mutate groups. Mutation is not supported and may
produce unexpected results.
Returns
-------
%(klass)s
See also
--------
aggregate, transform
Examples
--------
# Same shape
>>> df = pd.DataFrame({'A' : ['foo', 'bar', 'foo', 'bar',
... 'foo', 'bar'],
... 'B' : ['one', 'one', 'two', 'three',
... 'two', 'two'],
... 'C' : [1, 5, 5, 2, 5, 5],
... 'D' : [2.0, 5., 8., 1., 2., 9.]})
>>> grouped = df.groupby('A')
>>> grouped.transform(lambda x: (x - x.mean()) / x.std())
C D
0 -1.154701 -0.577350
1 0.577350 0.000000
2 0.577350 1.154701
3 -1.154701 -1.000000
4 0.577350 -0.577350
5 0.577350 1.000000
# Broadcastable
>>> grouped.transform(lambda x: x.max() - x.min())
C D
0 4 6.0
1 3 8.0
2 4 6.0
3 3 8.0
4 4 6.0
5 3 8.0
"""
class GroupByPlot(PandasObject):
"""
Class implementing the .plot attribute for groupby objects
"""
def __init__(self, groupby):
self._groupby = groupby
def __call__(self, *args, **kwargs):
def f(self):
return self.plot(*args, **kwargs)
f.__name__ = 'plot'
return self._groupby.apply(f)
def __getattr__(self, name):
def attr(*args, **kwargs):
def f(self):
return getattr(self.plot, name)(*args, **kwargs)
return self._groupby.apply(f)
return attr
@contextmanager
def _group_selection_context(groupby):
"""
set / reset the _group_selection_context
"""
groupby._set_group_selection()
yield groupby
groupby._reset_group_selection()
class _GroupBy(PandasObject, SelectionMixin):
_group_selection = None
_apply_whitelist = frozenset([])
def __init__(self, obj, keys=None, axis=0, level=None,
grouper=None, exclusions=None, selection=None, as_index=True,
sort=True, group_keys=True, squeeze=False,
observed=False, **kwargs):
self._selection = selection
if isinstance(obj, NDFrame):
obj._consolidate_inplace()
self.level = level
if not as_index:
if not isinstance(obj, DataFrame):
raise TypeError('as_index=False only valid with DataFrame')
if axis != 0:
raise ValueError('as_index=False only valid for axis=0')
self.as_index = as_index
self.keys = keys
self.sort = sort
self.group_keys = group_keys
self.squeeze = squeeze
self.observed = observed
self.mutated = kwargs.pop('mutated', False)
if grouper is None:
from pandas.core.groupby.grouper import _get_grouper
grouper, exclusions, obj = _get_grouper(obj, keys,
axis=axis,
level=level,
sort=sort,
observed=observed,
mutated=self.mutated)
self.obj = obj
self.axis = obj._get_axis_number(axis)
self.grouper = grouper
self.exclusions = set(exclusions) if exclusions else set()
# we accept no other args
validate_kwargs('group', kwargs, {})
def __len__(self):
return len(self.groups)
def __unicode__(self):
# TODO: Better unicode/repr for GroupBy object
return object.__repr__(self)
def _assure_grouper(self):
"""
we create the grouper on instantiation
sub-classes may have a different policy
"""
pass
@property
def groups(self):
""" dict {group name -> group labels} """
self._assure_grouper()
return self.grouper.groups
@property
def ngroups(self):
self._assure_grouper()
return self.grouper.ngroups
@property
def indices(self):
""" dict {group name -> group indices} """
self._assure_grouper()
return self.grouper.indices
def _get_indices(self, names):
"""
safe get multiple indices, translate keys for
datelike to underlying repr
"""
def get_converter(s):
# possibly convert to the actual key types
# in the indices, could be a Timestamp or a np.datetime64
if isinstance(s, (Timestamp, datetime.datetime)):
return lambda key: Timestamp(key)
elif isinstance(s, np.datetime64):
return lambda key: Timestamp(key).asm8
else:
return lambda key: key
if len(names) == 0:
return []
if len(self.indices) > 0:
index_sample = next(iter(self.indices))
else:
index_sample = None # Dummy sample
name_sample = names[0]
if isinstance(index_sample, tuple):
if not isinstance(name_sample, tuple):
msg = ("must supply a tuple to get_group with multiple"
" grouping keys")
raise ValueError(msg)
if not len(name_sample) == len(index_sample):
try:
# If the original grouper was a tuple
return [self.indices[name] for name in names]
except KeyError:
# turns out it wasn't a tuple
msg = ("must supply a a same-length tuple to get_group"
" with multiple grouping keys")
raise ValueError(msg)
converters = [get_converter(s) for s in index_sample]
names = [tuple(f(n) for f, n in zip(converters, name))
for name in names]
else:
converter = get_converter(index_sample)
names = [converter(name) for name in names]
return [self.indices.get(name, []) for name in names]
def _get_index(self, name):
""" safe get index, translate keys for datelike to underlying repr """
return self._get_indices([name])[0]
@cache_readonly
def _selected_obj(self):
if self._selection is None or isinstance(self.obj, Series):
if self._group_selection is not None:
return self.obj[self._group_selection]
return self.obj
else:
return self.obj[self._selection]
def _reset_group_selection(self):
"""
Clear group based selection. Used for methods needing to return info on
each group regardless of whether a group selection was previously set.
"""
if self._group_selection is not None:
# GH12839 clear cached selection too when changing group selection
self._group_selection = None
self._reset_cache('_selected_obj')
def _set_group_selection(self):
"""
Create group based selection. Used when selection is not passed
directly but instead via a grouper.
NOTE: this should be paired with a call to _reset_group_selection
"""
grp = self.grouper
if not (self.as_index and
getattr(grp, 'groupings', None) is not None and
self.obj.ndim > 1 and
self._group_selection is None):
return
ax = self.obj._info_axis
groupers = [g.name for g in grp.groupings
if g.level is None and g.in_axis]
if len(groupers):
# GH12839 clear selected obj cache when group selection changes
self._group_selection = ax.difference(Index(groupers)).tolist()
self._reset_cache('_selected_obj')
def _set_result_index_ordered(self, result):
# set the result index on the passed values object and
# return the new object, xref 8046
# the values/counts are repeated according to the group index
# shortcut if we have an already ordered grouper
if not self.grouper.is_monotonic:
index = Index(np.concatenate(
self._get_indices(self.grouper.result_index)))
result.set_axis(index, axis=self.axis, inplace=True)
result = result.sort_index(axis=self.axis)
result.set_axis(self.obj._get_axis(self.axis), axis=self.axis,
inplace=True)
return result
def _dir_additions(self):
return self.obj._dir_additions() | self._apply_whitelist
def __getattr__(self, attr):
if attr in self._internal_names_set:
return object.__getattribute__(self, attr)
if attr in self.obj:
return self[attr]
if hasattr(self.obj, attr):
return self._make_wrapper(attr)
raise AttributeError("%r object has no attribute %r" %
(type(self).__name__, attr))
@Substitution(klass='GroupBy',
versionadded='.. versionadded:: 0.21.0',
examples="""\
>>> df = pd.DataFrame({'A': 'a b a b'.split(), 'B': [1, 2, 3, 4]})
>>> df
A B
0 a 1
1 b 2
2 a 3
3 b 4
To get the difference between each groups maximum and minimum value in one
pass, you can do
>>> df.groupby('A').pipe(lambda x: x.max() - x.min())
B
A
a 2
b 2""")
@Appender(_pipe_template)
def pipe(self, func, *args, **kwargs):
return com._pipe(self, func, *args, **kwargs)
plot = property(GroupByPlot)
def _make_wrapper(self, name):
if name not in self._apply_whitelist:
is_callable = callable(getattr(self._selected_obj, name, None))
kind = ' callable ' if is_callable else ' '
msg = ("Cannot access{0}attribute {1!r} of {2!r} objects, try "
"using the 'apply' method".format(kind, name,
type(self).__name__))
raise AttributeError(msg)
self._set_group_selection()
# need to setup the selection
# as are not passed directly but in the grouper
f = getattr(self._selected_obj, name)
if not isinstance(f, types.MethodType):
return self.apply(lambda self: getattr(self, name))
f = getattr(type(self._selected_obj), name)
def wrapper(*args, **kwargs):
# a little trickery for aggregation functions that need an axis
# argument
kwargs_with_axis = kwargs.copy()
if 'axis' not in kwargs_with_axis or \
kwargs_with_axis['axis'] is None:
kwargs_with_axis['axis'] = self.axis
def curried_with_axis(x):
return f(x, *args, **kwargs_with_axis)
def curried(x):
return f(x, *args, **kwargs)
# preserve the name so we can detect it when calling plot methods,
# to avoid duplicates
curried.__name__ = curried_with_axis.__name__ = name
# special case otherwise extra plots are created when catching the
# exception below
if name in base.plotting_methods:
return self.apply(curried)
try:
return self.apply(curried_with_axis)
except Exception:
try:
return self.apply(curried)
except Exception:
# related to : GH3688
# try item-by-item
# this can be called recursively, so need to raise
# ValueError
# if we don't have this method to indicated to aggregate to
# mark this column as an error
try:
return self._aggregate_item_by_item(name,
*args, **kwargs)
except (AttributeError):
raise ValueError
return wrapper
def get_group(self, name, obj=None):
"""
Constructs NDFrame from group with provided name
Parameters
----------
name : object
the name of the group to get as a DataFrame
obj : NDFrame, default None
the NDFrame to take the DataFrame out of. If
it is None, the object groupby was called on will
be used
Returns
-------
group : same type as obj
"""
if obj is None:
obj = self._selected_obj
inds = self._get_index(name)
if not len(inds):
raise KeyError(name)
return obj._take(inds, axis=self.axis)
def __iter__(self):
"""
Groupby iterator
Returns
-------
Generator yielding sequence of (name, subsetted object)
for each group
"""
return self.grouper.get_iterator(self.obj, axis=self.axis)
@Appender(_apply_docs['template']
.format(input="dataframe",
examples=_apply_docs['dataframe_examples']))
def apply(self, func, *args, **kwargs):
func = self._is_builtin_func(func)
# this is needed so we don't try and wrap strings. If we could
# resolve functions to their callable functions prior, this
# wouldn't be needed
if args or kwargs:
if callable(func):
@wraps(func)
def f(g):
with np.errstate(all='ignore'):
return func(g, *args, **kwargs)
else:
raise ValueError('func must be a callable if args or '
'kwargs are supplied')
else:
f = func
# ignore SettingWithCopy here in case the user mutates
with option_context('mode.chained_assignment', None):
try:
result = self._python_apply_general(f)
except Exception:
# gh-20949
# try again, with .apply acting as a filtering
# operation, by excluding the grouping column
# This would normally not be triggered
# except if the udf is trying an operation that
# fails on *some* columns, e.g. a numeric operation
# on a string grouper column
with _group_selection_context(self):
return self._python_apply_general(f)
return result
def _python_apply_general(self, f):
keys, values, mutated = self.grouper.apply(f, self._selected_obj,
self.axis)
return self._wrap_applied_output(
keys,
values,
not_indexed_same=mutated or self.mutated)
def _iterate_slices(self):
yield self._selection_name, self._selected_obj
def transform(self, func, *args, **kwargs):
raise com.AbstractMethodError(self)
def _cumcount_array(self, ascending=True):
"""
Parameters
----------
ascending : bool, default True
If False, number in reverse, from length of group - 1 to 0.
Notes
-----
this is currently implementing sort=False
(though the default is sort=True) for groupby in general
"""
ids, _, ngroups = self.grouper.group_info
sorter = get_group_index_sorter(ids, ngroups)
ids, count = ids[sorter], len(ids)
if count == 0:
return np.empty(0, dtype=np.int64)
run = np.r_[True, ids[:-1] != ids[1:]]
rep = np.diff(np.r_[np.nonzero(run)[0], count])
out = (~run).cumsum()
if ascending:
out -= np.repeat(out[run], rep)
else:
out = np.repeat(out[np.r_[run[1:], True]], rep) - out
rev = np.empty(count, dtype=np.intp)
rev[sorter] = np.arange(count, dtype=np.intp)
return out[rev].astype(np.int64, copy=False)
def _try_cast(self, result, obj, numeric_only=False):
"""
try to cast the result to our obj original type,
we may have roundtripped thru object in the mean-time
if numeric_only is True, then only try to cast numerics
and not datetimelikes
"""
if obj.ndim > 1:
dtype = obj.values.dtype
else:
dtype = obj.dtype
if not is_scalar(result):
if numeric_only and is_numeric_dtype(dtype) or not numeric_only:
result = maybe_downcast_to_dtype(result, dtype)
return result
def _transform_should_cast(self, func_nm):
"""
Parameters:
-----------
func_nm: str
The name of the aggregation function being performed
Returns:
--------
bool
Whether transform should attempt to cast the result of aggregation
"""
return (self.size().fillna(0) > 0).any() and (
func_nm not in base.cython_cast_blacklist)
def _cython_transform(self, how, numeric_only=True, **kwargs):
output = collections.OrderedDict()
for name, obj in self._iterate_slices():
is_numeric = is_numeric_dtype(obj.dtype)
if numeric_only and not is_numeric:
continue
try:
result, names = self.grouper.transform(obj.values, how,
**kwargs)
except NotImplementedError:
continue
except AssertionError as e:
raise GroupByError(str(e))
if self._transform_should_cast(how):
output[name] = self._try_cast(result, obj)
else:
output[name] = result
if len(output) == 0:
raise DataError('No numeric types to aggregate')
return self._wrap_transformed_output(output, names)
def _cython_agg_general(self, how, alt=None, numeric_only=True,
min_count=-1):
output = {}
for name, obj in self._iterate_slices():
is_numeric = is_numeric_dtype(obj.dtype)
if numeric_only and not is_numeric:
continue
try:
result, names = self.grouper.aggregate(obj.values, how,
min_count=min_count)
except AssertionError as e:
raise GroupByError(str(e))
output[name] = self._try_cast(result, obj)
if len(output) == 0:
raise DataError('No numeric types to aggregate')
return self._wrap_aggregated_output(output, names)
def _python_agg_general(self, func, *args, **kwargs):
func = self._is_builtin_func(func)
f = lambda x: func(x, *args, **kwargs)
# iterate through "columns" ex exclusions to populate output dict
output = {}
for name, obj in self._iterate_slices():
try:
result, counts = self.grouper.agg_series(obj, f)
output[name] = self._try_cast(result, obj, numeric_only=True)
except TypeError:
continue
if len(output) == 0:
return self._python_apply_general(f)
if self.grouper._filter_empty_groups:
mask = counts.ravel() > 0
for name, result in compat.iteritems(output):
# since we are masking, make sure that we have a float object
values = result
if is_numeric_dtype(values.dtype):
values = ensure_float(values)
output[name] = self._try_cast(values[mask], result)
return self._wrap_aggregated_output(output)
def _wrap_applied_output(self, *args, **kwargs):
raise com.AbstractMethodError(self)
def _concat_objects(self, keys, values, not_indexed_same=False):
from pandas.core.reshape.concat import concat
def reset_identity(values):
# reset the identities of the components
# of the values to prevent aliasing
for v in com._not_none(*values):
ax = v._get_axis(self.axis)
ax._reset_identity()
return values
if not not_indexed_same:
result = concat(values, axis=self.axis)
ax = self._selected_obj._get_axis(self.axis)
if isinstance(result, Series):
result = result.reindex(ax)
else:
# this is a very unfortunate situation
# we have a multi-index that is NOT lexsorted
# and we have a result which is duplicated
# we can't reindex, so we resort to this
# GH 14776
if isinstance(ax, MultiIndex) and not ax.is_unique:
indexer = algorithms.unique1d(
result.index.get_indexer_for(ax.values))
result = result.take(indexer, axis=self.axis)
else:
result = result.reindex(ax, axis=self.axis)
elif self.group_keys:
values = reset_identity(values)
if self.as_index:
# possible MI return case
group_keys = keys
group_levels = self.grouper.levels
group_names = self.grouper.names
result = concat(values, axis=self.axis, keys=group_keys,
levels=group_levels, names=group_names,
sort=False)
else:
# GH5610, returns a MI, with the first level being a
# range index
keys = list(range(len(values)))
result = concat(values, axis=self.axis, keys=keys)
else:
values = reset_identity(values)
result = concat(values, axis=self.axis)
if (isinstance(result, Series) and
getattr(self, '_selection_name', None) is not None):
result.name = self._selection_name
return result
def _apply_filter(self, indices, dropna):
if len(indices) == 0:
indices = np.array([], dtype='int64')
else:
indices = np.sort(np.concatenate(indices))
if dropna:
filtered = self._selected_obj.take(indices, axis=self.axis)
else:
mask = np.empty(len(self._selected_obj.index), dtype=bool)
mask.fill(False)
mask[indices.astype(int)] = True
# mask fails to broadcast when passed to where; broadcast manually.
mask = np.tile(mask, list(self._selected_obj.shape[1:]) + [1]).T
filtered = self._selected_obj.where(mask) # Fill with NaNs.
return filtered
class GroupBy(_GroupBy):
"""
Class for grouping and aggregating relational data. See aggregate,
transform, and apply functions on this object.
It's easiest to use obj.groupby(...) to use GroupBy, but you can also do:
::
grouped = groupby(obj, ...)
Parameters
----------
obj : pandas object
axis : int, default 0
level : int, default None
Level of MultiIndex
groupings : list of Grouping objects
Most users should ignore this
exclusions : array-like, optional
List of columns to exclude
name : string
Most users should ignore this
Notes
-----
After grouping, see aggregate, apply, and transform functions. Here are
some other brief notes about usage. When grouping by multiple groups, the
result index will be a MultiIndex (hierarchical) by default.
Iteration produces (key, group) tuples, i.e. chunking the data by group. So
you can write code like:
::
grouped = obj.groupby(keys, axis=axis)
for key, group in grouped:
# do something with the data
Function calls on GroupBy, if not specially implemented, "dispatch" to the
grouped data. So if you group a DataFrame and wish to invoke the std()
method on each group, you can simply do:
::
df.groupby(mapper).std()
rather than
::
df.groupby(mapper).aggregate(np.std)
You can pass arguments to these "wrapped" functions, too.
See the online documentation for full exposition on these topics and much
more
Returns
-------
**Attributes**
groups : dict
{group name -> group labels}
len(grouped) : int
Number of groups
"""
def _bool_agg(self, val_test, skipna):
"""Shared func to call any / all Cython GroupBy implementations"""
def objs_to_bool(vals):
try:
vals = vals.astype(np.bool)
except ValueError: # for objects
vals = np.array([bool(x) for x in vals])
return vals.view(np.uint8)
def result_to_bool(result):
return result.astype(np.bool, copy=False)
return self._get_cythonized_result('group_any_all', self.grouper,
aggregate=True,
cython_dtype=np.uint8,
needs_values=True,
needs_mask=True,
pre_processing=objs_to_bool,
post_processing=result_to_bool,
val_test=val_test, skipna=skipna)
@Substitution(name='groupby')
@Appender(_doc_template)
def any(self, skipna=True):
"""
Returns True if any value in the group is truthful, else False
Parameters
----------
skipna : bool, default True
Flag to ignore nan values during truth testing
"""
return self._bool_agg('any', skipna)
@Substitution(name='groupby')
@Appender(_doc_template)
def all(self, skipna=True):
"""Returns True if all values in the group are truthful, else False
Parameters
----------
skipna : bool, default True
Flag to ignore nan values during truth testing
"""
return self._bool_agg('all', skipna)
@Substitution(name='groupby')
@Appender(_doc_template)
def count(self):
"""Compute count of group, excluding missing values"""
# defined here for API doc
raise NotImplementedError
@Substitution(name='groupby')
@Appender(_doc_template)
def mean(self, *args, **kwargs):
"""
Compute mean of groups, excluding missing values.
Returns
-------
pandas.Series or pandas.DataFrame
Examples
--------
>>> df = pd.DataFrame({'A': [1, 1, 2, 1, 2],
... 'B': [np.nan, 2, 3, 4, 5],
... 'C': [1, 2, 1, 1, 2]}, columns=['A', 'B', 'C'])
Groupby one column and return the mean of the remaining columns in
each group.
>>> df.groupby('A').mean()
>>>
B C
A
1 3.0 1.333333
2 4.0 1.500000
Groupby two columns and return the mean of the remaining column.
>>> df.groupby(['A', 'B']).mean()
>>>
C
A B
1 2.0 2
4.0 1
2 3.0 1
5.0 2
Groupby one column and return the mean of only particular column in
the group.
>>> df.groupby('A')['B'].mean()
>>>
A
1 3.0
2 4.0
Name: B, dtype: float64
"""
nv.validate_groupby_func('mean', args, kwargs, ['numeric_only'])
try:
return self._cython_agg_general('mean', **kwargs)
except GroupByError:
raise
except Exception: # pragma: no cover
with _group_selection_context(self):
f = lambda x: x.mean(axis=self.axis, **kwargs)
return self._python_agg_general(f)
@Substitution(name='groupby')
@Appender(_doc_template)
def median(self, **kwargs):
"""
Compute median of groups, excluding missing values
For multiple groupings, the result index will be a MultiIndex
"""
try:
return self._cython_agg_general('median', **kwargs)
except GroupByError:
raise
except Exception: # pragma: no cover
def f(x):
if isinstance(x, np.ndarray):
x = Series(x)
return x.median(axis=self.axis, **kwargs)
with _group_selection_context(self):
return self._python_agg_general(f)
@Substitution(name='groupby')
@Appender(_doc_template)
def std(self, ddof=1, *args, **kwargs):
"""
Compute standard deviation of groups, excluding missing values
For multiple groupings, the result index will be a MultiIndex
Parameters
----------
ddof : integer, default 1
degrees of freedom
"""
# TODO: implement at Cython level?
nv.validate_groupby_func('std', args, kwargs)
return np.sqrt(self.var(ddof=ddof, **kwargs))
@Substitution(name='groupby')
@Appender(_doc_template)
def var(self, ddof=1, *args, **kwargs):
"""
Compute variance of groups, excluding missing values
For multiple groupings, the result index will be a MultiIndex
Parameters
----------
ddof : integer, default 1
degrees of freedom
"""
nv.validate_groupby_func('var', args, kwargs)
if ddof == 1:
try:
return self._cython_agg_general('var', **kwargs)
except Exception:
f = lambda x: x.var(ddof=ddof, **kwargs)
with _group_selection_context(self):
return self._python_agg_general(f)
else:
f = lambda x: x.var(ddof=ddof, **kwargs)
with _group_selection_context(self):
return self._python_agg_general(f)
@Substitution(name='groupby')
@Appender(_doc_template)
def sem(self, ddof=1):
"""
Compute standard error of the mean of groups, excluding missing values
For multiple groupings, the result index will be a MultiIndex
Parameters
----------
ddof : integer, default 1
degrees of freedom
"""
return self.std(ddof=ddof) / np.sqrt(self.count())
@Substitution(name='groupby')
@Appender(_doc_template)
def size(self):
"""Compute group sizes"""
result = self.grouper.size()
if isinstance(self.obj, Series):
result.name = getattr(self.obj, 'name', None)
return result
@classmethod
def _add_numeric_operations(cls):
""" add numeric operations to the GroupBy generically """
def groupby_function(name, alias, npfunc,
numeric_only=True, _convert=False,
min_count=-1):
_local_template = "Compute %(f)s of group values"
@Substitution(name='groupby', f=name)
@Appender(_doc_template)
@Appender(_local_template)
def f(self, **kwargs):
if 'numeric_only' not in kwargs:
kwargs['numeric_only'] = numeric_only
if 'min_count' not in kwargs:
kwargs['min_count'] = min_count
self._set_group_selection()
try:
return self._cython_agg_general(
alias, alt=npfunc, **kwargs)
except AssertionError as e:
raise SpecificationError(str(e))
except Exception:
result = self.aggregate(
lambda x: npfunc(x, axis=self.axis))
if _convert:
result = result._convert(datetime=True)
return result
set_function_name(f, name, cls)
return f
def first_compat(x, axis=0):
def first(x):
x = np.asarray(x)
x = x[notna(x)]
if len(x) == 0:
return np.nan
return x[0]
if isinstance(x, DataFrame):
return x.apply(first, axis=axis)
else:
return first(x)
def last_compat(x, axis=0):
def last(x):
x = np.asarray(x)
x = x[notna(x)]
if len(x) == 0:
return np.nan
return x[-1]
if isinstance(x, DataFrame):
return x.apply(last, axis=axis)
else:
return last(x)
cls.sum = groupby_function('sum', 'add', np.sum, min_count=0)
cls.prod = groupby_function('prod', 'prod', np.prod, min_count=0)
cls.min = groupby_function('min', 'min', np.min, numeric_only=False)
cls.max = groupby_function('max', 'max', np.max, numeric_only=False)
cls.first = groupby_function('first', 'first', first_compat,
numeric_only=False)
cls.last = groupby_function('last', 'last', last_compat,
numeric_only=False)
@Substitution(name='groupby')
@Appender(_doc_template)
def ohlc(self):
"""
Compute sum of values, excluding missing values
For multiple groupings, the result index will be a MultiIndex
"""
return self._apply_to_column_groupbys(
lambda x: x._cython_agg_general('ohlc'))
@Appender(DataFrame.describe.__doc__)
def describe(self, **kwargs):
with _group_selection_context(self):
result = self.apply(lambda x: x.describe(**kwargs))
if self.axis == 1:
return result.T
return result.unstack()
@Substitution(name='groupby')
@Appender(_doc_template)
def resample(self, rule, *args, **kwargs):
"""
Provide resampling when using a TimeGrouper
Return a new grouper with our resampler appended
"""
from pandas.core.resample import get_resampler_for_grouping
return get_resampler_for_grouping(self, rule, *args, **kwargs)
@Substitution(name='groupby')
@Appender(_doc_template)
def rolling(self, *args, **kwargs):
"""
Return a rolling grouper, providing rolling
functionality per group
"""
from pandas.core.window import RollingGroupby
return RollingGroupby(self, *args, **kwargs)
@Substitution(name='groupby')
@Appender(_doc_template)
def expanding(self, *args, **kwargs):
"""
Return an expanding grouper, providing expanding
functionality per group
"""
from pandas.core.window import ExpandingGroupby
return ExpandingGroupby(self, *args, **kwargs)
def _fill(self, direction, limit=None):
"""Shared function for `pad` and `backfill` to call Cython method
Parameters
----------
direction : {'ffill', 'bfill'}
Direction passed to underlying Cython function. `bfill` will cause
values to be filled backwards. `ffill` and any other values will
default to a forward fill
limit : int, default None
Maximum number of consecutive values to fill. If `None`, this
method will convert to -1 prior to passing to Cython
Returns
-------
`Series` or `DataFrame` with filled values
See Also
--------
pad
backfill
"""
# Need int value for Cython
if limit is None:
limit = -1
return self._get_cythonized_result('group_fillna_indexer',
self.grouper, needs_mask=True,
cython_dtype=np.int64,
result_is_index=True,
direction=direction, limit=limit)
@Substitution(name='groupby')
def pad(self, limit=None):
"""
Forward fill the values
Parameters
----------
limit : integer, optional
limit of how many values to fill
See Also
--------
Series.pad
DataFrame.pad
Series.fillna
DataFrame.fillna
"""
return self._fill('ffill', limit=limit)
ffill = pad
@Substitution(name='groupby')
def backfill(self, limit=None):
"""
Backward fill the values
Parameters
----------
limit : integer, optional
limit of how many values to fill
See Also
--------
Series.backfill
DataFrame.backfill
Series.fillna
DataFrame.fillna
"""
return self._fill('bfill', limit=limit)
bfill = backfill
@Substitution(name='groupby')
@Appender(_doc_template)
def nth(self, n, dropna=None):
"""
Take the nth row from each group if n is an int, or a subset of rows
if n is a list of ints.
If dropna, will take the nth non-null row, dropna is either
Truthy (if a Series) or 'all', 'any' (if a DataFrame);
this is equivalent to calling dropna(how=dropna) before the
groupby.
Parameters
----------
n : int or list of ints
a single nth value for the row or a list of nth values
dropna : None or str, optional
apply the specified dropna operation before counting which row is
the nth row. Needs to be None, 'any' or 'all'
Examples
--------
>>> df = pd.DataFrame({'A': [1, 1, 2, 1, 2],
... 'B': [np.nan, 2, 3, 4, 5]}, columns=['A', 'B'])
>>> g = df.groupby('A')
>>> g.nth(0)
B
A
1 NaN
2 3.0
>>> g.nth(1)
B
A
1 2.0
2 5.0
>>> g.nth(-1)
B
A
1 4.0
2 5.0
>>> g.nth([0, 1])
B
A
1 NaN
1 2.0
2 3.0
2 5.0
Specifying `dropna` allows count ignoring ``NaN``
>>> g.nth(0, dropna='any')
B
A
1 2.0
2 3.0
NaNs denote group exhausted when using dropna
>>> g.nth(3, dropna='any')
B
A
1 NaN
2 NaN
Specifying `as_index=False` in `groupby` keeps the original index.
>>> df.groupby('A', as_index=False).nth(1)
A B
1 1 2.0
4 2 5.0
"""
if isinstance(n, int):
nth_values = [n]
elif isinstance(n, (set, list, tuple)):
nth_values = list(set(n))
if dropna is not None:
raise ValueError(
"dropna option with a list of nth values is not supported")
else:
raise TypeError("n needs to be an int or a list/set/tuple of ints")
nth_values = np.array(nth_values, dtype=np.intp)
self._set_group_selection()
if not dropna:
mask = np.in1d(self._cumcount_array(), nth_values) | \
np.in1d(self._cumcount_array(ascending=False) + 1, -nth_values)
out = self._selected_obj[mask]
if not self.as_index:
return out
ids, _, _ = self.grouper.group_info
out.index = self.grouper.result_index[ids[mask]]
return out.sort_index() if self.sort else out
if dropna not in ['any', 'all']:
if isinstance(self._selected_obj, Series) and dropna is True:
warnings.warn("the dropna={dropna} keyword is deprecated,"
"use dropna='all' instead. "
"For a Series groupby, dropna must be "
"either None, 'any' or 'all'.".format(
dropna=dropna),
FutureWarning,
stacklevel=2)
dropna = 'all'
else:
# Note: when agg-ing picker doesn't raise this,
# just returns NaN
raise ValueError("For a DataFrame groupby, dropna must be "
"either None, 'any' or 'all', "
"(was passed %s)." % (dropna),)
# old behaviour, but with all and any support for DataFrames.
# modified in GH 7559 to have better perf
max_len = n if n >= 0 else - 1 - n
dropped = self.obj.dropna(how=dropna, axis=self.axis)
# get a new grouper for our dropped obj
if self.keys is None and self.level is None:
# we don't have the grouper info available
# (e.g. we have selected out
# a column that is not in the current object)
axis = self.grouper.axis
grouper = axis[axis.isin(dropped.index)]
else:
# create a grouper with the original parameters, but on the dropped
# object
from pandas.core.groupby.grouper import _get_grouper
grouper, _, _ = _get_grouper(dropped, key=self.keys,
axis=self.axis, level=self.level,
sort=self.sort,
mutated=self.mutated)
grb = dropped.groupby(grouper, as_index=self.as_index, sort=self.sort)
sizes, result = grb.size(), grb.nth(n)
mask = (sizes < max_len).values
# set the results which don't meet the criteria
if len(result) and mask.any():
result.loc[mask] = np.nan
# reset/reindex to the original groups
if len(self.obj) == len(dropped) or \
len(result) == len(self.grouper.result_index):
result.index = self.grouper.result_index
else:
result = result.reindex(self.grouper.result_index)
return result
@Substitution(name='groupby')
def ngroup(self, ascending=True):
"""
Number each group from 0 to the number of groups - 1.
This is the enumerative complement of cumcount. Note that the
numbers given to the groups match the order in which the groups
would be seen when iterating over the groupby object, not the
order they are first observed.
.. versionadded:: 0.20.2
Parameters
----------
ascending : bool, default True
If False, number in reverse, from number of group - 1 to 0.
Examples
--------
>>> df = pd.DataFrame({"A": list("aaabba")})
>>> df
A
0 a
1 a
2 a
3 b
4 b
5 a
>>> df.groupby('A').ngroup()
0 0
1 0
2 0
3 1
4 1
5 0
dtype: int64
>>> df.groupby('A').ngroup(ascending=False)
0 1
1 1
2 1
3 0
4 0
5 1
dtype: int64
>>> df.groupby(["A", [1,1,2,3,2,1]]).ngroup()
0 0
1 0
2 1
3 3
4 2
5 0
dtype: int64
See also
--------
.cumcount : Number the rows in each group.
"""
with _group_selection_context(self):
index = self._selected_obj.index
result = Series(self.grouper.group_info[0], index)
if not ascending:
result = self.ngroups - 1 - result
return result
@Substitution(name='groupby')
def cumcount(self, ascending=True):
"""
Number each item in each group from 0 to the length of that group - 1.
Essentially this is equivalent to
>>> self.apply(lambda x: pd.Series(np.arange(len(x)), x.index))
Parameters
----------
ascending : bool, default True
If False, number in reverse, from length of group - 1 to 0.
Examples
--------
>>> df = pd.DataFrame([['a'], ['a'], ['a'], ['b'], ['b'], ['a']],
... columns=['A'])
>>> df
A
0 a
1 a
2 a
3 b
4 b
5 a
>>> df.groupby('A').cumcount()
0 0
1 1
2 2
3 0
4 1
5 3
dtype: int64
>>> df.groupby('A').cumcount(ascending=False)
0 3
1 2
2 1
3 1
4 0
5 0
dtype: int64
See also
--------
.ngroup : Number the groups themselves.
"""
with _group_selection_context(self):
index = self._selected_obj.index
cumcounts = self._cumcount_array(ascending=ascending)
return Series(cumcounts, index)
@Substitution(name='groupby')
@Appender(_doc_template)
def rank(self, method='average', ascending=True, na_option='keep',
pct=False, axis=0):
"""
Provides the rank of values within each group.
Parameters
----------
method : {'average', 'min', 'max', 'first', 'dense'}, default 'average'
* average: average rank of group
* min: lowest rank in group
* max: highest rank in group
* first: ranks assigned in order they appear in the array
* dense: like 'min', but rank always increases by 1 between groups
ascending : boolean, default True
False for ranks by high (1) to low (N)
na_option : {'keep', 'top', 'bottom'}, default 'keep'
* keep: leave NA values where they are
* top: smallest rank if ascending
* bottom: smallest rank if descending
pct : boolean, default False
Compute percentage rank of data within each group
axis : int, default 0
The axis of the object over which to compute the rank.
Returns
-----
DataFrame with ranking of values within each group
"""
if na_option not in {'keep', 'top', 'bottom'}:
msg = "na_option must be one of 'keep', 'top', or 'bottom'"
raise ValueError(msg)
return self._cython_transform('rank', numeric_only=False,
ties_method=method, ascending=ascending,
na_option=na_option, pct=pct, axis=axis)
@Substitution(name='groupby')
@Appender(_doc_template)
def cumprod(self, axis=0, *args, **kwargs):
"""Cumulative product for each group"""
nv.validate_groupby_func('cumprod', args, kwargs,
['numeric_only', 'skipna'])
if axis != 0:
return self.apply(lambda x: x.cumprod(axis=axis, **kwargs))
return self._cython_transform('cumprod', **kwargs)
@Substitution(name='groupby')
@Appender(_doc_template)
def cumsum(self, axis=0, *args, **kwargs):
"""Cumulative sum for each group"""
nv.validate_groupby_func('cumsum', args, kwargs,
['numeric_only', 'skipna'])
if axis != 0:
return self.apply(lambda x: x.cumsum(axis=axis, **kwargs))
return self._cython_transform('cumsum', **kwargs)
@Substitution(name='groupby')
@Appender(_doc_template)
def cummin(self, axis=0, **kwargs):
"""Cumulative min for each group"""
if axis != 0:
return self.apply(lambda x: np.minimum.accumulate(x, axis))
return self._cython_transform('cummin', numeric_only=False)
@Substitution(name='groupby')
@Appender(_doc_template)
def cummax(self, axis=0, **kwargs):
"""Cumulative max for each group"""
if axis != 0:
return self.apply(lambda x: np.maximum.accumulate(x, axis))
return self._cython_transform('cummax', numeric_only=False)
def _get_cythonized_result(self, how, grouper, aggregate=False,
cython_dtype=None, needs_values=False,
needs_mask=False, needs_ngroups=False,
result_is_index=False,
pre_processing=None, post_processing=None,
**kwargs):
"""Get result for Cythonized functions
Parameters
----------
how : str, Cythonized function name to be called
grouper : Grouper object containing pertinent group info
aggregate : bool, default False
Whether the result should be aggregated to match the number of
groups
cython_dtype : default None
Type of the array that will be modified by the Cython call. If
`None`, the type will be inferred from the values of each slice
needs_values : bool, default False
Whether the values should be a part of the Cython call
signature
needs_mask : bool, default False
Whether boolean mask needs to be part of the Cython call
signature
needs_ngroups : bool, default False
Whether number of groups is part of the Cython call signature
result_is_index : bool, default False
Whether the result of the Cython operation is an index of
values to be retrieved, instead of the actual values themselves
pre_processing : function, default None
Function to be applied to `values` prior to passing to Cython
Raises if `needs_values` is False
post_processing : function, default None
Function to be applied to result of Cython function
**kwargs : dict
Extra arguments to be passed back to Cython funcs
Returns
-------
`Series` or `DataFrame` with filled values
"""
if result_is_index and aggregate:
raise ValueError("'result_is_index' and 'aggregate' cannot both "
"be True!")
if post_processing:
if not callable(pre_processing):
raise ValueError("'post_processing' must be a callable!")
if pre_processing:
if not callable(pre_processing):
raise ValueError("'pre_processing' must be a callable!")
if not needs_values:
raise ValueError("Cannot use 'pre_processing' without "
"specifying 'needs_values'!")
labels, _, ngroups = grouper.group_info
output = collections.OrderedDict()
base_func = getattr(libgroupby, how)
for name, obj in self._iterate_slices():
if aggregate:
result_sz = ngroups
else:
result_sz = len(obj.values)
if not cython_dtype:
cython_dtype = obj.values.dtype
result = np.zeros(result_sz, dtype=cython_dtype)
func = partial(base_func, result, labels)
if needs_values:
vals = obj.values
if pre_processing:
vals = pre_processing(vals)
func = partial(func, vals)
if needs_mask:
mask = isna(obj.values).view(np.uint8)
func = partial(func, mask)
if needs_ngroups:
func = partial(func, ngroups)
func(**kwargs) # Call func to modify indexer values in place
if result_is_index:
result = algorithms.take_nd(obj.values, result)
if post_processing:
result = post_processing(result)
output[name] = result
if aggregate:
return self._wrap_aggregated_output(output)
else:
return self._wrap_transformed_output(output)
@Substitution(name='groupby')
@Appender(_doc_template)
def shift(self, periods=1, freq=None, axis=0):
"""
Shift each group by periods observations
Parameters
----------
periods : integer, default 1
number of periods to shift
freq : frequency string
axis : axis to shift, default 0
"""
if freq is not None or axis != 0:
return self.apply(lambda x: x.shift(periods, freq, axis))
return self._get_cythonized_result('group_shift_indexer',
self.grouper, cython_dtype=np.int64,
needs_ngroups=True,
result_is_index=True,
periods=periods)
@Substitution(name='groupby')
@Appender(_doc_template)
def pct_change(self, periods=1, fill_method='pad', limit=None, freq=None,
axis=0):
"""Calculate pct_change of each value to previous entry in group"""
if freq is not None or axis != 0:
return self.apply(lambda x: x.pct_change(periods=periods,
fill_method=fill_method,
limit=limit, freq=freq,
axis=axis))
filled = getattr(self, fill_method)(limit=limit).drop(
self.grouper.names, axis=1)
shifted = filled.shift(periods=periods, freq=freq)
return (filled / shifted) - 1
@Substitution(name='groupby')
@Appender(_doc_template)
def head(self, n=5):
"""
Returns first n rows of each group.
Essentially equivalent to ``.apply(lambda x: x.head(n))``,
except ignores as_index flag.
Examples
--------
>>> df = pd.DataFrame([[1, 2], [1, 4], [5, 6]],
columns=['A', 'B'])
>>> df.groupby('A', as_index=False).head(1)
A B
0 1 2
2 5 6
>>> df.groupby('A').head(1)
A B
0 1 2
2 5 6
"""
self._reset_group_selection()
mask = self._cumcount_array() < n
return self._selected_obj[mask].dropna(subset=[self.keys])
@Substitution(name='groupby')
@Appender(_doc_template)
def tail(self, n=5):
"""
Returns last n rows of each group
Essentially equivalent to ``.apply(lambda x: x.tail(n))``,
except ignores as_index flag.
Examples
--------
>>> df = pd.DataFrame([['a', 1], ['a', 2], ['b', 1], ['b', 2]],
columns=['A', 'B'])
>>> df.groupby('A').tail(1)
A B
1 a 2
3 b 2
>>> df.groupby('A').head(1)
A B
0 a 1
2 b 1
"""
self._reset_group_selection()
mask = self._cumcount_array(ascending=False) < n
return self._selected_obj[mask].dropna(subset=[self.keys])
GroupBy._add_numeric_operations()
@Appender(GroupBy.__doc__)
def groupby(obj, by, **kwds):
if isinstance(obj, Series):
from pandas.core.groupby.generic import SeriesGroupBy
klass = SeriesGroupBy
elif isinstance(obj, DataFrame):
from pandas.core.groupby.generic import DataFrameGroupBy
klass = DataFrameGroupBy
else: # pragma: no cover
raise TypeError('invalid type: %s' % type(obj))
return klass(obj, by, **kwds)
| 32.439756 | 82 | 0.555862 |
import types
from functools import wraps, partial
import datetime
import collections
import warnings
from contextlib import contextmanager
import numpy as np
from pandas._libs import groupby as libgroupby, Timestamp
from pandas.util._validators import validate_kwargs
from pandas.util._decorators import (
cache_readonly, Substitution, Appender)
from pandas import compat
from pandas.compat import zip, range, callable, set_function_name
from pandas.compat.numpy import function as nv
from pandas.core.dtypes.common import (
is_numeric_dtype,
is_scalar,
ensure_float)
from pandas.core.dtypes.cast import maybe_downcast_to_dtype
from pandas.core.dtypes.missing import isna, notna
from pandas.core.groupby import base
from pandas.core.base import (PandasObject, SelectionMixin, GroupByError,
DataError, SpecificationError)
from pandas.core.index import Index, MultiIndex
from pandas.core.generic import NDFrame
from pandas.core.frame import DataFrame
from pandas.core.series import Series
from pandas.core.sorting import get_group_index_sorter
import pandas.core.common as com
import pandas.core.algorithms as algorithms
from pandas.core.config import option_context
_doc_template = """
See also
--------
pandas.Series.%(name)s
pandas.DataFrame.%(name)s
pandas.Panel.%(name)s
"""
_apply_docs = dict(
template="""
Apply function `func` group-wise and combine the results together.
The function passed to `apply` must take a {input} as its first
argument and return a DataFrame, Series or scalar. `apply` will
then take care of combining the results back together into a single
dataframe or series. `apply` is therefore a highly flexible
grouping method.
While `apply` is a very flexible method, its downside is that
using it can be quite a bit slower than using more specific methods
like `agg` or `transform`. Pandas offers a wide range of method that will
be much faster than using `apply` for their specific purposes, so try to
use them before reaching for `apply`.
Parameters
----------
func : callable
A callable that takes a {input} as its first argument, and
returns a dataframe, a series or a scalar. In addition the
callable may take positional and keyword arguments.
args, kwargs : tuple and dict
Optional positional and keyword arguments to pass to `func`.
Returns
-------
applied : Series or DataFrame
Notes
-----
In the current implementation `apply` calls `func` twice on the
first group to decide whether it can take a fast or slow code
path. This can lead to unexpected behavior if `func` has
side-effects, as they will take effect twice for the first
group.
Examples
--------
{examples}
See also
--------
pipe : Apply function to the full GroupBy object instead of to each
group.
aggregate : Apply aggregate function to the GroupBy object.
transform : Apply function column-by-column to the GroupBy object.
Series.apply : Apply a function to a Series.
DataFrame.apply : Apply a function to each row or column of a DataFrame.
""",
dataframe_examples="""
>>> df = pd.DataFrame({'A': 'a a b'.split(),
'B': [1,2,3],
'C': [4,6, 5]})
>>> g = df.groupby('A')
Notice that ``g`` has two groups, ``a`` and ``b``.
Calling `apply` in various ways, we can get different grouping results:
Example 1: below the function passed to `apply` takes a DataFrame as
its argument and returns a DataFrame. `apply` combines the result for
each group together into a new DataFrame:
>>> g[['B', 'C']].apply(lambda x: x / x.sum())
B C
0 0.333333 0.4
1 0.666667 0.6
2 1.000000 1.0
Example 2: The function passed to `apply` takes a DataFrame as
its argument and returns a Series. `apply` combines the result for
each group together into a new DataFrame:
>>> g[['B', 'C']].apply(lambda x: x.max() - x.min())
B C
A
a 1 2
b 0 0
Example 3: The function passed to `apply` takes a DataFrame as
its argument and returns a scalar. `apply` combines the result for
each group together into a Series, including setting the index as
appropriate:
>>> g.apply(lambda x: x.C.max() - x.B.min())
A
a 5
b 2
dtype: int64
""",
series_examples="""
>>> s = pd.Series([0, 1, 2], index='a a b'.split())
>>> g = s.groupby(s.index)
From ``s`` above we can see that ``g`` has two groups, ``a`` and ``b``.
Calling `apply` in various ways, we can get different grouping results:
Example 1: The function passed to `apply` takes a Series as
its argument and returns a Series. `apply` combines the result for
each group together into a new Series:
>>> g.apply(lambda x: x*2 if x.name == 'b' else x/2)
0 0.0
1 0.5
2 4.0
dtype: float64
Example 2: The function passed to `apply` takes a Series as
its argument and returns a scalar. `apply` combines the result for
each group together into a Series, including setting the index as
appropriate:
>>> g.apply(lambda x: x.max() - x.min())
a 1
b 0
dtype: int64
""")
_pipe_template = """\
Apply a function `func` with arguments to this %(klass)s object and return
the function's result.
%(versionadded)s
Use `.pipe` when you want to improve readability by chaining together
functions that expect Series, DataFrames, GroupBy or Resampler objects.
Instead of writing
>>> h(g(f(df.groupby('group')), arg1=a), arg2=b, arg3=c)
You can write
>>> (df.groupby('group')
... .pipe(f)
... .pipe(g, arg1=a)
... .pipe(h, arg2=b, arg3=c))
which is much more readable.
Parameters
----------
func : callable or tuple of (callable, string)
Function to apply to this %(klass)s object or, alternatively,
a `(callable, data_keyword)` tuple where `data_keyword` is a
string indicating the keyword of `callable` that expects the
%(klass)s object.
args : iterable, optional
positional arguments passed into `func`.
kwargs : dict, optional
a dictionary of keyword arguments passed into `func`.
Returns
-------
object : the return type of `func`.
Notes
-----
See more `here
<http://pandas.pydata.org/pandas-docs/stable/groupby.html#piping-function-calls>`_
Examples
--------
%(examples)s
See Also
--------
pandas.Series.pipe : Apply a function with arguments to a series
pandas.DataFrame.pipe: Apply a function with arguments to a dataframe
apply : Apply function to each group instead of to the
full %(klass)s object.
"""
_transform_template = """
Call function producing a like-indexed %(klass)s on each group and
return a %(klass)s having the same indexes as the original object
filled with the transformed values
Parameters
----------
f : function
Function to apply to each group
Notes
-----
Each group is endowed the attribute 'name' in case you need to know
which group you are working on.
The current implementation imposes three requirements on f:
* f must return a value that either has the same shape as the input
subframe or can be broadcast to the shape of the input subframe.
For example, f returns a scalar it will be broadcast to have the
same shape as the input subframe.
* if this is a DataFrame, f must support application column-by-column
in the subframe. If f also supports application to the entire subframe,
then a fast path is used starting from the second chunk.
* f must not mutate groups. Mutation is not supported and may
produce unexpected results.
Returns
-------
%(klass)s
See also
--------
aggregate, transform
Examples
--------
# Same shape
>>> df = pd.DataFrame({'A' : ['foo', 'bar', 'foo', 'bar',
... 'foo', 'bar'],
... 'B' : ['one', 'one', 'two', 'three',
... 'two', 'two'],
... 'C' : [1, 5, 5, 2, 5, 5],
... 'D' : [2.0, 5., 8., 1., 2., 9.]})
>>> grouped = df.groupby('A')
>>> grouped.transform(lambda x: (x - x.mean()) / x.std())
C D
0 -1.154701 -0.577350
1 0.577350 0.000000
2 0.577350 1.154701
3 -1.154701 -1.000000
4 0.577350 -0.577350
5 0.577350 1.000000
# Broadcastable
>>> grouped.transform(lambda x: x.max() - x.min())
C D
0 4 6.0
1 3 8.0
2 4 6.0
3 3 8.0
4 4 6.0
5 3 8.0
"""
class GroupByPlot(PandasObject):
def __init__(self, groupby):
self._groupby = groupby
def __call__(self, *args, **kwargs):
def f(self):
return self.plot(*args, **kwargs)
f.__name__ = 'plot'
return self._groupby.apply(f)
def __getattr__(self, name):
def attr(*args, **kwargs):
def f(self):
return getattr(self.plot, name)(*args, **kwargs)
return self._groupby.apply(f)
return attr
@contextmanager
def _group_selection_context(groupby):
groupby._set_group_selection()
yield groupby
groupby._reset_group_selection()
class _GroupBy(PandasObject, SelectionMixin):
_group_selection = None
_apply_whitelist = frozenset([])
def __init__(self, obj, keys=None, axis=0, level=None,
grouper=None, exclusions=None, selection=None, as_index=True,
sort=True, group_keys=True, squeeze=False,
observed=False, **kwargs):
self._selection = selection
if isinstance(obj, NDFrame):
obj._consolidate_inplace()
self.level = level
if not as_index:
if not isinstance(obj, DataFrame):
raise TypeError('as_index=False only valid with DataFrame')
if axis != 0:
raise ValueError('as_index=False only valid for axis=0')
self.as_index = as_index
self.keys = keys
self.sort = sort
self.group_keys = group_keys
self.squeeze = squeeze
self.observed = observed
self.mutated = kwargs.pop('mutated', False)
if grouper is None:
from pandas.core.groupby.grouper import _get_grouper
grouper, exclusions, obj = _get_grouper(obj, keys,
axis=axis,
level=level,
sort=sort,
observed=observed,
mutated=self.mutated)
self.obj = obj
self.axis = obj._get_axis_number(axis)
self.grouper = grouper
self.exclusions = set(exclusions) if exclusions else set()
# we accept no other args
validate_kwargs('group', kwargs, {})
def __len__(self):
return len(self.groups)
def __unicode__(self):
# TODO: Better unicode/repr for GroupBy object
return object.__repr__(self)
def _assure_grouper(self):
pass
@property
def groups(self):
self._assure_grouper()
return self.grouper.groups
@property
def ngroups(self):
self._assure_grouper()
return self.grouper.ngroups
@property
def indices(self):
self._assure_grouper()
return self.grouper.indices
def _get_indices(self, names):
def get_converter(s):
# possibly convert to the actual key types
# in the indices, could be a Timestamp or a np.datetime64
if isinstance(s, (Timestamp, datetime.datetime)):
return lambda key: Timestamp(key)
elif isinstance(s, np.datetime64):
return lambda key: Timestamp(key).asm8
else:
return lambda key: key
if len(names) == 0:
return []
if len(self.indices) > 0:
index_sample = next(iter(self.indices))
else:
index_sample = None # Dummy sample
name_sample = names[0]
if isinstance(index_sample, tuple):
if not isinstance(name_sample, tuple):
msg = ("must supply a tuple to get_group with multiple"
" grouping keys")
raise ValueError(msg)
if not len(name_sample) == len(index_sample):
try:
# If the original grouper was a tuple
return [self.indices[name] for name in names]
except KeyError:
# turns out it wasn't a tuple
msg = ("must supply a a same-length tuple to get_group"
" with multiple grouping keys")
raise ValueError(msg)
converters = [get_converter(s) for s in index_sample]
names = [tuple(f(n) for f, n in zip(converters, name))
for name in names]
else:
converter = get_converter(index_sample)
names = [converter(name) for name in names]
return [self.indices.get(name, []) for name in names]
def _get_index(self, name):
return self._get_indices([name])[0]
@cache_readonly
def _selected_obj(self):
if self._selection is None or isinstance(self.obj, Series):
if self._group_selection is not None:
return self.obj[self._group_selection]
return self.obj
else:
return self.obj[self._selection]
def _reset_group_selection(self):
if self._group_selection is not None:
self._group_selection = None
self._reset_cache('_selected_obj')
def _set_group_selection(self):
grp = self.grouper
if not (self.as_index and
getattr(grp, 'groupings', None) is not None and
self.obj.ndim > 1 and
self._group_selection is None):
return
ax = self.obj._info_axis
groupers = [g.name for g in grp.groupings
if g.level is None and g.in_axis]
if len(groupers):
self._group_selection = ax.difference(Index(groupers)).tolist()
self._reset_cache('_selected_obj')
def _set_result_index_ordered(self, result):
if not self.grouper.is_monotonic:
index = Index(np.concatenate(
self._get_indices(self.grouper.result_index)))
result.set_axis(index, axis=self.axis, inplace=True)
result = result.sort_index(axis=self.axis)
result.set_axis(self.obj._get_axis(self.axis), axis=self.axis,
inplace=True)
return result
def _dir_additions(self):
return self.obj._dir_additions() | self._apply_whitelist
def __getattr__(self, attr):
if attr in self._internal_names_set:
return object.__getattribute__(self, attr)
if attr in self.obj:
return self[attr]
if hasattr(self.obj, attr):
return self._make_wrapper(attr)
raise AttributeError("%r object has no attribute %r" %
(type(self).__name__, attr))
@Substitution(klass='GroupBy',
versionadded='.. versionadded:: 0.21.0',
examples="""\
>>> df = pd.DataFrame({'A': 'a b a b'.split(), 'B': [1, 2, 3, 4]})
>>> df
A B
0 a 1
1 b 2
2 a 3
3 b 4
To get the difference between each groups maximum and minimum value in one
pass, you can do
>>> df.groupby('A').pipe(lambda x: x.max() - x.min())
B
A
a 2
b 2""")
@Appender(_pipe_template)
def pipe(self, func, *args, **kwargs):
return com._pipe(self, func, *args, **kwargs)
plot = property(GroupByPlot)
def _make_wrapper(self, name):
if name not in self._apply_whitelist:
is_callable = callable(getattr(self._selected_obj, name, None))
kind = ' callable ' if is_callable else ' '
msg = ("Cannot access{0}attribute {1!r} of {2!r} objects, try "
"using the 'apply' method".format(kind, name,
type(self).__name__))
raise AttributeError(msg)
self._set_group_selection()
f = getattr(self._selected_obj, name)
if not isinstance(f, types.MethodType):
return self.apply(lambda self: getattr(self, name))
f = getattr(type(self._selected_obj), name)
def wrapper(*args, **kwargs):
kwargs_with_axis = kwargs.copy()
if 'axis' not in kwargs_with_axis or \
kwargs_with_axis['axis'] is None:
kwargs_with_axis['axis'] = self.axis
def curried_with_axis(x):
return f(x, *args, **kwargs_with_axis)
def curried(x):
return f(x, *args, **kwargs)
curried.__name__ = curried_with_axis.__name__ = name
if name in base.plotting_methods:
return self.apply(curried)
try:
return self.apply(curried_with_axis)
except Exception:
try:
return self.apply(curried)
except Exception:
# mark this column as an error
try:
return self._aggregate_item_by_item(name,
*args, **kwargs)
except (AttributeError):
raise ValueError
return wrapper
def get_group(self, name, obj=None):
if obj is None:
obj = self._selected_obj
inds = self._get_index(name)
if not len(inds):
raise KeyError(name)
return obj._take(inds, axis=self.axis)
def __iter__(self):
return self.grouper.get_iterator(self.obj, axis=self.axis)
@Appender(_apply_docs['template']
.format(input="dataframe",
examples=_apply_docs['dataframe_examples']))
def apply(self, func, *args, **kwargs):
func = self._is_builtin_func(func)
# this is needed so we don't try and wrap strings. If we could
if args or kwargs:
if callable(func):
@wraps(func)
def f(g):
with np.errstate(all='ignore'):
return func(g, *args, **kwargs)
else:
raise ValueError('func must be a callable if args or '
'kwargs are supplied')
else:
f = func
# ignore SettingWithCopy here in case the user mutates
with option_context('mode.chained_assignment', None):
try:
result = self._python_apply_general(f)
except Exception:
# gh-20949
# try again, with .apply acting as a filtering
# operation, by excluding the grouping column
# This would normally not be triggered
# except if the udf is trying an operation that
# fails on *some* columns, e.g. a numeric operation
# on a string grouper column
with _group_selection_context(self):
return self._python_apply_general(f)
return result
def _python_apply_general(self, f):
keys, values, mutated = self.grouper.apply(f, self._selected_obj,
self.axis)
return self._wrap_applied_output(
keys,
values,
not_indexed_same=mutated or self.mutated)
def _iterate_slices(self):
yield self._selection_name, self._selected_obj
def transform(self, func, *args, **kwargs):
raise com.AbstractMethodError(self)
def _cumcount_array(self, ascending=True):
ids, _, ngroups = self.grouper.group_info
sorter = get_group_index_sorter(ids, ngroups)
ids, count = ids[sorter], len(ids)
if count == 0:
return np.empty(0, dtype=np.int64)
run = np.r_[True, ids[:-1] != ids[1:]]
rep = np.diff(np.r_[np.nonzero(run)[0], count])
out = (~run).cumsum()
if ascending:
out -= np.repeat(out[run], rep)
else:
out = np.repeat(out[np.r_[run[1:], True]], rep) - out
rev = np.empty(count, dtype=np.intp)
rev[sorter] = np.arange(count, dtype=np.intp)
return out[rev].astype(np.int64, copy=False)
def _try_cast(self, result, obj, numeric_only=False):
if obj.ndim > 1:
dtype = obj.values.dtype
else:
dtype = obj.dtype
if not is_scalar(result):
if numeric_only and is_numeric_dtype(dtype) or not numeric_only:
result = maybe_downcast_to_dtype(result, dtype)
return result
def _transform_should_cast(self, func_nm):
return (self.size().fillna(0) > 0).any() and (
func_nm not in base.cython_cast_blacklist)
def _cython_transform(self, how, numeric_only=True, **kwargs):
output = collections.OrderedDict()
for name, obj in self._iterate_slices():
is_numeric = is_numeric_dtype(obj.dtype)
if numeric_only and not is_numeric:
continue
try:
result, names = self.grouper.transform(obj.values, how,
**kwargs)
except NotImplementedError:
continue
except AssertionError as e:
raise GroupByError(str(e))
if self._transform_should_cast(how):
output[name] = self._try_cast(result, obj)
else:
output[name] = result
if len(output) == 0:
raise DataError('No numeric types to aggregate')
return self._wrap_transformed_output(output, names)
def _cython_agg_general(self, how, alt=None, numeric_only=True,
min_count=-1):
output = {}
for name, obj in self._iterate_slices():
is_numeric = is_numeric_dtype(obj.dtype)
if numeric_only and not is_numeric:
continue
try:
result, names = self.grouper.aggregate(obj.values, how,
min_count=min_count)
except AssertionError as e:
raise GroupByError(str(e))
output[name] = self._try_cast(result, obj)
if len(output) == 0:
raise DataError('No numeric types to aggregate')
return self._wrap_aggregated_output(output, names)
def _python_agg_general(self, func, *args, **kwargs):
func = self._is_builtin_func(func)
f = lambda x: func(x, *args, **kwargs)
# iterate through "columns" ex exclusions to populate output dict
output = {}
for name, obj in self._iterate_slices():
try:
result, counts = self.grouper.agg_series(obj, f)
output[name] = self._try_cast(result, obj, numeric_only=True)
except TypeError:
continue
if len(output) == 0:
return self._python_apply_general(f)
if self.grouper._filter_empty_groups:
mask = counts.ravel() > 0
for name, result in compat.iteritems(output):
# since we are masking, make sure that we have a float object
values = result
if is_numeric_dtype(values.dtype):
values = ensure_float(values)
output[name] = self._try_cast(values[mask], result)
return self._wrap_aggregated_output(output)
def _wrap_applied_output(self, *args, **kwargs):
raise com.AbstractMethodError(self)
def _concat_objects(self, keys, values, not_indexed_same=False):
from pandas.core.reshape.concat import concat
def reset_identity(values):
# reset the identities of the components
# of the values to prevent aliasing
for v in com._not_none(*values):
ax = v._get_axis(self.axis)
ax._reset_identity()
return values
if not not_indexed_same:
result = concat(values, axis=self.axis)
ax = self._selected_obj._get_axis(self.axis)
if isinstance(result, Series):
result = result.reindex(ax)
else:
# this is a very unfortunate situation
# we have a multi-index that is NOT lexsorted
# and we have a result which is duplicated
# we can't reindex, so we resort to this
if isinstance(ax, MultiIndex) and not ax.is_unique:
indexer = algorithms.unique1d(
result.index.get_indexer_for(ax.values))
result = result.take(indexer, axis=self.axis)
else:
result = result.reindex(ax, axis=self.axis)
elif self.group_keys:
values = reset_identity(values)
if self.as_index:
group_keys = keys
group_levels = self.grouper.levels
group_names = self.grouper.names
result = concat(values, axis=self.axis, keys=group_keys,
levels=group_levels, names=group_names,
sort=False)
else:
keys = list(range(len(values)))
result = concat(values, axis=self.axis, keys=keys)
else:
values = reset_identity(values)
result = concat(values, axis=self.axis)
if (isinstance(result, Series) and
getattr(self, '_selection_name', None) is not None):
result.name = self._selection_name
return result
def _apply_filter(self, indices, dropna):
if len(indices) == 0:
indices = np.array([], dtype='int64')
else:
indices = np.sort(np.concatenate(indices))
if dropna:
filtered = self._selected_obj.take(indices, axis=self.axis)
else:
mask = np.empty(len(self._selected_obj.index), dtype=bool)
mask.fill(False)
mask[indices.astype(int)] = True
mask = np.tile(mask, list(self._selected_obj.shape[1:]) + [1]).T
filtered = self._selected_obj.where(mask)
return filtered
class GroupBy(_GroupBy):
def _bool_agg(self, val_test, skipna):
def objs_to_bool(vals):
try:
vals = vals.astype(np.bool)
except ValueError:
vals = np.array([bool(x) for x in vals])
return vals.view(np.uint8)
def result_to_bool(result):
return result.astype(np.bool, copy=False)
return self._get_cythonized_result('group_any_all', self.grouper,
aggregate=True,
cython_dtype=np.uint8,
needs_values=True,
needs_mask=True,
pre_processing=objs_to_bool,
post_processing=result_to_bool,
val_test=val_test, skipna=skipna)
@Substitution(name='groupby')
@Appender(_doc_template)
def any(self, skipna=True):
return self._bool_agg('any', skipna)
@Substitution(name='groupby')
@Appender(_doc_template)
def all(self, skipna=True):
return self._bool_agg('all', skipna)
@Substitution(name='groupby')
@Appender(_doc_template)
def count(self):
raise NotImplementedError
@Substitution(name='groupby')
@Appender(_doc_template)
def mean(self, *args, **kwargs):
nv.validate_groupby_func('mean', args, kwargs, ['numeric_only'])
try:
return self._cython_agg_general('mean', **kwargs)
except GroupByError:
raise
except Exception:
with _group_selection_context(self):
f = lambda x: x.mean(axis=self.axis, **kwargs)
return self._python_agg_general(f)
@Substitution(name='groupby')
@Appender(_doc_template)
def median(self, **kwargs):
try:
return self._cython_agg_general('median', **kwargs)
except GroupByError:
raise
except Exception:
def f(x):
if isinstance(x, np.ndarray):
x = Series(x)
return x.median(axis=self.axis, **kwargs)
with _group_selection_context(self):
return self._python_agg_general(f)
@Substitution(name='groupby')
@Appender(_doc_template)
def std(self, ddof=1, *args, **kwargs):
nv.validate_groupby_func('std', args, kwargs)
return np.sqrt(self.var(ddof=ddof, **kwargs))
@Substitution(name='groupby')
@Appender(_doc_template)
def var(self, ddof=1, *args, **kwargs):
nv.validate_groupby_func('var', args, kwargs)
if ddof == 1:
try:
return self._cython_agg_general('var', **kwargs)
except Exception:
f = lambda x: x.var(ddof=ddof, **kwargs)
with _group_selection_context(self):
return self._python_agg_general(f)
else:
f = lambda x: x.var(ddof=ddof, **kwargs)
with _group_selection_context(self):
return self._python_agg_general(f)
@Substitution(name='groupby')
@Appender(_doc_template)
def sem(self, ddof=1):
return self.std(ddof=ddof) / np.sqrt(self.count())
@Substitution(name='groupby')
@Appender(_doc_template)
def size(self):
result = self.grouper.size()
if isinstance(self.obj, Series):
result.name = getattr(self.obj, 'name', None)
return result
@classmethod
def _add_numeric_operations(cls):
def groupby_function(name, alias, npfunc,
numeric_only=True, _convert=False,
min_count=-1):
_local_template = "Compute %(f)s of group values"
@Substitution(name='groupby', f=name)
@Appender(_doc_template)
@Appender(_local_template)
def f(self, **kwargs):
if 'numeric_only' not in kwargs:
kwargs['numeric_only'] = numeric_only
if 'min_count' not in kwargs:
kwargs['min_count'] = min_count
self._set_group_selection()
try:
return self._cython_agg_general(
alias, alt=npfunc, **kwargs)
except AssertionError as e:
raise SpecificationError(str(e))
except Exception:
result = self.aggregate(
lambda x: npfunc(x, axis=self.axis))
if _convert:
result = result._convert(datetime=True)
return result
set_function_name(f, name, cls)
return f
def first_compat(x, axis=0):
def first(x):
x = np.asarray(x)
x = x[notna(x)]
if len(x) == 0:
return np.nan
return x[0]
if isinstance(x, DataFrame):
return x.apply(first, axis=axis)
else:
return first(x)
def last_compat(x, axis=0):
def last(x):
x = np.asarray(x)
x = x[notna(x)]
if len(x) == 0:
return np.nan
return x[-1]
if isinstance(x, DataFrame):
return x.apply(last, axis=axis)
else:
return last(x)
cls.sum = groupby_function('sum', 'add', np.sum, min_count=0)
cls.prod = groupby_function('prod', 'prod', np.prod, min_count=0)
cls.min = groupby_function('min', 'min', np.min, numeric_only=False)
cls.max = groupby_function('max', 'max', np.max, numeric_only=False)
cls.first = groupby_function('first', 'first', first_compat,
numeric_only=False)
cls.last = groupby_function('last', 'last', last_compat,
numeric_only=False)
@Substitution(name='groupby')
@Appender(_doc_template)
def ohlc(self):
return self._apply_to_column_groupbys(
lambda x: x._cython_agg_general('ohlc'))
@Appender(DataFrame.describe.__doc__)
def describe(self, **kwargs):
with _group_selection_context(self):
result = self.apply(lambda x: x.describe(**kwargs))
if self.axis == 1:
return result.T
return result.unstack()
@Substitution(name='groupby')
@Appender(_doc_template)
def resample(self, rule, *args, **kwargs):
from pandas.core.resample import get_resampler_for_grouping
return get_resampler_for_grouping(self, rule, *args, **kwargs)
@Substitution(name='groupby')
@Appender(_doc_template)
def rolling(self, *args, **kwargs):
from pandas.core.window import RollingGroupby
return RollingGroupby(self, *args, **kwargs)
@Substitution(name='groupby')
@Appender(_doc_template)
def expanding(self, *args, **kwargs):
from pandas.core.window import ExpandingGroupby
return ExpandingGroupby(self, *args, **kwargs)
def _fill(self, direction, limit=None):
if limit is None:
limit = -1
return self._get_cythonized_result('group_fillna_indexer',
self.grouper, needs_mask=True,
cython_dtype=np.int64,
result_is_index=True,
direction=direction, limit=limit)
@Substitution(name='groupby')
def pad(self, limit=None):
return self._fill('ffill', limit=limit)
ffill = pad
@Substitution(name='groupby')
def backfill(self, limit=None):
return self._fill('bfill', limit=limit)
bfill = backfill
@Substitution(name='groupby')
@Appender(_doc_template)
def nth(self, n, dropna=None):
if isinstance(n, int):
nth_values = [n]
elif isinstance(n, (set, list, tuple)):
nth_values = list(set(n))
if dropna is not None:
raise ValueError(
"dropna option with a list of nth values is not supported")
else:
raise TypeError("n needs to be an int or a list/set/tuple of ints")
nth_values = np.array(nth_values, dtype=np.intp)
self._set_group_selection()
if not dropna:
mask = np.in1d(self._cumcount_array(), nth_values) | \
np.in1d(self._cumcount_array(ascending=False) + 1, -nth_values)
out = self._selected_obj[mask]
if not self.as_index:
return out
ids, _, _ = self.grouper.group_info
out.index = self.grouper.result_index[ids[mask]]
return out.sort_index() if self.sort else out
if dropna not in ['any', 'all']:
if isinstance(self._selected_obj, Series) and dropna is True:
warnings.warn("the dropna={dropna} keyword is deprecated,"
"use dropna='all' instead. "
"For a Series groupby, dropna must be "
"either None, 'any' or 'all'.".format(
dropna=dropna),
FutureWarning,
stacklevel=2)
dropna = 'all'
else:
# just returns NaN
raise ValueError("For a DataFrame groupby, dropna must be "
"either None, 'any' or 'all', "
"(was passed %s)." % (dropna),)
# old behaviour, but with all and any support for DataFrames.
# modified in GH 7559 to have better perf
max_len = n if n >= 0 else - 1 - n
dropped = self.obj.dropna(how=dropna, axis=self.axis)
# get a new grouper for our dropped obj
if self.keys is None and self.level is None:
# we don't have the grouper info available
axis = self.grouper.axis
grouper = axis[axis.isin(dropped.index)]
else:
from pandas.core.groupby.grouper import _get_grouper
grouper, _, _ = _get_grouper(dropped, key=self.keys,
axis=self.axis, level=self.level,
sort=self.sort,
mutated=self.mutated)
grb = dropped.groupby(grouper, as_index=self.as_index, sort=self.sort)
sizes, result = grb.size(), grb.nth(n)
mask = (sizes < max_len).values
if len(result) and mask.any():
result.loc[mask] = np.nan
# reset/reindex to the original groups
if len(self.obj) == len(dropped) or \
len(result) == len(self.grouper.result_index):
result.index = self.grouper.result_index
else:
result = result.reindex(self.grouper.result_index)
return result
@Substitution(name='groupby')
def ngroup(self, ascending=True):
with _group_selection_context(self):
index = self._selected_obj.index
result = Series(self.grouper.group_info[0], index)
if not ascending:
result = self.ngroups - 1 - result
return result
@Substitution(name='groupby')
def cumcount(self, ascending=True):
with _group_selection_context(self):
index = self._selected_obj.index
cumcounts = self._cumcount_array(ascending=ascending)
return Series(cumcounts, index)
@Substitution(name='groupby')
@Appender(_doc_template)
def rank(self, method='average', ascending=True, na_option='keep',
pct=False, axis=0):
if na_option not in {'keep', 'top', 'bottom'}:
msg = "na_option must be one of 'keep', 'top', or 'bottom'"
raise ValueError(msg)
return self._cython_transform('rank', numeric_only=False,
ties_method=method, ascending=ascending,
na_option=na_option, pct=pct, axis=axis)
@Substitution(name='groupby')
@Appender(_doc_template)
def cumprod(self, axis=0, *args, **kwargs):
nv.validate_groupby_func('cumprod', args, kwargs,
['numeric_only', 'skipna'])
if axis != 0:
return self.apply(lambda x: x.cumprod(axis=axis, **kwargs))
return self._cython_transform('cumprod', **kwargs)
@Substitution(name='groupby')
@Appender(_doc_template)
def cumsum(self, axis=0, *args, **kwargs):
nv.validate_groupby_func('cumsum', args, kwargs,
['numeric_only', 'skipna'])
if axis != 0:
return self.apply(lambda x: x.cumsum(axis=axis, **kwargs))
return self._cython_transform('cumsum', **kwargs)
@Substitution(name='groupby')
@Appender(_doc_template)
def cummin(self, axis=0, **kwargs):
if axis != 0:
return self.apply(lambda x: np.minimum.accumulate(x, axis))
return self._cython_transform('cummin', numeric_only=False)
@Substitution(name='groupby')
@Appender(_doc_template)
def cummax(self, axis=0, **kwargs):
if axis != 0:
return self.apply(lambda x: np.maximum.accumulate(x, axis))
return self._cython_transform('cummax', numeric_only=False)
def _get_cythonized_result(self, how, grouper, aggregate=False,
cython_dtype=None, needs_values=False,
needs_mask=False, needs_ngroups=False,
result_is_index=False,
pre_processing=None, post_processing=None,
**kwargs):
if result_is_index and aggregate:
raise ValueError("'result_is_index' and 'aggregate' cannot both "
"be True!")
if post_processing:
if not callable(pre_processing):
raise ValueError("'post_processing' must be a callable!")
if pre_processing:
if not callable(pre_processing):
raise ValueError("'pre_processing' must be a callable!")
if not needs_values:
raise ValueError("Cannot use 'pre_processing' without "
"specifying 'needs_values'!")
labels, _, ngroups = grouper.group_info
output = collections.OrderedDict()
base_func = getattr(libgroupby, how)
for name, obj in self._iterate_slices():
if aggregate:
result_sz = ngroups
else:
result_sz = len(obj.values)
if not cython_dtype:
cython_dtype = obj.values.dtype
result = np.zeros(result_sz, dtype=cython_dtype)
func = partial(base_func, result, labels)
if needs_values:
vals = obj.values
if pre_processing:
vals = pre_processing(vals)
func = partial(func, vals)
if needs_mask:
mask = isna(obj.values).view(np.uint8)
func = partial(func, mask)
if needs_ngroups:
func = partial(func, ngroups)
func(**kwargs) # Call func to modify indexer values in place
if result_is_index:
result = algorithms.take_nd(obj.values, result)
if post_processing:
result = post_processing(result)
output[name] = result
if aggregate:
return self._wrap_aggregated_output(output)
else:
return self._wrap_transformed_output(output)
@Substitution(name='groupby')
@Appender(_doc_template)
def shift(self, periods=1, freq=None, axis=0):
if freq is not None or axis != 0:
return self.apply(lambda x: x.shift(periods, freq, axis))
return self._get_cythonized_result('group_shift_indexer',
self.grouper, cython_dtype=np.int64,
needs_ngroups=True,
result_is_index=True,
periods=periods)
@Substitution(name='groupby')
@Appender(_doc_template)
def pct_change(self, periods=1, fill_method='pad', limit=None, freq=None,
axis=0):
if freq is not None or axis != 0:
return self.apply(lambda x: x.pct_change(periods=periods,
fill_method=fill_method,
limit=limit, freq=freq,
axis=axis))
filled = getattr(self, fill_method)(limit=limit).drop(
self.grouper.names, axis=1)
shifted = filled.shift(periods=periods, freq=freq)
return (filled / shifted) - 1
@Substitution(name='groupby')
@Appender(_doc_template)
def head(self, n=5):
self._reset_group_selection()
mask = self._cumcount_array() < n
return self._selected_obj[mask].dropna(subset=[self.keys])
@Substitution(name='groupby')
@Appender(_doc_template)
def tail(self, n=5):
self._reset_group_selection()
mask = self._cumcount_array(ascending=False) < n
return self._selected_obj[mask].dropna(subset=[self.keys])
GroupBy._add_numeric_operations()
@Appender(GroupBy.__doc__)
def groupby(obj, by, **kwds):
if isinstance(obj, Series):
from pandas.core.groupby.generic import SeriesGroupBy
klass = SeriesGroupBy
elif isinstance(obj, DataFrame):
from pandas.core.groupby.generic import DataFrameGroupBy
klass = DataFrameGroupBy
else: # pragma: no cover
raise TypeError('invalid type: %s' % type(obj))
return klass(obj, by, **kwds)
| true | true |
f71fef0c42afe5737fff68a898b0a1503169b16b | 303 | py | Python | mundo 3/des085.py | Pedroluis1/python | d949fa2646c049aa51a41a32dc62de7b14eae90f | [
"MIT"
] | null | null | null | mundo 3/des085.py | Pedroluis1/python | d949fa2646c049aa51a41a32dc62de7b14eae90f | [
"MIT"
] | null | null | null | mundo 3/des085.py | Pedroluis1/python | d949fa2646c049aa51a41a32dc62de7b14eae90f | [
"MIT"
] | null | null | null | valores = [[], []]
val = 0
for c in range(1, 8):
val = int(input(f'Digite o {c}° valor: '))
if val % 2 == 0:
valores[0].append(val)
else:
valores[1].append(val)
valores[0].sort()
valores[1].sort()
print(f'valores impares: {valores[1]}')
print(f'valores pares: {valores[0]}')
| 23.307692 | 46 | 0.567657 | valores = [[], []]
val = 0
for c in range(1, 8):
val = int(input(f'Digite o {c}° valor: '))
if val % 2 == 0:
valores[0].append(val)
else:
valores[1].append(val)
valores[0].sort()
valores[1].sort()
print(f'valores impares: {valores[1]}')
print(f'valores pares: {valores[0]}')
| true | true |
f71fefeee470de4abd81815f4b20fd0e5aa1ae84 | 6,228 | py | Python | pypy/translator/platform/posix.py | woodrow/pyoac | b5dc59e6a38e7912db47f26fb23ffa4764a3c0e7 | [
"MIT"
] | 1 | 2019-05-27T00:58:46.000Z | 2019-05-27T00:58:46.000Z | pypy/translator/platform/posix.py | woodrow/pyoac | b5dc59e6a38e7912db47f26fb23ffa4764a3c0e7 | [
"MIT"
] | null | null | null | pypy/translator/platform/posix.py | woodrow/pyoac | b5dc59e6a38e7912db47f26fb23ffa4764a3c0e7 | [
"MIT"
] | null | null | null |
""" Base class for all posixish platforms
"""
from pypy.translator.platform import Platform, log, _run_subprocess
from pypy.tool import autopath
import py, os
class BasePosix(Platform):
exe_ext = ''
def __init__(self, cc=None):
if cc is None:
cc = 'gcc'
self.cc = cc
def _libs(self, libraries):
return ['-l%s' % (lib,) for lib in libraries]
def _libdirs(self, library_dirs):
return ['-L%s' % (ldir,) for ldir in library_dirs]
def _includedirs(self, include_dirs):
return ['-I%s' % (idir,) for idir in include_dirs]
def _linkfiles(self, link_files):
return list(link_files)
def _compile_c_file(self, cc, cfile, compile_args):
oname = cfile.new(ext='o')
args = ['-c'] + compile_args + [str(cfile), '-o', str(oname)]
self._execute_c_compiler(cc, args, oname)
return oname
def _link(self, cc, ofiles, link_args, standalone, exe_name):
args = [str(ofile) for ofile in ofiles] + link_args
args += ['-o', str(exe_name)]
if not standalone:
args = self._args_for_shared(args)
self._execute_c_compiler(cc, args, exe_name)
return exe_name
def _preprocess_dirs(self, include_dirs):
# hook for maemo
return include_dirs
def gen_makefile(self, cfiles, eci, exe_name=None, path=None):
cfiles = [py.path.local(f) for f in cfiles]
cfiles += [py.path.local(f) for f in eci.separate_module_files]
if path is None:
path = cfiles[0].dirpath()
pypypath = py.path.local(autopath.pypydir)
if exe_name is None:
exe_name = cfiles[0].new(ext=self.exe_ext)
m = GnuMakefile(path)
m.exe_name = exe_name
m.eci = eci
def pypyrel(fpath):
rel = py.path.local(fpath).relto(pypypath)
if rel:
return os.path.join('$(PYPYDIR)', rel)
else:
return fpath
rel_cfiles = [m.pathrel(cfile) for cfile in cfiles]
rel_ofiles = [rel_cfile[:-2]+'.o' for rel_cfile in rel_cfiles]
m.cfiles = rel_cfiles
rel_includedirs = [pypyrel(incldir) for incldir in
self._preprocess_dirs(eci.include_dirs)]
m.comment('automatically generated makefile')
definitions = [
('PYPYDIR', autopath.pypydir),
('TARGET', exe_name.basename),
('DEFAULT_TARGET', '$(TARGET)'),
('SOURCES', rel_cfiles),
('OBJECTS', rel_ofiles),
('LIBS', self._libs(eci.libraries)),
('LIBDIRS', self._libdirs(eci.library_dirs)),
('INCLUDEDIRS', self._includedirs(rel_includedirs)),
('CFLAGS', self.cflags + list(eci.compile_extra)),
('LDFLAGS', self.link_flags + list(eci.link_extra)),
('CC', self.cc)
]
for args in definitions:
m.definition(*args)
rules = [
('all', '$(DEFAULT_TARGET)', []),
('$(TARGET)', '$(OBJECTS)', '$(CC) $(LDFLAGS) -o $@ $(OBJECTS) $(LIBDIRS) $(LIBS)'),
('%.o', '%.c', '$(CC) $(CFLAGS) -o $@ -c $< $(INCLUDEDIRS)'),
]
for rule in rules:
m.rule(*rule)
return m
def execute_makefile(self, path_to_makefile):
if isinstance(path_to_makefile, GnuMakefile):
path = path_to_makefile.makefile_dir
else:
path = path_to_makefile
log.execute('make in %s' % (path,))
returncode, stdout, stderr = _run_subprocess('make', ['-C', str(path)])
self._handle_error(returncode, stdout, stderr, path.join('make'))
class Definition(object):
def __init__(self, name, value):
self.name = name
self.value = value
def write(self, f):
def write_list(prefix, lst):
for i, fn in enumerate(lst):
print >> f, prefix, fn,
if i < len(lst)-1:
print >> f, '\\'
else:
print >> f
prefix = ' ' * len(prefix)
name, value = self.name, self.value
if isinstance(value, str):
f.write('%s = %s\n' % (name, value))
else:
write_list('%s =' % (name,), value)
if value:
f.write('\n')
class Rule(object):
def __init__(self, target, deps, body):
self.target = target
self.deps = deps
self.body = body
def write(self, f):
target, deps, body = self.target, self.deps, self.body
if isinstance(deps, str):
dep_s = deps
else:
dep_s = ' '.join(deps)
f.write('%s: %s\n' % (target, dep_s))
if isinstance(body, str):
f.write('\t%s\n' % body)
elif body:
f.write('\t%s\n' % '\n\t'.join(body))
f.write('\n')
class Comment(object):
def __init__(self, body):
self.body = body
def write(self, f):
f.write('# %s\n' % (self.body,))
class GnuMakefile(object):
def __init__(self, path=None):
self.defs = {}
self.lines = []
self.makefile_dir = py.path.local(path)
def pathrel(self, fpath):
if fpath.dirpath() == self.makefile_dir:
return fpath.basename
elif fpath.dirpath().dirpath() == self.makefile_dir.dirpath():
return '../' + fpath.relto(self.makefile_dir.dirpath())
else:
return str(fpath)
def definition(self, name, value):
defs = self.defs
defn = Definition(name, value)
if name in defs:
self.lines[defs[name]] = defn
else:
defs[name] = len(self.lines)
self.lines.append(defn)
def rule(self, target, deps, body):
self.lines.append(Rule(target, deps, body))
def comment(self, body):
self.lines.append(Comment(body))
def write(self, out=None):
if out is None:
f = self.makefile_dir.join('Makefile').open('w')
else:
f = out
for line in self.lines:
line.write(f)
f.flush()
if out is None:
f.close()
| 30.985075 | 96 | 0.538536 |
from pypy.translator.platform import Platform, log, _run_subprocess
from pypy.tool import autopath
import py, os
class BasePosix(Platform):
exe_ext = ''
def __init__(self, cc=None):
if cc is None:
cc = 'gcc'
self.cc = cc
def _libs(self, libraries):
return ['-l%s' % (lib,) for lib in libraries]
def _libdirs(self, library_dirs):
return ['-L%s' % (ldir,) for ldir in library_dirs]
def _includedirs(self, include_dirs):
return ['-I%s' % (idir,) for idir in include_dirs]
def _linkfiles(self, link_files):
return list(link_files)
def _compile_c_file(self, cc, cfile, compile_args):
oname = cfile.new(ext='o')
args = ['-c'] + compile_args + [str(cfile), '-o', str(oname)]
self._execute_c_compiler(cc, args, oname)
return oname
def _link(self, cc, ofiles, link_args, standalone, exe_name):
args = [str(ofile) for ofile in ofiles] + link_args
args += ['-o', str(exe_name)]
if not standalone:
args = self._args_for_shared(args)
self._execute_c_compiler(cc, args, exe_name)
return exe_name
def _preprocess_dirs(self, include_dirs):
return include_dirs
def gen_makefile(self, cfiles, eci, exe_name=None, path=None):
cfiles = [py.path.local(f) for f in cfiles]
cfiles += [py.path.local(f) for f in eci.separate_module_files]
if path is None:
path = cfiles[0].dirpath()
pypypath = py.path.local(autopath.pypydir)
if exe_name is None:
exe_name = cfiles[0].new(ext=self.exe_ext)
m = GnuMakefile(path)
m.exe_name = exe_name
m.eci = eci
def pypyrel(fpath):
rel = py.path.local(fpath).relto(pypypath)
if rel:
return os.path.join('$(PYPYDIR)', rel)
else:
return fpath
rel_cfiles = [m.pathrel(cfile) for cfile in cfiles]
rel_ofiles = [rel_cfile[:-2]+'.o' for rel_cfile in rel_cfiles]
m.cfiles = rel_cfiles
rel_includedirs = [pypyrel(incldir) for incldir in
self._preprocess_dirs(eci.include_dirs)]
m.comment('automatically generated makefile')
definitions = [
('PYPYDIR', autopath.pypydir),
('TARGET', exe_name.basename),
('DEFAULT_TARGET', '$(TARGET)'),
('SOURCES', rel_cfiles),
('OBJECTS', rel_ofiles),
('LIBS', self._libs(eci.libraries)),
('LIBDIRS', self._libdirs(eci.library_dirs)),
('INCLUDEDIRS', self._includedirs(rel_includedirs)),
('CFLAGS', self.cflags + list(eci.compile_extra)),
('LDFLAGS', self.link_flags + list(eci.link_extra)),
('CC', self.cc)
]
for args in definitions:
m.definition(*args)
rules = [
('all', '$(DEFAULT_TARGET)', []),
('$(TARGET)', '$(OBJECTS)', '$(CC) $(LDFLAGS) -o $@ $(OBJECTS) $(LIBDIRS) $(LIBS)'),
('%.o', '%.c', '$(CC) $(CFLAGS) -o $@ -c $< $(INCLUDEDIRS)'),
]
for rule in rules:
m.rule(*rule)
return m
def execute_makefile(self, path_to_makefile):
if isinstance(path_to_makefile, GnuMakefile):
path = path_to_makefile.makefile_dir
else:
path = path_to_makefile
log.execute('make in %s' % (path,))
returncode, stdout, stderr = _run_subprocess('make', ['-C', str(path)])
self._handle_error(returncode, stdout, stderr, path.join('make'))
class Definition(object):
def __init__(self, name, value):
self.name = name
self.value = value
def write(self, f):
def write_list(prefix, lst):
for i, fn in enumerate(lst):
print >> f, prefix, fn,
if i < len(lst)-1:
print >> f, '\\'
else:
print >> f
prefix = ' ' * len(prefix)
name, value = self.name, self.value
if isinstance(value, str):
f.write('%s = %s\n' % (name, value))
else:
write_list('%s =' % (name,), value)
if value:
f.write('\n')
class Rule(object):
def __init__(self, target, deps, body):
self.target = target
self.deps = deps
self.body = body
def write(self, f):
target, deps, body = self.target, self.deps, self.body
if isinstance(deps, str):
dep_s = deps
else:
dep_s = ' '.join(deps)
f.write('%s: %s\n' % (target, dep_s))
if isinstance(body, str):
f.write('\t%s\n' % body)
elif body:
f.write('\t%s\n' % '\n\t'.join(body))
f.write('\n')
class Comment(object):
def __init__(self, body):
self.body = body
def write(self, f):
f.write('# %s\n' % (self.body,))
class GnuMakefile(object):
def __init__(self, path=None):
self.defs = {}
self.lines = []
self.makefile_dir = py.path.local(path)
def pathrel(self, fpath):
if fpath.dirpath() == self.makefile_dir:
return fpath.basename
elif fpath.dirpath().dirpath() == self.makefile_dir.dirpath():
return '../' + fpath.relto(self.makefile_dir.dirpath())
else:
return str(fpath)
def definition(self, name, value):
defs = self.defs
defn = Definition(name, value)
if name in defs:
self.lines[defs[name]] = defn
else:
defs[name] = len(self.lines)
self.lines.append(defn)
def rule(self, target, deps, body):
self.lines.append(Rule(target, deps, body))
def comment(self, body):
self.lines.append(Comment(body))
def write(self, out=None):
if out is None:
f = self.makefile_dir.join('Makefile').open('w')
else:
f = out
for line in self.lines:
line.write(f)
f.flush()
if out is None:
f.close()
| true | true |
f71ff11e0a47dc49e9286ac8954a588df8c45b2e | 64 | py | Python | test/core/metaflow_custom/toplevel/__init__.py | saikonen/metaflow | 48e37bea3ea4e83ddab8227869bbe56b52d9957d | [
"Apache-2.0"
] | 7 | 2020-07-24T17:07:58.000Z | 2021-05-19T21:47:12.000Z | test/core/metaflow_custom/toplevel/__init__.py | saikonen/metaflow | 48e37bea3ea4e83ddab8227869bbe56b52d9957d | [
"Apache-2.0"
] | 55 | 2020-07-20T16:56:27.000Z | 2022-03-28T12:51:15.000Z | test/core/metaflow_custom/toplevel/__init__.py | saikonen/metaflow | 48e37bea3ea4e83ddab8227869bbe56b52d9957d | [
"Apache-2.0"
] | 6 | 2020-10-15T18:38:35.000Z | 2021-06-20T03:05:43.000Z | __mf_customization__ = 'test'
tl_value = 42
__version__ = None | 12.8 | 29 | 0.765625 | __mf_customization__ = 'test'
tl_value = 42
__version__ = None | true | true |
f71ff1359f7c6ddeef12a05d12e6963d96fa007e | 2,569 | py | Python | jspp_imageutils/annotations/convert.py | jspaezp/jspp_imageutils | 6376e274a1b0675622a7979c181b9effc125aa09 | [
"Apache-2.0"
] | null | null | null | jspp_imageutils/annotations/convert.py | jspaezp/jspp_imageutils | 6376e274a1b0675622a7979c181b9effc125aa09 | [
"Apache-2.0"
] | null | null | null | jspp_imageutils/annotations/convert.py | jspaezp/jspp_imageutils | 6376e274a1b0675622a7979c181b9effc125aa09 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# modified from:
# https://gist.github.com/rotemtam/88d9a4efae243fc77ed4a0f9917c8f6c
import os
import glob
import click
import pandas as pd
import xml.etree.ElementTree as ET
def xml_to_csv(path: str) -> pd.DataFrame:
xml_list = []
for xml_file in glob.glob(path):
tree = ET.parse(xml_file)
root = tree.getroot()
for member in root.findall('object'):
bbx = member.find('bndbox')
xmin = int(bbx.find('xmin').text)
ymin = int(bbx.find('ymin').text)
xmax = int(bbx.find('xmax').text)
ymax = int(bbx.find('ymax').text)
label = member.find('name').text
# The columns are organized as the csv required by keras-retinanet
# https://github.com/fizyr/keras-retinanet#csv-datasets
# path/to/image.jpg,x1,y1,x2,y2,class_name
value = (root.find('filename').text,
# int(root.find('size')[0].text),
# int(root.find('size')[1].text),
xmin, ymin,
xmax, ymax,
label)
xml_list.append(value)
column_name = ['filename',
# 'width',
# 'height',
'xmin',
'ymin',
'xmax',
'ymax',
'class']
xml_df = pd.DataFrame(xml_list, columns=column_name)
return xml_df
def xml_to_csv_file(infile: str, outfile: str):
xml_df = xml_to_csv(infile)
print(xml_df)
xml_df.to_csv(outfile, index=None)
@click.group(help='Converts a pascal xml to csv')
def cli():
pass
@cli.command()
@click.option('--dir', type=str,
help='Name of source directory,' +
' will convert all xml files in it')
@click.option('--out_dir', type=str, help='Name of the destination directory')
def directory(dir, out_dir):
files_convert = [x for x in os.listdir(dir) if x.endswith("xml")]
for xml_file in files_convert:
base = os.path.basename(xml_file)
filename = os.path.splitext(base)[0]
out_filename = filename + ".csv"
out_path = os.path.join(out_dir, out_filename)
xml_to_csv_file(os.path.join(dir, xml_file), out_path)
@cli.command()
@click.option('--file', type=str, help='File to be converted to csv')
@click.option('--out', type=str, help='Name of the destination file')
def xml(file, out):
xml_to_csv_file(file, out)
if __name__ == '__main__':
cli()
| 29.528736 | 79 | 0.570261 |
import os
import glob
import click
import pandas as pd
import xml.etree.ElementTree as ET
def xml_to_csv(path: str) -> pd.DataFrame:
xml_list = []
for xml_file in glob.glob(path):
tree = ET.parse(xml_file)
root = tree.getroot()
for member in root.findall('object'):
bbx = member.find('bndbox')
xmin = int(bbx.find('xmin').text)
ymin = int(bbx.find('ymin').text)
xmax = int(bbx.find('xmax').text)
ymax = int(bbx.find('ymax').text)
label = member.find('name').text
value = (root.find('filename').text,
xmin, ymin,
xmax, ymax,
label)
xml_list.append(value)
column_name = ['filename',
'xmin',
'ymin',
'xmax',
'ymax',
'class']
xml_df = pd.DataFrame(xml_list, columns=column_name)
return xml_df
def xml_to_csv_file(infile: str, outfile: str):
xml_df = xml_to_csv(infile)
print(xml_df)
xml_df.to_csv(outfile, index=None)
@click.group(help='Converts a pascal xml to csv')
def cli():
pass
@cli.command()
@click.option('--dir', type=str,
help='Name of source directory,' +
' will convert all xml files in it')
@click.option('--out_dir', type=str, help='Name of the destination directory')
def directory(dir, out_dir):
files_convert = [x for x in os.listdir(dir) if x.endswith("xml")]
for xml_file in files_convert:
base = os.path.basename(xml_file)
filename = os.path.splitext(base)[0]
out_filename = filename + ".csv"
out_path = os.path.join(out_dir, out_filename)
xml_to_csv_file(os.path.join(dir, xml_file), out_path)
@cli.command()
@click.option('--file', type=str, help='File to be converted to csv')
@click.option('--out', type=str, help='Name of the destination file')
def xml(file, out):
xml_to_csv_file(file, out)
if __name__ == '__main__':
cli()
| true | true |
f71ff15ecd2f2844f2c2b918043ff8217bac2c9b | 3,796 | py | Python | Week6/AdvML_Week6_ex2.py | mikkokotola/AdvancedMachineLearning | 574e82d4104ac04f1cb9889beb5be7d122bd0d01 | [
"MIT"
] | 1 | 2020-03-18T08:51:44.000Z | 2020-03-18T08:51:44.000Z | Week6/AdvML_Week6_ex2.py | mikkokotola/AdvancedMachineLearning | 574e82d4104ac04f1cb9889beb5be7d122bd0d01 | [
"MIT"
] | null | null | null | Week6/AdvML_Week6_ex2.py | mikkokotola/AdvancedMachineLearning | 574e82d4104ac04f1cb9889beb5be7d122bd0d01 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# coding: utf-8
# In[8]:
## Advanced Course in Machine Learning
## Week 6
## Exercise 2 / Random forest
import numpy as np
import scipy
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from numpy import linalg as LA
from sklearn import decomposition
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import pairwise_distances
from sklearn.manifold import TSNE
import math
import sys
import mnist
from sklearn.tree import DecisionTreeClassifier
from sklearn.model_selection import cross_val_score
sns.set_style("darkgrid")
# In[4]:
x_train, t_train, x_test, t_test = mnist.load()
# In[48]:
x_train = x_train[0:50000,:]
t_train = t_train[0:50000]
# In[49]:
print(x_train.shape)
print(t_train.shape)
print(x_test.shape)
print(t_test.shape)
# In[69]:
startTestIx = 0
endTestIx = 100
# clf.classes_
# clf.feature_importances_
# print(clf.max_features_)
# print(clf.n_classes_)
# print(clf.n_features_)
# print(clf.n_outputs_)
# #clf.tree_
# In[70]:
# Randomly select the samples and features for the tree
def sample(n, k, x_train, t_train):
idx = np.random.randint(x_train.shape[0], size=n)
fidx = np.random.randint(x_train.shape[1], size=k)
x = x_train[idx, :]
x = x[:, fidx]
y = t_train[idx]
return x, y, idx, fidx
#print("Rows: ", idx, ", features ", fidx)
#print(x.shape)
#print(y.shape)
# In[71]:
def trainTree(x_train, t_train):
clf = DecisionTreeClassifier(random_state=0)
clf = clf.fit(x_train, t_train)
return clf
#cross_val_score(clf, x_train, t_train, cv=10)
# In[72]:
def ensureAllClasses(newPred, clf):
for i in range(10):
if i not in clf.classes_:
newPred = np.insert(newPred, i, 0, axis=1)
return newPred
# In[75]:
# Main loop
def main(M, n, k):
pred = np.zeros(shape = (endTestIx - startTestIx, 10), dtype = 'float32')
for m in range(M):
x, y, idx, fidx = sample(n, k, x_train, t_train)
clf = trainTree(x, y)
newPred = clf.predict_proba(x_test[startTestIx:endTestIx,fidx])
newPred = ensureAllClasses(newPred, clf)
pred = np.add(pred, newPred)
pred_classes = np.argmax(pred, axis=1)
correct = pred_classes == t_test[startTestIx:endTestIx]
acc = sum(correct)/len(correct)
#print(pred_classes)
#print (acc)
return acc
# In[85]:
Mmax = 100
n = 1000
k = 20
accs = list()
for m in range(1, Mmax):
accs.append(main(m, n, k))
plt.figure(num=None, figsize=(8, 6), dpi=100, facecolor='w', edgecolor='k')
sns.lineplot(range(1,Mmax), accs)
plt.xlabel('Number of trees (M)')
plt.ylabel('Accuracy of predictions (%)')
plt.title('Number of trees vs. accuracy, n = {0}, k = {1}'.format(n, k))
plt.show()
# In[80]:
M = 100
n = 1000
kmax = 200
accs = list()
for k in range(1, kmax, 10):
accs.append(main(M, n, k))
plt.figure(num=None, figsize=(8, 6), dpi=100, facecolor='w', edgecolor='k')
sns.lineplot(range(1,kmax,10), accs)
plt.xlabel('Number of features per tree (k)')
plt.ylabel('Accuracy of predictions (%)')
plt.title('Number of features per tree vs. accuracy, M = {0}, n = {1}'.format(M, n))
plt.show()
# In[81]:
M = 100
nmax = 5000
k = 50
accs = list()
for n in range(1, nmax, 100):
accs.append(main(M, n, k))
plt.figure(num=None, figsize=(8, 6), dpi=100, facecolor='w', edgecolor='k')
sns.lineplot(range(1, nmax, 100), accs)
plt.xlabel('Number of samples per tree (n)')
plt.ylabel('Accuracy of predictions (%)')
plt.title('Number of samples per tree vs. accuracy, M = {0}, k = {1}'.format(M, k))
plt.show()
# In[84]:
M = 100
n = 1000
k = 50
repeats = 50
accs = list()
for i in range(50):
accs.append(main(M, n, k))
avAcc = sum(accs)/len(accs)
print(avAcc)
| 19.171717 | 84 | 0.663593 |
sns
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from numpy import linalg as LA
from sklearn import decomposition
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import pairwise_distances
from sklearn.manifold import TSNE
import math
import sys
import mnist
from sklearn.tree import DecisionTreeClassifier
from sklearn.model_selection import cross_val_score
sns.set_style("darkgrid")
x_train, t_train, x_test, t_test = mnist.load()
x_train = x_train[0:50000,:]
t_train = t_train[0:50000]
print(x_train.shape)
print(t_train.shape)
print(x_test.shape)
print(t_test.shape)
startTestIx = 0
endTestIx = 100
sample(n, k, x_train, t_train):
idx = np.random.randint(x_train.shape[0], size=n)
fidx = np.random.randint(x_train.shape[1], size=k)
x = x_train[idx, :]
x = x[:, fidx]
y = t_train[idx]
return x, y, idx, fidx
def trainTree(x_train, t_train):
clf = DecisionTreeClassifier(random_state=0)
clf = clf.fit(x_train, t_train)
return clf
def ensureAllClasses(newPred, clf):
for i in range(10):
if i not in clf.classes_:
newPred = np.insert(newPred, i, 0, axis=1)
return newPred
def main(M, n, k):
pred = np.zeros(shape = (endTestIx - startTestIx, 10), dtype = 'float32')
for m in range(M):
x, y, idx, fidx = sample(n, k, x_train, t_train)
clf = trainTree(x, y)
newPred = clf.predict_proba(x_test[startTestIx:endTestIx,fidx])
newPred = ensureAllClasses(newPred, clf)
pred = np.add(pred, newPred)
pred_classes = np.argmax(pred, axis=1)
correct = pred_classes == t_test[startTestIx:endTestIx]
acc = sum(correct)/len(correct)
return acc
Mmax = 100
n = 1000
k = 20
accs = list()
for m in range(1, Mmax):
accs.append(main(m, n, k))
plt.figure(num=None, figsize=(8, 6), dpi=100, facecolor='w', edgecolor='k')
sns.lineplot(range(1,Mmax), accs)
plt.xlabel('Number of trees (M)')
plt.ylabel('Accuracy of predictions (%)')
plt.title('Number of trees vs. accuracy, n = {0}, k = {1}'.format(n, k))
plt.show()
M = 100
n = 1000
kmax = 200
accs = list()
for k in range(1, kmax, 10):
accs.append(main(M, n, k))
plt.figure(num=None, figsize=(8, 6), dpi=100, facecolor='w', edgecolor='k')
sns.lineplot(range(1,kmax,10), accs)
plt.xlabel('Number of features per tree (k)')
plt.ylabel('Accuracy of predictions (%)')
plt.title('Number of features per tree vs. accuracy, M = {0}, n = {1}'.format(M, n))
plt.show()
M = 100
nmax = 5000
k = 50
accs = list()
for n in range(1, nmax, 100):
accs.append(main(M, n, k))
plt.figure(num=None, figsize=(8, 6), dpi=100, facecolor='w', edgecolor='k')
sns.lineplot(range(1, nmax, 100), accs)
plt.xlabel('Number of samples per tree (n)')
plt.ylabel('Accuracy of predictions (%)')
plt.title('Number of samples per tree vs. accuracy, M = {0}, k = {1}'.format(M, k))
plt.show()
M = 100
n = 1000
k = 50
repeats = 50
accs = list()
for i in range(50):
accs.append(main(M, n, k))
avAcc = sum(accs)/len(accs)
print(avAcc)
| true | true |
f71ff26a2521e900d339ce88a0f2dc11b89982ac | 8,979 | py | Python | models/layers/mesh_pool.py | yamaguchi1024/MeshCNN | 197530eab2aa4c2419511c1854dcbc662377f340 | [
"MIT"
] | null | null | null | models/layers/mesh_pool.py | yamaguchi1024/MeshCNN | 197530eab2aa4c2419511c1854dcbc662377f340 | [
"MIT"
] | null | null | null | models/layers/mesh_pool.py | yamaguchi1024/MeshCNN | 197530eab2aa4c2419511c1854dcbc662377f340 | [
"MIT"
] | 1 | 2020-10-10T23:31:50.000Z | 2020-10-10T23:31:50.000Z | import torch
import torch.nn as nn
from threading import Thread
from models.layers.mesh_union import MeshUnion
import numpy as np
from heapq import heappop, heapify
class MeshPool(nn.Module):
def __init__(self, target, multi_thread=False):
super(MeshPool, self).__init__()
self.__out_target = target
self.__multi_thread = multi_thread
self.__fe = None
self.__updated_fe = None
self.__meshes = None
self.__merge_edges = [-1, -1]
def __call__(self, fe, meshes):
return self.forward(fe, meshes)
def forward(self, fe, meshes):
self.__updated_fe = [[] for _ in range(len(meshes))]
pool_threads = []
self.__fe = fe
self.__meshes = meshes
# iterate over batch
for mesh_index in range(len(meshes)):
if self.__multi_thread:
pool_threads.append(Thread(target=self.__pool_main, args=(mesh_index,)))
pool_threads[-1].start()
else:
self.__pool_main(mesh_index)
if self.__multi_thread:
for mesh_index in range(len(meshes)):
pool_threads[mesh_index].join()
out_features = torch.cat(self.__updated_fe).view(len(meshes), -1, self.__out_target)
return out_features
def __pool_main(self, mesh_index):
mesh = self.__meshes[mesh_index]
queue = self.__build_queue(self.__fe[mesh_index, :, :mesh.edges_count], mesh.edges_count)
# recycle = []
# last_queue_len = len(queue)
last_count = mesh.edges_count + 1
mask = np.ones(mesh.edges_count, dtype=np.uint8)
edge_groups = MeshUnion(mesh.edges_count, self.__fe.device)
while mesh.edges_count > self.__out_target:
value, edge_id = heappop(queue)
edge_id = int(edge_id)
if mask[edge_id]:
self.__pool_edge(mesh, edge_id, mask, edge_groups)
mesh.clean(mask, edge_groups)
fe = edge_groups.rebuild_features(self.__fe[mesh_index], mask, self.__out_target)
self.__updated_fe[mesh_index] = fe
def __pool_edge(self, mesh, edge_id, mask, edge_groups):
if self.has_boundaries(mesh, edge_id):
return False
elif self.__clean_side(mesh, edge_id, mask, edge_groups, 0)\
and self.__clean_side(mesh, edge_id, mask, edge_groups, 2) \
and self.__is_one_ring_valid(mesh, edge_id):
self.__merge_edges[0] = self.__pool_side(mesh, edge_id, mask, edge_groups, 0)
self.__merge_edges[1] = self.__pool_side(mesh, edge_id, mask, edge_groups, 2)
mesh.merge_vertices(edge_id)
mask[edge_id] = False
MeshPool.__remove_group(mesh, edge_groups, edge_id)
mesh.edges_count -= 1
return True
else:
return False
def __clean_side(self, mesh, edge_id, mask, edge_groups, side):
if mesh.edges_count <= self.__out_target:
return False
invalid_edges = MeshPool.__get_invalids(mesh, edge_id, edge_groups, side)
while len(invalid_edges) != 0 and mesh.edges_count > self.__out_target:
self.__remove_triplete(mesh, mask, edge_groups, invalid_edges)
if mesh.edges_count <= self.__out_target:
return False
if self.has_boundaries(mesh, edge_id):
return False
invalid_edges = self.__get_invalids(mesh, edge_id, edge_groups, side)
return True
@staticmethod
def has_boundaries(mesh, edge_id):
for edge in mesh.gemm_edges[edge_id]:
if edge == -1 or -1 in mesh.gemm_edges[edge]:
return True
return False
@staticmethod
def __is_one_ring_valid(mesh, edge_id):
v_a = set(mesh.edges[mesh.ve[mesh.edges[edge_id, 0]]].reshape(-1))
v_b = set(mesh.edges[mesh.ve[mesh.edges[edge_id, 1]]].reshape(-1))
shared = v_a & v_b - set(mesh.edges[edge_id])
return len(shared) == 2
def __pool_side(self, mesh, edge_id, mask, edge_groups, side):
info = MeshPool.__get_face_info(mesh, edge_id, side)
key_a, key_b, side_a, side_b, _, other_side_b, _, other_keys_b = info
self.__redirect_edges(mesh, key_a, side_a - side_a % 2, other_keys_b[0], mesh.sides[key_b, other_side_b])
self.__redirect_edges(mesh, key_a, side_a - side_a % 2 + 1, other_keys_b[1], mesh.sides[key_b, other_side_b + 1])
MeshPool.__union_groups(mesh, edge_groups, key_b, key_a)
MeshPool.__union_groups(mesh, edge_groups, edge_id, key_a)
mask[key_b] = False
MeshPool.__remove_group(mesh, edge_groups, key_b)
mesh.remove_edge(key_b)
mesh.edges_count -= 1
return key_a
@staticmethod
def __get_invalids(mesh, edge_id, edge_groups, side):
info = MeshPool.__get_face_info(mesh, edge_id, side)
key_a, key_b, side_a, side_b, other_side_a, other_side_b, other_keys_a, other_keys_b = info
shared_items = MeshPool.__get_shared_items(other_keys_a, other_keys_b)
if len(shared_items) == 0:
return []
else:
assert (len(shared_items) == 2)
middle_edge = other_keys_a[shared_items[0]]
update_key_a = other_keys_a[1 - shared_items[0]]
update_key_b = other_keys_b[1 - shared_items[1]]
update_side_a = mesh.sides[key_a, other_side_a + 1 - shared_items[0]]
update_side_b = mesh.sides[key_b, other_side_b + 1 - shared_items[1]]
MeshPool.__redirect_edges(mesh, edge_id, side, update_key_a, update_side_a)
MeshPool.__redirect_edges(mesh, edge_id, side + 1, update_key_b, update_side_b)
MeshPool.__redirect_edges(mesh, update_key_a, MeshPool.__get_other_side(update_side_a), update_key_b, MeshPool.__get_other_side(update_side_b))
MeshPool.__union_groups(mesh, edge_groups, key_a, edge_id)
MeshPool.__union_groups(mesh, edge_groups, key_b, edge_id)
MeshPool.__union_groups(mesh, edge_groups, key_a, update_key_a)
MeshPool.__union_groups(mesh, edge_groups, middle_edge, update_key_a)
MeshPool.__union_groups(mesh, edge_groups, key_b, update_key_b)
MeshPool.__union_groups(mesh, edge_groups, middle_edge, update_key_b)
return [key_a, key_b, middle_edge]
@staticmethod
def __redirect_edges(mesh, edge_a_key, side_a, edge_b_key, side_b):
mesh.gemm_edges[edge_a_key, side_a] = edge_b_key
mesh.gemm_edges[edge_b_key, side_b] = edge_a_key
mesh.sides[edge_a_key, side_a] = side_b
mesh.sides[edge_b_key, side_b] = side_a
@staticmethod
def __get_shared_items(list_a, list_b):
shared_items = []
for i in range(len(list_a)):
for j in range(len(list_b)):
if list_a[i] == list_b[j]:
shared_items.extend([i, j])
return shared_items
@staticmethod
def __get_other_side(side):
return side + 1 - 2 * (side % 2)
@staticmethod
def __get_face_info(mesh, edge_id, side):
key_a = mesh.gemm_edges[edge_id, side]
key_b = mesh.gemm_edges[edge_id, side + 1]
side_a = mesh.sides[edge_id, side]
side_b = mesh.sides[edge_id, side + 1]
other_side_a = (side_a - (side_a % 2) + 2) % 4
other_side_b = (side_b - (side_b % 2) + 2) % 4
other_keys_a = [mesh.gemm_edges[key_a, other_side_a], mesh.gemm_edges[key_a, other_side_a + 1]]
other_keys_b = [mesh.gemm_edges[key_b, other_side_b], mesh.gemm_edges[key_b, other_side_b + 1]]
return key_a, key_b, side_a, side_b, other_side_a, other_side_b, other_keys_a, other_keys_b
@staticmethod
def __remove_triplete(mesh, mask, edge_groups, invalid_edges):
vertex = set(mesh.edges[invalid_edges[0]])
for edge_key in invalid_edges:
vertex &= set(mesh.edges[edge_key])
mask[edge_key] = False
MeshPool.__remove_group(mesh, edge_groups, edge_key)
mesh.edges_count -= 3
vertex = list(vertex)
assert(len(vertex) == 1)
mesh.remove_vertex(vertex[0])
def __build_queue(self, features, edges_count):
# delete edges with smallest norm
squared_magnitude = torch.sum(features * features, 0)
if squared_magnitude.shape[-1] != 1:
squared_magnitude = squared_magnitude.unsqueeze(-1)
edge_ids = torch.arange(edges_count, device=squared_magnitude.device, dtype=torch.float32).unsqueeze(-1)
heap = torch.cat((squared_magnitude, edge_ids), dim=-1).tolist()
heapify(heap)
return heap
@staticmethod
def __union_groups(mesh, edge_groups, source, target):
edge_groups.union(source, target)
mesh.union_groups(source, target)
@staticmethod
def __remove_group(mesh, edge_groups, index):
edge_groups.remove_group(index)
mesh.remove_group(index)
| 44.014706 | 155 | 0.646397 | import torch
import torch.nn as nn
from threading import Thread
from models.layers.mesh_union import MeshUnion
import numpy as np
from heapq import heappop, heapify
class MeshPool(nn.Module):
def __init__(self, target, multi_thread=False):
super(MeshPool, self).__init__()
self.__out_target = target
self.__multi_thread = multi_thread
self.__fe = None
self.__updated_fe = None
self.__meshes = None
self.__merge_edges = [-1, -1]
def __call__(self, fe, meshes):
return self.forward(fe, meshes)
def forward(self, fe, meshes):
self.__updated_fe = [[] for _ in range(len(meshes))]
pool_threads = []
self.__fe = fe
self.__meshes = meshes
for mesh_index in range(len(meshes)):
if self.__multi_thread:
pool_threads.append(Thread(target=self.__pool_main, args=(mesh_index,)))
pool_threads[-1].start()
else:
self.__pool_main(mesh_index)
if self.__multi_thread:
for mesh_index in range(len(meshes)):
pool_threads[mesh_index].join()
out_features = torch.cat(self.__updated_fe).view(len(meshes), -1, self.__out_target)
return out_features
def __pool_main(self, mesh_index):
mesh = self.__meshes[mesh_index]
queue = self.__build_queue(self.__fe[mesh_index, :, :mesh.edges_count], mesh.edges_count)
last_count = mesh.edges_count + 1
mask = np.ones(mesh.edges_count, dtype=np.uint8)
edge_groups = MeshUnion(mesh.edges_count, self.__fe.device)
while mesh.edges_count > self.__out_target:
value, edge_id = heappop(queue)
edge_id = int(edge_id)
if mask[edge_id]:
self.__pool_edge(mesh, edge_id, mask, edge_groups)
mesh.clean(mask, edge_groups)
fe = edge_groups.rebuild_features(self.__fe[mesh_index], mask, self.__out_target)
self.__updated_fe[mesh_index] = fe
def __pool_edge(self, mesh, edge_id, mask, edge_groups):
if self.has_boundaries(mesh, edge_id):
return False
elif self.__clean_side(mesh, edge_id, mask, edge_groups, 0)\
and self.__clean_side(mesh, edge_id, mask, edge_groups, 2) \
and self.__is_one_ring_valid(mesh, edge_id):
self.__merge_edges[0] = self.__pool_side(mesh, edge_id, mask, edge_groups, 0)
self.__merge_edges[1] = self.__pool_side(mesh, edge_id, mask, edge_groups, 2)
mesh.merge_vertices(edge_id)
mask[edge_id] = False
MeshPool.__remove_group(mesh, edge_groups, edge_id)
mesh.edges_count -= 1
return True
else:
return False
def __clean_side(self, mesh, edge_id, mask, edge_groups, side):
if mesh.edges_count <= self.__out_target:
return False
invalid_edges = MeshPool.__get_invalids(mesh, edge_id, edge_groups, side)
while len(invalid_edges) != 0 and mesh.edges_count > self.__out_target:
self.__remove_triplete(mesh, mask, edge_groups, invalid_edges)
if mesh.edges_count <= self.__out_target:
return False
if self.has_boundaries(mesh, edge_id):
return False
invalid_edges = self.__get_invalids(mesh, edge_id, edge_groups, side)
return True
@staticmethod
def has_boundaries(mesh, edge_id):
for edge in mesh.gemm_edges[edge_id]:
if edge == -1 or -1 in mesh.gemm_edges[edge]:
return True
return False
@staticmethod
def __is_one_ring_valid(mesh, edge_id):
v_a = set(mesh.edges[mesh.ve[mesh.edges[edge_id, 0]]].reshape(-1))
v_b = set(mesh.edges[mesh.ve[mesh.edges[edge_id, 1]]].reshape(-1))
shared = v_a & v_b - set(mesh.edges[edge_id])
return len(shared) == 2
def __pool_side(self, mesh, edge_id, mask, edge_groups, side):
info = MeshPool.__get_face_info(mesh, edge_id, side)
key_a, key_b, side_a, side_b, _, other_side_b, _, other_keys_b = info
self.__redirect_edges(mesh, key_a, side_a - side_a % 2, other_keys_b[0], mesh.sides[key_b, other_side_b])
self.__redirect_edges(mesh, key_a, side_a - side_a % 2 + 1, other_keys_b[1], mesh.sides[key_b, other_side_b + 1])
MeshPool.__union_groups(mesh, edge_groups, key_b, key_a)
MeshPool.__union_groups(mesh, edge_groups, edge_id, key_a)
mask[key_b] = False
MeshPool.__remove_group(mesh, edge_groups, key_b)
mesh.remove_edge(key_b)
mesh.edges_count -= 1
return key_a
@staticmethod
def __get_invalids(mesh, edge_id, edge_groups, side):
info = MeshPool.__get_face_info(mesh, edge_id, side)
key_a, key_b, side_a, side_b, other_side_a, other_side_b, other_keys_a, other_keys_b = info
shared_items = MeshPool.__get_shared_items(other_keys_a, other_keys_b)
if len(shared_items) == 0:
return []
else:
assert (len(shared_items) == 2)
middle_edge = other_keys_a[shared_items[0]]
update_key_a = other_keys_a[1 - shared_items[0]]
update_key_b = other_keys_b[1 - shared_items[1]]
update_side_a = mesh.sides[key_a, other_side_a + 1 - shared_items[0]]
update_side_b = mesh.sides[key_b, other_side_b + 1 - shared_items[1]]
MeshPool.__redirect_edges(mesh, edge_id, side, update_key_a, update_side_a)
MeshPool.__redirect_edges(mesh, edge_id, side + 1, update_key_b, update_side_b)
MeshPool.__redirect_edges(mesh, update_key_a, MeshPool.__get_other_side(update_side_a), update_key_b, MeshPool.__get_other_side(update_side_b))
MeshPool.__union_groups(mesh, edge_groups, key_a, edge_id)
MeshPool.__union_groups(mesh, edge_groups, key_b, edge_id)
MeshPool.__union_groups(mesh, edge_groups, key_a, update_key_a)
MeshPool.__union_groups(mesh, edge_groups, middle_edge, update_key_a)
MeshPool.__union_groups(mesh, edge_groups, key_b, update_key_b)
MeshPool.__union_groups(mesh, edge_groups, middle_edge, update_key_b)
return [key_a, key_b, middle_edge]
@staticmethod
def __redirect_edges(mesh, edge_a_key, side_a, edge_b_key, side_b):
mesh.gemm_edges[edge_a_key, side_a] = edge_b_key
mesh.gemm_edges[edge_b_key, side_b] = edge_a_key
mesh.sides[edge_a_key, side_a] = side_b
mesh.sides[edge_b_key, side_b] = side_a
@staticmethod
def __get_shared_items(list_a, list_b):
shared_items = []
for i in range(len(list_a)):
for j in range(len(list_b)):
if list_a[i] == list_b[j]:
shared_items.extend([i, j])
return shared_items
@staticmethod
def __get_other_side(side):
return side + 1 - 2 * (side % 2)
@staticmethod
def __get_face_info(mesh, edge_id, side):
key_a = mesh.gemm_edges[edge_id, side]
key_b = mesh.gemm_edges[edge_id, side + 1]
side_a = mesh.sides[edge_id, side]
side_b = mesh.sides[edge_id, side + 1]
other_side_a = (side_a - (side_a % 2) + 2) % 4
other_side_b = (side_b - (side_b % 2) + 2) % 4
other_keys_a = [mesh.gemm_edges[key_a, other_side_a], mesh.gemm_edges[key_a, other_side_a + 1]]
other_keys_b = [mesh.gemm_edges[key_b, other_side_b], mesh.gemm_edges[key_b, other_side_b + 1]]
return key_a, key_b, side_a, side_b, other_side_a, other_side_b, other_keys_a, other_keys_b
@staticmethod
def __remove_triplete(mesh, mask, edge_groups, invalid_edges):
vertex = set(mesh.edges[invalid_edges[0]])
for edge_key in invalid_edges:
vertex &= set(mesh.edges[edge_key])
mask[edge_key] = False
MeshPool.__remove_group(mesh, edge_groups, edge_key)
mesh.edges_count -= 3
vertex = list(vertex)
assert(len(vertex) == 1)
mesh.remove_vertex(vertex[0])
def __build_queue(self, features, edges_count):
squared_magnitude = torch.sum(features * features, 0)
if squared_magnitude.shape[-1] != 1:
squared_magnitude = squared_magnitude.unsqueeze(-1)
edge_ids = torch.arange(edges_count, device=squared_magnitude.device, dtype=torch.float32).unsqueeze(-1)
heap = torch.cat((squared_magnitude, edge_ids), dim=-1).tolist()
heapify(heap)
return heap
@staticmethod
def __union_groups(mesh, edge_groups, source, target):
edge_groups.union(source, target)
mesh.union_groups(source, target)
@staticmethod
def __remove_group(mesh, edge_groups, index):
edge_groups.remove_group(index)
mesh.remove_group(index)
| true | true |
f71ff2713f88d105f4975f0cee61ae6ef8e14fed | 896 | py | Python | nn_dataflow/nns/mlp_s.py | joeshow79/nn_dataflow | 279440452148ebf327992bd178a37cd5fd5330c5 | [
"BSD-3-Clause"
] | null | null | null | nn_dataflow/nns/mlp_s.py | joeshow79/nn_dataflow | 279440452148ebf327992bd178a37cd5fd5330c5 | [
"BSD-3-Clause"
] | null | null | null | nn_dataflow/nns/mlp_s.py | joeshow79/nn_dataflow | 279440452148ebf327992bd178a37cd5fd5330c5 | [
"BSD-3-Clause"
] | null | null | null | """ $lic$
Copyright (C) 2016-2019 by The Board of Trustees of Stanford University
This program is free software: you can redistribute it and/or modify it under
the terms of the Modified BSD-3 License as published by the Open Source
Initiative.
This program is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
PARTICULAR PURPOSE. See the BSD-3 License for more details.
You should have received a copy of the Modified BSD-3 License along with this
program. If not, see <https://opensource.org/licenses/BSD-3-Clause>.
"""
from nn_dataflow.core import Network
from nn_dataflow.core import InputLayer, FCLayer
'''
MLP-S
PRIME, 2016
'''
NN = Network('MLP-S')
NN.set_input_layer(InputLayer(784, 1))
NN.add('fc1', FCLayer(784, 500))
NN.add('fc2', FCLayer(500, 250))
NN.add('fc3', FCLayer(250, 10))
| 27.151515 | 79 | 0.756696 |
from nn_dataflow.core import Network
from nn_dataflow.core import InputLayer, FCLayer
NN = Network('MLP-S')
NN.set_input_layer(InputLayer(784, 1))
NN.add('fc1', FCLayer(784, 500))
NN.add('fc2', FCLayer(500, 250))
NN.add('fc3', FCLayer(250, 10))
| true | true |
f71ff2d8ade82949986924ccc275d50947e1700f | 3,753 | py | Python | nexuscasc/config/k8s_config_handler.py | vjda/nexus3-casc-cli | 6d33503c19f75b73b656293141911e083331faf0 | [
"MIT"
] | 7 | 2020-06-30T08:15:53.000Z | 2022-03-22T10:28:09.000Z | nexuscasc/config/k8s_config_handler.py | vjda/nexus3-casc-cli | 6d33503c19f75b73b656293141911e083331faf0 | [
"MIT"
] | 1 | 2021-09-27T03:24:46.000Z | 2021-09-27T03:24:46.000Z | nexuscasc/config/k8s_config_handler.py | vjda/nexus3-casc-cli | 6d33503c19f75b73b656293141911e083331faf0 | [
"MIT"
] | 1 | 2020-12-07T14:55:30.000Z | 2020-12-07T14:55:30.000Z | import base64
import re
from dataclasses import dataclass
from enum import Enum
from typing import Union, List
from kubernetes import client, config
from kubernetes.client import V1ConfigMapList, V1SecretList, CoreV1Api, V1Secret, V1ConfigMap
from nexuscasc.logger import Logger
class ResourceType(Enum):
SECRET, CONFIGMAP = range(2)
@dataclass
class WatchedResource:
name: str
version: str
type: ResourceType
class K8sConfigHandler:
v1: CoreV1Api = None
watch_list: List[WatchedResource] = list()
def __init__(self, local: bool = False):
if local:
config.load_kube_config()
else:
config.load_incluster_config()
self.v1 = client.CoreV1Api()
@staticmethod
def filter_resources(
resources: Union[V1ConfigMapList, V1SecretList],
label_value: str = None
) -> List[Union[V1ConfigMap, V1Secret]]:
matches = list()
for res in resources.items:
if label_value is None:
matches.append(res)
elif len(list(filter(lambda x: res.metadata.labels[x] == label_value, res.metadata.labels.keys()))) > 0:
matches.append(res)
return matches
def find_config_maps(self, namespace: str, label: str, label_value: str = None) -> List[V1ConfigMap]:
config_maps = self.v1.list_namespaced_config_map(namespace=namespace, label_selector=label)
return self.filter_resources(config_maps, label_value)
def find_secrets(self, namespace: str, label: str, label_value: str = None) -> List[V1Secret]:
secrets = self.v1.list_namespaced_secret(namespace=namespace, label_selector=label)
return self.filter_resources(secrets, label_value)
@staticmethod
def extract_yaml_strings_from_resources(resources: List[Union[V1ConfigMap, V1Secret]]) -> List[str]:
yaml_str = list()
for res in resources:
for k in filter(lambda key: re.search("\\.yml|\\.yaml$", key), res.data.keys()):
if type(res) == V1Secret:
Logger.debug(f"Found yaml in key '{k}' for secret '{res.metadata.name}'")
yaml_str.append(base64.b64decode(res.data[k]).decode())
else:
Logger.debug(f"Found yaml in key '{k}' for configmap '{res.metadata.name}'")
yaml_str.append(res.data[k])
return yaml_str
def any_resource_has_changed(self, resources: List[Union[V1ConfigMap, V1Secret]]) -> bool:
has_changed = False
if len(self.watch_list) == 0:
has_changed = True
for res in resources:
self.watch_resource(res)
else:
for res in resources:
r_name = res.metadata.name
r_type = ResourceType.SECRET if type(res) == V1Secret else ResourceType.CONFIGMAP
watched_resource = next(filter(lambda r: r_name == r.name and r_type == r.type, self.watch_list), None)
if watched_resource is None:
self.watch_resource(res)
has_changed = True
break
elif watched_resource.version != res.metadata.resource_version:
watched_resource.version = res.metadata.resource_version
has_changed = True
break
return has_changed
def watch_resource(self, resource: Union[V1ConfigMap, V1Secret]):
self.watch_list.append(
WatchedResource(
name=resource.metadata.name,
version=resource.metadata.resource_version,
type=ResourceType.SECRET if type(resource) == V1Secret else ResourceType.CONFIGMAP
))
| 38.295918 | 119 | 0.625633 | import base64
import re
from dataclasses import dataclass
from enum import Enum
from typing import Union, List
from kubernetes import client, config
from kubernetes.client import V1ConfigMapList, V1SecretList, CoreV1Api, V1Secret, V1ConfigMap
from nexuscasc.logger import Logger
class ResourceType(Enum):
SECRET, CONFIGMAP = range(2)
@dataclass
class WatchedResource:
name: str
version: str
type: ResourceType
class K8sConfigHandler:
v1: CoreV1Api = None
watch_list: List[WatchedResource] = list()
def __init__(self, local: bool = False):
if local:
config.load_kube_config()
else:
config.load_incluster_config()
self.v1 = client.CoreV1Api()
@staticmethod
def filter_resources(
resources: Union[V1ConfigMapList, V1SecretList],
label_value: str = None
) -> List[Union[V1ConfigMap, V1Secret]]:
matches = list()
for res in resources.items:
if label_value is None:
matches.append(res)
elif len(list(filter(lambda x: res.metadata.labels[x] == label_value, res.metadata.labels.keys()))) > 0:
matches.append(res)
return matches
def find_config_maps(self, namespace: str, label: str, label_value: str = None) -> List[V1ConfigMap]:
config_maps = self.v1.list_namespaced_config_map(namespace=namespace, label_selector=label)
return self.filter_resources(config_maps, label_value)
def find_secrets(self, namespace: str, label: str, label_value: str = None) -> List[V1Secret]:
secrets = self.v1.list_namespaced_secret(namespace=namespace, label_selector=label)
return self.filter_resources(secrets, label_value)
@staticmethod
def extract_yaml_strings_from_resources(resources: List[Union[V1ConfigMap, V1Secret]]) -> List[str]:
yaml_str = list()
for res in resources:
for k in filter(lambda key: re.search("\\.yml|\\.yaml$", key), res.data.keys()):
if type(res) == V1Secret:
Logger.debug(f"Found yaml in key '{k}' for secret '{res.metadata.name}'")
yaml_str.append(base64.b64decode(res.data[k]).decode())
else:
Logger.debug(f"Found yaml in key '{k}' for configmap '{res.metadata.name}'")
yaml_str.append(res.data[k])
return yaml_str
def any_resource_has_changed(self, resources: List[Union[V1ConfigMap, V1Secret]]) -> bool:
has_changed = False
if len(self.watch_list) == 0:
has_changed = True
for res in resources:
self.watch_resource(res)
else:
for res in resources:
r_name = res.metadata.name
r_type = ResourceType.SECRET if type(res) == V1Secret else ResourceType.CONFIGMAP
watched_resource = next(filter(lambda r: r_name == r.name and r_type == r.type, self.watch_list), None)
if watched_resource is None:
self.watch_resource(res)
has_changed = True
break
elif watched_resource.version != res.metadata.resource_version:
watched_resource.version = res.metadata.resource_version
has_changed = True
break
return has_changed
def watch_resource(self, resource: Union[V1ConfigMap, V1Secret]):
self.watch_list.append(
WatchedResource(
name=resource.metadata.name,
version=resource.metadata.resource_version,
type=ResourceType.SECRET if type(resource) == V1Secret else ResourceType.CONFIGMAP
))
| true | true |
f71ff2dfd267b7ca272fcc6a2a50017e19cd8ff1 | 3,423 | py | Python | tests/commands/spot/test_limit_order_cmd.py | mpetrinidev/bnb-cli | d10fc36b6a1e2dd34597d9f31ea143019ba6f4b9 | [
"MIT"
] | 5 | 2021-04-02T20:49:19.000Z | 2021-06-11T06:22:24.000Z | tests/commands/spot/test_limit_order_cmd.py | mpetrinidev/bnb-cli | d10fc36b6a1e2dd34597d9f31ea143019ba6f4b9 | [
"MIT"
] | 2 | 2021-06-13T20:22:09.000Z | 2021-06-14T23:09:14.000Z | tests/commands/spot/test_limit_order_cmd.py | mpetrinidev/bnc-cli | d10fc36b6a1e2dd34597d9f31ea143019ba6f4b9 | [
"MIT"
] | null | null | null | import datetime
import os
from unittest.mock import Mock
from bnc.cli import cli
from bnc.utils.utils import json_to_str
from tests.commands.common import read_json_test_file, get_headers
from tests.commands.common_fixtures import *
def get_json_filename():
return os.path.join(os.path.dirname(os.path.abspath(__file__)), 'res', 'new_order.json')
@pytest.fixture(scope='session')
def data():
return read_json_test_file(get_json_filename())
@pytest.mark.parametrize("params", [
['spot', 'new_order', 'limit', '-sy', 'LTCBTC', '-si', 'BUY', '-tif', 'GTC', '-q', 1, '-p', 0.003621],
['spot', 'new_order', 'limit', '--symbol', 'LTCBTC', '--side', 'BUY', '--time_in_force', 'GTC', '--quantity', 1, '--price', 0.003621],
['spot', 'new_order', 'limit', '-sy', 'LTCBTC', '-si', 'BUY', '-tif', 'GTC', '-q', 1, '-p', 0.003621, '-ncoid', "custom_id"],
['spot', 'new_order', 'limit', '--symbol', 'LTCBTC', '--side', 'BUY', '--time_in_force', 'GTC', '--quantity', 1, '--price', 0.003621,
'--new_client_order_id', "custom_id"],
['spot', 'new_order', 'limit', '-sy', 'LTCBTC', '-si', 'BUY', '-tif', 'GTC', '-q', 1, '-p', 0.003621, '-iq', 0.20],
['spot', 'new_order', 'limit', '--symbol', 'LTCBTC', '--side', 'BUY', '--time_in_force', 'GTC', '--quantity', 1, '--price', 0.003621,
'--iceberg_qty', 0.20]
])
def test_new_order_limit_return_full_resp(runner, params, mock_default_deps, data):
mock_response = Mock(status_code=200, elapsed=datetime.datetime.now(), headers=get_headers())
mock_response.json.return_value = data['limit_full']
mock_default_deps.patch('bnc.builder.requests.post', return_value=mock_response)
result = runner.invoke(cli, params)
assert result.exit_code == 0
assert result.output == json_to_str(data['limit_full']) + '\n'
@pytest.mark.parametrize("params", [
['spot', 'new_order', 'limit', '-sy', 'LTCBTC', '-si', 'BUY', '-tif', 'GTC', '-q', 1, '-p', 0.003621, '-nort', 'ACK'],
['spot', 'new_order', 'limit', '--symbol', 'LTCBTC', '--side', 'BUY', '--time_in_force', 'GTC', '--quantity', 1, '--price', 0.003621,
'--new_order_resp_type', 'ACK']
])
def test_new_order_limit_return_ack_resp(runner, params, mock_default_deps, data):
mock_response = Mock(status_code=200, elapsed=datetime.datetime.now(), headers=get_headers())
mock_response.json.return_value = data['limit_ack']
mock_default_deps.patch('bnc.builder.requests.post', return_value=mock_response)
result = runner.invoke(cli, params)
assert result.exit_code == 0
assert result.output == json_to_str(data['limit_ack']) + '\n'
@pytest.mark.parametrize("params", [
['spot', 'new_order', 'limit', '-sy', 'LTCBTC', '-si', 'BUY', '-tif', 'GTC', '-q', 1, '-p', 0.003621, '-nort', 'RESULT'],
['spot', 'new_order', 'limit', '--symbol', 'LTCBTC', '--side', 'BUY', '--time_in_force', 'GTC', '--quantity', 1, '--price', 0.003621,
'--new_order_resp_type', 'RESULT']
])
def test_new_order_limit_return_ack_resp(runner, params, mock_default_deps, data):
mock_response = Mock(status_code=200, elapsed=datetime.datetime.now(), headers=get_headers())
mock_response.json.return_value = data['limit_result']
mock_default_deps.patch('bnc.builder.requests.post', return_value=mock_response)
result = runner.invoke(cli, params)
assert result.exit_code == 0
assert result.output == json_to_str(data['limit_result']) + '\n'
| 48.211268 | 138 | 0.650891 | import datetime
import os
from unittest.mock import Mock
from bnc.cli import cli
from bnc.utils.utils import json_to_str
from tests.commands.common import read_json_test_file, get_headers
from tests.commands.common_fixtures import *
def get_json_filename():
return os.path.join(os.path.dirname(os.path.abspath(__file__)), 'res', 'new_order.json')
@pytest.fixture(scope='session')
def data():
return read_json_test_file(get_json_filename())
@pytest.mark.parametrize("params", [
['spot', 'new_order', 'limit', '-sy', 'LTCBTC', '-si', 'BUY', '-tif', 'GTC', '-q', 1, '-p', 0.003621],
['spot', 'new_order', 'limit', '--symbol', 'LTCBTC', '--side', 'BUY', '--time_in_force', 'GTC', '--quantity', 1, '--price', 0.003621],
['spot', 'new_order', 'limit', '-sy', 'LTCBTC', '-si', 'BUY', '-tif', 'GTC', '-q', 1, '-p', 0.003621, '-ncoid', "custom_id"],
['spot', 'new_order', 'limit', '--symbol', 'LTCBTC', '--side', 'BUY', '--time_in_force', 'GTC', '--quantity', 1, '--price', 0.003621,
'--new_client_order_id', "custom_id"],
['spot', 'new_order', 'limit', '-sy', 'LTCBTC', '-si', 'BUY', '-tif', 'GTC', '-q', 1, '-p', 0.003621, '-iq', 0.20],
['spot', 'new_order', 'limit', '--symbol', 'LTCBTC', '--side', 'BUY', '--time_in_force', 'GTC', '--quantity', 1, '--price', 0.003621,
'--iceberg_qty', 0.20]
])
def test_new_order_limit_return_full_resp(runner, params, mock_default_deps, data):
mock_response = Mock(status_code=200, elapsed=datetime.datetime.now(), headers=get_headers())
mock_response.json.return_value = data['limit_full']
mock_default_deps.patch('bnc.builder.requests.post', return_value=mock_response)
result = runner.invoke(cli, params)
assert result.exit_code == 0
assert result.output == json_to_str(data['limit_full']) + '\n'
@pytest.mark.parametrize("params", [
['spot', 'new_order', 'limit', '-sy', 'LTCBTC', '-si', 'BUY', '-tif', 'GTC', '-q', 1, '-p', 0.003621, '-nort', 'ACK'],
['spot', 'new_order', 'limit', '--symbol', 'LTCBTC', '--side', 'BUY', '--time_in_force', 'GTC', '--quantity', 1, '--price', 0.003621,
'--new_order_resp_type', 'ACK']
])
def test_new_order_limit_return_ack_resp(runner, params, mock_default_deps, data):
mock_response = Mock(status_code=200, elapsed=datetime.datetime.now(), headers=get_headers())
mock_response.json.return_value = data['limit_ack']
mock_default_deps.patch('bnc.builder.requests.post', return_value=mock_response)
result = runner.invoke(cli, params)
assert result.exit_code == 0
assert result.output == json_to_str(data['limit_ack']) + '\n'
@pytest.mark.parametrize("params", [
['spot', 'new_order', 'limit', '-sy', 'LTCBTC', '-si', 'BUY', '-tif', 'GTC', '-q', 1, '-p', 0.003621, '-nort', 'RESULT'],
['spot', 'new_order', 'limit', '--symbol', 'LTCBTC', '--side', 'BUY', '--time_in_force', 'GTC', '--quantity', 1, '--price', 0.003621,
'--new_order_resp_type', 'RESULT']
])
def test_new_order_limit_return_ack_resp(runner, params, mock_default_deps, data):
mock_response = Mock(status_code=200, elapsed=datetime.datetime.now(), headers=get_headers())
mock_response.json.return_value = data['limit_result']
mock_default_deps.patch('bnc.builder.requests.post', return_value=mock_response)
result = runner.invoke(cli, params)
assert result.exit_code == 0
assert result.output == json_to_str(data['limit_result']) + '\n'
| true | true |
f71ff3f9334377468b2b24b73aa343bc3c717efb | 2,218 | py | Python | src/odontology/core/migrations/0009_chapter_tariff.py | nanomolina/JP | 248a47bced4dac850f85d28968ddf279cd123400 | [
"Apache-2.0"
] | 2 | 2016-06-23T15:35:29.000Z | 2022-01-11T00:55:21.000Z | src/odontology/core/migrations/0009_chapter_tariff.py | nanomolina/JP | 248a47bced4dac850f85d28968ddf279cd123400 | [
"Apache-2.0"
] | 27 | 2016-06-24T12:28:01.000Z | 2022-01-13T00:37:25.000Z | src/odontology/core/migrations/0009_chapter_tariff.py | nanomolina/JP | 248a47bced4dac850f85d28968ddf279cd123400 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2016-09-02 20:22
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('core', '0008_auto_20160801_1937'),
]
operations = [
migrations.CreateModel(
name='Chapter',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=250)),
('number', models.PositiveSmallIntegerField()),
('date', models.DateField(blank=True, null=True)),
('date_created', models.DateField(auto_now_add=True)),
('date_modified', models.DateField(auto_now=True)),
],
),
migrations.CreateModel(
name='Tariff',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('index', models.PositiveSmallIntegerField(blank=True, null=True)),
('sub_index', models.PositiveSmallIntegerField(blank=True, null=True)),
('name', models.CharField(blank=True, max_length=250)),
('variable_cost', models.DecimalField(decimal_places=2, default=0, max_digits=12, verbose_name=b'Costo variable')),
('fixed_cost', models.DecimalField(decimal_places=2, default=0, max_digits=12, verbose_name=b'Costo fijo')),
('workshop_cost', models.DecimalField(decimal_places=2, default=0, max_digits=12, verbose_name=b'Costo taller')),
('total_cost', models.DecimalField(decimal_places=2, default=0, max_digits=12, verbose_name=b'Costo total')),
('fees', models.DecimalField(decimal_places=2, default=0, max_digits=12, verbose_name=b'Honorarios')),
('total_tariff', models.DecimalField(decimal_places=2, default=0, max_digits=12, verbose_name=b'Total arancel')),
('chapter', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.Chapter')),
],
),
]
| 50.409091 | 131 | 0.625338 |
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('core', '0008_auto_20160801_1937'),
]
operations = [
migrations.CreateModel(
name='Chapter',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=250)),
('number', models.PositiveSmallIntegerField()),
('date', models.DateField(blank=True, null=True)),
('date_created', models.DateField(auto_now_add=True)),
('date_modified', models.DateField(auto_now=True)),
],
),
migrations.CreateModel(
name='Tariff',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('index', models.PositiveSmallIntegerField(blank=True, null=True)),
('sub_index', models.PositiveSmallIntegerField(blank=True, null=True)),
('name', models.CharField(blank=True, max_length=250)),
('variable_cost', models.DecimalField(decimal_places=2, default=0, max_digits=12, verbose_name=b'Costo variable')),
('fixed_cost', models.DecimalField(decimal_places=2, default=0, max_digits=12, verbose_name=b'Costo fijo')),
('workshop_cost', models.DecimalField(decimal_places=2, default=0, max_digits=12, verbose_name=b'Costo taller')),
('total_cost', models.DecimalField(decimal_places=2, default=0, max_digits=12, verbose_name=b'Costo total')),
('fees', models.DecimalField(decimal_places=2, default=0, max_digits=12, verbose_name=b'Honorarios')),
('total_tariff', models.DecimalField(decimal_places=2, default=0, max_digits=12, verbose_name=b'Total arancel')),
('chapter', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.Chapter')),
],
),
]
| true | true |
f71ff457dfbaab5f3f5847da75668942d7052f7a | 3,330 | py | Python | dockerdjango/dockerdjango/settings.py | ducanhvna/DockerDjango | ab5dcb801691fa1ef15a38f05e75fe58066c61ae | [
"MIT"
] | null | null | null | dockerdjango/dockerdjango/settings.py | ducanhvna/DockerDjango | ab5dcb801691fa1ef15a38f05e75fe58066c61ae | [
"MIT"
] | 9 | 2019-12-04T23:29:22.000Z | 2022-02-10T12:19:23.000Z | dockerdjango/dockerdjango/settings.py | ducanhvna/DockerDjango | ab5dcb801691fa1ef15a38f05e75fe58066c61ae | [
"MIT"
] | null | null | null | """
Django settings for dockerdjango project.
Generated by 'django-admin startproject' using Django 2.2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'gc(_1u30_&as3g*xqy8k@u$aj22*@_#d$ylw@s8cjfjmz%ukq('
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'dockerdjango.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'dockerdjango.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
# DATABASES = {
# 'default': {
# 'ENGINE': 'django.db.backends.sqlite3',
# 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
# }
# }
DATABASES = {
'default':{
'ENGINE': 'django.db.backends.mysql',
'NAME': 'my-app-db',
'USER': 'root',
'PASSWORD': 'password',
'HOST': 'db',
'PORT': 3306
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
| 25.419847 | 91 | 0.678378 |
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
SECRET_KEY = 'gc(_1u30_&as3g*xqy8k@u$aj22*@_#d$ylw@s8cjfjmz%ukq('
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'dockerdjango.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'dockerdjango.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
# DATABASES = {
# 'default': {
# 'ENGINE': 'django.db.backends.sqlite3',
# 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
# }
# }
DATABASES = {
'default':{
'ENGINE': 'django.db.backends.mysql',
'NAME': 'my-app-db',
'USER': 'root',
'PASSWORD': 'password',
'HOST': 'db',
'PORT': 3306
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
| true | true |
f71ff4f192b3057091ebc889bf78714a2e21a9d6 | 2,455 | py | Python | examples/pybullet/examples/quadruped_setup_playback.py | felipeek/bullet3 | 6a59241074720e9df119f2f86bc01765917feb1e | [
"Zlib"
] | 9,136 | 2015-01-02T00:41:45.000Z | 2022-03-31T15:30:02.000Z | examples/pybullet/examples/quadruped_setup_playback.py | felipeek/bullet3 | 6a59241074720e9df119f2f86bc01765917feb1e | [
"Zlib"
] | 2,424 | 2015-01-05T08:55:58.000Z | 2022-03-30T19:34:55.000Z | examples/pybullet/examples/quadruped_setup_playback.py | felipeek/bullet3 | 6a59241074720e9df119f2f86bc01765917feb1e | [
"Zlib"
] | 2,921 | 2015-01-02T10:19:30.000Z | 2022-03-31T02:48:42.000Z | import pybullet as p
import pybullet_data
p.connect(p.SHARED_MEMORY)
p.setAdditionalSearchPath(pybullet_data.getDataPath())
objects = [
p.loadURDF("plane.urdf", 0.000000, 0.000000, -.300000, 0.000000, 0.000000, 0.000000, 1.000000)
]
objects = [
p.loadURDF("quadruped/minitaur.urdf", [-0.000046, -0.000068, 0.200774],
[-0.000701, 0.000387, -0.000252, 1.000000],
useFixedBase=False)
]
ob = objects[0]
jointPositions = [
0.000000, 1.531256, 0.000000, -2.240112, 1.527979, 0.000000, -2.240646, 1.533105, 0.000000,
-2.238254, 1.530335, 0.000000, -2.238298, 0.000000, -1.528038, 0.000000, 2.242656, -1.525193,
0.000000, 2.244008, -1.530011, 0.000000, 2.240683, -1.528687, 0.000000, 2.240517
]
for ji in range(p.getNumJoints(ob)):
p.resetJointState(ob, ji, jointPositions[ji])
p.setJointMotorControl2(bodyIndex=ob, jointIndex=ji, controlMode=p.VELOCITY_CONTROL, force=0)
cid0 = p.createConstraint(1, 3, 1, 6, p.JOINT_POINT2POINT, [0.000000, 0.000000, 0.000000],
[0.000000, 0.005000, 0.200000], [0.000000, 0.010000, 0.200000],
[0.000000, 0.000000, 0.000000, 1.000000],
[0.000000, 0.000000, 0.000000, 1.000000])
p.changeConstraint(cid0, maxForce=500.000000)
cid1 = p.createConstraint(1, 16, 1, 19, p.JOINT_POINT2POINT, [0.000000, 0.000000, 0.000000],
[0.000000, 0.005000, 0.200000], [0.000000, 0.010000, 0.200000],
[0.000000, 0.000000, 0.000000, 1.000000],
[0.000000, 0.000000, 0.000000, 1.000000])
p.changeConstraint(cid1, maxForce=500.000000)
cid2 = p.createConstraint(1, 9, 1, 12, p.JOINT_POINT2POINT, [0.000000, 0.000000, 0.000000],
[0.000000, 0.005000, 0.200000], [0.000000, 0.010000, 0.200000],
[0.000000, 0.000000, 0.000000, 1.000000],
[0.000000, 0.000000, 0.000000, 1.000000])
p.changeConstraint(cid2, maxForce=500.000000)
cid3 = p.createConstraint(1, 22, 1, 25, p.JOINT_POINT2POINT, [0.000000, 0.000000, 0.000000],
[0.000000, 0.005000, 0.200000], [0.000000, 0.010000, 0.200000],
[0.000000, 0.000000, 0.000000, 1.000000],
[0.000000, 0.000000, 0.000000, 1.000000])
p.changeConstraint(cid3, maxForce=500.000000)
p.setGravity(0.000000, 0.000000, 0.000000)
p.stepSimulation()
p.disconnect()
| 51.145833 | 98 | 0.608147 | import pybullet as p
import pybullet_data
p.connect(p.SHARED_MEMORY)
p.setAdditionalSearchPath(pybullet_data.getDataPath())
objects = [
p.loadURDF("plane.urdf", 0.000000, 0.000000, -.300000, 0.000000, 0.000000, 0.000000, 1.000000)
]
objects = [
p.loadURDF("quadruped/minitaur.urdf", [-0.000046, -0.000068, 0.200774],
[-0.000701, 0.000387, -0.000252, 1.000000],
useFixedBase=False)
]
ob = objects[0]
jointPositions = [
0.000000, 1.531256, 0.000000, -2.240112, 1.527979, 0.000000, -2.240646, 1.533105, 0.000000,
-2.238254, 1.530335, 0.000000, -2.238298, 0.000000, -1.528038, 0.000000, 2.242656, -1.525193,
0.000000, 2.244008, -1.530011, 0.000000, 2.240683, -1.528687, 0.000000, 2.240517
]
for ji in range(p.getNumJoints(ob)):
p.resetJointState(ob, ji, jointPositions[ji])
p.setJointMotorControl2(bodyIndex=ob, jointIndex=ji, controlMode=p.VELOCITY_CONTROL, force=0)
cid0 = p.createConstraint(1, 3, 1, 6, p.JOINT_POINT2POINT, [0.000000, 0.000000, 0.000000],
[0.000000, 0.005000, 0.200000], [0.000000, 0.010000, 0.200000],
[0.000000, 0.000000, 0.000000, 1.000000],
[0.000000, 0.000000, 0.000000, 1.000000])
p.changeConstraint(cid0, maxForce=500.000000)
cid1 = p.createConstraint(1, 16, 1, 19, p.JOINT_POINT2POINT, [0.000000, 0.000000, 0.000000],
[0.000000, 0.005000, 0.200000], [0.000000, 0.010000, 0.200000],
[0.000000, 0.000000, 0.000000, 1.000000],
[0.000000, 0.000000, 0.000000, 1.000000])
p.changeConstraint(cid1, maxForce=500.000000)
cid2 = p.createConstraint(1, 9, 1, 12, p.JOINT_POINT2POINT, [0.000000, 0.000000, 0.000000],
[0.000000, 0.005000, 0.200000], [0.000000, 0.010000, 0.200000],
[0.000000, 0.000000, 0.000000, 1.000000],
[0.000000, 0.000000, 0.000000, 1.000000])
p.changeConstraint(cid2, maxForce=500.000000)
cid3 = p.createConstraint(1, 22, 1, 25, p.JOINT_POINT2POINT, [0.000000, 0.000000, 0.000000],
[0.000000, 0.005000, 0.200000], [0.000000, 0.010000, 0.200000],
[0.000000, 0.000000, 0.000000, 1.000000],
[0.000000, 0.000000, 0.000000, 1.000000])
p.changeConstraint(cid3, maxForce=500.000000)
p.setGravity(0.000000, 0.000000, 0.000000)
p.stepSimulation()
p.disconnect()
| true | true |
f71ff582921b55fc70764b796e2441625d14a39b | 3,212 | py | Python | quati/dataset/corpora/imdb.py | onenoc/quati | ba372b2ad14076294af62cbcbc27e1b3ca8421c1 | [
"MIT"
] | 2 | 2021-01-30T21:20:36.000Z | 2021-01-30T22:15:07.000Z | quati/dataset/corpora/imdb.py | onenoc/quati | ba372b2ad14076294af62cbcbc27e1b3ca8421c1 | [
"MIT"
] | null | null | null | quati/dataset/corpora/imdb.py | onenoc/quati | ba372b2ad14076294af62cbcbc27e1b3ca8421c1 | [
"MIT"
] | 1 | 2021-01-18T23:12:18.000Z | 2021-01-18T23:12:18.000Z | from itertools import chain
from pathlib import Path
import nltk
import torchtext
from quati.dataset.fields.words import WordsField
from quati.dataset.fields.tags import TagsField
from quati.dataset.corpora.corpus import Corpus
def create_single_file_for_pos_and_neg(corpus_path):
new_file_path = Path(corpus_path, 'data.txt')
# do not create this file again if it is already there
if not new_file_path.exists():
neg_files = sorted(Path(corpus_path, 'neg').glob('*.txt'))
pos_files = sorted(Path(corpus_path, 'pos').glob('*.txt'))
paths = chain(neg_files, pos_files)
new_file = new_file_path.open('w', encoding='utf8')
for file_path in paths:
content = file_path.read_text().strip()
content = content.replace('<br>', ' <br> ')
content = content.replace('<br >', ' <br> ')
content = content.replace('<br />', ' <br> ')
content = content.replace('<br/>', ' <br> ')
label = '1' if 'pos' in str(file_path) else '0'
new_file.write(label + ' ' + content + '\n')
new_file.seek(0)
new_file.close()
return new_file_path
class IMDBCorpus(Corpus):
task = 'doc'
@staticmethod
def create_fields_tuples():
# if you choose tokenizer='spacy', please install the en package:
# python3 -m spacy download en
tokenizer = nltk.WordPunctTokenizer()
# tokenizer = nltk.TreebankWordTokenizer()
fields_tuples = [
('words', WordsField(tokenize=tokenizer.tokenize)),
('target', TagsField())
]
return fields_tuples
def read(self, corpus_path):
"""
First, read the positive and negative examples, which are located in
different folders: `pos/` and `neg/`.
Second, split the `<br>` tags from other tokens.
Third, save a new file called `data.txt` in the root directory, with
the following structure:
label_0 text_0
label_1 text_1
...
label_M text_M
Args:
corpus_path: path to the root directory where `pos/` and `neg/`
are located.
"""
new_file_path = create_single_file_for_pos_and_neg(corpus_path)
self.corpus_path = str(new_file_path)
self.open(self.corpus_path)
if self.lazy is True:
return self
else:
return list(self)
def _read(self, file):
for line in file:
line = line.strip().split()
if line:
label = line[0]
text = ' '.join(line[1:])
yield self.make_torchtext_example(text, label)
def make_torchtext_example(self, text, label=None):
ex = {'words': text, 'target': label}
if 'target' not in self.fields_dict.keys():
del ex['target']
assert ex.keys() == self.fields_dict.keys()
return torchtext.data.Example.fromdict(ex, self.fields_dict)
if __name__ == '__main__':
from quati.dataset.corpora.test_corpus import quick_test
quick_test(
IMDBCorpus,
'../../../data/corpus/imdb/test/',
lazy=True,
)
| 33.113402 | 76 | 0.59589 | from itertools import chain
from pathlib import Path
import nltk
import torchtext
from quati.dataset.fields.words import WordsField
from quati.dataset.fields.tags import TagsField
from quati.dataset.corpora.corpus import Corpus
def create_single_file_for_pos_and_neg(corpus_path):
new_file_path = Path(corpus_path, 'data.txt')
if not new_file_path.exists():
neg_files = sorted(Path(corpus_path, 'neg').glob('*.txt'))
pos_files = sorted(Path(corpus_path, 'pos').glob('*.txt'))
paths = chain(neg_files, pos_files)
new_file = new_file_path.open('w', encoding='utf8')
for file_path in paths:
content = file_path.read_text().strip()
content = content.replace('<br>', ' <br> ')
content = content.replace('<br >', ' <br> ')
content = content.replace('<br />', ' <br> ')
content = content.replace('<br/>', ' <br> ')
label = '1' if 'pos' in str(file_path) else '0'
new_file.write(label + ' ' + content + '\n')
new_file.seek(0)
new_file.close()
return new_file_path
class IMDBCorpus(Corpus):
task = 'doc'
@staticmethod
def create_fields_tuples():
tokenizer = nltk.WordPunctTokenizer()
fields_tuples = [
('words', WordsField(tokenize=tokenizer.tokenize)),
('target', TagsField())
]
return fields_tuples
def read(self, corpus_path):
new_file_path = create_single_file_for_pos_and_neg(corpus_path)
self.corpus_path = str(new_file_path)
self.open(self.corpus_path)
if self.lazy is True:
return self
else:
return list(self)
def _read(self, file):
for line in file:
line = line.strip().split()
if line:
label = line[0]
text = ' '.join(line[1:])
yield self.make_torchtext_example(text, label)
def make_torchtext_example(self, text, label=None):
ex = {'words': text, 'target': label}
if 'target' not in self.fields_dict.keys():
del ex['target']
assert ex.keys() == self.fields_dict.keys()
return torchtext.data.Example.fromdict(ex, self.fields_dict)
if __name__ == '__main__':
from quati.dataset.corpora.test_corpus import quick_test
quick_test(
IMDBCorpus,
'../../../data/corpus/imdb/test/',
lazy=True,
)
| true | true |
f71ff58b38aa884ffd4cc5997958ad2274a7a77b | 491 | py | Python | tests/system/test_base.py | aalmazanarbs/memstatsbeat | 554be8f67c385f4e9aeccfdc09b838075c8e1714 | [
"Apache-2.0"
] | null | null | null | tests/system/test_base.py | aalmazanarbs/memstatsbeat | 554be8f67c385f4e9aeccfdc09b838075c8e1714 | [
"Apache-2.0"
] | null | null | null | tests/system/test_base.py | aalmazanarbs/memstatsbeat | 554be8f67c385f4e9aeccfdc09b838075c8e1714 | [
"Apache-2.0"
] | null | null | null | from memstatsbeat import BaseTest
import os
class Test(BaseTest):
def test_base(self):
"""
Basic test with exiting Memstatsbeat normally
"""
self.render_config_template(
path=os.path.abspath(self.working_dir) + "/log/*"
)
memstatsbeat_proc = self.start_beat()
self.wait_until(lambda: self.log_contains("memstatsbeat is running"))
exit_code = memstatsbeat_proc.kill_and_wait()
assert exit_code == 0
| 24.55 | 77 | 0.641548 | from memstatsbeat import BaseTest
import os
class Test(BaseTest):
def test_base(self):
self.render_config_template(
path=os.path.abspath(self.working_dir) + "/log/*"
)
memstatsbeat_proc = self.start_beat()
self.wait_until(lambda: self.log_contains("memstatsbeat is running"))
exit_code = memstatsbeat_proc.kill_and_wait()
assert exit_code == 0
| true | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.